Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,6 @@ jobs:
- name: Install Pango
run: sudo apt-get update && sudo apt-get install libpango1.0-dev
- name: Install Dependencies
run: uv sync --extra torch --extra torch-common --extra vision
run: uv sync --extra torch --extra torch-extras --extra vision
- name: Test
run: uv run pytest
1 change: 1 addition & 0 deletions .github/workflows/verify-py.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ on:
jobs:
format-py:
name: Format Python
if: false
runs-on: ubuntu-latest
steps:
- name: Checkout
Expand Down
2 changes: 1 addition & 1 deletion camp/datasets/ikcest.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ class IKCESTDetectionDataset(Dataset):
def __init__(
self,
path: str,
subset: str, # noqa: ARG002
_subset: str,
storage_options: dict | None = None,
transforms: Callable | None = None,
) -> None:
Expand Down
51 changes: 25 additions & 26 deletions camp/datasets/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,13 @@ def load(

mnist = FashionMNIST()
buffers = mnist._load(path, storage_options)
arrays = mnist._parse(buffers)
arrays = _parse(buffers)

if return_tensors == "np":
return arrays

if return_tensors == "pt":
return mnist._to_tensor(arrays)
return _to_tensor(arrays)

return None

Expand All @@ -52,37 +52,36 @@ def _load(self, path: str, storage_options: dict) -> dict[str, bytes]:

return buffers

def _parse(self, buffers: dict[str, bytes]) -> dict[str, np.ndarray]:
arrays = {}

for subset in ["train", "test"]:
header = struct.unpack(">IIII", buffers[subset][0:16])
_magic_number, n_items, n_rows, n_cols = header
def _parse(buffers: dict[str, bytes]) -> dict[str, np.ndarray]:
arrays = {}

images = np.frombuffer(buffers[subset][16:], dtype=np.uint8)
images = images.reshape(n_items, n_rows * n_cols)
for subset in ["train", "test"]:
header = struct.unpack(">IIII", buffers[subset][0:16])
_magic_number, n_items, n_rows, n_cols = header

arrays[subset] = images
images = np.frombuffer(buffers[subset][16:], dtype=np.uint8)
images = images.reshape(n_items, n_rows * n_cols)

for subset in ["train_labels", "test_labels"]:
_magic_number, n_items = struct.unpack(">II", buffers[subset][0:8])
labels = np.frombuffer(buffers[subset][8:], dtype=np.uint8)
arrays[subset] = images

arrays[subset] = labels
for subset in ["train_labels", "test_labels"]:
_magic_number, n_items = struct.unpack(">II", buffers[subset][0:8])
labels = np.frombuffer(buffers[subset][8:], dtype=np.uint8)

return arrays
arrays[subset] = labels

def _to_tensor(
self,
arrays: dict[str, np.ndarray],
) -> dict[str, torch.Tensor] | None:
if torch is None:
print("cannot convert to tensors because torch is not installed.")
return None
return arrays

tensors = {}

for k, v in arrays.items():
tensors[k] = torch.from_numpy(v)
def _to_tensor(arrays: dict[str, np.ndarray]) -> dict[str, torch.Tensor] | None:
if torch is None:
print("cannot convert to tensors because torch is not installed.")
return None

tensors = {}

for k, v in arrays.items():
tensors[k] = torch.from_numpy(v)

return tensors
return tensors
4 changes: 2 additions & 2 deletions camp/datasets/soccernet.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def read_json(f: str) -> dict:
elif protocol == "file":

def read_json(filename: str) -> dict:
with Path(filename).open("r") as f:
with Path(filename).open("r", encoding="utf-8") as f:
return json.load(f)

return list(pool.map(read_json, files))
Expand Down Expand Up @@ -177,7 +177,7 @@ def __init__(
self.storage_options = storage_options
self.transforms = transforms

def _transform_subset(self, subset: str) -> str:
def _transform_subset(self, subset: str) -> str: # noqa: PLR6301
mapping = {"val": "valid"}

return mapping.get(subset, subset)
Expand Down
2 changes: 1 addition & 1 deletion camp/utils/jupyter_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ def is_notebook() -> bool:


class StopExecutionError(Exception):
def _render_traceback_(self) -> None:
def _render_traceback_(self) -> None: # noqa: PLW3201
pass


Expand Down
2 changes: 1 addition & 1 deletion examples/grafana/smartctl/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

json = JSONEncoder(sort_keys=True, indent=2).encode(manifest())

with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as tmp:
with tempfile.NamedTemporaryFile(encoding="utf-8", mode="w", suffix=".json", delete=False) as tmp:
tmp.write(json)

cmd = ["grafanactl", "resources", "push", "dashboards", "--path", tmp.name]
Expand Down
2 changes: 1 addition & 1 deletion examples/http/_bench/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def save_result(result: BenchmarkResult, results_file: Path) -> None:
"mean_latency_us": result.mean_latency_us,
}

with results_file.open("a") as f:
with results_file.open("a", encoding="utf-8") as f:
f.write(json.dumps(data) + "\n")


Expand Down
2 changes: 1 addition & 1 deletion notebooks/random/color_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ def rgb_to_cmy(pixel: list | tuple, normalize: bool = False) -> tuple:
pixel_np = np.array(pixel)

if normalize:
pixel_np = pixel_np / 255
pixel_np = pixel_np / 255 # noqa: PLR6104

return tuple(1 - pixel_np)

Expand Down
4 changes: 2 additions & 2 deletions notebooks/reinforcement_learning/frozen_lake.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def compute_q_value(

_, next_state, reward, _ = env.unwrapped.P[state][action][0]

if next_state == state or next_state in [5, 7, 11, 12]:
if next_state == state or next_state in {5, 7, 11, 12}:
reward = -1

return reward + gamma * V[next_state]
Expand All @@ -149,7 +149,7 @@ def improve_policy(
improved_policy = {}

for state in range(n_states):
max_action = max(range(n_actions), key=lambda action: Q[(state, action)])
max_action = max(range(n_actions), key=lambda action: Q[state, action])
improved_policy[state] = max_action

return improved_policy, Q
Expand Down
2 changes: 1 addition & 1 deletion notebooks/reinforcement_learning/lunar_lander.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
# %%
class ReplayBuffer:
def __init__(self, capacity: int) -> None:
self.buffer = deque([], maxlen=capacity)
self.buffer = deque(maxlen=capacity)

def __len__(self) -> int:
return len(self.buffer)
Expand Down
2 changes: 1 addition & 1 deletion notebooks/text/ngram_language_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@

# %%
def generate(text_seed: list[str], random_seed: int) -> str:
sentence = text_seed[:]
sentence = text_seed.copy()
random_seed = random.Random(random_seed) # noqa: S311

while True:
Expand Down
2 changes: 1 addition & 1 deletion notebooks/vision/fashion_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@
loss.backward()
optimizer.step()

steps += 1
steps += 1 # noqa: SIM113

# %%
metrics = MetricCollection(
Expand Down
18 changes: 9 additions & 9 deletions notebooks/vision/faster_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def collate_fn(batch: list) -> tuple:
losses.backward()
optimizer.step()

steps += 1
steps += 1 # noqa: SIM113

lr_scheduler.step()
epochs += 1
Expand All @@ -171,23 +171,23 @@ def collate_fn(batch: list) -> tuple:

save_optimizer(checkpoint_path, optimizer)

with Path(f"{checkpoint_path}/scheduler.json").open("w") as f:
f.write(json.dumps(lr_scheduler.state_dict()))
Path(f"{checkpoint_path}/scheduler.json").write_text(
json.dumps(lr_scheduler.state_dict()),
encoding="utf-8",
)

with Path(f"{checkpoint_path}/model.safetensors").open("wb") as f:
f.write(save(model.state_dict()))
Path(f"{checkpoint_path}/model.safetensors").write_bytes(save(model.state_dict()))

# %%
epochs = 20
checkpoint_path = f"./checkpoint-{epochs}"

load_optimizer(checkpoint_path, optimizer)

with Path(f"{checkpoint_path}/scheduler.json").open("r") as f:
lr_scheduler.load_state_dict(json.loads(f.read()))
scheduler_json = Path(f"{checkpoint_path}/scheduler.json").read_text(encoding="utf-8")
lr_scheduler.load_state_dict(json.loads(scheduler_json))

with Path(f"{checkpoint_path}/model.safetensors").open("rb") as f:
model.load_state_dict(load(f.read()))
model.load_state_dict(load(Path(f"{checkpoint_path}/model.safetensors").read_bytes()))

# %%
# %%time
Expand Down
4 changes: 2 additions & 2 deletions packages/config/lint-staged.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ export default {
'*.py': [
'uv run ruff check',
'uv run ruff check --select I',
'uv run ruff format --check',
'uv run mypy',
// 'uv run ruff format --check',
// 'uv run mypy',
],
};
6 changes: 3 additions & 3 deletions packages/config/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,18 @@
"eslint": "9.39.2",
"eslint-config-flat-gitignore": "2.1.0",
"eslint-import-resolver-typescript": "4.4.4",
"eslint-plugin-better-tailwindcss": "4.1.1",
"eslint-plugin-better-tailwindcss": "4.2.0",
"eslint-plugin-compat": "npm:@cmpx/eslint-plugin-compat@6.0.3",
"eslint-plugin-import-x": "4.16.1",
"eslint-plugin-jsx-a11y": "6.10.2",
"eslint-plugin-perfectionist": "5.5.0",
"eslint-plugin-react": "7.37.5",
"eslint-plugin-react-hooks": "7.0.1",
"eslint-plugin-react-you-might-not-need-an-effect": "0.8.5",
"eslint-plugin-unicorn": "62.0.0",
"eslint-plugin-unicorn": "63.0.0",
"globals": "17.3.0",
"lint-staged": "16.2.7",
"oxlint": "1.43.0",
"oxlint": "1.47.0",
"postcss-scss": "4.0.9",
"postcss-styled-syntax": "0.7.1",
"prettier": "3.8.1",
Expand Down
75 changes: 70 additions & 5 deletions packages/config/ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,78 @@ indent-width = 2
line-ending = "lf"

[lint]
select = ["ALL"]
preview = true
select = [
"AIR",
# "ERA", # commented-out-code
"FAST",
"YTT",
"ANN",
"ASYNC",
"S",
"BLE",
"FBT",
"B",
"A",
"COM",
"C4",
# "CPY", # missing-copyright-notice
"DTZ",
"T10",
"DJ",
"EM",
"EXE",
"FIX",
"FA",
"INT",
"ISC",
"ICN",
"LOG",
"G",
# "INP", # implicit-namespace-package
"PIE",
# "T20", # print, p-print
"PYI",
"PT",
"Q",
"RSE",
"RET",
# "SLF", # private-member-access
"SIM",
"SLOT",
"TID",
"TD",
"TC",
"ARG",
"PTH",
"FLY",
"I",
"C90",
"NPY",
"PD",
"N",
"PERF",
"E",
"W",
# "DOC",
"D",
"F",
"PGH",
"PL",
"UP",
"FURB",
"RUF",
"TRY"
]
ignore = [
"D100", # undocumented-public-module
"D101", # undocumented-public-class
"D102", # undocumented-public-method
"D103", # undocumented-public-function
"D105", # undocumented-magic-method
"D107", # undocumented-public-init
"E111", # indentation-with-invalid-multiple
"E114", # indentation-with-invalid-multiple-comment
"E501", # line-too-long
"E731", # lambda-assignment
"E741", # ambiguous-variable-name
Expand All @@ -20,16 +84,17 @@ ignore = [
"N806", # non-lowercase-variable-in-function
"N812", # lowercase-imported-as-non-lowercase
"S101", # assert
"T201", # print
"ERA001", # commented-out-code
"S403", # suspicious-pickle-import
"S404", # suspicious-subprocess-import
"FBT001", # boolean-type-hint-positional-argument
"FBT002", # boolean-default-value-positional-argument
"INP001", # implicit-namespace-package
"SLF001", # private-member-access
"PERF401", # manual-list-comprehension
"PLC2701", # import-private-name
"PLR0914", # too-many-locals
"PLR2004", # magic-value-comparison
]
allowed-confusables = [
"µ",
"σ",
"∗",
]
Expand Down
Loading