Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,15 @@ ___Note__: Axion files have extra data corresponding to mass of axion used in si

<br>

# __Setup__

Install the Python dependencies for this project before running training or evaluation:
```bash
python3 -m pip install -r requirements.txt
```

If PyTorch is not installed yet, install a compatible `torch` and `torchvision` build for your platform first from the [official PyTorch install guide](https://pytorch.org/get-started/locally/), then install the remaining project requirements.

# __Training__

Use the train.py script to train a particular model (using timm model name). The script will ask for a WandB login key, hence a WandB account is needed. Example:
Expand Down
24 changes: 21 additions & 3 deletions DeepLense_Classification_Transformers_Archil_Srivastava/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,27 @@ def evaluate(model, data_loader, loss_fn, device):
# Concatenate all results
logits, y = torch.cat(logits), torch.cat(y)
loss.append(loss_fn(logits, y))
accuracy.append(accuracy_fn(logits, y, num_classes=NUM_CLASSES))
class_auroc.append(auroc_fn(logits, y, num_classes=NUM_CLASSES, average=None))
macro_auroc.append(auroc_fn(logits, y, num_classes=NUM_CLASSES, average="macro"))
accuracy.append(
accuracy_fn(logits, y, task="multiclass", num_classes=NUM_CLASSES)
)
class_auroc.append(
auroc_fn(
logits,
y,
task="multiclass",
num_classes=NUM_CLASSES,
average=None,
)
)
macro_auroc.append(
auroc_fn(
logits,
y,
task="multiclass",
num_classes=NUM_CLASSES,
average="macro",
)
)

result = {
"ground_truth": y,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Install PyTorch and torchvision for your platform first if needed:
# https://pytorch.org/get-started/locally/

einops>=0.6,<1
matplotlib>=3.7,<4
numpy>=1.24,<3
scikit-learn>=1.3,<2
timm>=0.9,<1
torch>=2.1,<3
torchmetrics>=1.3,<2
torchvision>=0.16,<1
tqdm>=4.66,<5
wandb>=0.16,<1
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ def train(
# Scheduler
if run_config.decay_lr:
scheduler = CosineAnnealingWarmRestarts(
optimizer, T_0=15, T_mult=1, eta_min=1e-6, verbose=True
optimizer, T_0=15, T_mult=1, eta_min=1e-6
)
else:
scheduler = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@ def get_device(device):
return xm.xla_device()
if (device == "cuda" or device == "best") and torch.cuda.is_available():
return "cuda"
if (device == "mps" or device == "best") and torch.has_mps:
if (
(device == "mps" or device == "best")
and hasattr(torch.backends, "mps")
and torch.backends.mps.is_available()
):
return "mps"
if device == "cpu" or device == "best":
return "cpu"
Expand Down