Select Git revision
helper_evaluation.py
-
Lanka Naga Sai Deep authored
Update ConfusionMatrixAcc.png, ResNeXt-101-32x8d.ipynb, acc_train.txt, acc_valid.txt, helper_dataset.py, helper_evaluation.py, helper_plotting.py, loss_train.txt, loss_valid.txt
Lanka Naga Sai Deep authoredUpdate ConfusionMatrixAcc.png, ResNeXt-101-32x8d.ipynb, acc_train.txt, acc_valid.txt, helper_dataset.py, helper_evaluation.py, helper_plotting.py, loss_train.txt, loss_valid.txt
helper_evaluation.py 2.30 KiB
# imports from installed libraries
import os
import numpy as np
import random
import torch
from distutils.version import LooseVersion as Version
from itertools import product
from tqdm import tqdm
def set_all_seeds(seed):
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def set_deterministic():
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if torch.__version__ <= Version("1.7"):
torch.set_deterministic(True)
else:
torch.use_deterministic_algorithms(True)
def compute_accuracy(model, data_loader, device):
with torch.no_grad():
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(tqdm(data_loader)):
features = features.to(device)
targets = targets.float().to(device)
logits = model(features)
_, predicted_labels = torch.max(logits, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
def compute_confusion_matrix(model, data_loader, device):
all_targets, all_predictions = [], []
with torch.no_grad():
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets
logits = model(features)
_, predicted_labels = torch.max(logits, 1)
all_targets.extend(targets.to('cpu'))
all_predictions.extend(predicted_labels.to('cpu'))
all_predictions = all_predictions
all_predictions = np.array(all_predictions)
all_targets = np.array(all_targets)
class_labels = np.unique(np.concatenate((all_targets, all_predictions)))
if class_labels.shape[0] == 1:
if class_labels[0] != 0:
class_labels = np.array([0, class_labels[0]])
else:
class_labels = np.array([class_labels[0], 1])
n_labels = class_labels.shape[0]
lst = []
z = list(zip(all_targets, all_predictions))
for combi in product(class_labels, repeat=2):
lst.append(z.count(combi))
mat = np.asarray(lst)[:, None].reshape(n_labels, n_labels)
return mat