From ec6acc9f0b4684f65855ccdb2615ac2eeed46298 Mon Sep 17 00:00:00 2001 From: Justin Shenk Date: Tue, 16 Jul 2019 19:34:59 +0200 Subject: [PATCH] Format with black --- alternative_loaders.py | 53 +++-- csv_logger.py | 38 ++- extract_history.py | 17 +- main_experiment.py | 328 ++++++++++++++++---------- models.py | 511 ++++++++++++++++++++++++++++++----------- 5 files changed, 662 insertions(+), 285 deletions(-) diff --git a/alternative_loaders.py b/alternative_loaders.py index f626135..acf4c40 100644 --- a/alternative_loaders.py +++ b/alternative_loaders.py @@ -18,8 +18,12 @@ transform_no_aug = transforms.Compose([TT, NRM]) # Downloading/Louding CIFAR10 data -trainset = CIFAR10(root='./data', train=True, download=True) # , transform = transform_with_aug) -testset = CIFAR10(root='./data', train=False, download=True) # , transform = transform_no_aug) +trainset = CIFAR10( + root="./data", train=True, download=True +) # , transform = transform_with_aug) +testset = CIFAR10( + root="./data", train=False, download=True +) # , transform = transform_no_aug) classDict = trainset.class_to_idx # Separating trainset/testset data/label @@ -31,6 +35,7 @@ # Define a function to separate CIFAR classes by class index + def get_class_i(x, y, i): """ x: trainset.train_data or testset.test_data @@ -89,35 +94,43 @@ def index_of_which_bin(self, bin_sizes, absolute_index, verbose=False): # ================== Usage ================== # -def get_n_fold_datasets_train(t, batch_size, class_names=['cat', 'dog']): + +def get_n_fold_datasets_train(t, batch_size, class_names=["cat", "dog"]): # Let's choose cats (class 3 of CIFAR) and dogs (class 5 of CIFAR) as trainset/testset - cat_dog_trainset = \ - DatasetMaker( - [get_class_i(x_train, y_train, classDict[class_names[0]]), get_class_i(x_train, y_train, classDict[class_names[1]])], - transform_with_aug - ) + cat_dog_trainset = DatasetMaker( + [ + get_class_i(x_train, y_train, classDict[class_names[0]]), + get_class_i(x_train, y_train, classDict[class_names[1]]), + ], + transform_with_aug, + ) - kwargs = {'num_workers': 3, 'pin_memory': False} + kwargs = {"num_workers": 3, "pin_memory": False} # Create datasetLoaders from trainset and testse - trainsetLoader = DataLoader(cat_dog_trainset, batch_size=batch_size, shuffle=True, **kwargs) + trainsetLoader = DataLoader( + cat_dog_trainset, batch_size=batch_size, shuffle=True, **kwargs + ) return trainsetLoader -def get_n_fold_datasets_test(t, batch_size, class_names=['cat', 'dog']): +def get_n_fold_datasets_test(t, batch_size, class_names=["cat", "dog"]): # Let's choose cats (class 3 of CIFAR) and dogs (class 5 of CIFAR) as trainset/testset - cat_dog_testset = \ - DatasetMaker( - [get_class_i(x_test, y_test, classDict[class_names[0]]), - get_class_i(x_test, y_test, classDict[class_names[1]])], - transform_no_aug - ) + cat_dog_testset = DatasetMaker( + [ + get_class_i(x_test, y_test, classDict[class_names[0]]), + get_class_i(x_test, y_test, classDict[class_names[1]]), + ], + transform_no_aug, + ) - kwargs = {'num_workers': 3, 'pin_memory': False} + kwargs = {"num_workers": 3, "pin_memory": False} # Create datasetLoaders from trainset and testse - testsetLoader = DataLoader(cat_dog_testset, batch_size=batch_size, shuffle=False, **kwargs) - return testsetLoader \ No newline at end of file + testsetLoader = DataLoader( + cat_dog_testset, batch_size=batch_size, shuffle=False, **kwargs + ) + return testsetLoader diff --git a/csv_logger.py b/csv_logger.py index efc4e84..84ac298 100644 --- a/csv_logger.py +++ b/csv_logger.py @@ -1,31 +1,45 @@ import csv import pandas as pd -def extract_metrics_from_ordeered_dict(ordered_dict, mode='train', result={}): + +def extract_metrics_from_ordeered_dict(ordered_dict, mode="train", result={}): for key in ordered_dict.keys(): - name = key.split('/')[-1] - result[mode+'_'+name] = [ordered_dict[key]] + name = key.split("/")[-1] + result[mode + "_" + name] = [ordered_dict[key]] return result + + def extract_metrics_from_scalaer_dict(log_dict): result_dict = {} for key in log_dict.keys(): - mode = key.split('-')[0] + mode = key.split("-")[0] extract_metrics_from_ordeered_dict(log_dict[key], mode, result_dict) return result_dict + def log_to_csv(value_dict, savename): df = pd.DataFrame.from_dict(value_dict) - df.to_csv(savename+'.csv', sep=';') + df.to_csv(savename + ".csv", sep=";") + -def record_metrics(value_dict, log_dict, train_accuracy, train_loss, test_accuracy, test_loss, epoch, time): +def record_metrics( + value_dict, + log_dict, + train_accuracy, + train_loss, + test_accuracy, + test_loss, + epoch, + time, +): result_dict = extract_metrics_from_scalaer_dict(log_dict) - result_dict['train_accuracy'] = [train_accuracy] - result_dict['test_accuracy'] = [test_accuracy] - result_dict['train_loss'] = [train_loss] - result_dict['test_loss'] = [test_loss] - result_dict['epoch'] = [epoch] - result_dict['time_per_step'] = [time] + result_dict["train_accuracy"] = [train_accuracy] + result_dict["test_accuracy"] = [test_accuracy] + result_dict["train_loss"] = [train_loss] + result_dict["test_loss"] = [test_loss] + result_dict["epoch"] = [epoch] + result_dict["time_per_step"] = [time] if value_dict is None: return result_dict else: diff --git a/extract_history.py b/extract_history.py index f5b142e..cc352ed 100644 --- a/extract_history.py +++ b/extract_history.py @@ -7,12 +7,15 @@ def tabulate_events(dpath): - summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath)] + summary_iterators = [ + EventAccumulator(os.path.join(dpath, dname)).Reload() + for dname in os.listdir(dpath) + ] - tags = summary_iterators[0].Tags()['scalars'] + tags = summary_iterators[0].Tags()["scalars"] for it in summary_iterators: - assert it.Tags()['scalars'] == tags + assert it.Tags()["scalars"] == tags out = defaultdict(list) steps = [] @@ -41,13 +44,13 @@ def to_csv(dpath): def get_file_path(dpath, tag): - file_name = tag.replace("/", "_") + '.csv' - folder_path = os.path.join(dpath, 'csv') + file_name = tag.replace("/", "_") + ".csv" + folder_path = os.path.join(dpath, "csv") if not os.path.exists(folder_path): os.makedirs(folder_path) return os.path.join(folder_path, file_name) -if __name__ == '__main__': +if __name__ == "__main__": path = "train_run_576_1152_2304" - to_csv(path) \ No newline at end of file + to_csv(path) diff --git a/main_experiment.py b/main_experiment.py index 97876d8..95043f1 100644 --- a/main_experiment.py +++ b/main_experiment.py @@ -29,18 +29,25 @@ global IMBALANCE SAMPLER = None + def _shuffle_classes(class_list): c_samples = np.random.choice(class_list, len(class_list), replace=False) class_list = np.asarray(c_samples)[class_list] return class_list + def get_class_probas(class_list, skew_val=1.0): class_list = _shuffle_classes(class_list) - probas = np.linspace(-len(class_list)//2, len(class_list)//2, len(class_list)) * 0.01 * skew_val + probas = ( + np.linspace(-len(class_list) // 2, len(class_list) // 2, len(class_list)) + * 0.01 + * skew_val + ) probas += 0.1 print(probas) return probas + def get_sampler_with_random_imbalance(skew_val, num_samples, n_classes, labels): classes = list(range(n_classes)) class_probas = get_class_probas(classes, skew_val) @@ -54,18 +61,20 @@ def get_sampler_with_random_imbalance(skew_val, num_samples, n_classes, labels): print(class_probas) return WeightedRandomSampler(weights, num_samples, replacement=True) + def train_set_cifar(transform, batch_size): train_set = torchvision.datasets.CIFAR10( - root='./data', train=True, download=True, transform=transform + root="./data", train=True, download=True, transform=transform ) train_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=True, num_workers=2 ) return train_loader + def test_set_cifar(transform, batch_size): test_set = torchvision.datasets.CIFAR10( - root='./data', train=False, download=True, transform=transform + root="./data", train=False, download=True, transform=transform ) test_loader = torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=False, num_workers=2 @@ -76,16 +85,17 @@ def test_set_cifar(transform, batch_size): def train_set_cifar100(transform, batch_size): train_set = torchvision.datasets.CIFAR100( - root='./data', train=True, download=True, transform=transform + root="./data", train=True, download=True, transform=transform ) train_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=True, num_workers=2 ) return train_loader + def test_set_cifar100(transform, batch_size): test_set = torchvision.datasets.CIFAR100( - root='./data', train=False, download=True, transform=transform + root="./data", train=False, download=True, transform=transform ) test_loader = torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=False, num_workers=2 @@ -96,26 +106,30 @@ def test_set_cifar100(transform, batch_size): def train_set_imbalanced_cifar(transformer, batch_size, skew_val): train_set = torchvision.datasets.CIFAR10( - root='./data', train=True, download=True, transform=transformer + root="./data", train=True, download=True, transform=transformer ) global SAMPLER if SAMPLER is None: - SAMPLER = get_sampler_with_random_imbalance(skew_val, len(train_set.targets), n_classes=10, labels=train_set.targets) + SAMPLER = get_sampler_with_random_imbalance( + skew_val, len(train_set.targets), n_classes=10, labels=train_set.targets + ) train_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, sampler=SAMPLER ) return train_loader + def train(network, dataset, test_set, logging_dir, batch_size): network.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(network.parameters()) - #stats = CheckLayerSat(logging_dir, network, log_interval=len(dataset)//batch_size) - stats = CheckLayerSat(logging_dir, network, log_interval=60, sat_method='cumvar99', conv_method='mean') - + # stats = CheckLayerSat(logging_dir, network, log_interval=len(dataset)//batch_size) + stats = CheckLayerSat( + logging_dir, network, log_interval=60, sat_method="cumvar99", conv_method="mean" + ) epoch_acc = 0 thresh = 0.95 @@ -124,7 +138,7 @@ def train(network, dataset, test_set, logging_dir, batch_size): correct = 0 value_dict = None while epoch <= 20: - print('Start Training Epoch', epoch, '\n') + print("Start Training Epoch", epoch, "\n") start = t.time() epoch_acc = 0 train_loss = 0 @@ -132,7 +146,7 @@ def train(network, dataset, test_set, logging_dir, batch_size): correct = 0 network.train() for i, data in enumerate(dataset): - step = epoch*len(dataset) + i + step = epoch * len(dataset) + i inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() @@ -146,21 +160,39 @@ def train(network, dataset, test_set, logging_dir, batch_size): _, predicted = outputs.max(1) total += labels.size(0) correct += predicted.eq(labels).sum().item() - #if i % 2000 == 1999: # print every 2000 mini-batches - print(i,'of', len(dataset),'acc:', correct/total) + # if i % 2000 == 1999: # print every 2000 mini-batches + print(i, "of", len(dataset), "acc:", correct / total) # display layer saturation levels end = t.time() stats.saturation() test_loss, test_acc = test(network, test_set, criterion, stats, epoch) epoch_acc = correct / total - print('Epoch', epoch, 'finished', 'Acc:', epoch_acc, 'Loss:', train_loss / total,'\n') - stats.add_scalar('train_loss', train_loss / total, epoch) # optional - stats.add_scalar('train_acc', epoch_acc, epoch) # optional - value_dict = record_metrics(value_dict, stats.logs, epoch_acc, train_loss/total, test_acc, test_loss, epoch, (end-start) / total) + print( + "Epoch", + epoch, + "finished", + "Acc:", + epoch_acc, + "Loss:", + train_loss / total, + "\n", + ) + stats.add_scalar("train_loss", train_loss / total, epoch) # optional + stats.add_scalar("train_acc", epoch_acc, epoch) # optional + value_dict = record_metrics( + value_dict, + stats.logs, + epoch_acc, + train_loss / total, + test_acc, + test_loss, + epoch, + (end - start) / total, + ) log_to_csv(value_dict, logging_dir) epoch += 1 stats.close() -# test_stats.close() + # test_stats.close() return criterion @@ -182,17 +214,29 @@ def test(network, dataset, criterion, stats, epoch): total += targets.size(0) correct += predicted.eq(targets).sum().item() - #if batch_idx % 200 == 199: # print every 200 mini-batches - print(batch_idx,'of', len(dataset),'acc:', correct/total) + # if batch_idx % 200 == 199: # print every 200 mini-batches + print(batch_idx, "of", len(dataset), "acc:", correct / total) stats.saturation() - print('Test finished', 'Acc:', correct / total, 'Loss:', test_loss/total,'\n') - stats.add_scalar('test_loss', test_loss/total, epoch) # optional - stats.add_scalar('test_acc', correct/total, epoch) # optional - return test_loss/total, correct/total - -def execute_experiment(network: nn.Module, in_channels: int, n_classes: int, l1: int, l2: int , l3: int, train_set: FunctionType, test_set: FunctionType): - - print('Experiment has started') + print( + "Test finished", "Acc:", correct / total, "Loss:", test_loss / total, "\n" + ) + stats.add_scalar("test_loss", test_loss / total, epoch) # optional + stats.add_scalar("test_acc", correct / total, epoch) # optional + return test_loss / total, correct / total + + +def execute_experiment( + network: nn.Module, + in_channels: int, + n_classes: int, + l1: int, + l2: int, + l3: int, + train_set: FunctionType, + test_set: FunctionType, +): + + print("Experiment has started") batch_size = 128 @@ -207,144 +251,192 @@ def execute_experiment(network: nn.Module, in_channels: int, n_classes: int, l1: i += 1 if i <= check: continue - print('Creating Network') - - net = network(in_channels=in_channels, - l1=l1_config, - l2=l2_config, - l3=l3_config, - n_classes=n_classes) - print('Network created') - + print("Creating Network") + net = network( + in_channels=in_channels, + l1=l1_config, + l2=l2_config, + l3=l3_config, + n_classes=n_classes, + ) + print("Network created") train_loader = train_set(transform, batch_size) test_loader = test_set(transform, batch_size) - - print('Datasets fetched') - train(net, train_loader, test_loader, '{}_{}_{}'.format(l1_config, l2_config, l3_config), batch_size) + print("Datasets fetched") + train( + net, + train_loader, + test_loader, + "{}_{}_{}".format(l1_config, l2_config, l3_config), + batch_size, + ) del net -def execute_experiment_vgg(network: nn.Module, net_name: str, train_set: FunctionType, test_set: FunctionType, n_claases=2): - print('Experiment has started') +def execute_experiment_vgg( + network: nn.Module, + net_name: str, + train_set: FunctionType, + test_set: FunctionType, + n_claases=2, +): + + print("Experiment has started") batch_size = 128 - transform_train = transforms.Compose([ - transforms.RandomCrop(32, padding=4), - transforms.RandomHorizontalFlip(), - #transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), - ]) - - transform_test = transforms.Compose([ - #transforms.Resize(224), - transforms.ToTensor(), - transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), - ]) - print('Creating Network') + transform_train = transforms.Compose( + [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + # transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ] + ) + + transform_test = transforms.Compose( + [ + # transforms.Resize(224), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), + ] + ) + print("Creating Network") train_loader = train_set(transform_train, batch_size) test_loader = test_set(transform_test, batch_size) - net = network(num_classes=n_claases)#create_cnn(data, vgg16).model#network() + net = network(num_classes=n_claases) # create_cnn(data, vgg16).model#network() print(net) - print('Network created') - + print("Network created") - print('Datasets fetched') + print("Datasets fetched") train(net, train_loader, test_loader, net_name, batch_size) return -if '__main__' == __name__: - - executions = [vgg11_XXXS, vgg13_XXXS, vgg16_XXXS, vgg19_XXXS, - vgg11_XXS, vgg13_XXS, vgg16_XXS, vgg19_XXS, - vgg11_XS, vgg13_XS, vgg16_XS, vgg19_XS, - vgg11_S, vgg13_S, vgg16_S, vgg19_S, - vgg11, vgg13, vgg16, vgg19, - ] - names = ['11_XXXS', '13_XXXS', '16_XXXS', '19_XXXS', - '11_XXS', '13_XXS', '16_XXS', '19_XXS', - '11_XS', '13_XS', '16_XS', '19_XS', - '11_S', '13_S', '16_S', '19_S', - '11', '13', '16', '19', - ] - - train_set = lambda transform_train, batch_size: get_n_fold_datasets_train(transform_train, batch_size, ['automobile', 'frog']) - test_set = lambda transform_test, batch_size: get_n_fold_datasets_test(transform_test, batch_size, ['automobile', 'frog']) +if "__main__" == __name__: + + executions = [ + vgg11_XXXS, + vgg13_XXXS, + vgg16_XXXS, + vgg19_XXXS, + vgg11_XXS, + vgg13_XXS, + vgg16_XXS, + vgg19_XXS, + vgg11_XS, + vgg13_XS, + vgg16_XS, + vgg19_XS, + vgg11_S, + vgg13_S, + vgg16_S, + vgg19_S, + vgg11, + vgg13, + vgg16, + vgg19, + ] + names = [ + "11_XXXS", + "13_XXXS", + "16_XXXS", + "19_XXXS", + "11_XXS", + "13_XXS", + "16_XXS", + "19_XXS", + "11_XS", + "13_XS", + "16_XS", + "19_XS", + "11_S", + "13_S", + "16_S", + "19_S", + "11", + "13", + "16", + "19", + ] + + train_set = lambda transform_train, batch_size: get_n_fold_datasets_train( + transform_train, batch_size, ["automobile", "frog"] + ) + test_set = lambda transform_test, batch_size: get_n_fold_datasets_test( + transform_test, batch_size, ["automobile", "frog"] + ) - #executions.reverse() - #names.reverse() + # executions.reverse() + # names.reverse() train_set_imbalanced_cifar counter = 0 for j in [0, 1]: - #sampler = WeightedRandomSampler() + # sampler = WeightedRandomSampler() for i in range(len(names)): counter += 1 - print('COUNTER:',counter) + print("COUNTER:", counter) print(names[i]) - #configVGG_cifar = { + # configVGG_cifar = { # 'network': executions[i], # 'train_set': train_set, # 'test_set': test_set, # 'net_name': 'automobilefrog_VGG' + names[i] + '_A' + str(j), # 'n_claases': 2 - #} + # } - # execute_experiment_vgg(**configVGG_cifar) + # execute_experiment_vgg(**configVGG_cifar) configVGG_cifar = { - 'network': executions[i], - 'train_set': train_set_cifar100,#lambda t, batch_size: train_set_imbalanced_cifar(t, batch_size=batch_size, skew_val=1.0), - 'test_set': test_set_cifar100, - 'net_name': '100_VGG' + names[i] + '_A' + str(j), - 'n_claases': 100 + "network": executions[i], + "train_set": train_set_cifar100, # lambda t, batch_size: train_set_imbalanced_cifar(t, batch_size=batch_size, skew_val=1.0), + "test_set": test_set_cifar100, + "net_name": "100_VGG" + names[i] + "_A" + str(j), + "n_claases": 100, } execute_experiment_vgg(**configVGG_cifar) - configCNN_cifar = { - 'network': SimpleCNN, - 'in_channels': 3, - 'n_classes': 10, - 'l1' : [4, 16, 64], - 'l2' : [8, 32, 128], - 'l3' : [16, 64, 256], - 'train_set': train_set_cifar, - 'test_set': test_set_cifar + "network": SimpleCNN, + "in_channels": 3, + "n_classes": 10, + "l1": [4, 16, 64], + "l2": [8, 32, 128], + "l3": [16, 64, 256], + "train_set": train_set_cifar, + "test_set": test_set_cifar, } configCNNKernel_cifar = { - 'network': SimpleCNNKernel, - 'in_channels': 3, - 'n_classes': 10, - 'l1': [3, 5, 7], - 'l2': [3, 5, 7], - 'l3': [3, 5, 7], - 'train_set': train_set_cifar, - 'test_set': test_set_cifar + "network": SimpleCNNKernel, + "in_channels": 3, + "n_classes": 10, + "l1": [3, 5, 7], + "l2": [3, 5, 7], + "l3": [3, 5, 7], + "train_set": train_set_cifar, + "test_set": test_set_cifar, } configFCN_cifar = { - 'network': SimpleFCNet, - 'in_channels': 32*32*3, - 'n_classes': 10, - 'l1' : [4*3*3, 16*3*3, 64*3*3], - 'l2' : [8*3*3, 32*3*3, 128*3*3], - 'l3' : [16*3*3, 64*3*3, 256*3*3], - 'train_set': train_set_cifar, - 'test_set': test_set_cifar + "network": SimpleFCNet, + "in_channels": 32 * 32 * 3, + "n_classes": 10, + "l1": [4 * 3 * 3, 16 * 3 * 3, 64 * 3 * 3], + "l2": [8 * 3 * 3, 32 * 3 * 3, 128 * 3 * 3], + "l3": [16 * 3 * 3, 64 * 3 * 3, 256 * 3 * 3], + "train_set": train_set_cifar, + "test_set": test_set_cifar, } - #execute_experiment(**configCNN_cifar) - - + # execute_experiment(**configCNN_cifar) diff --git a/models.py b/models.py index 88a198e..80b5ded 100644 --- a/models.py +++ b/models.py @@ -13,15 +13,17 @@ class SimpleFCNet(nn.Module): - def __init__(self, - in_channels: int, - l1: int = 1024, - l2: int = 512, - l3: int = 256, - n_classes: int = 10): + def __init__( + self, + in_channels: int, + l1: int = 1024, + l2: int = 512, + l3: int = 256, + n_classes: int = 10, + ): super(SimpleFCNet, self).__init__() - print('Setting up FCN with: l1', l1, 'l2', l2, 'l3', l3) + print("Setting up FCN with: l1", l1, "l2", l2, "l3", l3) # feature extractor self.fc0 = nn.Linear(in_channels, l1) @@ -42,122 +44,319 @@ def forward(self, x): class SimpleCNN(nn.Module): - def __init__(self, - in_channels: int = 3, - l1: int = 8, - l2: int = 16, - l3: int = 32, - n_classes: int = 10): + def __init__( + self, + in_channels: int = 3, + l1: int = 8, + l2: int = 16, + l3: int = 32, + n_classes: int = 10, + ): super(SimpleCNN, self).__init__() - print('Setting up CNN with: l1',l1,'l2',l2,'l3',l3) + print("Setting up CNN with: l1", l1, "l2", l2, "l3", l3) # feature exxtractor self.conv00 = nn.Conv2d(in_channels=in_channels, out_channels=l1, kernel_size=5) - self.pool1 = nn.MaxPool2d(2,2) + self.pool1 = nn.MaxPool2d(2, 2) self.conv10 = nn.Conv2d(in_channels=l1, out_channels=l2, kernel_size=5) self.pool2 = nn.MaxPool2d(2, 2) self.conv20 = nn.Conv2d(in_channels=l2, out_channels=l3, kernel_size=5) self.pool3 = nn.MaxPool2d(2, 2) # readout + head - # self.pool = AdaptiveConcatPool2d(1) + # self.pool = AdaptiveConcatPool2d(1) self.fc0 = nn.Linear(l3 * 400, l3) - # self.fc1 = nn.Linear(l3, l3//2) + # self.fc1 = nn.Linear(l3, l3//2) self.out = nn.Linear(l3, n_classes) def forward(self, x): x = F.relu(self.conv00(x)) - #x = self.pool1(x) + # x = self.pool1(x) x = F.relu(self.conv10(x)) - #x = self.pool2(x) + # x = self.pool2(x) x = F.relu(self.conv20(x)) - #x = self.pool3(x) + # x = self.pool3(x) x = x.view(x.size(0), -1) x = F.relu(self.fc0(x)) - # x = F.relu(self.fc1(x)) + # x = F.relu(self.fc1(x)) x = self.out(x) return x class SimpleCNNKernel(nn.Module): - def __init__(self, in_channels: int = 3, l1: int = 5, l2: int = 5, l3: int = 5, n_classes: int = 10): + def __init__( + self, + in_channels: int = 3, + l1: int = 5, + l2: int = 5, + l3: int = 5, + n_classes: int = 10, + ): super(SimpleCNNKernel, self).__init__() - print('Setting up CNN with: kernel1', l1, 'kernel2', l2, 'kernel3', l3) + print("Setting up CNN with: kernel1", l1, "kernel2", l2, "kernel3", l3) - out_res = 32 - (2*(l1 // 2)) - (2*(l2 // 2)) - (2*(l3 // 2)) - out_res = out_res**2 * 32 + out_res = 32 - (2 * (l1 // 2)) - (2 * (l2 // 2)) - (2 * (l3 // 2)) + out_res = out_res ** 2 * 32 # feature exxtractor self.conv00 = nn.Conv2d(in_channels=in_channels, out_channels=8, kernel_size=l1) self.conv10 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=l2) self.conv20 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=l3) # readout + head - # self.pool = AdaptiveConcatPool2d(1) + # self.pool = AdaptiveConcatPool2d(1) self.fc0 = nn.Linear(out_res, l3) - # self.fc1 = nn.Linear(l3, l3//2) + # self.fc1 = nn.Linear(l3, l3//2) self.out = nn.Linear(l3, n_classes) def forward(self, x): x = F.relu(self.conv00(x)) - #x = self.pool1(x) + # x = self.pool1(x) x = F.relu(self.conv10(x)) - #x = self.pool2(x) + # x = self.pool2(x) x = F.relu(self.conv20(x)) - #x = self.pool3(x) + # x = self.pool3(x) x = x.view(x.size(0), -1) x = F.relu(self.fc0(x)) - # x = F.relu(self.fc1(x)) + # x = F.relu(self.fc1(x)) x = self.out(x) return x + cfg = { - 'V': [64, 'M'], - 'VS': [32, 'M'], - 'W': [64, 'M', 128, 'M'], - 'WS': [32, 'M', 64, 'M'], - 'X': [64, 'M', 128, 'M', 256, 'M'], - 'XS': [32, 'M', 64, 'M', 128, 'M'], - 'Y': [64, 'M', 128, 'M', 256, 'M', 512, 'M'], - 'YS': [32, 'M', 64, 'M', 128, 'M', 256, 'M'], - 'YXS': [16, 'M', 32, 'M', 64, 'M', 128, 'M'], - 'Z': [64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M'], - 'ZXS': [16, 'M', 32, 'M', 64, 'M', 128, 'M', 128, 'M'], - 'ZXXS': [8, 'M', 16, 'M', 32, 'M', 64, 'M', 64, 'M'], - 'ZXXXS': [4, 'M', 8, 'M', 16, 'M', 32, 'M', 32, 'M'], - 'ZS': [32, 'M', 64, 'M', 128, 'M', 256, 'M', 256, 'M'], - 'AS': [32, 'M', 64, 'M', 126, 126, 'M', 256, 256, 'M', 256, 256, 'M'], - 'AXS': [16, 'M', 32, 'M', 64, 64, 'M', 128, 128, 'M', 128, 128, 'M'], - 'AXXS': [8, 'M', 16, 'M', 32, 32, 'M', 64, 64, 'M', 64, 64, 'M'], - 'AXXXS': [4, 'M', 8, 'M', 16, 16, 'M', 32, 32, 'M', 32, 32, 'M'], - 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], - 'AL': [128, 'M', 256, 'M', 512, 512, 'M', 1024, 1024, 'M', 1024, 1024, 'M'], - 'BS': [32, 32, 'M', 64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 256, 256, 'M'], - 'BXS': [16, 16, 'M', 32, 32, 'M', 64, 64, 'M', 128, 128, 'M', 128, 128, 'M'], - 'BXXS': [8, 8, 'M', 16, 16, 'M', 32, 32, 'M', 64, 64, 'M', 64, 64, 'M'], - 'BXXXS': [4, 4, 'M', 8, 8, 'M', 16, 16, 'M', 32, 32, 'M', 32, 32, 'M'], - 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], - 'DS': [32, 32, 'M', 64, 64, 'M', 128, 128, 128, 'M', 256, 256, 256, 'M', 256, 256, 256, 'M'], - 'DXXXS': [4, 4, 'M', 8, 8, 'M', 16, 16, 16, 'M', 32, 32, 32, 'M', 32, 32, 32, 'M'], - 'DXXS': [8, 8, 'M', 16, 16, 'M', 32, 32, 32, 'M', 64, 64, 64, 'M', 64, 64, 64, 'M'], - 'DXS': [16, 16, 'M', 32, 32, 'M', 64, 64, 64, 'M', 128, 128, 128, 'M', 128, 128, 128, 'M'], - 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], - 'DL': [128, 128, 'M', 256, 256, 'M', 512, 512, 512, 'M', 1024, 1024, 1024, 'M', 1024, 1024, 1024, 'M'], - 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], - 'ES': [32, 32, 'M', 64, 64, 'M', 128, 128, 128, 128, 'M', 256, 256, 256, 256, 'M', 256, 256, 256, 256, 'M'], - 'EXS': [16, 16, 'M', 32, 32, 'M', 64, 64, 64, 64, 'M', 128, 128, 128, 128, 'M', 128, 128, 128, 128, 'M'], - 'EXXS': [8, 8, 'M', 16, 16, 'M', 32, 32, 32, 32, 'M', 64, 64, 64, 64, 'M', 64, 64, 64, 64, 'M'], - 'EXXXS': [4, 4, 'M', 8, 8, 'M', 16, 16, 16, 16, 'M', 32, 32, 32, 32, 'M', 32, 32, 32, 32, 'M'], + "V": [64, "M"], + "VS": [32, "M"], + "W": [64, "M", 128, "M"], + "WS": [32, "M", 64, "M"], + "X": [64, "M", 128, "M", 256, "M"], + "XS": [32, "M", 64, "M", 128, "M"], + "Y": [64, "M", 128, "M", 256, "M", 512, "M"], + "YS": [32, "M", 64, "M", 128, "M", 256, "M"], + "YXS": [16, "M", 32, "M", 64, "M", 128, "M"], + "Z": [64, "M", 128, "M", 256, "M", 512, "M", 512, "M"], + "ZXS": [16, "M", 32, "M", 64, "M", 128, "M", 128, "M"], + "ZXXS": [8, "M", 16, "M", 32, "M", 64, "M", 64, "M"], + "ZXXXS": [4, "M", 8, "M", 16, "M", 32, "M", 32, "M"], + "ZS": [32, "M", 64, "M", 128, "M", 256, "M", 256, "M"], + "AS": [32, "M", 64, "M", 126, 126, "M", 256, 256, "M", 256, 256, "M"], + "AXS": [16, "M", 32, "M", 64, 64, "M", 128, 128, "M", 128, 128, "M"], + "AXXS": [8, "M", 16, "M", 32, 32, "M", 64, 64, "M", 64, 64, "M"], + "AXXXS": [4, "M", 8, "M", 16, 16, "M", 32, 32, "M", 32, 32, "M"], + "A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], + "AL": [128, "M", 256, "M", 512, 512, "M", 1024, 1024, "M", 1024, 1024, "M"], + "BS": [32, 32, "M", 64, 64, "M", 128, 128, "M", 256, 256, "M", 256, 256, "M"], + "BXS": [16, 16, "M", 32, 32, "M", 64, 64, "M", 128, 128, "M", 128, 128, "M"], + "BXXS": [8, 8, "M", 16, 16, "M", 32, 32, "M", 64, 64, "M", 64, 64, "M"], + "BXXXS": [4, 4, "M", 8, 8, "M", 16, 16, "M", 32, 32, "M", 32, 32, "M"], + "B": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512, "M"], + "DS": [ + 32, + 32, + "M", + 64, + 64, + "M", + 128, + 128, + 128, + "M", + 256, + 256, + 256, + "M", + 256, + 256, + 256, + "M", + ], + "DXXXS": [4, 4, "M", 8, 8, "M", 16, 16, 16, "M", 32, 32, 32, "M", 32, 32, 32, "M"], + "DXXS": [8, 8, "M", 16, 16, "M", 32, 32, 32, "M", 64, 64, 64, "M", 64, 64, 64, "M"], + "DXS": [ + 16, + 16, + "M", + 32, + 32, + "M", + 64, + 64, + 64, + "M", + 128, + 128, + 128, + "M", + 128, + 128, + 128, + "M", + ], + "D": [ + 64, + 64, + "M", + 128, + 128, + "M", + 256, + 256, + 256, + "M", + 512, + 512, + 512, + "M", + 512, + 512, + 512, + "M", + ], + "DL": [ + 128, + 128, + "M", + 256, + 256, + "M", + 512, + 512, + 512, + "M", + 1024, + 1024, + 1024, + "M", + 1024, + 1024, + 1024, + "M", + ], + "E": [ + 64, + 64, + "M", + 128, + 128, + "M", + 256, + 256, + 256, + 256, + "M", + 512, + 512, + 512, + 512, + "M", + 512, + 512, + 512, + 512, + "M", + ], + "ES": [ + 32, + 32, + "M", + 64, + 64, + "M", + 128, + 128, + 128, + 128, + "M", + 256, + 256, + 256, + 256, + "M", + 256, + 256, + 256, + 256, + "M", + ], + "EXS": [ + 16, + 16, + "M", + 32, + 32, + "M", + 64, + 64, + 64, + 64, + "M", + 128, + 128, + 128, + 128, + "M", + 128, + 128, + 128, + 128, + "M", + ], + "EXXS": [ + 8, + 8, + "M", + 16, + 16, + "M", + 32, + 32, + 32, + 32, + "M", + 64, + 64, + 64, + 64, + "M", + 64, + 64, + 64, + 64, + "M", + ], + "EXXXS": [ + 4, + 4, + "M", + 8, + 8, + "M", + 16, + 16, + 16, + 16, + "M", + 32, + 32, + 32, + 32, + "M", + 32, + 32, + 32, + 32, + "M", + ], } + def make_layers(cfg, batch_norm=True, k_size=3): layers = [] in_channels = 3 for v in cfg: - if v == 'M': + if v == "M": layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: - conv2d = nn.Conv2d(in_channels, v, kernel_size=k_size, padding=k_size-2) + conv2d = nn.Conv2d(in_channels, v, kernel_size=k_size, padding=k_size - 2) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: @@ -165,20 +364,27 @@ def make_layers(cfg, batch_norm=True, k_size=3): in_channels = v return nn.Sequential(*layers) -class VGG(nn.Module): - def __init__(self, features, num_classes=10, init_weights=True, final_filter: int = 512, pretrained=False): +class VGG(nn.Module): + def __init__( + self, + features, + num_classes=10, + init_weights=True, + final_filter: int = 512, + pretrained=False, + ): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Sequential( nn.BatchNorm1d(final_filter), nn.Dropout(0.25), - nn.Linear(final_filter, final_filter//2), + nn.Linear(final_filter, final_filter // 2), nn.ReLU(True), - nn.BatchNorm1d(final_filter//2), + nn.BatchNorm1d(final_filter // 2), nn.Dropout(0.25), - nn.Linear(final_filter//2, num_classes) + nn.Linear(final_filter // 2, num_classes), ) if init_weights: self._initialize_weights() @@ -193,7 +399,7 @@ def forward(self, x): def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): @@ -203,100 +409,112 @@ def _initialize_weights(self): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) + def vgg16(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['D']), **kwargs) + model = VGG(make_layers(cfg["D"]), **kwargs) return model + def vgg16_L(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['DL']), **kwargs) + model = VGG(make_layers(cfg["DL"]), **kwargs) return model + def vgg16_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['DS']), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["DS"]), final_filter=256, **kwargs) return model + def vgg16_XS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['DXS']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["DXS"]), final_filter=128, **kwargs) return model + def vgg16_XXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['DXXS']), final_filter=64, **kwargs) + model = VGG(make_layers(cfg["DXXS"]), final_filter=64, **kwargs) return model + def vgg16_XXXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['DXXXS']), final_filter=32, **kwargs) + model = VGG(make_layers(cfg["DXXXS"]), final_filter=32, **kwargs) return model + def vgg16_5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['D'], k_size=5), **kwargs) + model = VGG(make_layers(cfg["D"], k_size=5), **kwargs) return model + def vgg16_7(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['D'], k_size=7), **kwargs) + model = VGG(make_layers(cfg["D"], k_size=7), **kwargs) return model + def vgg5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['V']), final_filter=64, **kwargs) + model = VGG(make_layers(cfg["V"]), final_filter=64, **kwargs) return model + def vgg5_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['VS']), final_filter=32, **kwargs) + model = VGG(make_layers(cfg["VS"]), final_filter=32, **kwargs) return model + def vgg6(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['W']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["W"]), final_filter=128, **kwargs) return model + def vgg6_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['WS']), final_filter=64, **kwargs) + model = VGG(make_layers(cfg["WS"]), final_filter=64, **kwargs) return model @@ -305,86 +523,97 @@ def vgg7(*args, **kwargs): Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['X']), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["X"]), final_filter=256, **kwargs) return model + def vgg7_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['XS']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["XS"]), final_filter=128, **kwargs) return model + def vgg8(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['Y']), **kwargs) + model = VGG(make_layers(cfg["Y"]), **kwargs) return model + def vgg8_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['YS']), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["YS"]), final_filter=256, **kwargs) return model + def vgg8_XS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['YXS']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["YXS"]), final_filter=128, **kwargs) return model + def vgg9(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['Z']), **kwargs) + model = VGG(make_layers(cfg["Z"]), **kwargs) return model + def vgg9_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['ZS']), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["ZS"]), final_filter=256, **kwargs) return model + def vgg9_XS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['ZXS']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["ZXS"]), final_filter=128, **kwargs) return model + def vgg9_XXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['ZXXS']), final_filter=64, **kwargs) + model = VGG(make_layers(cfg["ZXXS"]), final_filter=64, **kwargs) return model + + def vgg9_5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['Z'], k_size=5), **kwargs) + model = VGG(make_layers(cfg["Z"], k_size=5), **kwargs) return model + def vgg9_7(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['Z'], k_size=7), **kwargs) + model = VGG(make_layers(cfg["Z"], k_size=7), **kwargs) return model @@ -393,71 +622,79 @@ def vgg9(*args, **kwargs): Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['Z']), **kwargs) + model = VGG(make_layers(cfg["Z"]), **kwargs) return model + def vgg11(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['A']), **kwargs) + model = VGG(make_layers(cfg["A"]), **kwargs) return model + def vgg11_L(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['AL']), **kwargs) + model = VGG(make_layers(cfg["AL"]), **kwargs) return model + def vgg11_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['AS']), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["AS"]), final_filter=256, **kwargs) return model + def vgg11_XS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['AXS']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["AXS"]), final_filter=128, **kwargs) return model + def vgg11_XXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['AXXS']), final_filter=64, **kwargs) + model = VGG(make_layers(cfg["AXXS"]), final_filter=64, **kwargs) return model + def vgg11_XXXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['AXXXS']), final_filter=32, **kwargs) + model = VGG(make_layers(cfg["AXXXS"]), final_filter=32, **kwargs) return model + def vgg11_5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['A'], k_size=5), **kwargs) + model = VGG(make_layers(cfg["A"], k_size=5), **kwargs) return model + def vgg11_7(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['A'], k_size=7), **kwargs) + model = VGG(make_layers(cfg["A"], k_size=7), **kwargs) return model @@ -466,39 +703,43 @@ def vgg11nbn(*args, **kwargs): Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['A'], batch_norm=False), **kwargs) + model = VGG(make_layers(cfg["A"], batch_norm=False), **kwargs) return model + def vgg11nbn_L(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['AL'], batch_norm=False), **kwargs) + model = VGG(make_layers(cfg["AL"], batch_norm=False), **kwargs) return model + def vgg11nbn_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['AS'], batch_norm=False), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["AS"], batch_norm=False), final_filter=256, **kwargs) return model + def vgg11nbn_5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['A'], k_size=5, batch_norm=False), **kwargs) + model = VGG(make_layers(cfg["A"], k_size=5, batch_norm=False), **kwargs) return model + def vgg11nbn_7(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['A'], k_size=7, batch_norm=False), **kwargs) + model = VGG(make_layers(cfg["A"], k_size=7, batch_norm=False), **kwargs) return model @@ -507,117 +748,131 @@ def vgg13(*args, **kwargs): Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['B']), **kwargs) + model = VGG(make_layers(cfg["B"]), **kwargs) return model + def vgg13_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['BS']), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["BS"]), final_filter=256, **kwargs) return model + def vgg13_XS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['BXS']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["BXS"]), final_filter=128, **kwargs) return model + def vgg13_XXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['BXXS']), final_filter=64, **kwargs) + model = VGG(make_layers(cfg["BXXS"]), final_filter=64, **kwargs) return model + def vgg13_XXXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['BXXXS']), final_filter=32, **kwargs) + model = VGG(make_layers(cfg["BXXXS"]), final_filter=32, **kwargs) return model + def vgg13_5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['B'], k_size=5), **kwargs) + model = VGG(make_layers(cfg["B"], k_size=5), **kwargs) return model + def vgg13_7(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['B'], k_size=7), **kwargs) + model = VGG(make_layers(cfg["B"], k_size=7), **kwargs) return model + def vgg19(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['E']), **kwargs) + model = VGG(make_layers(cfg["E"]), **kwargs) return model + def vgg19_S(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['ES']), final_filter=256, **kwargs) + model = VGG(make_layers(cfg["ES"]), final_filter=256, **kwargs) return model + def vgg19_XS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['EXS']), final_filter=128, **kwargs) + model = VGG(make_layers(cfg["EXS"]), final_filter=128, **kwargs) return model + def vgg19_XXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['EXXS']), final_filter=64, **kwargs) + model = VGG(make_layers(cfg["EXXS"]), final_filter=64, **kwargs) return model + def vgg19_XXXS(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['EXXXS']), final_filter=32, **kwargs) + model = VGG(make_layers(cfg["EXXXS"]), final_filter=32, **kwargs) return model + def vgg19_5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['E'], k_size=5), **kwargs) + model = VGG(make_layers(cfg["E"], k_size=5), **kwargs) return model + def vgg19_5(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['E'], k_size=5), **kwargs) + model = VGG(make_layers(cfg["E"], k_size=5), **kwargs) return model + def vgg19_7(*args, **kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ - model = VGG(make_layers(cfg['E'], k_size=7), **kwargs) - return model \ No newline at end of file + model = VGG(make_layers(cfg["E"], k_size=7), **kwargs) + return model