diff --git a/Chan's LCB/LCB_Modified_Dynamic__00090000.ipynb b/Chan's LCB/LCB_Modified_Dynamic__00090000.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..a74a8c091e02a13082caf0f470d7d527fc73efc5 --- /dev/null +++ b/Chan's LCB/LCB_Modified_Dynamic__00090000.ipynb @@ -0,0 +1,1031 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "#1#Header\n", + "import csv\n", + "import numpy as np\n", + "import os \n", + "from os import urandom\n", + "from keras.models import model_from_json" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [], + "source": [ + "#2#Defining Global Variables\n", + "num_rounds = 20\n", + "m = 0\n", + "o = 0" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "#3#Defining WORDSIZE\n", + "def WORD_SIZE():\n", + " return(16);" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "#4#Defining S-Box\n", + "s_box_mapping_np = np.array([12, 5, 6, 11, 9, 0, 10, 13, 3, 14, 15, 8, 4, 7, 1, 2], dtype=np.uint8)\n", + "\n", + "def s_box(input_bits):\n", + " input_bits_int = int(input_bits)\n", + " output_bits_int = s_box_mapping_np[input_bits_int]\n", + " return output_bits_int" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "#5#Defining P-Box\n", + "def decimal_to_binary_list(value, num_bits=4):\n", + " return np.array([int(x) for x in format(value, f'0{num_bits}b')], dtype=np.uint8)\n", + "\n", + "def p_box(c_decimal, d_decimal):\n", + " c = decimal_to_binary_list(c_decimal)\n", + " d = decimal_to_binary_list(d_decimal)\n", + "\n", + " e = np.zeros(8, dtype=np.uint8)\n", + "\n", + " e[0] = c[0]\n", + " e[1] = d[0]\n", + " e[2] = c[3]\n", + " e[3] = d[3]\n", + " e[4] = c[1]\n", + " e[5] = d[1]\n", + " e[6] = c[2]\n", + " e[7] = d[2]\n", + "\n", + " return e" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "#6#Defining L-Box\n", + "def l_box(f, g):\n", + " if len(f) != 8 or len(g) != 8:\n", + " raise ValueError(\"Both input arrays f and g should have exactly 8 elements\")\n", + "\n", + " h = np.zeros(16, dtype=np.uint8)\n", + " h[0] = f[0]\n", + " h[1] = g[0]\n", + " h[2] = f[7]\n", + " h[3] = g[7]\n", + " h[4] = f[1]\n", + " h[5] = g[1]\n", + " h[6] = f[6]\n", + " h[7] = g[6]\n", + " h[8] = f[2]\n", + " h[9] = g[2]\n", + " h[10] = f[5]\n", + " h[11] = g[5]\n", + " h[12] = f[3]\n", + " h[13] = g[3]\n", + " h[14] = f[4]\n", + " h[15] = g[4]\n", + " #print(h)\n", + " return h" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "#7#Defining F-function for Right Side of Plaintext\n", + "def binary_array_to_integer(output):\n", + " int_output = ''.join(map(str, output))\n", + " return int(int_output, 2)\n", + "\n", + "def f_function(x, key, d):\n", + " q=0\n", + " global m\n", + " if isinstance(x, int):\n", + " x = [x]\n", + " input_parts = np.zeros((len(x), 4), dtype=np.uint16)\n", + " for i, val in enumerate(x):\n", + " input_parts[i] = np.array([val >> 12, (val >> 8) & 0xF, (val >> 4) & 0xF, val & 0xF])\n", + " \n", + " s_box_outputs = np.array([[s_box(element) for element in part] for part in input_parts])\n", + " p_box_outputs = np.zeros((len(x), 2, 8), dtype=np.uint8)\n", + " for i in range(len(x)):\n", + " p_box_outputs[i] = np.array([p_box(s_box_outputs[i][0], s_box_outputs[i][1]), p_box(s_box_outputs[i][2], s_box_outputs[i][3])])\n", + " \n", + " final_outputs = np.zeros(len(x), dtype=np.uint32)\n", + " for i in range(len(x)):\n", + " final_output = np.array(l_box(p_box_outputs[i][0], p_box_outputs[i][1]))\n", + " k = key[q][(m+1) % 4]\n", + " output = final_output ^ k\n", + " output = binary_array_to_integer(output)\n", + " final_outputs[i] = output\n", + " q +=1 \n", + " if (m < 2):\n", + " m +=2\n", + " else:\n", + " m = 0\n", + " return final_outputs" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "#8#Defining F-function for Left Side of Plaintext\n", + "def binary_array_to_integer(output):\n", + " int_output = ''.join(map(str, output))\n", + " return int(int_output, 2)\n", + "\n", + "def ff_function(x, key, d):\n", + " q=0\n", + " global o\n", + " if isinstance(x, int):\n", + " x = [x]\n", + " \n", + " input_parts = np.zeros((len(x), 4), dtype=np.uint16)\n", + " for i, val in enumerate(x):\n", + " input_parts[i] = np.array([val >> 12, (val >> 8) & 0xF, (val >> 4) & 0xF, val & 0xF])\n", + " \n", + " s_box_outputs = np.array([[s_box(element) for element in part] for part in input_parts])\n", + " p_box_outputs = np.zeros((len(x), 2, 8), dtype=np.uint8)\n", + " for i in range(len(x)):\n", + " p_box_outputs[i] = np.array([p_box(s_box_outputs[i][0], s_box_outputs[i][1]), p_box(s_box_outputs[i][2], s_box_outputs[i][3])])\n", + " \n", + " final_outputs = np.zeros(len(x), dtype=np.uint32)\n", + " for i in range(len(x)):\n", + " final_output = np.array(l_box(p_box_outputs[i][0], p_box_outputs[i][1]))\n", + " k = key[q][o % 4]\n", + " output = final_output ^ k\n", + " output = binary_array_to_integer(output)\n", + " final_outputs[i] = output\n", + " q +=1 \n", + " if (o < 2):\n", + " o +=2\n", + " else:\n", + " o = 0\n", + " return final_outputs" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "#9#Convert the ciphertext pairs into Binary array\n", + "def convert_to_binary(row):\n", + " bin_array = np.zeros(64, dtype=np.uint8)\n", + " for i, num in enumerate(row):\n", + " binary_str = format(num, '016b')\n", + " for j, b in enumerate(binary_str):\n", + " bin_array[i * 16 + j] = int(b)\n", + " return bin_array" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "#10#Encryption Function\n", + "def lcb_encrypt(plaintext, key, rounds, d):\n", + " \n", + " left_plaintext = np.uint16(plaintext[0])\n", + " right_plaintext = np.uint16(plaintext[1])\n", + " L, R = left_plaintext, right_plaintext\n", + "\n", + " n = 0\n", + " \n", + " while n < rounds:\n", + " L, R = f_function(R, key, d), ff_function(L, key, d)\n", + " n += 1\n", + " \n", + " return (L, R)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "#11#Fuction for generation of keys\n", + "import random\n", + "\n", + "def generate_hex_keys(num_keys, length=16):\n", + " hex_chars = \"0123456789ABCDEF\"\n", + " keys_str = [\"\".join(random.choices(hex_chars, k=length)) for _ in range(num_keys)]\n", + "\n", + " return keys_str\n", + "\n", + "\n", + "def to_binary(value, bits):\n", + " return format(value, f'0{bits}b')\n", + "\n", + "def generate_round_keys(num_keys):\n", + " random_keys_hex = generate_hex_keys(num_keys)\n", + " round_keys = []\n", + " \n", + " for random_key_hex in random_keys_hex:\n", + " random_key = int(random_key_hex, 16)\n", + "\n", + " K1 = (random_key >> 48) & 0xFFFF\n", + " K2 = (random_key >> 32) & 0xFFFF\n", + " K3 = (random_key >> 16) & 0xFFFF\n", + " K4 = random_key & 0xFFFF\n", + " \n", + " k1_bin = to_binary(K1, 16)\n", + " k2_bin = to_binary(K2, 16)\n", + " k3_bin = to_binary(K3, 16)\n", + " k4_bin = to_binary(K4, 16)\n", + "\n", + " k1_np_array = np.array([int(bit) for bit in k1_bin])\n", + " k2_np_array = np.array([int(bit) for bit in k2_bin])\n", + " k3_np_array = np.array([int(bit) for bit in k3_bin])\n", + " k4_np_array = np.array([int(bit) for bit in k4_bin])\n", + "\n", + " round_key = np.array([k1_np_array, k2_np_array, k3_np_array, k4_np_array])\n", + " round_keys.append(round_key)\n", + " round_key = np.array(round_keys)\n", + " \n", + " return round_key" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "#12#Make dataset\n", + "\n", + "def make_train_data(n, nr, diff=(0x0009,0)):\n", + " Y = np.frombuffer(urandom(n), dtype=np.uint8); \n", + " Y = Y & 1;\n", + " plaintext = np.frombuffer(urandom(4*n), dtype=np.uint32);\n", + " plain0l = np.empty(n, dtype=np.uint16)\n", + " plain0r = np.empty(n, dtype=np.uint16)\n", + " \n", + " for i in range(n):\n", + " plain0l[i] = (plaintext[i] >> 16) & 0xffff\n", + " plain0r[i] = plaintext[i] & 0xffff\n", + " \n", + " plain1l = plain0l ^ diff[0]; plain1r = plain0r ^ diff[1];\n", + " \n", + " num_rand_samples = np.sum(Y==0);\n", + " plain1l[Y==0] = np.frombuffer(urandom(2*num_rand_samples),dtype=np.uint16);\n", + " plain1r[Y==0] = np.frombuffer(urandom(2*num_rand_samples),dtype=np.uint16);\n", + " \n", + " round_key = generate_round_keys(n)\n", + " \n", + " ctdata0l, ctdata0r = lcb_encrypt((plain0l, plain0r), round_key, nr, n)\n", + " ctdata1l, ctdata1r = lcb_encrypt((plain1l, plain1r), round_key, nr, n)\n", + "\n", + " ctdata = np.vstack((ctdata0l, ctdata0r, ctdata1l, ctdata1r)).T\n", + " X = np.array([convert_to_binary(row) for row in ctdata])\n", + " \n", + " \"\"\"\n", + " with open(\"Dataset_NewP.csv\", \"w\", newline='') as f:\n", + " writer = csv.writer(f)\n", + " writer.writerow([\"plain0l\", \"plain0r\", \"plain1l\", \"plain1r\",\"Y\"])\n", + " for i in range(n):\n", + " writer.writerow([plain0l[i], plain0r[i], plain1l[i], plain1r[i],Y[i]])\n", + "\n", + " with open(\"Dataset_NewC.csv\", \"w\", newline='') as f:\n", + " writer = csv.writer(f)\n", + " writer.writerow([\"ctdata0l\", \"ctdata0r\", \"ctdata1l\", \"ctdata1r\",\"Y\"])\n", + " for i in range(n):\n", + " writer.writerow([ctdata0l[i], ctdata0r[i], ctdata1l[i], ctdata1r[i],Y[i]])\n", + " \"\"\"\n", + " return(X,Y);" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(array([[0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0,\n", + " 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0,\n", + " 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0],\n", + " [0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n", + " 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0,\n", + " 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1],\n", + " [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1,\n", + " 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,\n", + " 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0],\n", + " [1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1,\n", + " 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0,\n", + " 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],\n", + " [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1,\n", + " 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0,\n", + " 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1],\n", + " [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0,\n", + " 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,\n", + " 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0],\n", + " [1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n", + " 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1],\n", + " [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1,\n", + " 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0,\n", + " 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0],\n", + " [1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0,\n", + " 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n", + " 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n", + " [0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,\n", + " 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n", + " 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1]],\n", + " dtype=uint8),\n", + " array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0], dtype=uint8))" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "make_train_data(10,10)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "#13#Creation of Model\n", + "\n", + "from pickle import dump\n", + "\n", + "from keras.callbacks import ModelCheckpoint, LearningRateScheduler\n", + "from keras.models import Model\n", + "from keras.optimizers import Adam\n", + "from keras.layers import Dense, Conv1D, Input, Reshape, Permute, Add, Flatten, BatchNormalization, Activation\n", + "from keras import backend as K\n", + "from keras.regularizers import l2\n", + "\n", + "bs = 5000;\n", + "wdir = './freshly_trained_nets/'\n", + "\n", + "def cyclic_lr(num_epochs, high_lr, low_lr):\n", + " res = lambda i: low_lr + ((num_epochs-1) - i % num_epochs)/(num_epochs-1) * (high_lr - low_lr);\n", + " return(res);\n", + "\n", + "def make_checkpoint(datei):\n", + " res = ModelCheckpoint(datei, monitor='val_loss', save_best_only = True);\n", + " return(res);\n", + "\n", + "#make residual tower of convolutional blocks\n", + "def make_resnet(num_blocks=2, num_filters=32, num_outputs=1, d1=64, d2=64, word_size=16, ks=3,depth=5, reg_param=0.0001, final_activation='sigmoid'):\n", + " #Input and preprocessing layers\n", + " inp = Input(shape=(num_blocks * word_size * 2,));\n", + " rs = Reshape((2 * num_blocks, word_size))(inp);\n", + " perm = Permute((2,1))(rs);\n", + " #add a single residual layer that will expand the data to num_filters channels\n", + " #this is a bit-sliced layer\n", + " conv0 = Conv1D(num_filters, kernel_size=1, padding='same', kernel_regularizer=l2(reg_param))(perm);\n", + " conv0 = BatchNormalization()(conv0);\n", + " conv0 = Activation('relu')(conv0);\n", + " #add residual blocks\n", + " shortcut = conv0;\n", + " for i in range(depth):\n", + " conv1 = Conv1D(num_filters, kernel_size=ks, padding='same', kernel_regularizer=l2(reg_param))(shortcut);\n", + " conv1 = BatchNormalization()(conv1);\n", + " conv1 = Activation('relu')(conv1);\n", + " conv2 = Conv1D(num_filters, kernel_size=ks, padding='same',kernel_regularizer=l2(reg_param))(conv1);\n", + " conv2 = BatchNormalization()(conv2);\n", + " conv2 = Activation('relu')(conv2);\n", + " shortcut = Add()([shortcut, conv2]);\n", + " #add prediction head\n", + " flat1 = Flatten()(shortcut);\n", + " dense1 = Dense(d1,kernel_regularizer=l2(reg_param))(flat1);\n", + " dense1 = BatchNormalization()(dense1);\n", + " dense1 = Activation('relu')(dense1);\n", + " dense2 = Dense(d2, kernel_regularizer=l2(reg_param))(dense1);\n", + " dense2 = BatchNormalization()(dense2);\n", + " dense2 = Activation('relu')(dense2);\n", + " out = Dense(num_outputs, activation=final_activation, kernel_regularizer=l2(reg_param))(dense2);\n", + " model = Model(inputs=inp, outputs=out);\n", + " return(model);\n", + "\n", + "def train_LCB_distinguisher(num_epochs, num_rounds, depth):\n", + " #create the network\n", + " print(num_rounds)\n", + " print(depth)\n", + " net = make_resnet(depth=depth, reg_param=10**-5);\n", + " net.compile(optimizer='adam',loss='mse',metrics=['acc']);\n", + " #generate training and validation data\n", + " X, Y = make_train_data(10**6,num_rounds);\n", + " X_eval, Y_eval = make_train_data(10**5, num_rounds);\n", + " #set up model checkpoint\n", + " check = make_checkpoint(wdir+'ghor_Rk_0009_0000_Round_'+str(num_rounds)+'_depth_'+str(depth)+'.h5');\n", + " #create learnrate schedule\n", + " lr = LearningRateScheduler(cyclic_lr(10,0.002, 0.0001));\n", + " #train and evaluate\n", + " #print(X_eval)\n", + " h = net.fit(X,Y,epochs=num_epochs,batch_size=bs,validation_data=(X_eval, Y_eval), callbacks=[lr,check]);\n", + " np.save(wdir+'h'+str(num_rounds)+'r_depth'+str(depth)+'.npy', h.history['val_acc']);\n", + " np.save(wdir+'h'+str(num_rounds)+'r_depth'+str(depth)+'.npy', h.history['val_loss']);\n", + " dump(h.history,open(wdir+'hist'+str(num_rounds)+'r_depth'+str(depth)+'.p','wb'));\n", + " print(\"Best validation accuracy: \", np.max(h.history['val_acc']));\n", + " return(net, h);\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "20\n", + "10\n", + "Epoch 1/200\n", + "200/200 [==============================] - 187s 917ms/step - loss: 0.0136 - acc: 0.9922 - val_loss: 0.4953 - val_acc: 0.5009 - lr: 0.0020\n", + "Epoch 2/200\n", + "200/200 [==============================] - 182s 911ms/step - loss: 0.0067 - acc: 1.0000 - val_loss: 0.0608 - val_acc: 0.9279 - lr: 0.0018\n", + "Epoch 3/200\n", + "200/200 [==============================] - 181s 906ms/step - loss: 0.0052 - acc: 1.0000 - val_loss: 0.0241 - val_acc: 0.9817 - lr: 0.0016\n", + "Epoch 4/200\n", + "200/200 [==============================] - 181s 908ms/step - loss: 0.0041 - acc: 1.0000 - val_loss: 0.0061 - val_acc: 0.9997 - lr: 0.0014\n", + "Epoch 5/200\n", + "200/200 [==============================] - 181s 907ms/step - loss: 0.0032 - acc: 1.0000 - val_loss: 0.0084 - val_acc: 0.9984 - lr: 0.0012\n", + "Epoch 6/200\n", + "200/200 [==============================] - 181s 907ms/step - loss: 0.0026 - acc: 1.0000 - val_loss: 0.0040 - val_acc: 0.9999 - lr: 9.4444e-04\n", + "Epoch 7/200\n", + "200/200 [==============================] - 182s 911ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0025 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 8/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 9/200\n", + "200/200 [==============================] - 184s 920ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0017 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 10/200\n", + "200/200 [==============================] - 182s 911ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 11/200\n", + "200/200 [==============================] - 180s 901ms/step - loss: 0.0028 - acc: 0.9993 - val_loss: 0.0196 - val_acc: 0.9782 - lr: 0.0020\n", + "Epoch 12/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0033 - val_acc: 1.0000 - lr: 0.0018\n", + "Epoch 13/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0310 - val_acc: 0.9963 - lr: 0.0016\n", + "Epoch 14/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0087 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 15/200\n", + "200/200 [==============================] - 180s 898ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 0.0012\n", + "Epoch 16/200\n", + "200/200 [==============================] - 180s 901ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 17/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 9.7560e-04 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 18/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 8.8757e-04 - acc: 1.0000 - val_loss: 8.5963e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 19/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 8.3019e-04 - acc: 1.0000 - val_loss: 8.0374e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 20/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 8.0170e-04 - acc: 1.0000 - val_loss: 7.8842e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 21/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 6.6720e-04 - acc: 1.0000 - val_loss: 0.4827 - val_acc: 0.5009 - lr: 0.0020\n", + "Epoch 22/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 6.3044e-04 - acc: 0.9999 - val_loss: 0.4824 - val_acc: 0.5182 - lr: 0.0018\n", + "Epoch 23/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 0.0016\n", + "Epoch 24/200\n", + "200/200 [==============================] - 180s 898ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.0038 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 25/200\n", + "200/200 [==============================] - 180s 898ms/step - loss: 7.6366e-04 - acc: 1.0000 - val_loss: 0.0119 - val_acc: 1.0000 - lr: 0.0012\n", + "Epoch 26/200\n", + "200/200 [==============================] - 184s 919ms/step - loss: 6.1937e-04 - acc: 1.0000 - val_loss: 0.0047 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 27/200\n", + "200/200 [==============================] - 182s 910ms/step - loss: 5.2626e-04 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 28/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 4.6562e-04 - acc: 1.0000 - val_loss: 6.1518e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 29/200\n", + "200/200 [==============================] - 180s 901ms/step - loss: 4.2823e-04 - acc: 1.0000 - val_loss: 4.3905e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 30/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 4.0991e-04 - acc: 1.0000 - val_loss: 4.0107e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 31/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 8.3233e-04 - acc: 0.9998 - val_loss: 0.0076 - val_acc: 0.9922 - lr: 0.0020\n", + "Epoch 32/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 8.1173e-04 - acc: 1.0000 - val_loss: 0.4859 - val_acc: 0.5009 - lr: 0.0018\n", + "Epoch 33/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 5.3618e-04 - acc: 1.0000 - val_loss: 0.4987 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 34/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0013 - acc: 0.9998 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 35/200\n", + "200/200 [==============================] - 181s 906ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.4449 - val_acc: 0.5012 - lr: 0.0012\n", + "Epoch 36/200\n", + "200/200 [==============================] - 185s 923ms/step - loss: 8.5272e-04 - acc: 1.0000 - val_loss: 0.4265 - val_acc: 0.5010 - lr: 9.4444e-04\n", + "Epoch 37/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 7.3529e-04 - acc: 1.0000 - val_loss: 0.3936 - val_acc: 0.5115 - lr: 7.3333e-04\n", + "Epoch 38/200\n", + "200/200 [==============================] - 186s 932ms/step - loss: 6.6209e-04 - acc: 1.0000 - val_loss: 0.0399 - val_acc: 0.9732 - lr: 5.2222e-04\n", + "Epoch 39/200\n", + "200/200 [==============================] - 182s 909ms/step - loss: 6.1525e-04 - acc: 1.0000 - val_loss: 6.2619e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 40/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 5.9258e-04 - acc: 1.0000 - val_loss: 5.8337e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 41/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 7.4854e-04 - acc: 1.0000 - val_loss: 7.5987e-04 - val_acc: 0.9999 - lr: 0.0020\n", + "Epoch 42/200\n", + "200/200 [==============================] - 181s 906ms/step - loss: 5.8856e-04 - acc: 1.0000 - val_loss: 5.6850e-04 - val_acc: 1.0000 - lr: 0.0018\n", + "Epoch 43/200\n", + "200/200 [==============================] - 185s 927ms/step - loss: 4.2083e-04 - acc: 1.0000 - val_loss: 0.4435 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 44/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 3.1822e-04 - acc: 1.0000 - val_loss: 0.4967 - val_acc: 0.5009 - lr: 0.0014\n", + "Epoch 45/200\n", + "200/200 [==============================] - 186s 933ms/step - loss: 2.5712e-04 - acc: 1.0000 - val_loss: 0.3924 - val_acc: 0.5044 - lr: 0.0012\n", + "Epoch 46/200\n", + "200/200 [==============================] - 182s 910ms/step - loss: 2.1630e-04 - acc: 1.0000 - val_loss: 0.1669 - val_acc: 0.6991 - lr: 9.4444e-04\n", + "Epoch 47/200\n", + "200/200 [==============================] - 184s 919ms/step - loss: 8.7610e-04 - acc: 0.9993 - val_loss: 0.3860 - val_acc: 0.6121 - lr: 7.3333e-04\n", + "Epoch 48/200\n", + "200/200 [==============================] - 184s 921ms/step - loss: 6.3017e-04 - acc: 1.0000 - val_loss: 6.5010e-04 - val_acc: 0.9999 - lr: 5.2222e-04\n", + "Epoch 49/200\n", + "200/200 [==============================] - 182s 910ms/step - loss: 5.9965e-04 - acc: 1.0000 - val_loss: 5.9395e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 50/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 5.8631e-04 - acc: 1.0000 - val_loss: 5.8630e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 51/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 5.2949e-04 - acc: 1.0000 - val_loss: 0.0036 - val_acc: 1.0000 - lr: 0.0020\n", + "Epoch 52/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 4.4946e-04 - acc: 1.0000 - val_loss: 0.1455 - val_acc: 0.7500 - lr: 0.0018\n", + "Epoch 53/200\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "200/200 [==============================] - 181s 905ms/step - loss: 3.9248e-04 - acc: 1.0000 - val_loss: 0.2130 - val_acc: 0.6466 - lr: 0.0016\n", + "Epoch 54/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 3.5169e-04 - acc: 1.0000 - val_loss: 0.0026 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 55/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 3.2237e-04 - acc: 1.0000 - val_loss: 4.2931e-04 - val_acc: 1.0000 - lr: 0.0012\n", + "Epoch 56/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 2.9846e-04 - acc: 1.0000 - val_loss: 7.9015e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 57/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 2.8065e-04 - acc: 1.0000 - val_loss: 3.5286e-04 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 58/200\n", + "200/200 [==============================] - 180s 901ms/step - loss: 2.6776e-04 - acc: 1.0000 - val_loss: 2.9611e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 59/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 2.5907e-04 - acc: 1.0000 - val_loss: 2.6061e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 60/200\n", + "200/200 [==============================] - 183s 917ms/step - loss: 2.5460e-04 - acc: 1.0000 - val_loss: 2.5688e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 61/200\n", + "200/200 [==============================] - 188s 942ms/step - loss: 0.0031 - acc: 0.9990 - val_loss: 0.0033 - val_acc: 0.9991 - lr: 0.0020\n", + "Epoch 62/200\n", + "200/200 [==============================] - 335s 2s/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0024 - val_acc: 1.0000 - lr: 0.0018\n", + "Epoch 63/200\n", + "200/200 [==============================] - 245s 1s/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0075 - val_acc: 0.9994 - lr: 0.0016\n", + "Epoch 64/200\n", + "200/200 [==============================] - 237s 1s/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0134 - val_acc: 0.9949 - lr: 0.0014\n", + "Epoch 65/200\n", + "200/200 [==============================] - 235s 1s/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0035 - val_acc: 0.9999 - lr: 0.0012\n", + "Epoch 66/200\n", + "200/200 [==============================] - 230s 1s/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 67/200\n", + "200/200 [==============================] - 341s 2s/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 68/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 69/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 70/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 71/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.4930 - val_acc: 0.5009 - lr: 0.0020\n", + "Epoch 72/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.3443 - val_acc: 0.5019 - lr: 0.0018\n", + "Epoch 73/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.4989 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 74/200\n", + "200/200 [==============================] - 180s 899ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.0447 - val_acc: 0.9846 - lr: 0.0014\n", + "Epoch 75/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 9.6001e-04 - acc: 1.0000 - val_loss: 0.0436 - val_acc: 0.9851 - lr: 0.0012\n", + "Epoch 76/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 8.9517e-04 - acc: 1.0000 - val_loss: 0.0058 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 77/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 8.4502e-04 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 78/200\n", + "200/200 [==============================] - 186s 933ms/step - loss: 8.0784e-04 - acc: 1.0000 - val_loss: 7.9338e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 79/200\n", + "200/200 [==============================] - 191s 956ms/step - loss: 7.8263e-04 - acc: 1.0000 - val_loss: 7.8492e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 80/200\n", + "200/200 [==============================] - 200s 1s/step - loss: 7.6964e-04 - acc: 1.0000 - val_loss: 7.6086e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 81/200\n", + "200/200 [==============================] - 197s 986ms/step - loss: 0.0029 - acc: 0.9990 - val_loss: 0.0057 - val_acc: 0.9962 - lr: 0.0020\n", + "Epoch 82/200\n", + "200/200 [==============================] - 196s 979ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 1.0000 - lr: 0.0018\n", + "Epoch 83/200\n", + "200/200 [==============================] - 186s 928ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0403 - val_acc: 0.9864 - lr: 0.0016\n", + "Epoch 84/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0080 - val_acc: 0.9999 - lr: 0.0014\n", + "Epoch 85/200\n", + "200/200 [==============================] - 181s 906ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0035 - val_acc: 0.9999 - lr: 0.0012\n", + "Epoch 86/200\n", + "200/200 [==============================] - 180s 899ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0092 - val_acc: 0.9990 - lr: 9.4444e-04\n", + "Epoch 87/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 88/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0086 - val_acc: 0.9991 - lr: 5.2222e-04\n", + "Epoch 89/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 90/200\n", + "200/200 [==============================] - 179s 894ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 91/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 9.8396e-04 - acc: 1.0000 - val_loss: 0.3802 - val_acc: 0.5009 - lr: 0.0020\n", + "Epoch 92/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 8.0579e-04 - acc: 1.0000 - val_loss: 0.4646 - val_acc: 0.5009 - lr: 0.0018\n", + "Epoch 93/200\n", + "200/200 [==============================] - 180s 899ms/step - loss: 6.8073e-04 - acc: 1.0000 - val_loss: 0.4997 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 94/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 6.6352e-04 - acc: 1.0000 - val_loss: 0.0204 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 95/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 5.5704e-04 - acc: 1.0000 - val_loss: 0.4337 - val_acc: 0.5009 - lr: 0.0012\n", + "Epoch 96/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 4.9229e-04 - acc: 1.0000 - val_loss: 0.0383 - val_acc: 0.9947 - lr: 9.4444e-04\n", + "Epoch 97/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 4.4810e-04 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 98/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 4.1782e-04 - acc: 1.0000 - val_loss: 0.0010 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 99/200\n", + "200/200 [==============================] - 180s 898ms/step - loss: 3.9800e-04 - acc: 1.0000 - val_loss: 4.0696e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 100/200\n", + "200/200 [==============================] - 179s 895ms/step - loss: 3.8809e-04 - acc: 1.0000 - val_loss: 3.8087e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 101/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 7.7288e-04 - acc: 1.0000 - val_loss: 0.0069 - val_acc: 0.9924 - lr: 0.0020\n", + "Epoch 102/200\n", + "200/200 [==============================] - 180s 899ms/step - loss: 7.6231e-04 - acc: 1.0000 - val_loss: 0.4987 - val_acc: 0.5009 - lr: 0.0018\n", + "Epoch 103/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 4.9780e-04 - acc: 1.0000 - val_loss: 0.4994 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 104/200\n", + "200/200 [==============================] - 180s 898ms/step - loss: 3.8039e-04 - acc: 1.0000 - val_loss: 0.4990 - val_acc: 0.5009 - lr: 0.0014\n", + "Epoch 105/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 3.2063e-04 - acc: 1.0000 - val_loss: 0.4955 - val_acc: 0.5009 - lr: 0.0012\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 106/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 2.6720e-04 - acc: 1.0000 - val_loss: 0.4970 - val_acc: 0.5009 - lr: 9.4444e-04\n", + "Epoch 107/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 2.2903e-04 - acc: 1.0000 - val_loss: 0.4908 - val_acc: 0.5009 - lr: 7.3333e-04\n", + "Epoch 108/200\n", + "200/200 [==============================] - 180s 899ms/step - loss: 2.0587e-04 - acc: 1.0000 - val_loss: 0.1540 - val_acc: 0.6001 - lr: 5.2222e-04\n", + "Epoch 109/200\n", + "200/200 [==============================] - 180s 899ms/step - loss: 1.9189e-04 - acc: 1.0000 - val_loss: 2.3968e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 110/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 1.8512e-04 - acc: 1.0000 - val_loss: 1.7941e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 111/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 0.0020 - acc: 0.9988 - val_loss: 0.0618 - val_acc: 0.9248 - lr: 0.0020\n", + "Epoch 112/200\n", + "200/200 [==============================] - 179s 897ms/step - loss: 0.0029 - acc: 1.0000 - val_loss: 0.0025 - val_acc: 1.0000 - lr: 0.0018\n", + "Epoch 113/200\n", + "200/200 [==============================] - 179s 896ms/step - loss: 0.0023 - acc: 1.0000 - val_loss: 0.0040 - val_acc: 1.0000 - lr: 0.0016\n", + "Epoch 114/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0051 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 115/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0561 - val_acc: 0.9596 - lr: 0.0012\n", + "Epoch 116/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0060 - val_acc: 0.9999 - lr: 9.4444e-04\n", + "Epoch 117/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 118/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 119/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 120/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 121/200\n", + "200/200 [==============================] - 184s 919ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.4613 - val_acc: 0.5009 - lr: 0.0020\n", + "Epoch 122/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 9.1262e-04 - acc: 1.0000 - val_loss: 0.4962 - val_acc: 0.5009 - lr: 0.0018\n", + "Epoch 123/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 7.6331e-04 - acc: 1.0000 - val_loss: 0.4982 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 124/200\n", + "200/200 [==============================] - 180s 901ms/step - loss: 8.8428e-04 - acc: 0.9998 - val_loss: 0.5020 - val_acc: 0.4990 - lr: 0.0014\n", + "Epoch 125/200\n", + "200/200 [==============================] - 182s 911ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 0.0012\n", + "Epoch 126/200\n", + "200/200 [==============================] - 182s 910ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 127/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 9.1584e-04 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 128/200\n", + "200/200 [==============================] - 180s 901ms/step - loss: 8.4356e-04 - acc: 1.0000 - val_loss: 9.1924e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 129/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 7.9755e-04 - acc: 1.0000 - val_loss: 7.9224e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 130/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 7.7464e-04 - acc: 1.0000 - val_loss: 7.6398e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 131/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 6.6648e-04 - acc: 1.0000 - val_loss: 0.4899 - val_acc: 0.5009 - lr: 0.0020\n", + "Epoch 132/200\n", + "200/200 [==============================] - 180s 900ms/step - loss: 5.1358e-04 - acc: 1.0000 - val_loss: 0.4965 - val_acc: 0.5009 - lr: 0.0018\n", + "Epoch 133/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 4.1115e-04 - acc: 1.0000 - val_loss: 0.4968 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 134/200\n", + "200/200 [==============================] - 180s 901ms/step - loss: 3.4201e-04 - acc: 1.0000 - val_loss: 0.4875 - val_acc: 0.5009 - lr: 0.0014\n", + "Epoch 135/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 2.9266e-04 - acc: 1.0000 - val_loss: 0.4929 - val_acc: 0.5009 - lr: 0.0012\n", + "Epoch 136/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 2.5684e-04 - acc: 1.0000 - val_loss: 0.4574 - val_acc: 0.5009 - lr: 9.4444e-04\n", + "Epoch 137/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 3.7795e-04 - acc: 0.9999 - val_loss: 5.5103e-04 - val_acc: 0.9998 - lr: 7.3333e-04\n", + "Epoch 138/200\n", + "200/200 [==============================] - 182s 909ms/step - loss: 3.7845e-04 - acc: 1.0000 - val_loss: 3.5098e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 139/200\n", + "200/200 [==============================] - 181s 907ms/step - loss: 3.4421e-04 - acc: 1.0000 - val_loss: 3.2789e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 140/200\n", + "200/200 [==============================] - 182s 911ms/step - loss: 3.2858e-04 - acc: 1.0000 - val_loss: 3.1910e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 141/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 2.6441e-04 - acc: 1.0000 - val_loss: 0.4622 - val_acc: 0.5009 - lr: 0.0020\n", + "Epoch 142/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 1.8700e-04 - acc: 1.0000 - val_loss: 0.4229 - val_acc: 0.5009 - lr: 0.0018\n", + "Epoch 143/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 1.4435e-04 - acc: 1.0000 - val_loss: 0.3152 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 144/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 1.1780e-04 - acc: 1.0000 - val_loss: 0.2472 - val_acc: 0.5092 - lr: 0.0014\n", + "Epoch 145/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 1.0020e-04 - acc: 1.0000 - val_loss: 0.0510 - val_acc: 0.9942 - lr: 0.0012\n", + "Epoch 146/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 8.8017e-05 - acc: 1.0000 - val_loss: 2.5845e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 147/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 7.9554e-05 - acc: 1.0000 - val_loss: 7.8138e-05 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 148/200\n", + "200/200 [==============================] - 181s 906ms/step - loss: 7.3738e-05 - acc: 1.0000 - val_loss: 1.1118e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 149/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 7.0031e-05 - acc: 1.0000 - val_loss: 8.8437e-05 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 150/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 6.8191e-05 - acc: 1.0000 - val_loss: 6.2082e-05 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 151/200\n", + "200/200 [==============================] - 181s 903ms/step - loss: 5.9770e-05 - acc: 1.0000 - val_loss: 0.1230 - val_acc: 0.8043 - lr: 0.0020\n", + "Epoch 152/200\n", + "200/200 [==============================] - 180s 902ms/step - loss: 4.7915e-05 - acc: 1.0000 - val_loss: 0.0062 - val_acc: 1.0000 - lr: 0.0018\n", + "Epoch 153/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 4.0668e-05 - acc: 1.0000 - val_loss: 4.4193e-05 - val_acc: 1.0000 - lr: 0.0016\n", + "Epoch 154/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 0.0024 - acc: 0.9981 - val_loss: 0.4956 - val_acc: 0.5037 - lr: 0.0014\n", + "Epoch 155/200\n", + "200/200 [==============================] - 181s 904ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 0.0012\n", + "Epoch 156/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 157/200\n", + "200/200 [==============================] - 181s 906ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 158/200\n", + "200/200 [==============================] - 182s 909ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 159/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 160/200\n", + "200/200 [==============================] - 181s 905ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 161/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 0.0020\n", + "Epoch 162/200\n", + "200/200 [==============================] - 182s 908ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 0.0018\n", + "Epoch 163/200\n", + "200/200 [==============================] - 181s 907ms/step - loss: 9.1347e-04 - acc: 1.0000 - val_loss: 8.8741e-04 - val_acc: 1.0000 - lr: 0.0016\n", + "Epoch 164/200\n", + "200/200 [==============================] - 189s 946ms/step - loss: 8.2854e-04 - acc: 1.0000 - val_loss: 8.2963e-04 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 165/200\n", + "200/200 [==============================] - 201s 1s/step - loss: 7.6230e-04 - acc: 1.0000 - val_loss: 7.8478e-04 - val_acc: 1.0000 - lr: 0.0012\n", + "Epoch 166/200\n", + "200/200 [==============================] - 203s 1s/step - loss: 7.1002e-04 - acc: 1.0000 - val_loss: 6.8492e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n", + "Epoch 167/200\n", + "200/200 [==============================] - 192s 959ms/step - loss: 6.6918e-04 - acc: 1.0000 - val_loss: 6.4804e-04 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 168/200\n", + "200/200 [==============================] - 185s 924ms/step - loss: 6.3903e-04 - acc: 1.0000 - val_loss: 6.2087e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 169/200\n", + "200/200 [==============================] - 183s 917ms/step - loss: 6.1830e-04 - acc: 1.0000 - val_loss: 6.0446e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 170/200\n", + "200/200 [==============================] - 183s 916ms/step - loss: 6.0753e-04 - acc: 1.0000 - val_loss: 5.9887e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 171/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 5.6534e-04 - acc: 1.0000 - val_loss: 5.2013e-04 - val_acc: 1.0000 - lr: 0.0020\n", + "Epoch 172/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 4.7182e-04 - acc: 1.0000 - val_loss: 0.0024 - val_acc: 0.9999 - lr: 0.0018\n", + "Epoch 173/200\n", + "200/200 [==============================] - 182s 911ms/step - loss: 4.0822e-04 - acc: 1.0000 - val_loss: 4.1420e-04 - val_acc: 1.0000 - lr: 0.0016\n", + "Epoch 174/200\n", + "200/200 [==============================] - 183s 917ms/step - loss: 3.5842e-04 - acc: 1.0000 - val_loss: 6.5389e-04 - val_acc: 1.0000 - lr: 0.0014\n", + "Epoch 175/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 3.1872e-04 - acc: 1.0000 - val_loss: 4.6086e-04 - val_acc: 1.0000 - lr: 0.0012\n", + "Epoch 176/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 2.8938e-04 - acc: 1.0000 - val_loss: 0.2113 - val_acc: 0.6461 - lr: 9.4444e-04\n", + "Epoch 177/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 2.6750e-04 - acc: 1.0000 - val_loss: 0.0032 - val_acc: 0.9998 - lr: 7.3333e-04\n", + "Epoch 178/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 2.5151e-04 - acc: 1.0000 - val_loss: 2.4030e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 179/200\n", + "200/200 [==============================] - 183s 915ms/step - loss: 2.4081e-04 - acc: 1.0000 - val_loss: 2.3143e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 180/200\n", + "200/200 [==============================] - 183s 917ms/step - loss: 2.3538e-04 - acc: 1.0000 - val_loss: 2.2867e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 181/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 6.1482e-04 - acc: 0.9999 - val_loss: 0.1093 - val_acc: 0.8825 - lr: 0.0020\n", + "Epoch 182/200\n", + "200/200 [==============================] - 183s 916ms/step - loss: 4.7975e-04 - acc: 1.0000 - val_loss: 0.4328 - val_acc: 0.5009 - lr: 0.0018\n", + "Epoch 183/200\n", + "200/200 [==============================] - 183s 916ms/step - loss: 3.3621e-04 - acc: 1.0000 - val_loss: 0.4934 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 184/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 2.5863e-04 - acc: 1.0000 - val_loss: 0.4763 - val_acc: 0.5009 - lr: 0.0014\n", + "Epoch 185/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 2.1425e-04 - acc: 1.0000 - val_loss: 0.4480 - val_acc: 0.5009 - lr: 0.0012\n", + "Epoch 186/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 1.8059e-04 - acc: 1.0000 - val_loss: 0.1734 - val_acc: 0.5981 - lr: 9.4444e-04\n", + "Epoch 187/200\n", + "200/200 [==============================] - 183s 916ms/step - loss: 1.5917e-04 - acc: 1.0000 - val_loss: 0.0044 - val_acc: 1.0000 - lr: 7.3333e-04\n", + "Epoch 188/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 1.4467e-04 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 5.2222e-04\n", + "Epoch 189/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 1.3561e-04 - acc: 1.0000 - val_loss: 1.8315e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 190/200\n", + "200/200 [==============================] - 183s 915ms/step - loss: 1.3111e-04 - acc: 1.0000 - val_loss: 1.2495e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Epoch 191/200\n", + "200/200 [==============================] - 183s 915ms/step - loss: 5.8874e-04 - acc: 0.9999 - val_loss: 0.4786 - val_acc: 0.4942 - lr: 0.0020\n", + "Epoch 192/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 7.1856e-04 - acc: 1.0000 - val_loss: 0.2866 - val_acc: 0.5099 - lr: 0.0018\n", + "Epoch 193/200\n", + "200/200 [==============================] - 183s 916ms/step - loss: 4.0349e-04 - acc: 1.0000 - val_loss: 0.4929 - val_acc: 0.5009 - lr: 0.0016\n", + "Epoch 194/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 2.7399e-04 - acc: 1.0000 - val_loss: 0.4874 - val_acc: 0.5009 - lr: 0.0014\n", + "Epoch 195/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 2.0520e-04 - acc: 1.0000 - val_loss: 0.4753 - val_acc: 0.5009 - lr: 0.0012\n", + "Epoch 196/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 1.6416e-04 - acc: 1.0000 - val_loss: 0.4276 - val_acc: 0.5009 - lr: 9.4444e-04\n", + "Epoch 197/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 1.3822e-04 - acc: 1.0000 - val_loss: 0.3467 - val_acc: 0.5009 - lr: 7.3333e-04\n", + "Epoch 198/200\n", + "200/200 [==============================] - 182s 911ms/step - loss: 1.6206e-04 - acc: 1.0000 - val_loss: 4.4510e-04 - val_acc: 0.9996 - lr: 5.2222e-04\n", + "Epoch 199/200\n", + "200/200 [==============================] - 183s 914ms/step - loss: 1.5997e-04 - acc: 1.0000 - val_loss: 1.6303e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n", + "Epoch 200/200\n", + "200/200 [==============================] - 183s 913ms/step - loss: 1.4846e-04 - acc: 1.0000 - val_loss: 1.4764e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n", + "Best validation accuracy: 1.0\n" + ] + } + ], + "source": [ + "#14#Training the Model\n", + "num_epochs = 200\n", + "depth = 10\n", + "trained_net, history = train_LCB_distinguisher(num_epochs, num_rounds, depth)" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ghor_Rk_0009_0000_Round_20_depth_10.json\n" + ] + } + ], + "source": [ + "#15#Create JSON File \n", + "# Convert the model architecture to JSON format\n", + "import json\n", + "from keras.models import model_from_json\n", + "model_json = trained_net.to_json()\n", + "\n", + " # Save the model architecture as a JSON file (optional)\n", + "filename = f'ghor_Rk_0009_0000_Round_{num_rounds}_depth_10.json'\n", + "print(filename)\n", + "with open(filename, \"w\") as json_file:\n", + " json.dump(json.loads(model_json), json_file, indent=4)" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "#16#Evaluate Function\n", + "def evaluate(net,X,Y):\n", + " Z = net.predict(X,batch_size=10000).flatten();\n", + " Zbin = (Z > 0.5);\n", + " diff = Y - Z; mse = np.mean(diff*diff);\n", + " n = len(Z); n0 = np.sum(Y==0); n1 = np.sum(Y==1);\n", + " acc = np.sum(Zbin == Y) / n;\n", + " tpr = np.sum(Zbin[Y==1]) / n1;\n", + " tnr = np.sum(Zbin[Y==0] == 0) / n0;\n", + " mreal = np.median(Z[Y==1]);\n", + " high_random = np.sum(Z[Y==0] > mreal) / n0;\n", + " print(\"Accuracy: \", acc, \"TPR: \", tpr, \"TNR: \", tnr, \"MSE:\", mse);\n", + " print(\"Percentage of random pairs with score higher than median of real pairs:\", 100*high_random);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "10/10 [==============================] - 3s 266ms/step\n", + "Accuracy: 0.99999 TPR: 1.0 TNR: 0.9999800279608548 MSE: 1.3477956e-05\n", + "Percentage of random pairs with score higher than median of real pairs: 0.0019972039145196726\n" + ] + } + ], + "source": [ + "#17#Evaluate Function Call\n", + "import numpy as np\n", + "\n", + "from keras.models import model_from_json\n", + "\n", + "#load distinguishers\n", + "json_file = open('ghor_Rk_0009_0000_Round_20_depth_10.json','r');\n", + "json_model = json_file.read();\n", + "\n", + "net20 = model_from_json(json_model);\n", + "\n", + "net20.load_weights('ghor_Rk_0009_0000_Round_20_depth_10.h5');\n", + "\n", + "X_test_stacked, Y_test_stacked = make_train_data(100000, num_rounds)\n", + "evaluate(net20, X_test_stacked, Y_test_stacked);\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.13" + }, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}