Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
L
LCB Cipher
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Neural-LCB
LCB Cipher
Commits
63aae23e
Commit
63aae23e
authored
Aug 10, 2023
by
Indrakanti Aishwarya
Browse files
Options
Downloads
Patches
Plain Diff
Upload New File
parent
3430e876
No related branches found
No related tags found
No related merge requests found
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
LCB/LCB_Dynamic_0000836F.ipynb
+996
-0
996 additions, 0 deletions
LCB/LCB_Dynamic_0000836F.ipynb
with
996 additions
and
0 deletions
LCB/LCB_Dynamic_0000836F.ipynb
0 → 100644
+
996
−
0
View file @
63aae23e
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"#1#Header\n",
"import csv\n",
"import numpy as np\n",
"import os \n",
"from os import urandom\n",
"from keras.models import model_from_json"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"#2#Defining Global Variables\n",
"num_rounds = 10\n",
"m = 0\n",
"o = 0"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"#3#Defining WORDSIZE\n",
"def WORD_SIZE():\n",
" return(16);"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"#4#Defining S-Box\n",
"s_box_mapping_np = np.array([0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15], dtype=np.uint8)\n",
"\n",
"def s_box(input_bits):\n",
" input_bits_int = int(input_bits)\n",
" output_bits_int = s_box_mapping_np[input_bits_int]\n",
" return output_bits_int"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"#5#Defining P-Box\n",
"def decimal_to_binary_list(value, num_bits=4):\n",
" return np.array([int(x) for x in format(value, f'0{num_bits}b')], dtype=np.uint8)\n",
"\n",
"def p_box(c_decimal, d_decimal):\n",
" c = decimal_to_binary_list(c_decimal)\n",
" d = decimal_to_binary_list(d_decimal)\n",
"\n",
" e = np.zeros(8, dtype=np.uint8)\n",
"\n",
" e[0] = c[0]\n",
" e[1] = d[0]\n",
" e[2] = c[3]\n",
" e[3] = d[3]\n",
" e[4] = c[1]\n",
" e[5] = d[1]\n",
" e[6] = c[2]\n",
" e[7] = d[2]\n",
"\n",
" return e"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"#6#Defining L-Box\n",
"def l_box(f, g):\n",
" if len(f) != 8 or len(g) != 8:\n",
" raise ValueError(\"Both input arrays f and g should have exactly 8 elements\")\n",
"\n",
" h = np.zeros(16, dtype=np.uint8)\n",
" h[0] = f[0]\n",
" h[1] = g[0]\n",
" h[2] = f[7]\n",
" h[3] = g[7]\n",
" h[4] = f[1]\n",
" h[5] = g[1]\n",
" h[6] = f[6]\n",
" h[7] = g[6]\n",
" h[8] = f[2]\n",
" h[9] = g[2]\n",
" h[10] = f[5]\n",
" h[11] = g[5]\n",
" h[12] = f[3]\n",
" h[13] = g[3]\n",
" h[14] = f[4]\n",
" h[15] = g[4]\n",
" #print(h)\n",
" return h"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"#7#Defining F-function for Right Side of Plaintext\n",
"def binary_array_to_integer(output):\n",
" int_output = ''.join(map(str, output))\n",
" return int(int_output, 2)\n",
"\n",
"def f_function(x, key, d):\n",
" q=0\n",
" global m\n",
" if isinstance(x, int):\n",
" x = [x]\n",
" input_parts = np.zeros((len(x), 4), dtype=np.uint16)\n",
" for i, val in enumerate(x):\n",
" input_parts[i] = np.array([val >> 12, (val >> 8) & 0xF, (val >> 4) & 0xF, val & 0xF])\n",
" \n",
" s_box_outputs = np.array([[s_box(element) for element in part] for part in input_parts])\n",
" p_box_outputs = np.zeros((len(x), 2, 8), dtype=np.uint8)\n",
" for i in range(len(x)):\n",
" p_box_outputs[i] = np.array([p_box(s_box_outputs[i][0], s_box_outputs[i][1]), p_box(s_box_outputs[i][2], s_box_outputs[i][3])])\n",
" \n",
" final_outputs = np.zeros(len(x), dtype=np.uint32)\n",
" for i in range(len(x)):\n",
" final_output = np.array(l_box(p_box_outputs[i][0], p_box_outputs[i][1]))\n",
" k = key[q][(m+1) % 4]\n",
" output = final_output ^ k\n",
" output = binary_array_to_integer(output)\n",
" final_outputs[i] = output\n",
" q +=1 \n",
" if (m < 2):\n",
" m +=2\n",
" else:\n",
" m = 0\n",
" return final_outputs"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"#8#Defining F-function for Left Side of Plaintext\n",
"def binary_array_to_integer(output):\n",
" int_output = ''.join(map(str, output))\n",
" return int(int_output, 2)\n",
"\n",
"def ff_function(x, key, d):\n",
" q=0\n",
" global o\n",
" if isinstance(x, int):\n",
" x = [x]\n",
" \n",
" input_parts = np.zeros((len(x), 4), dtype=np.uint16)\n",
" for i, val in enumerate(x):\n",
" input_parts[i] = np.array([val >> 12, (val >> 8) & 0xF, (val >> 4) & 0xF, val & 0xF])\n",
" \n",
" s_box_outputs = np.array([[s_box(element) for element in part] for part in input_parts])\n",
" p_box_outputs = np.zeros((len(x), 2, 8), dtype=np.uint8)\n",
" for i in range(len(x)):\n",
" p_box_outputs[i] = np.array([p_box(s_box_outputs[i][0], s_box_outputs[i][1]), p_box(s_box_outputs[i][2], s_box_outputs[i][3])])\n",
" \n",
" final_outputs = np.zeros(len(x), dtype=np.uint32)\n",
" for i in range(len(x)):\n",
" final_output = np.array(l_box(p_box_outputs[i][0], p_box_outputs[i][1]))\n",
" k = key[q][o % 4]\n",
" output = final_output ^ k\n",
" output = binary_array_to_integer(output)\n",
" final_outputs[i] = output\n",
" q +=1 \n",
" if (o < 2):\n",
" o +=2\n",
" else:\n",
" o = 0\n",
" return final_outputs"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"#9#Convert the ciphertext pairs into Binary array\n",
"def convert_to_binary(row):\n",
" bin_array = np.zeros(64, dtype=np.uint8)\n",
" for i, num in enumerate(row):\n",
" binary_str = format(num, '016b')\n",
" for j, b in enumerate(binary_str):\n",
" bin_array[i * 16 + j] = int(b)\n",
" return bin_array"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"#10#Encryption Function\n",
"def lcb_encrypt(plaintext, key, rounds, d):\n",
" \n",
" left_plaintext = np.uint16(plaintext[0])\n",
" right_plaintext = np.uint16(plaintext[1])\n",
" L, R = left_plaintext, right_plaintext\n",
"\n",
" n = 0\n",
" \n",
" while n < rounds:\n",
" L, R = f_function(R, key, d), ff_function(L, key, d)\n",
" n += 1\n",
" \n",
" return (L, R)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"#11#Fuction for generation of keys\n",
"import random\n",
"\n",
"def generate_hex_keys(num_keys, length=16):\n",
" hex_chars = \"0123456789ABCDEF\"\n",
" keys_str = [\"\".join(random.choices(hex_chars, k=length)) for _ in range(num_keys)]\n",
"\n",
" return keys_str\n",
"\n",
"\n",
"def to_binary(value, bits):\n",
" return format(value, f'0{bits}b')\n",
"\n",
"def generate_round_keys(num_keys):\n",
" random_keys_hex = generate_hex_keys(num_keys)\n",
" round_keys = []\n",
" \n",
" for random_key_hex in random_keys_hex:\n",
" random_key = int(random_key_hex, 16)\n",
"\n",
" K1 = (random_key >> 48) & 0xFFFF\n",
" K2 = (random_key >> 32) & 0xFFFF\n",
" K3 = (random_key >> 16) & 0xFFFF\n",
" K4 = random_key & 0xFFFF\n",
" \n",
" k1_bin = to_binary(K1, 16)\n",
" k2_bin = to_binary(K2, 16)\n",
" k3_bin = to_binary(K3, 16)\n",
" k4_bin = to_binary(K4, 16)\n",
"\n",
" k1_np_array = np.array([int(bit) for bit in k1_bin])\n",
" k2_np_array = np.array([int(bit) for bit in k2_bin])\n",
" k3_np_array = np.array([int(bit) for bit in k3_bin])\n",
" k4_np_array = np.array([int(bit) for bit in k4_bin])\n",
"\n",
" round_key = np.array([k1_np_array, k2_np_array, k3_np_array, k4_np_array])\n",
" round_keys.append(round_key)\n",
" round_key = np.array(round_keys)\n",
" \n",
" return round_key"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"#12#Make dataset\n",
"\n",
"def make_train_data(n, nr, diff=(0,0x836F)):\n",
" Y = np.frombuffer(urandom(n), dtype=np.uint8); \n",
" Y = Y & 1;\n",
" plaintext = np.frombuffer(urandom(4*n), dtype=np.uint32);\n",
" plain0l = np.empty(n, dtype=np.uint16)\n",
" plain0r = np.empty(n, dtype=np.uint16)\n",
" \n",
" for i in range(n):\n",
" plain0l[i] = (plaintext[i] >> 16) & 0xffff\n",
" plain0r[i] = plaintext[i] & 0xffff\n",
" \n",
" plain1l = plain0l ^ diff[0]; plain1r = plain0r ^ diff[1];\n",
" \n",
" num_rand_samples = np.sum(Y==0);\n",
" plain1l[Y==0] = np.frombuffer(urandom(2*num_rand_samples),dtype=np.uint16);\n",
" plain1r[Y==0] = np.frombuffer(urandom(2*num_rand_samples),dtype=np.uint16);\n",
" \n",
" round_key = generate_round_keys(n)\n",
" \n",
" ctdata0l, ctdata0r = lcb_encrypt((plain0l, plain0r), round_key, nr, n)\n",
" ctdata1l, ctdata1r = lcb_encrypt((plain1l, plain1r), round_key, nr, n)\n",
" \n",
" ctdata = np.vstack((ctdata0l, ctdata0r, ctdata1l, ctdata1r)).T\n",
" X = np.array([convert_to_binary(row) for row in ctdata])\n",
" \n",
"\n",
" with open(\"VDataset_NewP.csv\", \"w\", newline='') as f:\n",
" writer = csv.writer(f)\n",
" writer.writerow([\"plain0l\", \"plain0r\", \"plain1l\", \"plain1r\",\"Y\"])\n",
" for i in range(n):\n",
" writer.writerow([plain0l[i], plain0r[i], plain1l[i], plain1r[i],Y[i]])\n",
"\n",
" with open(\"VDataset_NewC.csv\", \"w\", newline='') as f:\n",
" writer = csv.writer(f)\n",
" writer.writerow([\"ctdata0l\", \"ctdata0r\", \"ctdata1l\", \"ctdata1r\",\"Y\"])\n",
" for i in range(n):\n",
" writer.writerow([ctdata0l[i], ctdata0r[i], ctdata1l[i], ctdata1r[i],Y[i]])\n",
" \n",
" return(X,Y);"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(array([[1, 1, 0, ..., 0, 1, 0],\n",
" [0, 0, 0, ..., 1, 0, 1],\n",
" [1, 1, 1, ..., 1, 1, 0],\n",
" ...,\n",
" [0, 1, 0, ..., 0, 1, 0],\n",
" [1, 0, 1, ..., 0, 0, 0],\n",
" [1, 0, 0, ..., 1, 0, 0]], dtype=uint8),\n",
" array([1, 1, 1, ..., 0, 0, 0], dtype=uint8))"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"make_train_data(10**5, num_rounds)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"#13#Creation of Model\n",
"\n",
"from pickle import dump\n",
"\n",
"from keras.callbacks import ModelCheckpoint, LearningRateScheduler\n",
"from keras.models import Model\n",
"from keras.optimizers import Adam\n",
"from keras.layers import Dense, Conv1D, Input, Reshape, Permute, Add, Flatten, BatchNormalization, Activation\n",
"from keras import backend as K\n",
"from keras.regularizers import l2\n",
"\n",
"bs = 5000;\n",
"wdir = './freshly_trained_nets/'\n",
"\n",
"def cyclic_lr(num_epochs, high_lr, low_lr):\n",
" res = lambda i: low_lr + ((num_epochs-1) - i % num_epochs)/(num_epochs-1) * (high_lr - low_lr);\n",
" return(res);\n",
"\n",
"def make_checkpoint(datei):\n",
" res = ModelCheckpoint(datei, monitor='val_loss', save_best_only = True);\n",
" return(res);\n",
"\n",
"#make residual tower of convolutional blocks\n",
"def make_resnet(num_blocks=2, num_filters=32, num_outputs=1, d1=64, d2=64, word_size=16, ks=3,depth=5, reg_param=0.0001, final_activation='sigmoid'):\n",
" #Input and preprocessing layers\n",
" inp = Input(shape=(num_blocks * word_size * 2,));\n",
" rs = Reshape((2 * num_blocks, word_size))(inp);\n",
" perm = Permute((2,1))(rs);\n",
" #add a single residual layer that will expand the data to num_filters channels\n",
" #this is a bit-sliced layer\n",
" conv0 = Conv1D(num_filters, kernel_size=1, padding='same', kernel_regularizer=l2(reg_param))(perm);\n",
" conv0 = BatchNormalization()(conv0);\n",
" conv0 = Activation('relu')(conv0);\n",
" #add residual blocks\n",
" shortcut = conv0;\n",
" for i in range(depth):\n",
" conv1 = Conv1D(num_filters, kernel_size=ks, padding='same', kernel_regularizer=l2(reg_param))(shortcut);\n",
" conv1 = BatchNormalization()(conv1);\n",
" conv1 = Activation('relu')(conv1);\n",
" conv2 = Conv1D(num_filters, kernel_size=ks, padding='same',kernel_regularizer=l2(reg_param))(conv1);\n",
" conv2 = BatchNormalization()(conv2);\n",
" conv2 = Activation('relu')(conv2);\n",
" shortcut = Add()([shortcut, conv2]);\n",
" #add prediction head\n",
" flat1 = Flatten()(shortcut);\n",
" dense1 = Dense(d1,kernel_regularizer=l2(reg_param))(flat1);\n",
" dense1 = BatchNormalization()(dense1);\n",
" dense1 = Activation('relu')(dense1);\n",
" dense2 = Dense(d2, kernel_regularizer=l2(reg_param))(dense1);\n",
" dense2 = BatchNormalization()(dense2);\n",
" dense2 = Activation('relu')(dense2);\n",
" out = Dense(num_outputs, activation=final_activation, kernel_regularizer=l2(reg_param))(dense2);\n",
" model = Model(inputs=inp, outputs=out);\n",
" return(model);\n",
"\n",
"def train_LCB_distinguisher(num_epochs, num_rounds, depth):\n",
" #create the network\n",
" print(num_rounds)\n",
" print(depth)\n",
" net = make_resnet(depth=depth, reg_param=10**-5);\n",
" net.compile(optimizer='adam',loss='mse',metrics=['acc']);\n",
" #generate training and validation data\n",
" X, Y = make_train_data(10**6,num_rounds);\n",
" X_eval, Y_eval = make_train_data(10**5, num_rounds);\n",
" #set up model checkpoint\n",
" check = make_checkpoint(wdir+'ghor_Rk_0000_836F_Round_'+str(num_rounds)+'_depth_'+str(depth)+'.h5');\n",
" #create learnrate schedule\n",
" lr = LearningRateScheduler(cyclic_lr(10,0.002, 0.0001));\n",
" #train and evaluate\n",
" #print(X_eval)\n",
" h = net.fit(X,Y,epochs=num_epochs,batch_size=bs,validation_data=(X_eval, Y_eval), callbacks=[lr,check]);\n",
" np.save(wdir+'h'+str(num_rounds)+'r_depth'+str(depth)+'.npy', h.history['val_acc']);\n",
" np.save(wdir+'h'+str(num_rounds)+'r_depth'+str(depth)+'.npy', h.history['val_loss']);\n",
" dump(h.history,open(wdir+'hist'+str(num_rounds)+'r_depth'+str(depth)+'.p','wb'));\n",
" print(\"Best validation accuracy: \", np.max(h.history['val_acc']));\n",
" return(net, h);\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"10\n",
"10\n",
"Epoch 1/200\n",
"200/200 [==============================] - 185s 909ms/step - loss: 0.0114 - acc: 0.9959 - val_loss: 0.0240 - val_acc: 0.9776 - lr: 0.0020\n",
"Epoch 2/200\n",
"200/200 [==============================] - 182s 909ms/step - loss: 0.0064 - acc: 1.0000 - val_loss: 0.0056 - val_acc: 0.9999 - lr: 0.0018\n",
"Epoch 3/200\n",
"200/200 [==============================] - 190s 951ms/step - loss: 0.0049 - acc: 1.0000 - val_loss: 0.0044 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 4/200\n",
"200/200 [==============================] - 183s 916ms/step - loss: 0.0038 - acc: 1.0000 - val_loss: 0.0037 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 5/200\n",
"200/200 [==============================] - 183s 914ms/step - loss: 0.0030 - acc: 1.0000 - val_loss: 0.0030 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 6/200\n",
"200/200 [==============================] - 185s 923ms/step - loss: 0.0024 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 7/200\n",
"200/200 [==============================] - 183s 916ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 8/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0017 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 9/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 10/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 11/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.4747 - val_acc: 0.5018 - lr: 0.0020\n",
"Epoch 12/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 6.9676e-04 - acc: 1.0000 - val_loss: 0.1912 - val_acc: 0.6449 - lr: 0.0018\n",
"Epoch 13/200\n",
"200/200 [==============================] - 186s 931ms/step - loss: 5.0213e-04 - acc: 1.0000 - val_loss: 8.2996e-04 - val_acc: 0.9995 - lr: 0.0016\n",
"Epoch 14/200\n",
"200/200 [==============================] - 181s 908ms/step - loss: 3.9845e-04 - acc: 1.0000 - val_loss: 0.3903 - val_acc: 0.5018 - lr: 0.0014\n",
"Epoch 15/200\n",
"200/200 [==============================] - 183s 915ms/step - loss: 2.6665e-04 - acc: 1.0000 - val_loss: 0.4915 - val_acc: 0.5018 - lr: 0.0012\n",
"Epoch 16/200\n",
"200/200 [==============================] - 182s 910ms/step - loss: 2.0211e-04 - acc: 1.0000 - val_loss: 0.4945 - val_acc: 0.5018 - lr: 9.4444e-04\n",
"Epoch 17/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 1.6426e-04 - acc: 1.0000 - val_loss: 0.4659 - val_acc: 0.5018 - lr: 7.3333e-04\n",
"Epoch 18/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 1.4096e-04 - acc: 1.0000 - val_loss: 0.1450 - val_acc: 0.7232 - lr: 5.2222e-04\n",
"Epoch 19/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 1.2714e-04 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 20/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 1.2059e-04 - acc: 1.0000 - val_loss: 1.2217e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 21/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0044 - acc: 0.9980 - val_loss: 0.0059 - val_acc: 0.9974 - lr: 0.0020\n",
"Epoch 22/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 0.0037 - acc: 1.0000 - val_loss: 0.0036 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 23/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 0.0034 - acc: 1.0000 - val_loss: 0.0033 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 24/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 0.0032 - acc: 1.0000 - val_loss: 0.0031 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 25/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 0.0031 - acc: 1.0000 - val_loss: 0.0030 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 26/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0029 - acc: 1.0000 - val_loss: 0.0029 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 27/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0028 - acc: 1.0000 - val_loss: 0.0028 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 28/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 0.0027 - acc: 1.0000 - val_loss: 0.0027 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 29/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 0.0027 - acc: 1.0000 - val_loss: 0.0027 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 30/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 0.0026 - acc: 1.0000 - val_loss: 0.0026 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 31/200\n",
"200/200 [==============================] - 183s 917ms/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0034 - val_acc: 1.0000 - lr: 0.0020\n",
"Epoch 32/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0025 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 33/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 34/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 35/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 36/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 37/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 38/200\n",
"200/200 [==============================] - 182s 909ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 39/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 40/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 41/200\n",
"200/200 [==============================] - 182s 909ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0375 - val_acc: 0.9868 - lr: 0.0020\n",
"Epoch 42/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.1601 - val_acc: 0.6503 - lr: 0.0018\n",
"Epoch 43/200\n",
"200/200 [==============================] - 182s 909ms/step - loss: 8.7272e-04 - acc: 1.0000 - val_loss: 0.0358 - val_acc: 0.9965 - lr: 0.0016\n",
"Epoch 44/200\n",
"200/200 [==============================] - 199s 996ms/step - loss: 7.5967e-04 - acc: 1.0000 - val_loss: 0.2835 - val_acc: 0.5024 - lr: 0.0014\n",
"Epoch 45/200\n",
"200/200 [==============================] - 187s 934ms/step - loss: 6.7187e-04 - acc: 1.0000 - val_loss: 0.0131 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 46/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 6.0448e-04 - acc: 1.0000 - val_loss: 0.0045 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 47/200\n",
"200/200 [==============================] - 187s 938ms/step - loss: 5.5327e-04 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 48/200\n",
"200/200 [==============================] - 204s 1s/step - loss: 5.1591e-04 - acc: 1.0000 - val_loss: 5.5068e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 49/200\n",
"200/200 [==============================] - 191s 956ms/step - loss: 4.9090e-04 - acc: 1.0000 - val_loss: 5.0161e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 50/200\n",
"200/200 [==============================] - 187s 934ms/step - loss: 4.7809e-04 - acc: 1.0000 - val_loss: 4.7770e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 51/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 0.0014 - acc: 0.9998 - val_loss: 0.1151 - val_acc: 0.8641 - lr: 0.0020\n",
"Epoch 52/200\n",
"200/200 [==============================] - 187s 934ms/step - loss: 9.5738e-04 - acc: 1.0000 - val_loss: 0.3482 - val_acc: 0.5020 - lr: 0.0018\n",
"Epoch 53/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 6.7179e-04 - acc: 1.0000 - val_loss: 0.4190 - val_acc: 0.5018 - lr: 0.0016\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 54/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 5.1030e-04 - acc: 1.0000 - val_loss: 0.3700 - val_acc: 0.5019 - lr: 0.0014\n",
"Epoch 55/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 4.0714e-04 - acc: 1.0000 - val_loss: 0.2277 - val_acc: 0.5630 - lr: 0.0012\n",
"Epoch 56/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 3.3943e-04 - acc: 1.0000 - val_loss: 0.0590 - val_acc: 0.9308 - lr: 9.4444e-04\n",
"Epoch 57/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 2.9440e-04 - acc: 1.0000 - val_loss: 0.0220 - val_acc: 0.9963 - lr: 7.3333e-04\n",
"Epoch 58/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 2.6228e-04 - acc: 1.0000 - val_loss: 4.3736e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 59/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 2.4230e-04 - acc: 1.0000 - val_loss: 2.6776e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 60/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 2.3241e-04 - acc: 1.0000 - val_loss: 2.3119e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 61/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 3.1112e-04 - acc: 1.0000 - val_loss: 8.1905e-04 - val_acc: 0.9997 - lr: 0.0020\n",
"Epoch 62/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 3.3318e-04 - acc: 1.0000 - val_loss: 0.4971 - val_acc: 0.5018 - lr: 0.0018\n",
"Epoch 63/200\n",
"200/200 [==============================] - 183s 914ms/step - loss: 1.6852e-04 - acc: 1.0000 - val_loss: 0.4981 - val_acc: 0.5018 - lr: 0.0016\n",
"Epoch 64/200\n",
"200/200 [==============================] - 187s 934ms/step - loss: 1.1411e-04 - acc: 1.0000 - val_loss: 0.4975 - val_acc: 0.5018 - lr: 0.0014\n",
"Epoch 65/200\n",
"200/200 [==============================] - 189s 943ms/step - loss: 8.4985e-05 - acc: 1.0000 - val_loss: 0.4952 - val_acc: 0.5018 - lr: 0.0012\n",
"Epoch 66/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 1.0603e-04 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 0.9974 - lr: 9.4444e-04\n",
"Epoch 67/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 1.2690e-04 - acc: 1.0000 - val_loss: 0.0074 - val_acc: 0.9999 - lr: 7.3333e-04\n",
"Epoch 68/200\n",
"200/200 [==============================] - 187s 933ms/step - loss: 8.4066e-05 - acc: 1.0000 - val_loss: 0.0879 - val_acc: 0.8975 - lr: 5.2222e-04\n",
"Epoch 69/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 6.8841e-05 - acc: 1.0000 - val_loss: 5.1311e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 70/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 6.3039e-05 - acc: 1.0000 - val_loss: 6.8196e-05 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 71/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 2.2853e-04 - acc: 1.0000 - val_loss: 0.5024 - val_acc: 0.4984 - lr: 0.0020\n",
"Epoch 72/200\n",
"200/200 [==============================] - 180s 900ms/step - loss: 4.0886e-04 - acc: 1.0000 - val_loss: 0.4943 - val_acc: 0.5018 - lr: 0.0018\n",
"Epoch 73/200\n",
"200/200 [==============================] - 183s 913ms/step - loss: 1.9104e-04 - acc: 1.0000 - val_loss: 0.4975 - val_acc: 0.5018 - lr: 0.0016\n",
"Epoch 74/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 1.1183e-04 - acc: 1.0000 - val_loss: 0.4980 - val_acc: 0.5018 - lr: 0.0014\n",
"Epoch 75/200\n",
"200/200 [==============================] - 188s 939ms/step - loss: 6.2444e-05 - acc: 1.0000 - val_loss: 0.4977 - val_acc: 0.5018 - lr: 0.0012\n",
"Epoch 76/200\n",
"200/200 [==============================] - 189s 943ms/step - loss: 4.3502e-05 - acc: 1.0000 - val_loss: 0.4976 - val_acc: 0.5018 - lr: 9.4444e-04\n",
"Epoch 77/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 3.2875e-05 - acc: 1.0000 - val_loss: 0.4972 - val_acc: 0.5018 - lr: 7.3333e-04\n",
"Epoch 78/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 2.6406e-05 - acc: 1.0000 - val_loss: 0.4931 - val_acc: 0.5018 - lr: 5.2222e-04\n",
"Epoch 79/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 2.3363e-05 - acc: 1.0000 - val_loss: 0.4424 - val_acc: 0.5018 - lr: 3.1111e-04\n",
"Epoch 80/200\n",
"200/200 [==============================] - 184s 920ms/step - loss: 2.2037e-05 - acc: 1.0000 - val_loss: 2.0193e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 81/200\n",
"200/200 [==============================] - 193s 966ms/step - loss: 1.7932e-05 - acc: 1.0000 - val_loss: 0.4637 - val_acc: 0.5018 - lr: 0.0020\n",
"Epoch 82/200\n",
"200/200 [==============================] - 189s 944ms/step - loss: 0.0020 - acc: 0.9994 - val_loss: 0.5345 - val_acc: 0.4482 - lr: 0.0018\n",
"Epoch 83/200\n",
"200/200 [==============================] - 182s 910ms/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0024 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 84/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 0.0023 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 85/200\n",
"200/200 [==============================] - 186s 930ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 86/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0020 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 87/200\n",
"200/200 [==============================] - 182s 910ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 88/200\n",
"200/200 [==============================] - 183s 915ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 89/200\n",
"200/200 [==============================] - 183s 914ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 90/200\n",
"200/200 [==============================] - 182s 911ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 91/200\n",
"200/200 [==============================] - 182s 912ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 0.0020\n",
"Epoch 92/200\n",
"200/200 [==============================] - 182s 910ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 93/200\n",
"200/200 [==============================] - 182s 911ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 94/200\n",
"200/200 [==============================] - 185s 925ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 95/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 96/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.0010 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 97/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 9.7535e-04 - acc: 1.0000 - val_loss: 9.5283e-04 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 98/200\n",
"200/200 [==============================] - 180s 901ms/step - loss: 9.2974e-04 - acc: 1.0000 - val_loss: 9.1339e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 99/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 8.9887e-04 - acc: 1.0000 - val_loss: 8.8789e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 100/200\n",
"200/200 [==============================] - 187s 934ms/step - loss: 8.8294e-04 - acc: 1.0000 - val_loss: 8.7880e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 101/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 0.0012 - acc: 0.9999 - val_loss: 0.0013 - val_acc: 0.9998 - lr: 0.0020\n",
"Epoch 102/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 9.3628e-04 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 103/200\n",
"200/200 [==============================] - 179s 897ms/step - loss: 8.4824e-04 - acc: 1.0000 - val_loss: 8.5005e-04 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 104/200\n",
"200/200 [==============================] - 180s 900ms/step - loss: 7.1619e-04 - acc: 1.0000 - val_loss: 7.0104e-04 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 105/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 6.2045e-04 - acc: 1.0000 - val_loss: 6.2361e-04 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 106/200\n",
"200/200 [==============================] - 183s 915ms/step - loss: 5.5093e-04 - acc: 1.0000 - val_loss: 5.3495e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 107/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 4.9948e-04 - acc: 1.0000 - val_loss: 4.9466e-04 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 108/200\n",
"200/200 [==============================] - 187s 935ms/step - loss: 4.6283e-04 - acc: 1.0000 - val_loss: 5.3213e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 109/200\n",
"200/200 [==============================] - 188s 941ms/step - loss: 4.3871e-04 - acc: 1.0000 - val_loss: 4.3065e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 110/200\n",
"200/200 [==============================] - 194s 973ms/step - loss: 4.2627e-04 - acc: 1.0000 - val_loss: 4.2329e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 111/200\n",
"200/200 [==============================] - 206s 1s/step - loss: 3.6728e-04 - acc: 1.0000 - val_loss: 0.0929 - val_acc: 0.8698 - lr: 0.0020\n",
"Epoch 112/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 2.8893e-04 - acc: 1.0000 - val_loss: 2.6767e-04 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 113/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 2.3273e-04 - acc: 1.0000 - val_loss: 0.1472 - val_acc: 0.6903 - lr: 0.0016\n",
"Epoch 114/200\n",
"200/200 [==============================] - 213s 1s/step - loss: 1.8670e-04 - acc: 1.0000 - val_loss: 0.1450 - val_acc: 0.7015 - lr: 0.0014\n",
"Epoch 115/200\n",
"200/200 [==============================] - 213s 1s/step - loss: 1.5695e-04 - acc: 1.0000 - val_loss: 0.0115 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 116/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 1.3597e-04 - acc: 1.0000 - val_loss: 3.2649e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 117/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 1.2097e-04 - acc: 1.0000 - val_loss: 0.4788 - val_acc: 0.5018 - lr: 7.3333e-04\n",
"Epoch 118/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 2.2300e-04 - acc: 1.0000 - val_loss: 2.4725e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 119/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 2.1805e-04 - acc: 1.0000 - val_loss: 2.0893e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 120/200\n",
"200/200 [==============================] - 214s 1s/step - loss: 2.0614e-04 - acc: 1.0000 - val_loss: 2.0339e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 121/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 1.6093e-04 - acc: 1.0000 - val_loss: 0.4219 - val_acc: 0.5018 - lr: 0.0020\n",
"Epoch 122/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 1.1050e-04 - acc: 1.0000 - val_loss: 0.4460 - val_acc: 0.5018 - lr: 0.0018\n",
"Epoch 123/200\n",
"200/200 [==============================] - 183s 914ms/step - loss: 3.3806e-04 - acc: 1.0000 - val_loss: 2.9386e-04 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 124/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 1.9949e-04 - acc: 1.0000 - val_loss: 0.4264 - val_acc: 0.5018 - lr: 0.0014\n",
"Epoch 125/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 1.4096e-04 - acc: 1.0000 - val_loss: 0.2692 - val_acc: 0.5107 - lr: 0.0012\n",
"Epoch 126/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 1.0977e-04 - acc: 1.0000 - val_loss: 0.0105 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 127/200\n",
"200/200 [==============================] - 182s 909ms/step - loss: 9.1792e-05 - acc: 1.0000 - val_loss: 4.5129e-04 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 128/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 8.0692e-05 - acc: 1.0000 - val_loss: 1.1738e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 129/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 7.4031e-05 - acc: 1.0000 - val_loss: 7.6682e-05 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 130/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 7.0827e-05 - acc: 1.0000 - val_loss: 7.0510e-05 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 131/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 5.6762e-05 - acc: 1.0000 - val_loss: 0.4818 - val_acc: 0.5018 - lr: 0.0020\n",
"Epoch 132/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 3.9235e-05 - acc: 1.0000 - val_loss: 0.4837 - val_acc: 0.5018 - lr: 0.0018\n",
"Epoch 133/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 2.9483e-05 - acc: 1.0000 - val_loss: 0.4621 - val_acc: 0.5018 - lr: 0.0016\n",
"Epoch 134/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 8.8404e-04 - acc: 0.9999 - val_loss: 9.7526e-04 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 135/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 8.3623e-04 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 136/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 6.6218e-04 - acc: 1.0000 - val_loss: 0.0236 - val_acc: 0.9904 - lr: 9.4444e-04\n",
"Epoch 137/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 5.6179e-04 - acc: 1.0000 - val_loss: 8.9467e-04 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 138/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 4.9843e-04 - acc: 1.0000 - val_loss: 5.4970e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 139/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 4.6030e-04 - acc: 1.0000 - val_loss: 4.5565e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 140/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 4.4187e-04 - acc: 1.0000 - val_loss: 4.3794e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 141/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 3.5985e-04 - acc: 1.0000 - val_loss: 0.4717 - val_acc: 0.5018 - lr: 0.0020\n",
"Epoch 142/200\n",
"200/200 [==============================] - 212s 1s/step - loss: 2.5639e-04 - acc: 1.0000 - val_loss: 0.3537 - val_acc: 0.5018 - lr: 0.0018\n",
"Epoch 143/200\n",
"200/200 [==============================] - 190s 952ms/step - loss: 1.9486e-04 - acc: 1.0000 - val_loss: 0.0238 - val_acc: 0.9944 - lr: 0.0016\n",
"Epoch 144/200\n",
"200/200 [==============================] - 192s 961ms/step - loss: 1.5588e-04 - acc: 1.0000 - val_loss: 5.5978e-04 - val_acc: 0.9998 - lr: 0.0014\n",
"Epoch 145/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 1.2962e-04 - acc: 1.0000 - val_loss: 9.1726e-04 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 146/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 1.1111e-04 - acc: 1.0000 - val_loss: 2.5750e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 147/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 9.8085e-05 - acc: 1.0000 - val_loss: 9.7593e-05 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 148/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 1.1093e-04 - acc: 1.0000 - val_loss: 0.6840 - val_acc: 0.2697 - lr: 5.2222e-04\n",
"Epoch 149/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 1.7264e-04 - acc: 1.0000 - val_loss: 1.5816e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 150/200\n",
"200/200 [==============================] - 180s 902ms/step - loss: 1.5343e-04 - acc: 1.0000 - val_loss: 1.4886e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 151/200\n",
"200/200 [==============================] - 180s 900ms/step - loss: 1.1442e-04 - acc: 1.0000 - val_loss: 0.4817 - val_acc: 0.5018 - lr: 0.0020\n",
"Epoch 152/200\n",
"200/200 [==============================] - 180s 900ms/step - loss: 7.5607e-05 - acc: 1.0000 - val_loss: 0.4857 - val_acc: 0.5018 - lr: 0.0018\n",
"Epoch 153/200\n",
"200/200 [==============================] - 180s 900ms/step - loss: 5.0521e-05 - acc: 1.0000 - val_loss: 0.4959 - val_acc: 0.5018 - lr: 0.0016\n",
"Epoch 154/200\n",
"200/200 [==============================] - 182s 909ms/step - loss: 3.6988e-05 - acc: 1.0000 - val_loss: 0.4636 - val_acc: 0.5018 - lr: 0.0014\n",
"Epoch 155/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 2.9382e-05 - acc: 1.0000 - val_loss: 0.1145 - val_acc: 0.8209 - lr: 0.0012\n",
"Epoch 156/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 2.4769e-05 - acc: 1.0000 - val_loss: 5.0902e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 157/200\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"200/200 [==============================] - 181s 904ms/step - loss: 2.1742e-05 - acc: 1.0000 - val_loss: 4.3060e-05 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 158/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 1.9691e-05 - acc: 1.0000 - val_loss: 2.2383e-05 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 159/200\n",
"200/200 [==============================] - 181s 903ms/step - loss: 1.8412e-05 - acc: 1.0000 - val_loss: 1.8346e-05 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 160/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 1.7785e-05 - acc: 1.0000 - val_loss: 1.7664e-05 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 161/200\n",
"200/200 [==============================] - 181s 904ms/step - loss: 0.0051 - acc: 0.9974 - val_loss: 0.0642 - val_acc: 0.9316 - lr: 0.0020\n",
"Epoch 162/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 0.0029 - acc: 1.0000 - val_loss: 0.0028 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 163/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0027 - acc: 1.0000 - val_loss: 0.0026 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 164/200\n",
"200/200 [==============================] - 181s 906ms/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0025 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 165/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0024 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 166/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0023 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 167/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 168/200\n",
"200/200 [==============================] - 181s 907ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 169/200\n",
"200/200 [==============================] - 181s 905ms/step - loss: 0.0021 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 170/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0021 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 171/200\n",
"200/200 [==============================] - 182s 908ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 0.0020\n",
"Epoch 172/200\n",
"200/200 [==============================] - 182s 911ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0017 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 173/200\n",
"200/200 [==============================] - 183s 915ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 174/200\n",
"200/200 [==============================] - 183s 917ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 175/200\n",
"200/200 [==============================] - 184s 920ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 176/200\n",
"200/200 [==============================] - 184s 918ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 177/200\n",
"200/200 [==============================] - 183s 915ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 178/200\n",
"200/200 [==============================] - 183s 913ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 179/200\n",
"200/200 [==============================] - 182s 912ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 180/200\n",
"200/200 [==============================] - 184s 918ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 181/200\n",
"200/200 [==============================] - 187s 934ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 0.0020\n",
"Epoch 182/200\n",
"200/200 [==============================] - 185s 927ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 9.6330e-04 - val_acc: 1.0000 - lr: 0.0018\n",
"Epoch 183/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 9.1766e-04 - acc: 1.0000 - val_loss: 8.7060e-04 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 184/200\n",
"200/200 [==============================] - 185s 927ms/step - loss: 8.3431e-04 - acc: 1.0000 - val_loss: 7.9592e-04 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 185/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 7.6764e-04 - acc: 1.0000 - val_loss: 7.3644e-04 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 186/200\n",
"200/200 [==============================] - 185s 927ms/step - loss: 7.1441e-04 - acc: 1.0000 - val_loss: 6.8889e-04 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 187/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 6.7345e-04 - acc: 1.0000 - val_loss: 6.5429e-04 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 188/200\n",
"200/200 [==============================] - 185s 925ms/step - loss: 6.4192e-04 - acc: 1.0000 - val_loss: 6.2633e-04 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 189/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 6.2029e-04 - acc: 1.0000 - val_loss: 6.0985e-04 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 190/200\n",
"200/200 [==============================] - 185s 926ms/step - loss: 6.0916e-04 - acc: 1.0000 - val_loss: 6.0429e-04 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Epoch 191/200\n",
"200/200 [==============================] - 184s 920ms/step - loss: 5.5169e-04 - acc: 1.0000 - val_loss: 5.0742e-04 - val_acc: 1.0000 - lr: 0.0020\n",
"Epoch 192/200\n",
"200/200 [==============================] - 184s 922ms/step - loss: 0.0027 - acc: 0.9986 - val_loss: 0.5014 - val_acc: 0.5005 - lr: 0.0018\n",
"Epoch 193/200\n",
"200/200 [==============================] - 184s 921ms/step - loss: 0.0024 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 1.0000 - lr: 0.0016\n",
"Epoch 194/200\n",
"200/200 [==============================] - 185s 923ms/step - loss: 0.0023 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 0.0014\n",
"Epoch 195/200\n",
"200/200 [==============================] - 183s 916ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 0.0012\n",
"Epoch 196/200\n",
"200/200 [==============================] - 183s 917ms/step - loss: 0.0021 - acc: 1.0000 - val_loss: 0.0020 - val_acc: 1.0000 - lr: 9.4444e-04\n",
"Epoch 197/200\n",
"200/200 [==============================] - 183s 917ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0020 - val_acc: 1.0000 - lr: 7.3333e-04\n",
"Epoch 198/200\n",
"200/200 [==============================] - 184s 918ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 5.2222e-04\n",
"Epoch 199/200\n",
"200/200 [==============================] - 184s 918ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 3.1111e-04\n",
"Epoch 200/200\n",
"200/200 [==============================] - 183s 917ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 1.0000e-04\n",
"Best validation accuracy: 1.0\n"
]
}
],
"source": [
"#14#Training the Model\n",
"num_epochs = 200\n",
"depth = 10\n",
"trained_net, history = train_LCB_distinguisher(num_epochs, num_rounds, depth)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"ghor_Rk_0000_836F_Round_10_depth_10.json\n"
]
}
],
"source": [
"#15#Create JSON File \n",
"# Convert the model architecture to JSON format\n",
"import json\n",
"from keras.models import model_from_json\n",
"model_json = trained_net.to_json()\n",
"\n",
" # Save the model architecture as a JSON file (optional)\n",
"filename = f'ghor_Rk_0000_836F_Round_{num_rounds}_depth_10.json'\n",
"print(filename)\n",
"with open(filename, \"w\") as json_file:\n",
" json.dump(json.loads(model_json), json_file, indent=4)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"#16#Evaluate Function\n",
"def evaluate(net,X,Y):\n",
" Z = net.predict(X,batch_size=10000).flatten();\n",
" Zbin = (Z > 0.5);\n",
" diff = Y - Z; mse = np.mean(diff*diff);\n",
" n = len(Z); n0 = np.sum(Y==0); n1 = np.sum(Y==1);\n",
" acc = np.sum(Zbin == Y) / n;\n",
" tpr = np.sum(Zbin[Y==1]) / n1;\n",
" tnr = np.sum(Zbin[Y==0] == 0) / n0;\n",
" mreal = np.median(Z[Y==1]);\n",
" high_random = np.sum(Z[Y==0] > mreal) / n0;\n",
" print(\"Accuracy: \", acc, \"TPR: \", tpr, \"TNR: \", tnr, \"MSE:\", mse);\n",
" print(\"Percentage of random pairs with score higher than median of real pairs:\", 100*high_random);"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"10/10 [==============================] - 3s 273ms/step\n",
"Accuracy: 1.0 TPR: 1.0 TNR: 1.0 MSE: 3.327519e-07\n",
"Percentage of random pairs with score higher than median of real pairs: 0.0\n"
]
}
],
"source": [
"#17#Evaluate Function Call\n",
"import numpy as np\n",
"\n",
"from keras.models import model_from_json\n",
"\n",
"#load distinguishers\n",
"json_file = open('ghor_Rk_0000_836F_Round_10_depth_10.json','r');\n",
"json_model = json_file.read();\n",
"\n",
"net10 = model_from_json(json_model);\n",
"\n",
"net10.load_weights('ghor_Rk_0000_836F_Round_10_depth_10.h5');\n",
"\n",
"X_test_stacked, Y_test_stacked = make_train_data(100000, num_rounds)\n",
"evaluate(net10, X_test_stacked, Y_test_stacked);\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
},
"vscode": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}
%% Cell type:code id: tags:
```
python
#1#Header
import
csv
import
numpy
as
np
import
os
from
os
import
urandom
from
keras.models
import
model_from_json
```
%% Cell type:code id: tags:
```
python
#2#Defining Global Variables
num_rounds
=
10
m
=
0
o
=
0
```
%% Cell type:code id: tags:
```
python
#3#Defining WORDSIZE
def
WORD_SIZE
():
return
(
16
);
```
%% Cell type:code id: tags:
```
python
#4#Defining S-Box
s_box_mapping_np
=
np
.
array
([
0
,
4
,
1
,
5
,
2
,
6
,
3
,
7
,
8
,
12
,
9
,
13
,
10
,
14
,
11
,
15
],
dtype
=
np
.
uint8
)
def
s_box
(
input_bits
):
input_bits_int
=
int
(
input_bits
)
output_bits_int
=
s_box_mapping_np
[
input_bits_int
]
return
output_bits_int
```
%% Cell type:code id: tags:
```
python
#5#Defining P-Box
def
decimal_to_binary_list
(
value
,
num_bits
=
4
):
return
np
.
array
([
int
(
x
)
for
x
in
format
(
value
,
f
'
0
{
num_bits
}
b
'
)],
dtype
=
np
.
uint8
)
def
p_box
(
c_decimal
,
d_decimal
):
c
=
decimal_to_binary_list
(
c_decimal
)
d
=
decimal_to_binary_list
(
d_decimal
)
e
=
np
.
zeros
(
8
,
dtype
=
np
.
uint8
)
e
[
0
]
=
c
[
0
]
e
[
1
]
=
d
[
0
]
e
[
2
]
=
c
[
3
]
e
[
3
]
=
d
[
3
]
e
[
4
]
=
c
[
1
]
e
[
5
]
=
d
[
1
]
e
[
6
]
=
c
[
2
]
e
[
7
]
=
d
[
2
]
return
e
```
%% Cell type:code id: tags:
```
python
#6#Defining L-Box
def
l_box
(
f
,
g
):
if
len
(
f
)
!=
8
or
len
(
g
)
!=
8
:
raise
ValueError
(
"
Both input arrays f and g should have exactly 8 elements
"
)
h
=
np
.
zeros
(
16
,
dtype
=
np
.
uint8
)
h
[
0
]
=
f
[
0
]
h
[
1
]
=
g
[
0
]
h
[
2
]
=
f
[
7
]
h
[
3
]
=
g
[
7
]
h
[
4
]
=
f
[
1
]
h
[
5
]
=
g
[
1
]
h
[
6
]
=
f
[
6
]
h
[
7
]
=
g
[
6
]
h
[
8
]
=
f
[
2
]
h
[
9
]
=
g
[
2
]
h
[
10
]
=
f
[
5
]
h
[
11
]
=
g
[
5
]
h
[
12
]
=
f
[
3
]
h
[
13
]
=
g
[
3
]
h
[
14
]
=
f
[
4
]
h
[
15
]
=
g
[
4
]
#print(h)
return
h
```
%% Cell type:code id: tags:
```
python
#7#Defining F-function for Right Side of Plaintext
def
binary_array_to_integer
(
output
):
int_output
=
''
.
join
(
map
(
str
,
output
))
return
int
(
int_output
,
2
)
def
f_function
(
x
,
key
,
d
):
q
=
0
global
m
if
isinstance
(
x
,
int
):
x
=
[
x
]
input_parts
=
np
.
zeros
((
len
(
x
),
4
),
dtype
=
np
.
uint16
)
for
i
,
val
in
enumerate
(
x
):
input_parts
[
i
]
=
np
.
array
([
val
>>
12
,
(
val
>>
8
)
&
0xF
,
(
val
>>
4
)
&
0xF
,
val
&
0xF
])
s_box_outputs
=
np
.
array
([[
s_box
(
element
)
for
element
in
part
]
for
part
in
input_parts
])
p_box_outputs
=
np
.
zeros
((
len
(
x
),
2
,
8
),
dtype
=
np
.
uint8
)
for
i
in
range
(
len
(
x
)):
p_box_outputs
[
i
]
=
np
.
array
([
p_box
(
s_box_outputs
[
i
][
0
],
s_box_outputs
[
i
][
1
]),
p_box
(
s_box_outputs
[
i
][
2
],
s_box_outputs
[
i
][
3
])])
final_outputs
=
np
.
zeros
(
len
(
x
),
dtype
=
np
.
uint32
)
for
i
in
range
(
len
(
x
)):
final_output
=
np
.
array
(
l_box
(
p_box_outputs
[
i
][
0
],
p_box_outputs
[
i
][
1
]))
k
=
key
[
q
][(
m
+
1
)
%
4
]
output
=
final_output
^
k
output
=
binary_array_to_integer
(
output
)
final_outputs
[
i
]
=
output
q
+=
1
if
(
m
<
2
):
m
+=
2
else
:
m
=
0
return
final_outputs
```
%% Cell type:code id: tags:
```
python
#8#Defining F-function for Left Side of Plaintext
def
binary_array_to_integer
(
output
):
int_output
=
''
.
join
(
map
(
str
,
output
))
return
int
(
int_output
,
2
)
def
ff_function
(
x
,
key
,
d
):
q
=
0
global
o
if
isinstance
(
x
,
int
):
x
=
[
x
]
input_parts
=
np
.
zeros
((
len
(
x
),
4
),
dtype
=
np
.
uint16
)
for
i
,
val
in
enumerate
(
x
):
input_parts
[
i
]
=
np
.
array
([
val
>>
12
,
(
val
>>
8
)
&
0xF
,
(
val
>>
4
)
&
0xF
,
val
&
0xF
])
s_box_outputs
=
np
.
array
([[
s_box
(
element
)
for
element
in
part
]
for
part
in
input_parts
])
p_box_outputs
=
np
.
zeros
((
len
(
x
),
2
,
8
),
dtype
=
np
.
uint8
)
for
i
in
range
(
len
(
x
)):
p_box_outputs
[
i
]
=
np
.
array
([
p_box
(
s_box_outputs
[
i
][
0
],
s_box_outputs
[
i
][
1
]),
p_box
(
s_box_outputs
[
i
][
2
],
s_box_outputs
[
i
][
3
])])
final_outputs
=
np
.
zeros
(
len
(
x
),
dtype
=
np
.
uint32
)
for
i
in
range
(
len
(
x
)):
final_output
=
np
.
array
(
l_box
(
p_box_outputs
[
i
][
0
],
p_box_outputs
[
i
][
1
]))
k
=
key
[
q
][
o
%
4
]
output
=
final_output
^
k
output
=
binary_array_to_integer
(
output
)
final_outputs
[
i
]
=
output
q
+=
1
if
(
o
<
2
):
o
+=
2
else
:
o
=
0
return
final_outputs
```
%% Cell type:code id: tags:
```
python
#9#Convert the ciphertext pairs into Binary array
def
convert_to_binary
(
row
):
bin_array
=
np
.
zeros
(
64
,
dtype
=
np
.
uint8
)
for
i
,
num
in
enumerate
(
row
):
binary_str
=
format
(
num
,
'
016b
'
)
for
j
,
b
in
enumerate
(
binary_str
):
bin_array
[
i
*
16
+
j
]
=
int
(
b
)
return
bin_array
```
%% Cell type:code id: tags:
```
python
#10#Encryption Function
def
lcb_encrypt
(
plaintext
,
key
,
rounds
,
d
):
left_plaintext
=
np
.
uint16
(
plaintext
[
0
])
right_plaintext
=
np
.
uint16
(
plaintext
[
1
])
L
,
R
=
left_plaintext
,
right_plaintext
n
=
0
while
n
<
rounds
:
L
,
R
=
f_function
(
R
,
key
,
d
),
ff_function
(
L
,
key
,
d
)
n
+=
1
return
(
L
,
R
)
```
%% Cell type:code id: tags:
```
python
#11#Fuction for generation of keys
import
random
def
generate_hex_keys
(
num_keys
,
length
=
16
):
hex_chars
=
"
0123456789ABCDEF
"
keys_str
=
[
""
.
join
(
random
.
choices
(
hex_chars
,
k
=
length
))
for
_
in
range
(
num_keys
)]
return
keys_str
def
to_binary
(
value
,
bits
):
return
format
(
value
,
f
'
0
{
bits
}
b
'
)
def
generate_round_keys
(
num_keys
):
random_keys_hex
=
generate_hex_keys
(
num_keys
)
round_keys
=
[]
for
random_key_hex
in
random_keys_hex
:
random_key
=
int
(
random_key_hex
,
16
)
K1
=
(
random_key
>>
48
)
&
0xFFFF
K2
=
(
random_key
>>
32
)
&
0xFFFF
K3
=
(
random_key
>>
16
)
&
0xFFFF
K4
=
random_key
&
0xFFFF
k1_bin
=
to_binary
(
K1
,
16
)
k2_bin
=
to_binary
(
K2
,
16
)
k3_bin
=
to_binary
(
K3
,
16
)
k4_bin
=
to_binary
(
K4
,
16
)
k1_np_array
=
np
.
array
([
int
(
bit
)
for
bit
in
k1_bin
])
k2_np_array
=
np
.
array
([
int
(
bit
)
for
bit
in
k2_bin
])
k3_np_array
=
np
.
array
([
int
(
bit
)
for
bit
in
k3_bin
])
k4_np_array
=
np
.
array
([
int
(
bit
)
for
bit
in
k4_bin
])
round_key
=
np
.
array
([
k1_np_array
,
k2_np_array
,
k3_np_array
,
k4_np_array
])
round_keys
.
append
(
round_key
)
round_key
=
np
.
array
(
round_keys
)
return
round_key
```
%% Cell type:code id: tags:
```
python
#12#Make dataset
def
make_train_data
(
n
,
nr
,
diff
=
(
0
,
0x836F
)):
Y
=
np
.
frombuffer
(
urandom
(
n
),
dtype
=
np
.
uint8
);
Y
=
Y
&
1
;
plaintext
=
np
.
frombuffer
(
urandom
(
4
*
n
),
dtype
=
np
.
uint32
);
plain0l
=
np
.
empty
(
n
,
dtype
=
np
.
uint16
)
plain0r
=
np
.
empty
(
n
,
dtype
=
np
.
uint16
)
for
i
in
range
(
n
):
plain0l
[
i
]
=
(
plaintext
[
i
]
>>
16
)
&
0xffff
plain0r
[
i
]
=
plaintext
[
i
]
&
0xffff
plain1l
=
plain0l
^
diff
[
0
];
plain1r
=
plain0r
^
diff
[
1
];
num_rand_samples
=
np
.
sum
(
Y
==
0
);
plain1l
[
Y
==
0
]
=
np
.
frombuffer
(
urandom
(
2
*
num_rand_samples
),
dtype
=
np
.
uint16
);
plain1r
[
Y
==
0
]
=
np
.
frombuffer
(
urandom
(
2
*
num_rand_samples
),
dtype
=
np
.
uint16
);
round_key
=
generate_round_keys
(
n
)
ctdata0l
,
ctdata0r
=
lcb_encrypt
((
plain0l
,
plain0r
),
round_key
,
nr
,
n
)
ctdata1l
,
ctdata1r
=
lcb_encrypt
((
plain1l
,
plain1r
),
round_key
,
nr
,
n
)
ctdata
=
np
.
vstack
((
ctdata0l
,
ctdata0r
,
ctdata1l
,
ctdata1r
)).
T
X
=
np
.
array
([
convert_to_binary
(
row
)
for
row
in
ctdata
])
with
open
(
"
VDataset_NewP.csv
"
,
"
w
"
,
newline
=
''
)
as
f
:
writer
=
csv
.
writer
(
f
)
writer
.
writerow
([
"
plain0l
"
,
"
plain0r
"
,
"
plain1l
"
,
"
plain1r
"
,
"
Y
"
])
for
i
in
range
(
n
):
writer
.
writerow
([
plain0l
[
i
],
plain0r
[
i
],
plain1l
[
i
],
plain1r
[
i
],
Y
[
i
]])
with
open
(
"
VDataset_NewC.csv
"
,
"
w
"
,
newline
=
''
)
as
f
:
writer
=
csv
.
writer
(
f
)
writer
.
writerow
([
"
ctdata0l
"
,
"
ctdata0r
"
,
"
ctdata1l
"
,
"
ctdata1r
"
,
"
Y
"
])
for
i
in
range
(
n
):
writer
.
writerow
([
ctdata0l
[
i
],
ctdata0r
[
i
],
ctdata1l
[
i
],
ctdata1r
[
i
],
Y
[
i
]])
return
(
X
,
Y
);
```
%% Cell type:code id: tags:
```
python
make_train_data
(
10
**
5
,
num_rounds
)
```
%% Output
(array([[1, 1, 0, ..., 0, 1, 0],
[0, 0, 0, ..., 1, 0, 1],
[1, 1, 1, ..., 1, 1, 0],
...,
[0, 1, 0, ..., 0, 1, 0],
[1, 0, 1, ..., 0, 0, 0],
[1, 0, 0, ..., 1, 0, 0]], dtype=uint8),
array([1, 1, 1, ..., 0, 0, 0], dtype=uint8))
%% Cell type:code id: tags:
```
python
#13#Creation of Model
from
pickle
import
dump
from
keras.callbacks
import
ModelCheckpoint
,
LearningRateScheduler
from
keras.models
import
Model
from
keras.optimizers
import
Adam
from
keras.layers
import
Dense
,
Conv1D
,
Input
,
Reshape
,
Permute
,
Add
,
Flatten
,
BatchNormalization
,
Activation
from
keras
import
backend
as
K
from
keras.regularizers
import
l2
bs
=
5000
;
wdir
=
'
./freshly_trained_nets/
'
def
cyclic_lr
(
num_epochs
,
high_lr
,
low_lr
):
res
=
lambda
i
:
low_lr
+
((
num_epochs
-
1
)
-
i
%
num_epochs
)
/
(
num_epochs
-
1
)
*
(
high_lr
-
low_lr
);
return
(
res
);
def
make_checkpoint
(
datei
):
res
=
ModelCheckpoint
(
datei
,
monitor
=
'
val_loss
'
,
save_best_only
=
True
);
return
(
res
);
#make residual tower of convolutional blocks
def
make_resnet
(
num_blocks
=
2
,
num_filters
=
32
,
num_outputs
=
1
,
d1
=
64
,
d2
=
64
,
word_size
=
16
,
ks
=
3
,
depth
=
5
,
reg_param
=
0.0001
,
final_activation
=
'
sigmoid
'
):
#Input and preprocessing layers
inp
=
Input
(
shape
=
(
num_blocks
*
word_size
*
2
,));
rs
=
Reshape
((
2
*
num_blocks
,
word_size
))(
inp
);
perm
=
Permute
((
2
,
1
))(
rs
);
#add a single residual layer that will expand the data to num_filters channels
#this is a bit-sliced layer
conv0
=
Conv1D
(
num_filters
,
kernel_size
=
1
,
padding
=
'
same
'
,
kernel_regularizer
=
l2
(
reg_param
))(
perm
);
conv0
=
BatchNormalization
()(
conv0
);
conv0
=
Activation
(
'
relu
'
)(
conv0
);
#add residual blocks
shortcut
=
conv0
;
for
i
in
range
(
depth
):
conv1
=
Conv1D
(
num_filters
,
kernel_size
=
ks
,
padding
=
'
same
'
,
kernel_regularizer
=
l2
(
reg_param
))(
shortcut
);
conv1
=
BatchNormalization
()(
conv1
);
conv1
=
Activation
(
'
relu
'
)(
conv1
);
conv2
=
Conv1D
(
num_filters
,
kernel_size
=
ks
,
padding
=
'
same
'
,
kernel_regularizer
=
l2
(
reg_param
))(
conv1
);
conv2
=
BatchNormalization
()(
conv2
);
conv2
=
Activation
(
'
relu
'
)(
conv2
);
shortcut
=
Add
()([
shortcut
,
conv2
]);
#add prediction head
flat1
=
Flatten
()(
shortcut
);
dense1
=
Dense
(
d1
,
kernel_regularizer
=
l2
(
reg_param
))(
flat1
);
dense1
=
BatchNormalization
()(
dense1
);
dense1
=
Activation
(
'
relu
'
)(
dense1
);
dense2
=
Dense
(
d2
,
kernel_regularizer
=
l2
(
reg_param
))(
dense1
);
dense2
=
BatchNormalization
()(
dense2
);
dense2
=
Activation
(
'
relu
'
)(
dense2
);
out
=
Dense
(
num_outputs
,
activation
=
final_activation
,
kernel_regularizer
=
l2
(
reg_param
))(
dense2
);
model
=
Model
(
inputs
=
inp
,
outputs
=
out
);
return
(
model
);
def
train_LCB_distinguisher
(
num_epochs
,
num_rounds
,
depth
):
#create the network
print
(
num_rounds
)
print
(
depth
)
net
=
make_resnet
(
depth
=
depth
,
reg_param
=
10
**-
5
);
net
.
compile
(
optimizer
=
'
adam
'
,
loss
=
'
mse
'
,
metrics
=
[
'
acc
'
]);
#generate training and validation data
X
,
Y
=
make_train_data
(
10
**
6
,
num_rounds
);
X_eval
,
Y_eval
=
make_train_data
(
10
**
5
,
num_rounds
);
#set up model checkpoint
check
=
make_checkpoint
(
wdir
+
'
ghor_Rk_0000_836F_Round_
'
+
str
(
num_rounds
)
+
'
_depth_
'
+
str
(
depth
)
+
'
.h5
'
);
#create learnrate schedule
lr
=
LearningRateScheduler
(
cyclic_lr
(
10
,
0.002
,
0.0001
));
#train and evaluate
#print(X_eval)
h
=
net
.
fit
(
X
,
Y
,
epochs
=
num_epochs
,
batch_size
=
bs
,
validation_data
=
(
X_eval
,
Y_eval
),
callbacks
=
[
lr
,
check
]);
np
.
save
(
wdir
+
'
h
'
+
str
(
num_rounds
)
+
'
r_depth
'
+
str
(
depth
)
+
'
.npy
'
,
h
.
history
[
'
val_acc
'
]);
np
.
save
(
wdir
+
'
h
'
+
str
(
num_rounds
)
+
'
r_depth
'
+
str
(
depth
)
+
'
.npy
'
,
h
.
history
[
'
val_loss
'
]);
dump
(
h
.
history
,
open
(
wdir
+
'
hist
'
+
str
(
num_rounds
)
+
'
r_depth
'
+
str
(
depth
)
+
'
.p
'
,
'
wb
'
));
print
(
"
Best validation accuracy:
"
,
np
.
max
(
h
.
history
[
'
val_acc
'
]));
return
(
net
,
h
);
```
%% Cell type:code id: tags:
```
python
#14#Training the Model
num_epochs
=
200
depth
=
10
trained_net
,
history
=
train_LCB_distinguisher
(
num_epochs
,
num_rounds
,
depth
)
```
%% Output
10
10
Epoch 1/200
200/200 [==============================] - 185s 909ms/step - loss: 0.0114 - acc: 0.9959 - val_loss: 0.0240 - val_acc: 0.9776 - lr: 0.0020
Epoch 2/200
200/200 [==============================] - 182s 909ms/step - loss: 0.0064 - acc: 1.0000 - val_loss: 0.0056 - val_acc: 0.9999 - lr: 0.0018
Epoch 3/200
200/200 [==============================] - 190s 951ms/step - loss: 0.0049 - acc: 1.0000 - val_loss: 0.0044 - val_acc: 1.0000 - lr: 0.0016
Epoch 4/200
200/200 [==============================] - 183s 916ms/step - loss: 0.0038 - acc: 1.0000 - val_loss: 0.0037 - val_acc: 1.0000 - lr: 0.0014
Epoch 5/200
200/200 [==============================] - 183s 914ms/step - loss: 0.0030 - acc: 1.0000 - val_loss: 0.0030 - val_acc: 1.0000 - lr: 0.0012
Epoch 6/200
200/200 [==============================] - 185s 923ms/step - loss: 0.0024 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 7/200
200/200 [==============================] - 183s 916ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 8/200
200/200 [==============================] - 180s 902ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0017 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 9/200
200/200 [==============================] - 181s 903ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 10/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 11/200
200/200 [==============================] - 181s 904ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.4747 - val_acc: 0.5018 - lr: 0.0020
Epoch 12/200
200/200 [==============================] - 181s 904ms/step - loss: 6.9676e-04 - acc: 1.0000 - val_loss: 0.1912 - val_acc: 0.6449 - lr: 0.0018
Epoch 13/200
200/200 [==============================] - 186s 931ms/step - loss: 5.0213e-04 - acc: 1.0000 - val_loss: 8.2996e-04 - val_acc: 0.9995 - lr: 0.0016
Epoch 14/200
200/200 [==============================] - 181s 908ms/step - loss: 3.9845e-04 - acc: 1.0000 - val_loss: 0.3903 - val_acc: 0.5018 - lr: 0.0014
Epoch 15/200
200/200 [==============================] - 183s 915ms/step - loss: 2.6665e-04 - acc: 1.0000 - val_loss: 0.4915 - val_acc: 0.5018 - lr: 0.0012
Epoch 16/200
200/200 [==============================] - 182s 910ms/step - loss: 2.0211e-04 - acc: 1.0000 - val_loss: 0.4945 - val_acc: 0.5018 - lr: 9.4444e-04
Epoch 17/200
200/200 [==============================] - 185s 926ms/step - loss: 1.6426e-04 - acc: 1.0000 - val_loss: 0.4659 - val_acc: 0.5018 - lr: 7.3333e-04
Epoch 18/200
200/200 [==============================] - 185s 926ms/step - loss: 1.4096e-04 - acc: 1.0000 - val_loss: 0.1450 - val_acc: 0.7232 - lr: 5.2222e-04
Epoch 19/200
200/200 [==============================] - 181s 904ms/step - loss: 1.2714e-04 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 20/200
200/200 [==============================] - 182s 908ms/step - loss: 1.2059e-04 - acc: 1.0000 - val_loss: 1.2217e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 21/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0044 - acc: 0.9980 - val_loss: 0.0059 - val_acc: 0.9974 - lr: 0.0020
Epoch 22/200
200/200 [==============================] - 181s 904ms/step - loss: 0.0037 - acc: 1.0000 - val_loss: 0.0036 - val_acc: 1.0000 - lr: 0.0018
Epoch 23/200
200/200 [==============================] - 181s 904ms/step - loss: 0.0034 - acc: 1.0000 - val_loss: 0.0033 - val_acc: 1.0000 - lr: 0.0016
Epoch 24/200
200/200 [==============================] - 181s 903ms/step - loss: 0.0032 - acc: 1.0000 - val_loss: 0.0031 - val_acc: 1.0000 - lr: 0.0014
Epoch 25/200
200/200 [==============================] - 181s 903ms/step - loss: 0.0031 - acc: 1.0000 - val_loss: 0.0030 - val_acc: 1.0000 - lr: 0.0012
Epoch 26/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0029 - acc: 1.0000 - val_loss: 0.0029 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 27/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0028 - acc: 1.0000 - val_loss: 0.0028 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 28/200
200/200 [==============================] - 181s 903ms/step - loss: 0.0027 - acc: 1.0000 - val_loss: 0.0027 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 29/200
200/200 [==============================] - 180s 902ms/step - loss: 0.0027 - acc: 1.0000 - val_loss: 0.0027 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 30/200
200/200 [==============================] - 181s 904ms/step - loss: 0.0026 - acc: 1.0000 - val_loss: 0.0026 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 31/200
200/200 [==============================] - 183s 917ms/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0034 - val_acc: 1.0000 - lr: 0.0020
Epoch 32/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0025 - val_acc: 1.0000 - lr: 0.0018
Epoch 33/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 0.0016
Epoch 34/200
200/200 [==============================] - 181s 907ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 0.0014
Epoch 35/200
200/200 [==============================] - 181s 907ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 0.0012
Epoch 36/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0016 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 37/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 38/200
200/200 [==============================] - 182s 909ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 39/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 40/200
200/200 [==============================] - 181s 907ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 41/200
200/200 [==============================] - 182s 909ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0375 - val_acc: 0.9868 - lr: 0.0020
Epoch 42/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.1601 - val_acc: 0.6503 - lr: 0.0018
Epoch 43/200
200/200 [==============================] - 182s 909ms/step - loss: 8.7272e-04 - acc: 1.0000 - val_loss: 0.0358 - val_acc: 0.9965 - lr: 0.0016
Epoch 44/200
200/200 [==============================] - 199s 996ms/step - loss: 7.5967e-04 - acc: 1.0000 - val_loss: 0.2835 - val_acc: 0.5024 - lr: 0.0014
Epoch 45/200
200/200 [==============================] - 187s 934ms/step - loss: 6.7187e-04 - acc: 1.0000 - val_loss: 0.0131 - val_acc: 1.0000 - lr: 0.0012
Epoch 46/200
200/200 [==============================] - 182s 908ms/step - loss: 6.0448e-04 - acc: 1.0000 - val_loss: 0.0045 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 47/200
200/200 [==============================] - 187s 938ms/step - loss: 5.5327e-04 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 48/200
200/200 [==============================] - 204s 1s/step - loss: 5.1591e-04 - acc: 1.0000 - val_loss: 5.5068e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 49/200
200/200 [==============================] - 191s 956ms/step - loss: 4.9090e-04 - acc: 1.0000 - val_loss: 5.0161e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 50/200
200/200 [==============================] - 187s 934ms/step - loss: 4.7809e-04 - acc: 1.0000 - val_loss: 4.7770e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 51/200
200/200 [==============================] - 181s 904ms/step - loss: 0.0014 - acc: 0.9998 - val_loss: 0.1151 - val_acc: 0.8641 - lr: 0.0020
Epoch 52/200
200/200 [==============================] - 187s 934ms/step - loss: 9.5738e-04 - acc: 1.0000 - val_loss: 0.3482 - val_acc: 0.5020 - lr: 0.0018
Epoch 53/200
200/200 [==============================] - 181s 904ms/step - loss: 6.7179e-04 - acc: 1.0000 - val_loss: 0.4190 - val_acc: 0.5018 - lr: 0.0016
Epoch 54/200
200/200 [==============================] - 181s 904ms/step - loss: 5.1030e-04 - acc: 1.0000 - val_loss: 0.3700 - val_acc: 0.5019 - lr: 0.0014
Epoch 55/200
200/200 [==============================] - 181s 904ms/step - loss: 4.0714e-04 - acc: 1.0000 - val_loss: 0.2277 - val_acc: 0.5630 - lr: 0.0012
Epoch 56/200
200/200 [==============================] - 181s 905ms/step - loss: 3.3943e-04 - acc: 1.0000 - val_loss: 0.0590 - val_acc: 0.9308 - lr: 9.4444e-04
Epoch 57/200
200/200 [==============================] - 181s 904ms/step - loss: 2.9440e-04 - acc: 1.0000 - val_loss: 0.0220 - val_acc: 0.9963 - lr: 7.3333e-04
Epoch 58/200
200/200 [==============================] - 181s 904ms/step - loss: 2.6228e-04 - acc: 1.0000 - val_loss: 4.3736e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 59/200
200/200 [==============================] - 181s 904ms/step - loss: 2.4230e-04 - acc: 1.0000 - val_loss: 2.6776e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 60/200
200/200 [==============================] - 181s 904ms/step - loss: 2.3241e-04 - acc: 1.0000 - val_loss: 2.3119e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 61/200
200/200 [==============================] - 180s 902ms/step - loss: 3.1112e-04 - acc: 1.0000 - val_loss: 8.1905e-04 - val_acc: 0.9997 - lr: 0.0020
Epoch 62/200
200/200 [==============================] - 181s 903ms/step - loss: 3.3318e-04 - acc: 1.0000 - val_loss: 0.4971 - val_acc: 0.5018 - lr: 0.0018
Epoch 63/200
200/200 [==============================] - 183s 914ms/step - loss: 1.6852e-04 - acc: 1.0000 - val_loss: 0.4981 - val_acc: 0.5018 - lr: 0.0016
Epoch 64/200
200/200 [==============================] - 187s 934ms/step - loss: 1.1411e-04 - acc: 1.0000 - val_loss: 0.4975 - val_acc: 0.5018 - lr: 0.0014
Epoch 65/200
200/200 [==============================] - 189s 943ms/step - loss: 8.4985e-05 - acc: 1.0000 - val_loss: 0.4952 - val_acc: 0.5018 - lr: 0.0012
Epoch 66/200
200/200 [==============================] - 181s 904ms/step - loss: 1.0603e-04 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 0.9974 - lr: 9.4444e-04
Epoch 67/200
200/200 [==============================] - 181s 905ms/step - loss: 1.2690e-04 - acc: 1.0000 - val_loss: 0.0074 - val_acc: 0.9999 - lr: 7.3333e-04
Epoch 68/200
200/200 [==============================] - 187s 933ms/step - loss: 8.4066e-05 - acc: 1.0000 - val_loss: 0.0879 - val_acc: 0.8975 - lr: 5.2222e-04
Epoch 69/200
200/200 [==============================] - 181s 907ms/step - loss: 6.8841e-05 - acc: 1.0000 - val_loss: 5.1311e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 70/200
200/200 [==============================] - 181s 907ms/step - loss: 6.3039e-05 - acc: 1.0000 - val_loss: 6.8196e-05 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 71/200
200/200 [==============================] - 181s 906ms/step - loss: 2.2853e-04 - acc: 1.0000 - val_loss: 0.5024 - val_acc: 0.4984 - lr: 0.0020
Epoch 72/200
200/200 [==============================] - 180s 900ms/step - loss: 4.0886e-04 - acc: 1.0000 - val_loss: 0.4943 - val_acc: 0.5018 - lr: 0.0018
Epoch 73/200
200/200 [==============================] - 183s 913ms/step - loss: 1.9104e-04 - acc: 1.0000 - val_loss: 0.4975 - val_acc: 0.5018 - lr: 0.0016
Epoch 74/200
200/200 [==============================] - 182s 908ms/step - loss: 1.1183e-04 - acc: 1.0000 - val_loss: 0.4980 - val_acc: 0.5018 - lr: 0.0014
Epoch 75/200
200/200 [==============================] - 188s 939ms/step - loss: 6.2444e-05 - acc: 1.0000 - val_loss: 0.4977 - val_acc: 0.5018 - lr: 0.0012
Epoch 76/200
200/200 [==============================] - 189s 943ms/step - loss: 4.3502e-05 - acc: 1.0000 - val_loss: 0.4976 - val_acc: 0.5018 - lr: 9.4444e-04
Epoch 77/200
200/200 [==============================] - 181s 905ms/step - loss: 3.2875e-05 - acc: 1.0000 - val_loss: 0.4972 - val_acc: 0.5018 - lr: 7.3333e-04
Epoch 78/200
200/200 [==============================] - 181s 904ms/step - loss: 2.6406e-05 - acc: 1.0000 - val_loss: 0.4931 - val_acc: 0.5018 - lr: 5.2222e-04
Epoch 79/200
200/200 [==============================] - 181s 906ms/step - loss: 2.3363e-05 - acc: 1.0000 - val_loss: 0.4424 - val_acc: 0.5018 - lr: 3.1111e-04
Epoch 80/200
200/200 [==============================] - 184s 920ms/step - loss: 2.2037e-05 - acc: 1.0000 - val_loss: 2.0193e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 81/200
200/200 [==============================] - 193s 966ms/step - loss: 1.7932e-05 - acc: 1.0000 - val_loss: 0.4637 - val_acc: 0.5018 - lr: 0.0020
Epoch 82/200
200/200 [==============================] - 189s 944ms/step - loss: 0.0020 - acc: 0.9994 - val_loss: 0.5345 - val_acc: 0.4482 - lr: 0.0018
Epoch 83/200
200/200 [==============================] - 182s 910ms/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0024 - val_acc: 1.0000 - lr: 0.0016
Epoch 84/200
200/200 [==============================] - 181s 907ms/step - loss: 0.0023 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 0.0014
Epoch 85/200
200/200 [==============================] - 186s 930ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 0.0012
Epoch 86/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0020 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 87/200
200/200 [==============================] - 182s 910ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 88/200
200/200 [==============================] - 183s 915ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 89/200
200/200 [==============================] - 183s 914ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 90/200
200/200 [==============================] - 182s 911ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0018 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 91/200
200/200 [==============================] - 182s 912ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 0.0020
Epoch 92/200
200/200 [==============================] - 182s 910ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 0.0018
Epoch 93/200
200/200 [==============================] - 182s 911ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 0.0016
Epoch 94/200
200/200 [==============================] - 185s 925ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 0.0014
Epoch 95/200
200/200 [==============================] - 181s 904ms/step - loss: 0.0011 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 0.0012
Epoch 96/200
200/200 [==============================] - 180s 902ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 0.0010 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 97/200
200/200 [==============================] - 180s 902ms/step - loss: 9.7535e-04 - acc: 1.0000 - val_loss: 9.5283e-04 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 98/200
200/200 [==============================] - 180s 901ms/step - loss: 9.2974e-04 - acc: 1.0000 - val_loss: 9.1339e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 99/200
200/200 [==============================] - 181s 907ms/step - loss: 8.9887e-04 - acc: 1.0000 - val_loss: 8.8789e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 100/200
200/200 [==============================] - 187s 934ms/step - loss: 8.8294e-04 - acc: 1.0000 - val_loss: 8.7880e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 101/200
200/200 [==============================] - 180s 902ms/step - loss: 0.0012 - acc: 0.9999 - val_loss: 0.0013 - val_acc: 0.9998 - lr: 0.0020
Epoch 102/200
200/200 [==============================] - 180s 902ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 9.3628e-04 - val_acc: 1.0000 - lr: 0.0018
Epoch 103/200
200/200 [==============================] - 179s 897ms/step - loss: 8.4824e-04 - acc: 1.0000 - val_loss: 8.5005e-04 - val_acc: 1.0000 - lr: 0.0016
Epoch 104/200
200/200 [==============================] - 180s 900ms/step - loss: 7.1619e-04 - acc: 1.0000 - val_loss: 7.0104e-04 - val_acc: 1.0000 - lr: 0.0014
Epoch 105/200
200/200 [==============================] - 214s 1s/step - loss: 6.2045e-04 - acc: 1.0000 - val_loss: 6.2361e-04 - val_acc: 1.0000 - lr: 0.0012
Epoch 106/200
200/200 [==============================] - 183s 915ms/step - loss: 5.5093e-04 - acc: 1.0000 - val_loss: 5.3495e-04 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 107/200
200/200 [==============================] - 185s 926ms/step - loss: 4.9948e-04 - acc: 1.0000 - val_loss: 4.9466e-04 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 108/200
200/200 [==============================] - 187s 935ms/step - loss: 4.6283e-04 - acc: 1.0000 - val_loss: 5.3213e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 109/200
200/200 [==============================] - 188s 941ms/step - loss: 4.3871e-04 - acc: 1.0000 - val_loss: 4.3065e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 110/200
200/200 [==============================] - 194s 973ms/step - loss: 4.2627e-04 - acc: 1.0000 - val_loss: 4.2329e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 111/200
200/200 [==============================] - 206s 1s/step - loss: 3.6728e-04 - acc: 1.0000 - val_loss: 0.0929 - val_acc: 0.8698 - lr: 0.0020
Epoch 112/200
200/200 [==============================] - 214s 1s/step - loss: 2.8893e-04 - acc: 1.0000 - val_loss: 2.6767e-04 - val_acc: 1.0000 - lr: 0.0018
Epoch 113/200
200/200 [==============================] - 214s 1s/step - loss: 2.3273e-04 - acc: 1.0000 - val_loss: 0.1472 - val_acc: 0.6903 - lr: 0.0016
Epoch 114/200
200/200 [==============================] - 213s 1s/step - loss: 1.8670e-04 - acc: 1.0000 - val_loss: 0.1450 - val_acc: 0.7015 - lr: 0.0014
Epoch 115/200
200/200 [==============================] - 213s 1s/step - loss: 1.5695e-04 - acc: 1.0000 - val_loss: 0.0115 - val_acc: 1.0000 - lr: 0.0012
Epoch 116/200
200/200 [==============================] - 214s 1s/step - loss: 1.3597e-04 - acc: 1.0000 - val_loss: 3.2649e-04 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 117/200
200/200 [==============================] - 214s 1s/step - loss: 1.2097e-04 - acc: 1.0000 - val_loss: 0.4788 - val_acc: 0.5018 - lr: 7.3333e-04
Epoch 118/200
200/200 [==============================] - 214s 1s/step - loss: 2.2300e-04 - acc: 1.0000 - val_loss: 2.4725e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 119/200
200/200 [==============================] - 214s 1s/step - loss: 2.1805e-04 - acc: 1.0000 - val_loss: 2.0893e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 120/200
200/200 [==============================] - 214s 1s/step - loss: 2.0614e-04 - acc: 1.0000 - val_loss: 2.0339e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 121/200
200/200 [==============================] - 185s 926ms/step - loss: 1.6093e-04 - acc: 1.0000 - val_loss: 0.4219 - val_acc: 0.5018 - lr: 0.0020
Epoch 122/200
200/200 [==============================] - 185s 926ms/step - loss: 1.1050e-04 - acc: 1.0000 - val_loss: 0.4460 - val_acc: 0.5018 - lr: 0.0018
Epoch 123/200
200/200 [==============================] - 183s 914ms/step - loss: 3.3806e-04 - acc: 1.0000 - val_loss: 2.9386e-04 - val_acc: 1.0000 - lr: 0.0016
Epoch 124/200
200/200 [==============================] - 181s 904ms/step - loss: 1.9949e-04 - acc: 1.0000 - val_loss: 0.4264 - val_acc: 0.5018 - lr: 0.0014
Epoch 125/200
200/200 [==============================] - 182s 908ms/step - loss: 1.4096e-04 - acc: 1.0000 - val_loss: 0.2692 - val_acc: 0.5107 - lr: 0.0012
Epoch 126/200
200/200 [==============================] - 181s 903ms/step - loss: 1.0977e-04 - acc: 1.0000 - val_loss: 0.0105 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 127/200
200/200 [==============================] - 182s 909ms/step - loss: 9.1792e-05 - acc: 1.0000 - val_loss: 4.5129e-04 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 128/200
200/200 [==============================] - 181s 905ms/step - loss: 8.0692e-05 - acc: 1.0000 - val_loss: 1.1738e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 129/200
200/200 [==============================] - 181s 907ms/step - loss: 7.4031e-05 - acc: 1.0000 - val_loss: 7.6682e-05 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 130/200
200/200 [==============================] - 181s 906ms/step - loss: 7.0827e-05 - acc: 1.0000 - val_loss: 7.0510e-05 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 131/200
200/200 [==============================] - 181s 905ms/step - loss: 5.6762e-05 - acc: 1.0000 - val_loss: 0.4818 - val_acc: 0.5018 - lr: 0.0020
Epoch 132/200
200/200 [==============================] - 182s 908ms/step - loss: 3.9235e-05 - acc: 1.0000 - val_loss: 0.4837 - val_acc: 0.5018 - lr: 0.0018
Epoch 133/200
200/200 [==============================] - 181s 907ms/step - loss: 2.9483e-05 - acc: 1.0000 - val_loss: 0.4621 - val_acc: 0.5018 - lr: 0.0016
Epoch 134/200
200/200 [==============================] - 181s 906ms/step - loss: 8.8404e-04 - acc: 0.9999 - val_loss: 9.7526e-04 - val_acc: 1.0000 - lr: 0.0014
Epoch 135/200
200/200 [==============================] - 181s 906ms/step - loss: 8.3623e-04 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 0.0012
Epoch 136/200
200/200 [==============================] - 181s 906ms/step - loss: 6.6218e-04 - acc: 1.0000 - val_loss: 0.0236 - val_acc: 0.9904 - lr: 9.4444e-04
Epoch 137/200
200/200 [==============================] - 181s 905ms/step - loss: 5.6179e-04 - acc: 1.0000 - val_loss: 8.9467e-04 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 138/200
200/200 [==============================] - 181s 906ms/step - loss: 4.9843e-04 - acc: 1.0000 - val_loss: 5.4970e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 139/200
200/200 [==============================] - 181s 906ms/step - loss: 4.6030e-04 - acc: 1.0000 - val_loss: 4.5565e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 140/200
200/200 [==============================] - 181s 905ms/step - loss: 4.4187e-04 - acc: 1.0000 - val_loss: 4.3794e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 141/200
200/200 [==============================] - 181s 905ms/step - loss: 3.5985e-04 - acc: 1.0000 - val_loss: 0.4717 - val_acc: 0.5018 - lr: 0.0020
Epoch 142/200
200/200 [==============================] - 212s 1s/step - loss: 2.5639e-04 - acc: 1.0000 - val_loss: 0.3537 - val_acc: 0.5018 - lr: 0.0018
Epoch 143/200
200/200 [==============================] - 190s 952ms/step - loss: 1.9486e-04 - acc: 1.0000 - val_loss: 0.0238 - val_acc: 0.9944 - lr: 0.0016
Epoch 144/200
200/200 [==============================] - 192s 961ms/step - loss: 1.5588e-04 - acc: 1.0000 - val_loss: 5.5978e-04 - val_acc: 0.9998 - lr: 0.0014
Epoch 145/200
200/200 [==============================] - 181s 906ms/step - loss: 1.2962e-04 - acc: 1.0000 - val_loss: 9.1726e-04 - val_acc: 1.0000 - lr: 0.0012
Epoch 146/200
200/200 [==============================] - 181s 904ms/step - loss: 1.1111e-04 - acc: 1.0000 - val_loss: 2.5750e-04 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 147/200
200/200 [==============================] - 181s 903ms/step - loss: 9.8085e-05 - acc: 1.0000 - val_loss: 9.7593e-05 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 148/200
200/200 [==============================] - 181s 905ms/step - loss: 1.1093e-04 - acc: 1.0000 - val_loss: 0.6840 - val_acc: 0.2697 - lr: 5.2222e-04
Epoch 149/200
200/200 [==============================] - 181s 904ms/step - loss: 1.7264e-04 - acc: 1.0000 - val_loss: 1.5816e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 150/200
200/200 [==============================] - 180s 902ms/step - loss: 1.5343e-04 - acc: 1.0000 - val_loss: 1.4886e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 151/200
200/200 [==============================] - 180s 900ms/step - loss: 1.1442e-04 - acc: 1.0000 - val_loss: 0.4817 - val_acc: 0.5018 - lr: 0.0020
Epoch 152/200
200/200 [==============================] - 180s 900ms/step - loss: 7.5607e-05 - acc: 1.0000 - val_loss: 0.4857 - val_acc: 0.5018 - lr: 0.0018
Epoch 153/200
200/200 [==============================] - 180s 900ms/step - loss: 5.0521e-05 - acc: 1.0000 - val_loss: 0.4959 - val_acc: 0.5018 - lr: 0.0016
Epoch 154/200
200/200 [==============================] - 182s 909ms/step - loss: 3.6988e-05 - acc: 1.0000 - val_loss: 0.4636 - val_acc: 0.5018 - lr: 0.0014
Epoch 155/200
200/200 [==============================] - 181s 904ms/step - loss: 2.9382e-05 - acc: 1.0000 - val_loss: 0.1145 - val_acc: 0.8209 - lr: 0.0012
Epoch 156/200
200/200 [==============================] - 181s 905ms/step - loss: 2.4769e-05 - acc: 1.0000 - val_loss: 5.0902e-04 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 157/200
200/200 [==============================] - 181s 904ms/step - loss: 2.1742e-05 - acc: 1.0000 - val_loss: 4.3060e-05 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 158/200
200/200 [==============================] - 181s 905ms/step - loss: 1.9691e-05 - acc: 1.0000 - val_loss: 2.2383e-05 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 159/200
200/200 [==============================] - 181s 903ms/step - loss: 1.8412e-05 - acc: 1.0000 - val_loss: 1.8346e-05 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 160/200
200/200 [==============================] - 181s 904ms/step - loss: 1.7785e-05 - acc: 1.0000 - val_loss: 1.7664e-05 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 161/200
200/200 [==============================] - 181s 904ms/step - loss: 0.0051 - acc: 0.9974 - val_loss: 0.0642 - val_acc: 0.9316 - lr: 0.0020
Epoch 162/200
200/200 [==============================] - 181s 907ms/step - loss: 0.0029 - acc: 1.0000 - val_loss: 0.0028 - val_acc: 1.0000 - lr: 0.0018
Epoch 163/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0027 - acc: 1.0000 - val_loss: 0.0026 - val_acc: 1.0000 - lr: 0.0016
Epoch 164/200
200/200 [==============================] - 181s 906ms/step - loss: 0.0025 - acc: 1.0000 - val_loss: 0.0025 - val_acc: 1.0000 - lr: 0.0014
Epoch 165/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0024 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 1.0000 - lr: 0.0012
Epoch 166/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0023 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 167/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 168/200
200/200 [==============================] - 181s 907ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 169/200
200/200 [==============================] - 181s 905ms/step - loss: 0.0021 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 170/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0021 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 171/200
200/200 [==============================] - 182s 908ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 0.0020
Epoch 172/200
200/200 [==============================] - 182s 911ms/step - loss: 0.0018 - acc: 1.0000 - val_loss: 0.0017 - val_acc: 1.0000 - lr: 0.0018
Epoch 173/200
200/200 [==============================] - 183s 915ms/step - loss: 0.0017 - acc: 1.0000 - val_loss: 0.0016 - val_acc: 1.0000 - lr: 0.0016
Epoch 174/200
200/200 [==============================] - 183s 917ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0015 - val_acc: 1.0000 - lr: 0.0014
Epoch 175/200
200/200 [==============================] - 184s 920ms/step - loss: 0.0015 - acc: 1.0000 - val_loss: 0.0014 - val_acc: 1.0000 - lr: 0.0012
Epoch 176/200
200/200 [==============================] - 184s 918ms/step - loss: 0.0014 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 177/200
200/200 [==============================] - 183s 915ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 178/200
200/200 [==============================] - 183s 913ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0013 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 179/200
200/200 [==============================] - 182s 912ms/step - loss: 0.0013 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 180/200
200/200 [==============================] - 184s 918ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0012 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 181/200
200/200 [==============================] - 187s 934ms/step - loss: 0.0012 - acc: 1.0000 - val_loss: 0.0011 - val_acc: 1.0000 - lr: 0.0020
Epoch 182/200
200/200 [==============================] - 185s 927ms/step - loss: 0.0010 - acc: 1.0000 - val_loss: 9.6330e-04 - val_acc: 1.0000 - lr: 0.0018
Epoch 183/200
200/200 [==============================] - 185s 926ms/step - loss: 9.1766e-04 - acc: 1.0000 - val_loss: 8.7060e-04 - val_acc: 1.0000 - lr: 0.0016
Epoch 184/200
200/200 [==============================] - 185s 927ms/step - loss: 8.3431e-04 - acc: 1.0000 - val_loss: 7.9592e-04 - val_acc: 1.0000 - lr: 0.0014
Epoch 185/200
200/200 [==============================] - 185s 926ms/step - loss: 7.6764e-04 - acc: 1.0000 - val_loss: 7.3644e-04 - val_acc: 1.0000 - lr: 0.0012
Epoch 186/200
200/200 [==============================] - 185s 927ms/step - loss: 7.1441e-04 - acc: 1.0000 - val_loss: 6.8889e-04 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 187/200
200/200 [==============================] - 185s 926ms/step - loss: 6.7345e-04 - acc: 1.0000 - val_loss: 6.5429e-04 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 188/200
200/200 [==============================] - 185s 925ms/step - loss: 6.4192e-04 - acc: 1.0000 - val_loss: 6.2633e-04 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 189/200
200/200 [==============================] - 185s 926ms/step - loss: 6.2029e-04 - acc: 1.0000 - val_loss: 6.0985e-04 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 190/200
200/200 [==============================] - 185s 926ms/step - loss: 6.0916e-04 - acc: 1.0000 - val_loss: 6.0429e-04 - val_acc: 1.0000 - lr: 1.0000e-04
Epoch 191/200
200/200 [==============================] - 184s 920ms/step - loss: 5.5169e-04 - acc: 1.0000 - val_loss: 5.0742e-04 - val_acc: 1.0000 - lr: 0.0020
Epoch 192/200
200/200 [==============================] - 184s 922ms/step - loss: 0.0027 - acc: 0.9986 - val_loss: 0.5014 - val_acc: 0.5005 - lr: 0.0018
Epoch 193/200
200/200 [==============================] - 184s 921ms/step - loss: 0.0024 - acc: 1.0000 - val_loss: 0.0023 - val_acc: 1.0000 - lr: 0.0016
Epoch 194/200
200/200 [==============================] - 185s 923ms/step - loss: 0.0023 - acc: 1.0000 - val_loss: 0.0022 - val_acc: 1.0000 - lr: 0.0014
Epoch 195/200
200/200 [==============================] - 183s 916ms/step - loss: 0.0022 - acc: 1.0000 - val_loss: 0.0021 - val_acc: 1.0000 - lr: 0.0012
Epoch 196/200
200/200 [==============================] - 183s 917ms/step - loss: 0.0021 - acc: 1.0000 - val_loss: 0.0020 - val_acc: 1.0000 - lr: 9.4444e-04
Epoch 197/200
200/200 [==============================] - 183s 917ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0020 - val_acc: 1.0000 - lr: 7.3333e-04
Epoch 198/200
200/200 [==============================] - 184s 918ms/step - loss: 0.0020 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 5.2222e-04
Epoch 199/200
200/200 [==============================] - 184s 918ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 3.1111e-04
Epoch 200/200
200/200 [==============================] - 183s 917ms/step - loss: 0.0019 - acc: 1.0000 - val_loss: 0.0019 - val_acc: 1.0000 - lr: 1.0000e-04
Best validation accuracy: 1.0
%% Cell type:code id: tags:
```
python
#15#Create JSON File
# Convert the model architecture to JSON format
import
json
from
keras.models
import
model_from_json
model_json
=
trained_net
.
to_json
()
# Save the model architecture as a JSON file (optional)
filename
=
f
'
ghor_Rk_0000_836F_Round_
{
num_rounds
}
_depth_10.json
'
print
(
filename
)
with
open
(
filename
,
"
w
"
)
as
json_file
:
json
.
dump
(
json
.
loads
(
model_json
),
json_file
,
indent
=
4
)
```
%% Output
ghor_Rk_0000_836F_Round_10_depth_10.json
%% Cell type:code id: tags:
```
python
#16#Evaluate Function
def
evaluate
(
net
,
X
,
Y
):
Z
=
net
.
predict
(
X
,
batch_size
=
10000
).
flatten
();
Zbin
=
(
Z
>
0.5
);
diff
=
Y
-
Z
;
mse
=
np
.
mean
(
diff
*
diff
);
n
=
len
(
Z
);
n0
=
np
.
sum
(
Y
==
0
);
n1
=
np
.
sum
(
Y
==
1
);
acc
=
np
.
sum
(
Zbin
==
Y
)
/
n
;
tpr
=
np
.
sum
(
Zbin
[
Y
==
1
])
/
n1
;
tnr
=
np
.
sum
(
Zbin
[
Y
==
0
]
==
0
)
/
n0
;
mreal
=
np
.
median
(
Z
[
Y
==
1
]);
high_random
=
np
.
sum
(
Z
[
Y
==
0
]
>
mreal
)
/
n0
;
print
(
"
Accuracy:
"
,
acc
,
"
TPR:
"
,
tpr
,
"
TNR:
"
,
tnr
,
"
MSE:
"
,
mse
);
print
(
"
Percentage of random pairs with score higher than median of real pairs:
"
,
100
*
high_random
);
```
%% Cell type:code id: tags:
```
python
#17#Evaluate Function Call
import
numpy
as
np
from
keras.models
import
model_from_json
#load distinguishers
json_file
=
open
(
'
ghor_Rk_0000_836F_Round_10_depth_10.json
'
,
'
r
'
);
json_model
=
json_file
.
read
();
net10
=
model_from_json
(
json_model
);
net10
.
load_weights
(
'
ghor_Rk_0000_836F_Round_10_depth_10.h5
'
);
X_test_stacked
,
Y_test_stacked
=
make_train_data
(
100000
,
num_rounds
)
evaluate
(
net10
,
X_test_stacked
,
Y_test_stacked
);
```
%% Output
10/10 [==============================] - 3s 273ms/step
Accuracy: 1.0 TPR: 1.0 TNR: 1.0 MSE: 3.327519e-07
Percentage of random pairs with score higher than median of real pairs: 0.0
%% Cell type:code id: tags:
```
python
```
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment