Skip to content
Snippets Groups Projects
Commit 73b5a3e2 authored by Indrakanti Aishwarya's avatar Indrakanti Aishwarya
Browse files

Upload New File

parent e6647b4a
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
``` python
#1#Header
import csv
import numpy as np
import os
from os import urandom
from keras.models import model_from_json
```
%% Cell type:code id: tags:
``` python
#2#Defining Global Variables
num_rounds = 20
#num_rounds = 10
m = 0
o = 0
counter = 0
k_int = 0
k_int1 = 0
```
%% Cell type:code id: tags:
``` python
#3#Defining WORDSIZE
def WORD_SIZE():
return(16);
```
%% Cell type:code id: tags:
``` python
#4#Defining S-Box
#s_box_mapping_np = np.array([12, 5, 6, 11, 9, 0, 10, 13, 3, 14, 15, 8, 4, 7, 1, 2], dtype=np.uint8)
s_box_mapping_np = np.array([0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15], dtype=np.uint8)
def s_box(input_bits):
input_bits_int = int(input_bits)
output_bits_int = s_box_mapping_np[input_bits_int]
return output_bits_int
```
%% Cell type:code id: tags:
``` python
#5#Defining P-Box
def decimal_to_binary_list(value, num_bits=4):
return np.array([int(x) for x in format(value, f'0{num_bits}b')], dtype=np.uint8)
def p_box(c_decimal, d_decimal, x_decimal, y_decimal):
c = decimal_to_binary_list(c_decimal)
d = decimal_to_binary_list(d_decimal)
x = decimal_to_binary_list(x_decimal)
y = decimal_to_binary_list(y_decimal)
e = np.zeros(16, dtype=np.uint8)
e[0] = d[0]
e[1] = y[0]
e[2] = c[3]
e[3] = x[3]
e[4] = x[1]
e[5] = y[1]
e[6] = c[2]
e[7] = d[2]
e[8] = d[0]
e[9] = x[0]
e[10] = y[3]
e[11] = c[3]
e[12] = c[1]
e[13] = d[1]
e[14] = x[2]
e[15] = y[2]
return e
```
%% Cell type:code id: tags:
``` python
#6#Defining L-Box
def l_box(f):
h = np.zeros(16, dtype=np.uint8)
h[0] = f[0]
h[1] = f[8]
h[2] = f[7]
h[3] = f[15]
h[4] = f[1]
h[5] = f[9]
h[6] = f[6]
h[7] = f[14]
h[8] = f[2]
h[9] = f[10]
h[10] = f[5]
h[11] = f[13]
h[12] = f[3]
h[13] = f[11]
h[14] = f[4]
h[15] = f[12]
#print("H:", h)
return h
```
%% Cell type:code id: tags:
``` python
#7#Defining F-function for Right Side of Plaintext
def binary_array_to_integer(output):
int_output = ''.join(map(str, output))
return int(int_output, 2)
def to_binary(value, bits):
return format(value, f'0{bits}b')
def f_function(x, key, d):
q=0
global m, counter, k_int
#print("X:", x)
if isinstance(x, int):
x = [x]
input_parts = np.zeros((len(x), 4), dtype=np.uint16)
for i, val in enumerate(x):
input_parts[i] = np.array([val >> 12, (val >> 8) & 0xF, (val >> 4) & 0xF, val & 0xF])
#print("F_FUNCTION")
#print(input_parts)
s_box_outputs = np.array([[s_box(element) for element in part] for part in input_parts])
#print("S-box:", s_box_outputs)
p_box_outputs = np.zeros((len(x), 1, 16), dtype=np.uint8)
for i in range(len(x)):
p_box_outputs[i] = np.array(p_box(s_box_outputs[i][0], s_box_outputs[i][1], s_box_outputs[i][2], s_box_outputs[i][3]))
#print("P-box:", p_box_outputs)
final_outputs = np.zeros(len(x), dtype=np.uint32)
#print(len(x))
for i in range(len(x)):
#print(len(x))
final_output = np.array(l_box(p_box_outputs[i][0]))
k = key[q][(m+1) % 4]
#print("final_output:", final_output)
#print("Key:", k)
if (counter > 1):
#print("counter:", counter)
k_bin, k_int = subsequent_key(k_int)
#print("Key in binary:", k_bin)
#print("k in int", k_int)
output = final_output ^ k_bin
else:
k = to_binary(k,16)
k = np.array([int(bit) for bit in k])
#print("k", k)
output = final_output ^ k
#print("XORING output:", output)
output = binary_array_to_integer(output)
final_outputs[i] = output
q +=1
#print("Final output:", final_outputs)
if (m < 2):
m +=2
else:
m = 0
#print("_______________________________________________________________")
return final_outputs
```
%% Cell type:code id: tags:
``` python
#akey generation Algorithm
def to_binary(value, bits):
return format(value, f'0{bits}b')
def binary_array_to_integer(output):
int_output = ''.join(map(str, output))
return int(int_output, 2)
def subsequent_key(x):
#x = [x]
if isinstance(x, int):
x = [x]
#print("sub key", x)
input_parts = np.zeros((len(x), 4), dtype=np.uint16)
for i, val in enumerate(x):
input_parts[i] = np.array([val >> 12, (val >> 8) & 0xF, (val >> 4) & 0xF, val & 0xF])
#print("input_part", input_parts)
s_box_outputs = np.array([[s_box(element) for element in part] for part in input_parts])
#print("S-box:", s_box_outputs)
p_box_outputs = np.zeros((len(x), 1, 16), dtype=np.uint8)
for i in range(len(x)):
p_box_outputs[i] = np.array(p_box(s_box_outputs[i][0], s_box_outputs[i][1], s_box_outputs[i][2], s_box_outputs[i][3]))
#print("P-box:", p_box_outputs)
bin_output = np.zeros(len(x), dtype=np.uint16)
final_output = np.zeros(len(x), dtype=np.uint16)
for i in range(len(x)):
bin_output = np.array(l_box(p_box_outputs[i][0]))
#print(bin_output)
#final_outputs[i] = final_output
output = binary_array_to_integer(bin_output)
#print(output)
final_output[i] = output
#print("final_outputs:", final_outputs)
return bin_output, final_output
```
%% Cell type:code id: tags:
``` python
#8#Defining F-function for Left Side of Plaintext
def binary_array_to_integer(output):
int_output = ''.join(map(str, output))
return int(int_output, 2)
def ff_function(x, key, d):
q=0
global o, counter, k_int1
if isinstance(x, int):
x = [x]
input_parts = np.zeros((len(x), 4), dtype=np.uint16)
for i, val in enumerate(x):
input_parts[i] = np.array([val >> 12, (val >> 8) & 0xF, (val >> 4) & 0xF, val & 0xF])
#print("FF_FUNCTION")
#print(input_parts)
s_box_outputs = np.array([[s_box(element) for element in part] for part in input_parts])
#print("S-box:", s_box_outputs)
p_box_outputs = np.zeros((len(x), 1, 16), dtype=np.uint8)
for i in range(len(x)):
p_box_outputs[i] = np.array(p_box(s_box_outputs[i][0], s_box_outputs[i][1], s_box_outputs[i][2], s_box_outputs[i][3]))
#print("P-box:", p_box_outputs)
final_outputs = np.zeros(len(x), dtype=np.uint32)
#print(len(x))
for i in range(len(x)):
#print(len(x))
final_output = np.array(l_box(p_box_outputs[i][0]))
k = key[q][o % 4]
#print("final_output:", final_output)
#print("Key in int:", k)
if (counter > 1):
k_bin, k_int1 = subsequent_key(k_int1)
#print("Key in binary:", k_bin)
#print("k", k_int)
output = final_output ^ k_bin
else:
k = to_binary(k,16)
k = np.array([int(bit) for bit in k])
#print("k", k)
output = final_output ^ k
#print("XORING output:", output)
output = binary_array_to_integer(output)
final_outputs[i] = output
q +=1
counter += 1
#print("Final output:", final_outputs)
if (o < 2):
o +=2
else:
o = 0
#print("_______________________________________________________________")
return final_outputs
```
%% Cell type:code id: tags:
``` python
#9#Convert the ciphertext pairs into Binary array
def convert_to_binary(row):
bin_array = np.zeros(64, dtype=np.uint8)
for i, num in enumerate(row):
binary_str = format(num, '016b')
for j, b in enumerate(binary_str):
bin_array[i * 16 + j] = int(b)
return bin_array
```
%% Cell type:code id: tags:
``` python
#10#Encryption Function
def lcb_encrypt(plaintext, key, rounds, d):
left_plaintext = np.uint16(plaintext[0])
right_plaintext = np.uint16(plaintext[1])
L, R = left_plaintext, right_plaintext
n = 0
while n < rounds:
L, R = f_function(R, key, d), ff_function(L, key, d)
n += 1
print("Encryption done per round")
#print(rounds)
#print(n)
return (L, R)
```
%% Cell type:code id: tags:
``` python
#11#Fuction for generation of keys
import random
def generate_hex_keys(num_keys, length=16):
hex_chars = "0123456789ABCDEF"
keys_str = ["".join(random.choices(hex_chars, k=length)) for _ in range(num_keys)]
return keys_str
def generate_round_keys(num_keys):
random_keys_hex = generate_hex_keys(num_keys)
#random_keys_hex = ['D63A529ECC92D353', '563A529ECC92D353', '163A529ECC92D353', 'D67AD296CC92DB53', '76BA569EDC9BD353']
#random_keys_hex = ['163A529D687529EC']
round_keys = []
for random_key_hex in random_keys_hex:
random_key = int(random_key_hex, 16)
K1 = (random_key >> 48) & 0xFFFF
K2 = (random_key >> 32) & 0xFFFF
K3 = (random_key >> 16) & 0xFFFF
K4 = random_key & 0xFFFF
#k1_bin = to_binary(K1, 16)
#k2_bin = to_binary(K2, 16)
#k3_bin = to_binary(K3, 16)
#k4_bin = to_binary(K4, 16)
#k1_np_array = np.array([int(bit) for bit in k1_bin])
#k2_np_array = np.array([int(bit) for bit in k2_bin])
#k3_np_array = np.array([int(bit) for bit in k3_bin])
#k4_np_array = np.array([int(bit) for bit in k4_bin])
round_key = np.array([K1, K2, K3, K4])
round_keys.append(round_key)
round_key = np.array(round_keys)
#print("Key generation done:", round_keys)
return round_key
```
%% Cell type:code id: tags:
``` python
#12#Make dataset
def make_train_data(n, nr, diff=(0x0020,0)):
global counter
Y = np.frombuffer(urandom(n), dtype=np.uint8);
Y = Y & 1;
plaintext = np.frombuffer(urandom(4*n), dtype=np.uint32);
#plaintext = [0xEED4B555]
#plaintext = [0xCED4B5C6, 0xCED4B5C6, 0xCED4B5C6, 0xCED4B5C6, 0xCED4B5C6]
plain0l = np.empty(n, dtype=np.uint16)
plain0r = np.empty(n, dtype=np.uint16)
for i in range(n):
plain0l[i] = (plaintext[i] >> 16) & 0xffff
plain0r[i] = plaintext[i] & 0xffff
plain1l = plain0l ^ diff[0]; plain1r = plain0r ^ diff[1];
print(plain0l)
print(plain0r)
num_rand_samples = np.sum(Y==0);
plain1l[Y==0] = np.frombuffer(urandom(2*num_rand_samples),dtype=np.uint16);
plain1r[Y==0] = np.frombuffer(urandom(2*num_rand_samples),dtype=np.uint16);
round_key = generate_round_keys(n)
ctdata0l, ctdata0r = lcb_encrypt((plain0l, plain0r), round_key, nr, n)
counter = 0
ctdata1l, ctdata1r = lcb_encrypt((plain1l, plain1r), round_key, nr, n)
print("All encryption done")
ctdata = np.vstack((ctdata0l, ctdata0r, ctdata1l, ctdata1r)).T
X = np.array([convert_to_binary(row) for row in ctdata])
#print(X)
"""
with open("Dataset_NewP.csv", "w", newline='') as f:
writer = csv.writer(f)
writer.writerow(["plain0l", "plain0r", "plain1l", "plain1r","Y"])
for i in range(n):
writer.writerow([plain0l[i], plain0r[i], plain1l[i], plain1r[i],Y[i]])
with open("Dataset_NewC.csv", "w", newline='') as f:
writer = csv.writer(f)
writer.writerow(["ctdata0l", "ctdata0r", "ctdata1l", "ctdata1r","Y"])
for i in range(n):
writer.writerow([ctdata0l[i], ctdata0r[i], ctdata1l[i], ctdata1r[i],Y[i]])
"""
return(X,Y);
```
%% Cell type:code id: tags:
``` python
#13#Creation of Model
from pickle import dump
from sklearn.model_selection import KFold
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.layers import Dense, Conv1D, Input, Reshape, Permute, Add, Flatten, BatchNormalization, Activation
from keras import backend as K
from keras.regularizers import l2
from keras.layers import Dropout
dropout_rate = 0.5;
bs = 2000;
wdir = './Final_h5_file/'
def cyclic_lr(num_epochs, high_lr, low_lr):
res = lambda i: low_lr + ((num_epochs-1) - i % num_epochs)/(num_epochs-1) * (high_lr - low_lr);
return(res);
def make_checkpoint(datei):
res = ModelCheckpoint(datei, monitor='val_loss', save_best_only = True);
return(res);
#make residual tower of convolutional blocks
def make_resnet(num_blocks=2, num_filters=32, num_outputs=1, d1=64, d2=64, word_size=16, ks=3,depth=5, reg_param=0.0001, final_activation='sigmoid'):
#Input and preprocessing layers
inp = Input(shape=(num_blocks * word_size * 2,));
rs = Reshape((2 * num_blocks, word_size))(inp);
perm = Permute((2,1))(rs);
#add a single residual layer that will expand the data to num_filters channels
#this is a bit-sliced layer
conv0 = Conv1D(num_filters, kernel_size=1, padding='same', kernel_regularizer=l2(reg_param))(perm);
conv0 = BatchNormalization()(conv0);
conv0 = Activation('relu')(conv0);
#add residual blocks
shortcut = conv0;
for i in range(depth):
conv1 = Conv1D(num_filters, kernel_size=ks, padding='same', kernel_regularizer=l2(reg_param))(shortcut);
conv1 = BatchNormalization()(conv1);
conv1 = Activation('relu')(conv1);
conv2 = Conv1D(num_filters, kernel_size=ks, padding='same',kernel_regularizer=l2(reg_param))(conv1);
conv2 = BatchNormalization()(conv2);
conv2 = Activation('relu')(conv2);
conv2 = Dropout(dropout_rate)(conv2)
shortcut = Add()([shortcut, conv2]);
#add prediction head
flat1 = Flatten()(shortcut);
dense1 = Dense(d1,kernel_regularizer=l2(reg_param))(flat1);
dense1 = BatchNormalization()(dense1);
dense1 = Activation('relu')(dense1);
dense1 = Dropout(dropout_rate)(dense1) # Add dropout layer after the first dense layer
dense2 = Dense(d2, kernel_regularizer=l2(reg_param))(dense1);
dense2 = Dropout(dropout_rate)(dense2)
dense2 = BatchNormalization()(dense2);
dense2 = Activation('relu')(dense2);
out = Dense(num_outputs, activation=final_activation, kernel_regularizer=l2(reg_param))(dense2);
model = Model(inputs=inp, outputs=out);
return(model);
def train_LCB_distinguisher(num_epochs, num_rounds, depth):
#create the network
print(num_rounds)
print(depth)
net = make_resnet(depth=depth, reg_param=0.00007);
opt = SGD(learning_rate=0.00001, momentum=0.5)
net.compile(optimizer= opt,loss='binary_crossentropy',metrics=['acc']);
#net.compile(optimizer='adam',loss='binary_crossentropy',metrics=['acc']);
#generate training and validation data
X, Y = make_train_data(10000000,num_rounds);
X_eval, Y_eval = make_train_data(1000000, num_rounds);
#set up model checkpoint
check = make_checkpoint(wdir+'FINAL_SECURE_'+str(num_rounds)+'_depth_'+str(depth)+'.h5');
#create learnrate schedule
lr = LearningRateScheduler(cyclic_lr(10,0.00004, 0.000019));
#train and evaluate
#print(X_eval)
h = net.fit(X,Y,epochs=num_epochs,batch_size=bs,validation_data=(X_eval, Y_eval), callbacks=[lr,check]);
np.save(wdir+'h'+str(num_rounds)+'r_depth'+str(depth)+'.npy', h.history['val_acc']);
np.save(wdir+'h'+str(num_rounds)+'r_depth'+str(depth)+'.npy', h.history['val_loss']);
dump(h.history,open(wdir+'hist'+str(num_rounds)+'r_depth'+str(depth)+'.p','wb'));
print("Best validation accuracy: ", np.max(h.history['val_acc']));
return(net, h);
```
%% Cell type:code id: tags:
``` python
#14#Training the Model
#1crore, 10lakhs, bs2000
num_epochs = 30
depth = 3
trained_net, history = train_LCB_distinguisher(num_epochs, num_rounds, depth)
```
%% Output
20
3
[13182 2913 24737 ... 62401 10591 23589]
[49387 49338 18711 ... 59833 42322 63243]
Encryption done per round
Encryption done per round
All encryption done
[31785 45398 1696 ... 3215 62633 4958]
[43601 25042 12208 ... 26164 4492 32270]
Encryption done per round
Encryption done per round
All encryption done
Epoch 1/30
5000/5000 [==============================] - 548s 109ms/step - loss: 0.7906 - acc: 0.4983 - val_loss: 0.7267 - val_acc: 0.5008 - lr: 4.0000e-05
Epoch 2/30
5000/5000 [==============================] - 554s 111ms/step - loss: 0.7498 - acc: 0.5286 - val_loss: 0.7254 - val_acc: 0.5005 - lr: 3.7667e-05
Epoch 3/30
5000/5000 [==============================] - 558s 112ms/step - loss: 0.7037 - acc: 0.5777 - val_loss: 0.7262 - val_acc: 0.5005 - lr: 3.5333e-05
Epoch 4/30
5000/5000 [==============================] - 565s 113ms/step - loss: 0.6467 - acc: 0.6458 - val_loss: 0.7289 - val_acc: 0.5006 - lr: 3.3000e-05
Epoch 5/30
5000/5000 [==============================] - 566s 113ms/step - loss: 0.5843 - acc: 0.7248 - val_loss: 0.7334 - val_acc: 0.5005 - lr: 3.0667e-05
Epoch 6/30
5000/5000 [==============================] - 567s 113ms/step - loss: 0.5266 - acc: 0.7962 - val_loss: 0.7396 - val_acc: 0.5001 - lr: 2.8333e-05
Epoch 7/30
5000/5000 [==============================] - 566s 113ms/step - loss: 0.4780 - acc: 0.8504 - val_loss: 0.7467 - val_acc: 0.5002 - lr: 2.6000e-05
Epoch 8/30
5000/5000 [==============================] - 573s 115ms/step - loss: 0.4380 - acc: 0.8877 - val_loss: 0.7544 - val_acc: 0.5001 - lr: 2.3667e-05
Epoch 9/30
5000/5000 [==============================] - 573s 115ms/step - loss: 0.4061 - acc: 0.9115 - val_loss: 0.7618 - val_acc: 0.5000 - lr: 2.1333e-05
Epoch 10/30
5000/5000 [==============================] - 565s 113ms/step - loss: 0.3803 - acc: 0.9271 - val_loss: 0.7685 - val_acc: 0.5001 - lr: 1.9000e-05
Epoch 11/30
5000/5000 [==============================] - 565s 113ms/step - loss: 0.3476 - acc: 0.9420 - val_loss: 0.7845 - val_acc: 0.5000 - lr: 4.0000e-05
Epoch 12/30
5000/5000 [==============================] - 569s 114ms/step - loss: 0.3110 - acc: 0.9546 - val_loss: 0.8003 - val_acc: 0.5002 - lr: 3.7667e-05
Epoch 13/30
5000/5000 [==============================] - 566s 113ms/step - loss: 0.2826 - acc: 0.9620 - val_loss: 0.8148 - val_acc: 0.5002 - lr: 3.5333e-05
Epoch 14/30
5000/5000 [==============================] - 569s 114ms/step - loss: 0.2600 - acc: 0.9669 - val_loss: 0.8288 - val_acc: 0.5003 - lr: 3.3000e-05
Epoch 15/30
5000/5000 [==============================] - 568s 114ms/step - loss: 0.2419 - acc: 0.9704 - val_loss: 0.8424 - val_acc: 0.5002 - lr: 3.0667e-05
Epoch 16/30
5000/5000 [==============================] - 571s 114ms/step - loss: 0.2271 - acc: 0.9731 - val_loss: 0.8532 - val_acc: 0.5001 - lr: 2.8333e-05
Epoch 17/30
5000/5000 [==============================] - 571s 114ms/step - loss: 0.2149 - acc: 0.9753 - val_loss: 0.8656 - val_acc: 0.5001 - lr: 2.6000e-05
Epoch 18/30
5000/5000 [==============================] - 567s 113ms/step - loss: 0.2049 - acc: 0.9770 - val_loss: 0.8737 - val_acc: 0.5001 - lr: 2.3667e-05
Epoch 19/30
5000/5000 [==============================] - 566s 113ms/step - loss: 0.1966 - acc: 0.9783 - val_loss: 0.8828 - val_acc: 0.5001 - lr: 2.1333e-05
Epoch 20/30
5000/5000 [==============================] - 566s 113ms/step - loss: 0.1896 - acc: 0.9795 - val_loss: 0.8920 - val_acc: 0.5001 - lr: 1.9000e-05
Epoch 21/30
5000/5000 [==============================] - 568s 114ms/step - loss: 0.1805 - acc: 0.9810 - val_loss: 0.9073 - val_acc: 0.5000 - lr: 4.0000e-05
Epoch 22/30
5000/5000 [==============================] - 568s 114ms/step - loss: 0.1696 - acc: 0.9827 - val_loss: 0.9234 - val_acc: 0.5000 - lr: 3.7667e-05
Epoch 23/30
5000/5000 [==============================] - 569s 114ms/step - loss: 0.1605 - acc: 0.9842 - val_loss: 0.9369 - val_acc: 0.5000 - lr: 3.5333e-05
Epoch 24/30
5000/5000 [==============================] - 570s 114ms/step - loss: 0.1529 - acc: 0.9853 - val_loss: 0.9508 - val_acc: 0.5001 - lr: 3.3000e-05
Epoch 25/30
5000/5000 [==============================] - 570s 114ms/step - loss: 0.1464 - acc: 0.9862 - val_loss: 0.9615 - val_acc: 0.5000 - lr: 3.0667e-05
Epoch 26/30
5000/5000 [==============================] - 570s 114ms/step - loss: 0.1410 - acc: 0.9870 - val_loss: 0.9729 - val_acc: 0.5000 - lr: 2.8333e-05
Epoch 27/30
5000/5000 [==============================] - 571s 114ms/step - loss: 0.1364 - acc: 0.9878 - val_loss: 0.9803 - val_acc: 0.5000 - lr: 2.6000e-05
Epoch 28/30
5000/5000 [==============================] - 569s 114ms/step - loss: 0.1324 - acc: 0.9884 - val_loss: 0.9913 - val_acc: 0.5001 - lr: 2.3667e-05
Epoch 29/30
5000/5000 [==============================] - 569s 114ms/step - loss: 0.1290 - acc: 0.9888 - val_loss: 0.9994 - val_acc: 0.5001 - lr: 2.1333e-05
Epoch 30/30
5000/5000 [==============================] - 568s 114ms/step - loss: 0.1260 - acc: 0.9893 - val_loss: 1.0050 - val_acc: 0.5000 - lr: 1.9000e-05
Best validation accuracy: 0.5007830262184143
%% Cell type:code id: tags:
``` python
#15#Create JSON File
# Convert the model architecture to JSON format
import json
from keras.models import model_from_json
model_json = trained_net.to_json()
# Save the model architecture as a JSON file (optional)
filename = f'FINAL_SECURE_20_depth_3.json'
print(filename)
with open(filename, "w") as json_file:
json.dump(json.loads(model_json), json_file, indent=4)
```
%% Output
FINAL_SECURE_20_depth_3.json
%% Cell type:code id: tags:
``` python
#16#Evaluate Function
def evaluate(net,X,Y):
Z = net.predict(X,batch_size=4000).flatten();
Zbin = (Z > 0.5);
diff = Y - Z; mse = np.mean(diff*diff);
n = len(Z); n0 = np.sum(Y==0); n1 = np.sum(Y==1);
acc = np.sum(Zbin == Y) / n;
tpr = np.sum(Zbin[Y==1]) / n1;
tnr = np.sum(Zbin[Y==0] == 0) / n0;
mreal = np.median(Z[Y==1]);
high_random = np.sum(Z[Y==0] > mreal) / n0;
print("Accuracy: ", acc, "TPR: ", tpr, "TNR: ", tnr, "MSE:", mse);
print("Percentage of random pairs with score higher than median of real pairs:", 100*high_random);
```
%% Cell type:code id: tags:
``` python
#17#Evaluate Function Call
import numpy as np
from keras.models import model_from_json
#load distinguishers
json_file = open('Vul_Best_20_depth_3.json','r');
json_model = json_file.read();
net20 = model_from_json(json_model);
net20.load_weights('Vul_Best_10_depth_3.h5');
X_test_stacked, Y_test_stacked = make_train_data(100000, num_rounds)
evaluate(net20, X_test_stacked, Y_test_stacked);
```
%% Output
Key generation done
Encryption done per round
Encryption done per round
All encryption done
[[1 0 1 ... 0 1 1]
[0 1 0 ... 0 1 1]
[1 0 1 ... 1 0 0]
...
[1 0 0 ... 0 0 1]
[1 0 1 ... 1 1 0]
[1 1 1 ... 1 1 0]]
25/25 [==============================] - 1s 40ms/step
Accuracy: 0.96585 TPR: 1.0 TNR: 0.9312143734767458 MSE: 0.03562397
Percentage of random pairs with score higher than median of real pairs: 0.0
%% Cell type:code id: tags:
``` python
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment