Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
SIP-DDoS
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Oussama SBAI
SIP-DDoS
Commits
1e98acb9
Commit
1e98acb9
authored
Nov 10, 2023
by
Oussama SBAI
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Initial commit
parents
Pipeline
#2210
failed with stages
Changes
5
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
9 additions
and
0 deletions
+9
-0
GRU_W10-step2_Rgl-2 (1).ipynb
GRU_W10-step2_Rgl-2 (1).ipynb
+2
-0
LSTM_W10-step2_Rgl-2 (1).ipynb
LSTM_W10-step2_Rgl-2 (1).ipynb
+2
-0
README.md
README.md
+3
-0
RNN_W10-step2_Rgl-2 (1).ipynb
RNN_W10-step2_Rgl-2 (1).ipynb
+2
-0
sipp-3.3.zip
sipp-3.3.zip
+0
-0
No files found.
GRU_W10-step2_Rgl-2 (1).ipynb
0 → 100644
View file @
1e98acb9
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"id":"pL8kU9d45Zwn","executionInfo":{"status":"ok","timestamp":1699023724631,"user_tz":-60,"elapsed":6466,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["import pandas as pd\n","import numpy as np\n","import seaborn as sns\n","import matplotlib.pyplot as plt\n","from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n","from sklearn.metrics import classification_report\n","import sklearn.metrics as metrics\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from torch.utils.data import Dataset\n","from torch.utils.data import DataLoader\n","#from pytorchtools import EarlyStopping\n","from tqdm.notebook import tqdm\n","import time\n","import datetime\n","import io\n","import random\n","import statistics\n","from sklearn.model_selection import KFold\n","from sklearn.model_selection import StratifiedKFold\n","from sklearn.model_selection import train_test_split\n","from statistics import mean\n","from collections import Counter\n","\n","def seed_everything(seed=0):\n"," os.environ['PYTHONHASHSEED'] = str(seed)\n"," np.random.seed(seed)\n"," torch.manual_seed(seed)\n"," random.seed(seed)\n"," torch.cuda.manual_seed(seed)\n"," torch.cuda.manual_seed_all(seed)\n"," torch.backends.cudnn.deterministic = True\n"," torch.backends.cudnn.benchmark = False\n","\n","seed_everything(230)"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')\n","%cd /content/drive/My Drive/Colab Notebooks/Data"],"metadata":{"id":"yMuqHvkjtKDm"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":9,"status":"aborted","timestamp":1699023727583,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"d0ClOl-YYoM-"},"outputs":[],"source":["df1 = pd.read_csv('sample_2_1_F.csv')\n","df2 = pd.read_csv('sample_2_2_F.csv')\n","df3 = pd.read_csv('sample_2_3_F.csv')\n","df4 = pd.read_csv('sample_2_4_F.csv')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"r02nNgBGTRlb","executionInfo":{"status":"aborted","timestamp":1699023727584,"user_tz":-60,"elapsed":10,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Define the custom dataset\n","class CustomDataset(Dataset):\n"," def __init__(self, X, Y, sequence_length, forward_step):\n"," self.X = X\n"," self.Y = Y\n"," self.sequence_length = sequence_length\n"," self.forward_step = forward_step\n","\n"," def __len__(self):\n"," return len(self.X)\n","\n"," def pad_sequence(self, x, seq_length):\n"," diff = seq_length - x.shape[0]\n"," x = F.pad(x, (0, 0, 0, diff))\n"," return x\n","\n"," def __getitem__(self, idx):\n"," idx = min(idx*self.forward_step, len(self.X)-1 - self.sequence_length)\n"," x = torch.from_numpy(self.X[idx : idx + self.sequence_length]).type(torch.float32)\n"," x = self.pad_sequence(x, self.sequence_length)\n","\n"," y = torch.from_numpy(self.Y[idx : idx + self.sequence_length]).type(torch.long)\n","\n"," if (y == 1).sum().item() >= 2:\n"," y = y.any().type(torch.long)\n"," else:\n"," y = torch.tensor(0, dtype=torch.long)\n","\n"," return x, y, torch.from_numpy(self.Y[idx : idx + self.sequence_length]).type(torch.long), self.X[idx : idx + self.sequence_length]\n","\n","# Define the RNN model\n","class DeepRNNClassifier(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(DeepRNNClassifier, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.rnn = nn.RNN(input_size, hidden_size, num_layers=num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," hidden = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n"," out, hidden = self.rnn(x.to(device), hidden.to(device))\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," out = self.fc(out)\n"," return out\n","\n","# Define the LSTM model\n","class LSTMClassifier(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(LSTMClassifier, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial hidden state\n"," c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial cell state\n"," out, _ = self.lstm(x, (h0, c0))\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," out = self.fc(out) # Use only the last time step's output for classification\n"," return out\n","\n","# Define the GRU model\n","class GRUModel(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(GRUModel, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," h = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial hidden state\n"," # Forward pass through GRU layer\n"," out, h = self.gru(x, h)\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," # Forward pass through the linear layer\n"," out = self.fc(out)\n"," return out"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"3QO8fTXkae3_","executionInfo":{"status":"aborted","timestamp":1699023727584,"user_tz":-60,"elapsed":9,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["df = pd.concat([df1, df3, df2, df4], ignore_index=True)"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":10,"status":"aborted","timestamp":1699023727585,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"_kdgW9BJbcm5"},"outputs":[],"source":["# Encode data\n","le_y = LabelEncoder()\n","\n","X = df.loc[:, ['Temps','Source', 'From', 'Call-ID', 'Contact']].apply(le_y.fit_transform)\n","\n","def label_encoder(x):\n"," return x == 'Attack'\n","\n","Y = df.loc[:, ['Label']].apply(label_encoder).astype('int')\n","\n","# Split data\n","X_train = X.loc[len(df1)+1+len(df3) : ,:].values\n","y_train = Y.loc[len(df1)+1+len(df3) : ,:].values\n","\n","X_val = X.loc[:len(df1) ,:].values\n","y_val = Y.loc[:len(df1) ,:].values\n","\n","X_test = X.loc[ : len(df1)+len(df3) ,:].values\n","y_test = Y.loc[ : len(df1)+len(df3),:].values\n","\n","df11 = pd.concat([df1, df3], ignore_index=True)\n","\n","df11['Source'] = df11['Source'].str.replace('192.168.56', '199.162.59')\n","df11['From'] = df11['From'].str.replace('192.168.56', '199.162.59')\n","df11['Call-ID'] = df11['Call-ID'].str.replace('192.168.56', '199.162.59')\n","df11['Contact'] = df11['Contact'].str.replace('192.168.56', '199.162.59')\n","\n","# Encode data\n","le_y = LabelEncoder()\n","\n","X1 = df11.loc[:, ['Temps','Source', 'From', 'Call-ID', 'Contact']].apply(le_y.fit_transform)\n","\n","Y1 = df11.loc[:, ['Label']].apply(label_encoder).astype('int')\n","\n","X1_test = X1.values\n","y1_test = Y1.values"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"XVxJBVnW9q-W","executionInfo":{"status":"aborted","timestamp":1699023727585,"user_tz":-60,"elapsed":10,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Window size and forword step\n","sequence_length = 10\n","forward_step = 2\n","forward_step_inf = 2\n","\n","# Create the training-validation dataset and dataloader\n","batch_size = 240\n","training_dataset = CustomDataset(X_train, y_train, sequence_length, forward_step)\n","train_dataloader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True)\n","\n","validation_dataset = CustomDataset(X_val, y_val, sequence_length, forward_step)\n","val_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=False)\n","\n","test_dataset = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n","\n","test_dataset_Bis = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Bis = DataLoader(test_dataset_Bis, batch_size=batch_size, shuffle=False)"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":11,"status":"aborted","timestamp":1699023727586,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"QjXSdmNUaftB"},"outputs":[],"source":["labels = []\n","for x, y, Y, X in train_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))\n","\n","labels = []\n","for x, y, Y, X in val_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))\n","\n","labels = []\n","for x, y, Y, X in test_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":10360,"status":"aborted","timestamp":1699023727586,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"o_zSQF4vfGSo"},"outputs":[],"source":["print (\"Train\")\n","for x, y, Y, X in train_dataloader:\n"," print(x.shape)\n"," break\n","\n","print (\"Validation\")\n","for x, y, Y, X in val_dataloader:\n"," print(x.shape)\n"," break\n","\n","print (\"Test\")\n","for x, y, Y, X in test_dataloader:\n"," print(x.shape)\n"," break"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"JqejHbBWIvRP","executionInfo":{"status":"aborted","timestamp":1699023727587,"user_tz":-60,"elapsed":10361,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["#df11"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ftxexraXnGPd","executionInfo":{"status":"aborted","timestamp":1699023727587,"user_tz":-60,"elapsed":10360,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["def dessiner_graphe(tab1, tab2):\n"," indices1 = range(len(tab1))\n"," indices2 = range(len(tab2))\n","\n"," valeurs1 = tab1\n"," valeurs2 = tab2\n","\n"," plt.figure(figsize=(25, 6))\n"," plt.plot(indices1, valeurs1, 'bo-', label='True', color='blue') # 'bo-' représente des points bleus reliés par des lignes\n"," plt.plot(indices2, valeurs2, 'bo-', label='Predicted', color='red')\n"," plt.xlabel('Indices')\n"," plt.ylabel('Valeurs')\n"," plt.title('Graphe des valeurs du tableau')\n"," plt.grid(True)\n"," plt.show()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"V3_k-ql2ZNyj","executionInfo":{"status":"aborted","timestamp":1699023727588,"user_tz":-60,"elapsed":10361,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["def compare_elements(table1, table2):\n"," different_indices = []\n","\n"," for i in range(len(table1)):\n"," if table1[i] != table2[i]:\n"," different_indices.append(i)\n","\n"," return different_indices"]},{"cell_type":"markdown","metadata":{"id":"YR999bf-4t-B"},"source":["GRU"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"I60JwCB5ZXC0","executionInfo":{"status":"aborted","timestamp":1699023727588,"user_tz":-60,"elapsed":10356,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Create empty lists to store accuracy and loss\n","train_acc = []\n","train_loss = []\n","val_acc_list = []\n","val_loss_list = []\n","\n","# Define the hyperparameters\n","Nbr_row, Nbr_col = X_train.shape\n","\n","input_size = Nbr_col\n","hidden_size = pow(2, 4) # Number of hidden units\n","output_size = 2 # Number of output classes\n","num_layers = sequence_length # Number of hidden layers\n","learning_rate = 0.0001\n","num_epochs = 10\n","drop_out = 0.10\n","\n","# Define early stopping parameters\n","patience = 3 #3\n","best_loss = float('inf')\n","early_stop = False\n","epochs_no_improve = 0\n","\n","# Move the model to the GPU\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","model = GRUModel(input_size, hidden_size, output_size, num_layers, drop_out).to(device)\n","\n","model = nn.DataParallel(model).to(device)\n","\n","# Define the loss function and optimizer\n","criterion = nn.CrossEntropyLoss()\n","optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n","\n","since = time.time()\n","\n","m = 'GRU'\n","\n","# Train the model\n","for epoch in range(num_epochs):\n"," total_loss = 0.0\n"," total_correct = 0\n"," total = 0\n","\n"," total_val_loss = 0.0\n"," total_val_correct = 0\n"," total_val = 0\n","\n"," if early_stop:\n"," # torch.save(model.state_dict(), '/home/ubuntu/Bureau/collab/model_Seq/'+ dt_string +'/'+ m +'_Epoch-'+ str(epoch) +'_best_model.pt')\n"," break\n","\n"," pbar = tqdm(train_dataloader)\n","\n"," # Training loop\n"," model.train()\n"," for x, y, Y, X in pbar:\n"," # Forward pass\n"," x = x.to(device)\n"," y = y.to(device)\n"," optimizer.zero_grad()\n"," outputs = model(x)\n"," # loss = criterion(outputs, y[:, 0].to(device))\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," # Backward pass and optimization\n"," loss.backward()\n"," optimizer.step()\n","\n"," # Forward pass\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total += y.size(0)\n","\n"," # Calculate accuracy and loss for the epoch\n"," acc = total_correct / total\n"," loss = total_loss / len(train_dataloader)\n","\n"," pbar.set_description('Epoch [{}/{}], Loss: {:.5f}, Acc: {:.5f}'.format(epoch+1, num_epochs, total_loss.item(), acc))\n","\n"," # Append accuracy and loss to the lists\n"," train_acc.append(acc)\n"," train_loss.append(loss.item())\n","\n"," # Validation loop\n"," model.eval()\n"," with torch.no_grad():\n"," for x, y, Y, X in val_dataloader:\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n"," loss = criterion(outputs, y)\n"," total_val_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_val_correct += (predicted == y).sum().item()\n"," total_val += y.size(0)\n","\n"," val_acc = total_val_correct / total_val\n"," val_loss = total_val_loss / len(val_dataloader)\n","\n"," print('Epoch [{}/{}], Validation Loss: {:.5f}, Validation Acc: {:.5f}'.format(epoch+1, num_epochs, val_loss.item(), val_acc ))\n","\n"," val_acc_list.append(val_acc)\n"," val_loss_list.append(val_loss.item())\n","\n"," # Early stopping check\n"," if val_loss < best_loss:\n"," best_loss = val_loss\n"," epochs_no_improve = 0\n"," else:\n"," epochs_no_improve += 1\n"," if epochs_no_improve == patience:\n"," early_stop = True\n","\n","time_elapsed = time.time() - since\n","\n","print('============== FIN ==============')\n","print('\\n\\nTraining complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n","print('=== Train Loss: {:.5f}, Train Acc: {:.5f}'.format( loss, acc ))\n","print('=== Validation Loss: {:.5f}, Validation Acc: {:.5f}'.format( val_loss, val_acc ))"]},{"cell_type":"code","source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","L = []\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," #print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n"," L.append(np.array(Y[i].T).flatten())\n","\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n"],"metadata":{"id":"BhZ_xaWqpg2v","executionInfo":{"status":"aborted","timestamp":1699023727588,"user_tz":-60,"elapsed":10352,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["array_counts = Counter(map(tuple, L))\n","sorted_results = dict(sorted(array_counts.items(), key=lambda item: item[1], reverse=True))\n","len(array_counts), array_counts, sorted_results"],"metadata":{"id":"76IksnscpZlj","executionInfo":{"status":"aborted","timestamp":1699023727589,"user_tz":-60,"elapsed":10351,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df11.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","L = []\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Bis:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," #print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n"," L.append(np.array(Y[i].T).flatten())\n","\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n"],"metadata":{"id":"AlCUaE0CpgAf","executionInfo":{"status":"aborted","timestamp":1699023728246,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["array_counts = Counter(map(tuple, L))\n","sorted_results = dict(sorted(array_counts.items(), key=lambda item: item[1], reverse=True))\n","len(array_counts), array_counts, sorted_results"],"metadata":{"id":"8-QV3OZ0pWnE","executionInfo":{"status":"aborted","timestamp":1699023728246,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"gyWl2K1RZj8l","executionInfo":{"status":"aborted","timestamp":1699023728247,"user_tz":-60,"elapsed":5,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","since = time.time()\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader:\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n","\n"," dessiner_graphe(y.cpu().numpy(), predicted.cpu().numpy())\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n","\n","# Convert lists to numpy arrays\n","true_labels = np.array(true_labels)\n","predicted_labels = np.array(predicted_labels)\n","\n","# Calculate the test accuracy and loss\n","test_accuracy = total_correct / total_samples\n","test_loss = total_loss / total_samples\n","\n","print('=== Test Loss: {:.5f}, Test Acc: {:.5f}'.format(test_loss, test_accuracy ))\n","\n","# Decode the predicted and actual labels\n","label_decoder = {0: 'Good', 1: 'Attack'}\n","\n","y_pred = [label_decoder[i] for i in predicted_labels]\n","y_true = [label_decoder[i] for i in true_labels]\n","\n","# Generate the IoU\n","cm = metrics.confusion_matrix(y_true, y_pred, normalize='true')\n","print('=== IoU: {:.5f}'.format(cm.ravel()[3]/(cm.ravel()[3]+cm.ravel()[2]+cm.ravel()[1])))\n","\n","# Generate the classification report\n","report = classification_report(y_true, y_pred, digits=5)\n","print(\"Classification Report:\")\n","print(report)\n","\n","# Generate the confusion matrix\n","print(\"Confusion Matrix:\")\n","print(cm)\n","\n","sns.heatmap(cm, annot=True, xticklabels=['Good', 'Attack'], yticklabels=['Good', 'Attack'], fmt='.5f')\n","plt.title('GRU')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"9TtFErxC764G","executionInfo":{"status":"aborted","timestamp":1699023728247,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df11.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Bis:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n","\n"," dessiner_graphe(y.cpu().numpy(), predicted.cpu().numpy())\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n","\n","# Convert lists to numpy arrays\n","true_labels = np.array(true_labels)\n","predicted_labels = np.array(predicted_labels)\n","\n","# Calculate the test accuracy and loss\n","test_accuracy = total_correct / total_samples\n","test_loss = total_loss / total_samples\n","\n","print('=== Test Loss: {:.5f}, Test Acc: {:.5f}'.format(test_loss, test_accuracy ))\n","\n","# Decode the predicted and actual labels\n","label_decoder = {0: 'Good', 1: 'Attack'}\n","\n","y_pred = [label_decoder[i] for i in predicted_labels]\n","y_true = [label_decoder[i] for i in true_labels]\n","\n","# Generate the IoU\n","cm = metrics.confusion_matrix(y_true, y_pred, normalize='true')\n","print('=== IoU: {:.5f}'.format(cm.ravel()[3]/(cm.ravel()[3]+cm.ravel()[2]+cm.ravel()[1])))\n","\n","# Generate the classification report\n","report = classification_report(y_true, y_pred, digits=5)\n","print(\"Classification Report:\")\n","print(report)\n","\n","# Generate the confusion matrix\n","print(\"Confusion Matrix:\")\n","print(cm)\n","\n","sns.heatmap(cm, annot=True, xticklabels=['Good', 'Attack'], yticklabels=['Good', 'Attack'], fmt='.5f')\n","plt.title('GRU')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"KU-j5HZ27p4X","executionInfo":{"status":"aborted","timestamp":1699023728247,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["test_dataset_Inf = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=1, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"]},{"cell_type":"code","source":["test_dataset_Inf = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=batch_size, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"],"metadata":{"id":"tFsZ6RCrhk62","executionInfo":{"status":"aborted","timestamp":1699023728247,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"05rK_I3N4oCj","executionInfo":{"status":"aborted","timestamp":1699023728247,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["test_dataset_Inf = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=1, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"]},{"cell_type":"code","source":["test_dataset_Inf = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=batch_size, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"],"metadata":{"id":"IYTujeZNhnUG","executionInfo":{"status":"aborted","timestamp":1699023728247,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["%cd /content/drive/My Drive/Colab Notebooks/step 2 Bis\n","PATH = m + \"_Bis_W\"+str(sequence_length) +\"-step\"+ str(forward_step)+'.pt'\n","torch.save(model.state_dict(), PATH)"],"metadata":{"id":"B1TngHIthjUu","executionInfo":{"status":"aborted","timestamp":1699023728247,"user_tz":-60,"elapsed":4,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"provenance":[],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.9"}},"nbformat":4,"nbformat_minor":0}
\ No newline at end of file
LSTM_W10-step2_Rgl-2 (1).ipynb
0 → 100644
View file @
1e98acb9
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"id":"pL8kU9d45Zwn","executionInfo":{"status":"ok","timestamp":1699023668619,"user_tz":-60,"elapsed":7656,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["import pandas as pd\n","import numpy as np\n","import seaborn as sns\n","import matplotlib.pyplot as plt\n","from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n","from sklearn.metrics import classification_report\n","import sklearn.metrics as metrics\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from torch.utils.data import Dataset\n","from torch.utils.data import DataLoader\n","#from pytorchtools import EarlyStopping\n","from tqdm.notebook import tqdm\n","import time\n","import datetime\n","import io\n","import random\n","import statistics\n","from sklearn.model_selection import KFold\n","from sklearn.model_selection import StratifiedKFold\n","from sklearn.model_selection import train_test_split\n","from statistics import mean\n","from collections import Counter\n","\n","def seed_everything(seed=0):\n"," os.environ['PYTHONHASHSEED'] = str(seed)\n"," np.random.seed(seed)\n"," torch.manual_seed(seed)\n"," random.seed(seed)\n"," torch.cuda.manual_seed(seed)\n"," torch.cuda.manual_seed_all(seed)\n"," torch.backends.cudnn.deterministic = True\n"," torch.backends.cudnn.benchmark = False\n","\n","seed_everything(230)"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')\n","%cd /content/drive/My Drive/Colab Notebooks/Data"],"metadata":{"id":"ihDTXpi7s3fb","executionInfo":{"status":"aborted","timestamp":1699023675954,"user_tz":-60,"elapsed":10,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":9,"status":"aborted","timestamp":1699023675954,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"d0ClOl-YYoM-"},"outputs":[],"source":["df1 = pd.read_csv('sample_2_1_F.csv')\n","df2 = pd.read_csv('sample_2_2_F.csv')\n","df3 = pd.read_csv('sample_2_3_F.csv')\n","df4 = pd.read_csv('sample_2_4_F.csv')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"r02nNgBGTRlb","executionInfo":{"status":"aborted","timestamp":1699023675955,"user_tz":-60,"elapsed":10,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Define the custom dataset\n","class CustomDataset(Dataset):\n"," def __init__(self, X, Y, sequence_length, forward_step):\n"," self.X = X\n"," self.Y = Y\n"," self.sequence_length = sequence_length\n"," self.forward_step = forward_step\n","\n"," def __len__(self):\n"," return len(self.X)\n","\n"," def pad_sequence(self, x, seq_length):\n"," diff = seq_length - x.shape[0]\n"," x = F.pad(x, (0, 0, 0, diff))\n"," return x\n","\n"," def __getitem__(self, idx):\n"," idx = min(idx*self.forward_step, len(self.X)-1 - self.sequence_length)\n"," x = torch.from_numpy(self.X[idx : idx + self.sequence_length]).type(torch.float32)\n"," x = self.pad_sequence(x, self.sequence_length)\n","\n"," y = torch.from_numpy(self.Y[idx : idx + self.sequence_length]).type(torch.long)\n","\n"," if (y == 1).sum().item() >= 2:\n"," y = y.any().type(torch.long)\n"," else:\n"," y = torch.tensor(0, dtype=torch.long)\n","\n"," return x, y, torch.from_numpy(self.Y[idx : idx + self.sequence_length]).type(torch.long), self.X[idx : idx + self.sequence_length]\n","\n","# Define the RNN model\n","class DeepRNNClassifier(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(DeepRNNClassifier, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.rnn = nn.RNN(input_size, hidden_size, num_layers=num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," hidden = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n"," out, hidden = self.rnn(x.to(device), hidden.to(device))\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," out = self.fc(out)\n"," return out\n","\n","# Define the LSTM model\n","class LSTMClassifier(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(LSTMClassifier, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial hidden state\n"," c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial cell state\n"," out, _ = self.lstm(x, (h0, c0))\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," out = self.fc(out) # Use only the last time step's output for classification\n"," return out\n","\n","# Define the GRU model\n","class GRUModel(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(GRUModel, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," h = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial hidden state\n"," # Forward pass through GRU layer\n"," out, h = self.gru(x, h)\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," # Forward pass through the linear layer\n"," out = self.fc(out)\n"," return out"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"3QO8fTXkae3_","executionInfo":{"status":"aborted","timestamp":1699023675956,"user_tz":-60,"elapsed":10,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["df = pd.concat([df1, df3, df2, df4], ignore_index=True)"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":10,"status":"aborted","timestamp":1699023675956,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"_kdgW9BJbcm5"},"outputs":[],"source":["# Encode data\n","le_y = LabelEncoder()\n","\n","X = df.loc[:, ['Temps','Source', 'From', 'Call-ID', 'Contact']].apply(le_y.fit_transform)\n","\n","def label_encoder(x):\n"," return x == 'Attack'\n","\n","Y = df.loc[:, ['Label']].apply(label_encoder).astype('int')\n","\n","# Split data\n","X_train = X.loc[len(df1)+1+len(df3) : ,:].values\n","y_train = Y.loc[len(df1)+1+len(df3) : ,:].values\n","\n","X_val = X.loc[:len(df1) ,:].values\n","y_val = Y.loc[:len(df1) ,:].values\n","\n","X_test = X.loc[ : len(df1)+len(df3) ,:].values\n","y_test = Y.loc[ : len(df1)+len(df3),:].values\n","\n","df11 = pd.concat([df1, df3], ignore_index=True)\n","\n","df11['Source'] = df11['Source'].str.replace('192.168.56', '199.162.59')\n","df11['From'] = df11['From'].str.replace('192.168.56', '199.162.59')\n","df11['Call-ID'] = df11['Call-ID'].str.replace('192.168.56', '199.162.59')\n","df11['Contact'] = df11['Contact'].str.replace('192.168.56', '199.162.59')\n","\n","# Encode data\n","le_y = LabelEncoder()\n","\n","X1 = df11.loc[:, ['Temps','Source', 'From', 'Call-ID', 'Contact']].apply(le_y.fit_transform)\n","\n","Y1 = df11.loc[:, ['Label']].apply(label_encoder).astype('int')\n","\n","X1_test = X1.values\n","y1_test = Y1.values"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"XVxJBVnW9q-W","executionInfo":{"status":"aborted","timestamp":1699023675956,"user_tz":-60,"elapsed":10,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Window size and forword step\n","sequence_length = 10\n","forward_step = 2\n","forward_step_inf = 2\n","\n","# Create the training-validation dataset and dataloader\n","batch_size = 240\n","training_dataset = CustomDataset(X_train, y_train, sequence_length, forward_step)\n","train_dataloader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True)\n","\n","validation_dataset = CustomDataset(X_val, y_val, sequence_length, forward_step)\n","val_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=False)\n","\n","test_dataset = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n","\n","test_dataset_Bis = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Bis = DataLoader(test_dataset_Bis, batch_size=batch_size, shuffle=False)"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":11,"status":"aborted","timestamp":1699023675957,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"QjXSdmNUaftB"},"outputs":[],"source":["labels = []\n","for x, y, Y, X in train_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n"," if np.unique(labels, return_counts=True)[1][1]==92315:\n"," break\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))\n","\n","labels = []\n","for x, y, Y, X in val_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))\n","\n","labels = []\n","for x, y, Y, X in test_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":11,"status":"aborted","timestamp":1699023675957,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"},"user_tz":-60},"id":"o_zSQF4vfGSo"},"outputs":[],"source":["print (\"Train\")\n","for x, y, Y, X in train_dataloader:\n"," print(x.shape)\n"," break\n","\n","print (\"Validation\")\n","for x, y, Y, X in val_dataloader:\n"," print(x.shape)\n"," break\n","\n","print (\"Test\")\n","for x, y, Y, X in test_dataloader:\n"," print(x.shape)\n"," break"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"JqejHbBWIvRP","executionInfo":{"status":"aborted","timestamp":1699023675958,"user_tz":-60,"elapsed":11,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["#df11"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ftxexraXnGPd","executionInfo":{"status":"aborted","timestamp":1699023675958,"user_tz":-60,"elapsed":11,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["def dessiner_graphe(tab1, tab2):\n"," indices1 = range(len(tab1))\n"," indices2 = range(len(tab2))\n","\n"," valeurs1 = tab1\n"," valeurs2 = tab2\n","\n"," plt.figure(figsize=(25, 6))\n"," plt.plot(indices1, valeurs1, 'bo-', label='True', color='blue') # 'bo-' représente des points bleus reliés par des lignes\n"," plt.plot(indices2, valeurs2, 'bo-', label='Predicted', color='red')\n"," plt.xlabel('Indices')\n"," plt.ylabel('Valeurs')\n"," plt.title('Graphe des valeurs du tableau')\n"," plt.grid(True)\n"," plt.show()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"V3_k-ql2ZNyj","executionInfo":{"status":"aborted","timestamp":1699023675959,"user_tz":-60,"elapsed":12,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["def compare_elements(table1, table2):\n"," different_indices = []\n","\n"," for i in range(len(table1)):\n"," if table1[i] != table2[i]:\n"," different_indices.append(i)\n","\n"," return different_indices"]},{"cell_type":"markdown","metadata":{"id":"YR999bf-4t-B"},"source":["LSTM"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"I60JwCB5ZXC0","executionInfo":{"status":"aborted","timestamp":1699023675960,"user_tz":-60,"elapsed":16203,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Create empty lists to store accuracy and loss\n","train_acc = []\n","train_loss = []\n","val_acc_list = []\n","val_loss_list = []\n","\n","# Define the hyperparameters\n","Nbr_row, Nbr_col = X_train.shape\n","\n","input_size = Nbr_col\n","hidden_size = pow(2, 4) # Number of hidden units\n","output_size = 2 # Number of output classes\n","num_layers = sequence_length # Number of hidden layers\n","learning_rate = 0.0001\n","num_epochs = 10\n","drop_out = 0.10\n","\n","# Define early stopping parameters\n","patience = 3 #3\n","best_loss = float('inf')\n","early_stop = False\n","epochs_no_improve = 0\n","\n","# Move the model to the GPU\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","model = LSTMClassifier(input_size, hidden_size, output_size, num_layers, drop_out).to(device)\n","\n","model = nn.DataParallel(model).to(device)\n","\n","# Define the loss function and optimizer\n","criterion = nn.CrossEntropyLoss()\n","optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n","\n","since = time.time()\n","\n","m = 'LSTM'\n","\n","# Train the model\n","for epoch in range(num_epochs):\n"," total_loss = 0.0\n"," total_correct = 0\n"," total = 0\n","\n"," total_val_loss = 0.0\n"," total_val_correct = 0\n"," total_val = 0\n","\n"," if early_stop:\n"," # torch.save(model.state_dict(), '/home/ubuntu/Bureau/collab/model_Seq/'+ dt_string +'/'+ m +'_Epoch-'+ str(epoch) +'_best_model.pt')\n"," break\n","\n"," pbar = tqdm(train_dataloader)\n","\n"," # Training loop\n"," model.train()\n"," for x, y, Y, X in pbar:\n"," # Forward pass\n"," x = x.to(device)\n"," y = y.to(device)\n"," optimizer.zero_grad()\n"," outputs = model(x)\n"," # loss = criterion(outputs, y[:, 0].to(device))\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," # Backward pass and optimization\n"," loss.backward()\n"," optimizer.step()\n","\n"," # Forward pass\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total += y.size(0)\n","\n"," # Calculate accuracy and loss for the epoch\n"," acc = total_correct / total\n"," loss = total_loss / len(train_dataloader)\n","\n"," pbar.set_description('Epoch [{}/{}], Loss: {:.5f}, Acc: {:.5f}'.format(epoch+1, num_epochs, total_loss.item(), acc))\n","\n"," # Append accuracy and loss to the lists\n"," train_acc.append(acc)\n"," train_loss.append(loss.item())\n","\n"," # Validation loop\n"," model.eval()\n"," with torch.no_grad():\n"," for x, y, Y, X in val_dataloader:\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n"," loss = criterion(outputs, y)\n"," total_val_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_val_correct += (predicted == y).sum().item()\n"," total_val += y.size(0)\n","\n"," val_acc = total_val_correct / total_val\n"," val_loss = total_val_loss / len(val_dataloader)\n","\n"," print('Epoch [{}/{}], Validation Loss: {:.5f}, Validation Acc: {:.5f}'.format(epoch+1, num_epochs, val_loss.item(), val_acc ))\n","\n"," val_acc_list.append(val_acc)\n"," val_loss_list.append(val_loss.item())\n","\n"," # Early stopping check\n"," if val_loss < best_loss:\n"," best_loss = val_loss\n"," epochs_no_improve = 0\n"," else:\n"," epochs_no_improve += 1\n"," if epochs_no_improve == patience:\n"," early_stop = True\n","\n","time_elapsed = time.time() - since\n","\n","print('============== FIN ==============')\n","print('\\n\\nTraining complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n","print('=== Train Loss: {:.5f}, Train Acc: {:.5f}'.format( loss, acc ))\n","print('=== Validation Loss: {:.5f}, Validation Acc: {:.5f}'.format( val_loss, val_acc ))"]},{"cell_type":"code","source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","L = []\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," #print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n"," L.append(np.array(Y[i].T).flatten())\n","\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n"],"metadata":{"id":"BhZ_xaWqpg2v","executionInfo":{"status":"aborted","timestamp":1699023675960,"user_tz":-60,"elapsed":16199,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["array_counts = Counter(map(tuple, L))\n","sorted_results = dict(sorted(array_counts.items(), key=lambda item: item[1], reverse=True))\n","len(array_counts), array_counts, sorted_results"],"metadata":{"id":"76IksnscpZlj","executionInfo":{"status":"aborted","timestamp":1699023675960,"user_tz":-60,"elapsed":16197,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df11.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","L = []\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Bis:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," #print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n"," L.append(np.array(Y[i].T).flatten())\n","\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n"],"metadata":{"id":"AlCUaE0CpgAf","executionInfo":{"status":"aborted","timestamp":1699023675960,"user_tz":-60,"elapsed":16194,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["array_counts = Counter(map(tuple, L))\n","sorted_results = dict(sorted(array_counts.items(), key=lambda item: item[1], reverse=True))\n","len(array_counts), array_counts, sorted_results"],"metadata":{"id":"8-QV3OZ0pWnE","executionInfo":{"status":"aborted","timestamp":1699023675960,"user_tz":-60,"elapsed":16192,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"gyWl2K1RZj8l","executionInfo":{"status":"aborted","timestamp":1699023675961,"user_tz":-60,"elapsed":16189,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","since = time.time()\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader:\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n","\n"," dessiner_graphe(y.cpu().numpy(), predicted.cpu().numpy())\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n","\n","# Convert lists to numpy arrays\n","true_labels = np.array(true_labels)\n","predicted_labels = np.array(predicted_labels)\n","\n","# Calculate the test accuracy and loss\n","test_accuracy = total_correct / total_samples\n","test_loss = total_loss / total_samples\n","\n","print('=== Test Loss: {:.5f}, Test Acc: {:.5f}'.format(test_loss, test_accuracy ))\n","\n","# Decode the predicted and actual labels\n","label_decoder = {0: 'Good', 1: 'Attack'}\n","\n","y_pred = [label_decoder[i] for i in predicted_labels]\n","y_true = [label_decoder[i] for i in true_labels]\n","\n","# Generate the IoU\n","cm = metrics.confusion_matrix(y_true, y_pred, normalize='true')\n","print('=== IoU: {:.5f}'.format(cm.ravel()[3]/(cm.ravel()[3]+cm.ravel()[2]+cm.ravel()[1])))\n","\n","# Generate the classification report\n","report = classification_report(y_true, y_pred, digits=5)\n","print(\"Classification Report:\")\n","print(report)\n","\n","# Generate the confusion matrix\n","print(\"Confusion Matrix:\")\n","print(cm)\n","\n","sns.heatmap(cm, annot=True, xticklabels=['Good', 'Attack'], yticklabels=['Good', 'Attack'], fmt='.5f')\n","plt.title('LSTM')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"9TtFErxC764G","executionInfo":{"status":"aborted","timestamp":1699023675961,"user_tz":-60,"elapsed":16185,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df11.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Bis:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n","\n"," dessiner_graphe(y.cpu().numpy(), predicted.cpu().numpy())\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n","\n","# Convert lists to numpy arrays\n","true_labels = np.array(true_labels)\n","predicted_labels = np.array(predicted_labels)\n","\n","# Calculate the test accuracy and loss\n","test_accuracy = total_correct / total_samples\n","test_loss = total_loss / total_samples\n","\n","print('=== Test Loss: {:.5f}, Test Acc: {:.5f}'.format(test_loss, test_accuracy ))\n","\n","# Decode the predicted and actual labels\n","label_decoder = {0: 'Good', 1: 'Attack'}\n","\n","y_pred = [label_decoder[i] for i in predicted_labels]\n","y_true = [label_decoder[i] for i in true_labels]\n","\n","# Generate the IoU\n","cm = metrics.confusion_matrix(y_true, y_pred, normalize='true')\n","print('=== IoU: {:.5f}'.format(cm.ravel()[3]/(cm.ravel()[3]+cm.ravel()[2]+cm.ravel()[1])))\n","\n","# Generate the classification report\n","report = classification_report(y_true, y_pred, digits=5)\n","print(\"Classification Report:\")\n","print(report)\n","\n","# Generate the confusion matrix\n","print(\"Confusion Matrix:\")\n","print(cm)\n","\n","sns.heatmap(cm, annot=True, xticklabels=['Good', 'Attack'], yticklabels=['Good', 'Attack'], fmt='.5f')\n","plt.title('LSTM')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"KU-j5HZ27p4X","executionInfo":{"status":"aborted","timestamp":1699023675961,"user_tz":-60,"elapsed":16182,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["test_dataset_Inf = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=1, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"]},{"cell_type":"code","source":["test_dataset_Inf = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=batch_size, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"],"metadata":{"id":"scpg1yI7XX_L","executionInfo":{"status":"aborted","timestamp":1699023675962,"user_tz":-60,"elapsed":16180,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"05rK_I3N4oCj","executionInfo":{"status":"aborted","timestamp":1699023676220,"user_tz":-60,"elapsed":2,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["test_dataset_Inf = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=1, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"]},{"cell_type":"code","source":["test_dataset_Inf = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=batch_size, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"],"metadata":{"id":"zwq8hqETXbCl","executionInfo":{"status":"aborted","timestamp":1699023676220,"user_tz":-60,"elapsed":2,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["%cd /content/drive/My Drive/Colab Notebooks/step 2 Bis\n","PATH = m + \"_Bis_W\"+str(sequence_length) +\"-step\"+ str(forward_step)+'.pt'\n","torch.save(model.state_dict(), PATH)"],"metadata":{"id":"aRuD-GsCXV0_","executionInfo":{"status":"aborted","timestamp":1699023676220,"user_tz":-60,"elapsed":2,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"provenance":[],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.9"}},"nbformat":4,"nbformat_minor":0}
\ No newline at end of file
README.md
0 → 100644
View file @
1e98acb9
# SIP-DDOS
Dataset:
https://www.dropbox.com/scl/fi/6fidlecyoijng71wsrzsn/SIP-Dataset.zip?rlkey=bkctnqrvukv97n0ztdnxklcj8&dl=0
RNN_W10-step2_Rgl-2 (1).ipynb
0 → 100644
View file @
1e98acb9
{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"id":"pL8kU9d45Zwn","executionInfo":{"status":"ok","timestamp":1699023521672,"user_tz":-60,"elapsed":5342,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["import pandas as pd\n","import numpy as np\n","import seaborn as sns\n","import matplotlib.pyplot as plt\n","from sklearn.preprocessing import LabelEncoder,OneHotEncoder\n","from sklearn.metrics import classification_report\n","import sklearn.metrics as metrics\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from torch.utils.data import Dataset\n","from torch.utils.data import DataLoader\n","#from pytorchtools import EarlyStopping\n","from tqdm.notebook import tqdm\n","import time\n","import datetime\n","import io\n","import random\n","import statistics\n","from sklearn.model_selection import KFold\n","from sklearn.model_selection import StratifiedKFold\n","from sklearn.model_selection import train_test_split\n","from statistics import mean\n","from collections import Counter\n","\n","def seed_everything(seed=0):\n"," os.environ['PYTHONHASHSEED'] = str(seed)\n"," np.random.seed(seed)\n"," torch.manual_seed(seed)\n"," random.seed(seed)\n"," torch.cuda.manual_seed(seed)\n"," torch.cuda.manual_seed_all(seed)\n"," torch.backends.cudnn.deterministic = True\n"," torch.backends.cudnn.benchmark = False\n","\n","seed_everything(230)"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')\n","%cd /content/drive/My Drive/Colab Notebooks/Data"],"metadata":{"id":"gXcmDmebsgGd"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"d0ClOl-YYoM-","executionInfo":{"status":"aborted","timestamp":1699023526113,"user_tz":-60,"elapsed":17,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["df1 = pd.read_csv('sample_2_1_F.csv')\n","df2 = pd.read_csv('sample_2_2_F.csv')\n","df3 = pd.read_csv('sample_2_3_F.csv')\n","df4 = pd.read_csv('sample_2_4_F.csv')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"r02nNgBGTRlb","executionInfo":{"status":"aborted","timestamp":1699023526114,"user_tz":-60,"elapsed":17,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Define the custom dataset\n","class CustomDataset(Dataset):\n"," def __init__(self, X, Y, sequence_length, forward_step):\n"," self.X = X\n"," self.Y = Y\n"," self.sequence_length = sequence_length\n"," self.forward_step = forward_step\n","\n"," def __len__(self):\n"," return len(self.X)\n","\n"," def pad_sequence(self, x, seq_length):\n"," diff = seq_length - x.shape[0]\n"," x = F.pad(x, (0, 0, 0, diff))\n"," return x\n","\n"," def __getitem__(self, idx):\n"," idx = min(idx*self.forward_step, len(self.X)-1 - self.sequence_length)\n"," x = torch.from_numpy(self.X[idx : idx + self.sequence_length]).type(torch.float32)\n"," x = self.pad_sequence(x, self.sequence_length)\n","\n"," y = torch.from_numpy(self.Y[idx : idx + self.sequence_length]).type(torch.long)\n","\n"," if (y == 1).sum().item() >= 2:\n"," y = y.any().type(torch.long)\n"," else:\n"," y = torch.tensor(0, dtype=torch.long)\n","\n"," return x, y, torch.from_numpy(self.Y[idx : idx + self.sequence_length]).type(torch.long), self.X[idx : idx + self.sequence_length]\n","\n","# Define the RNN model\n","class DeepRNNClassifier(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(DeepRNNClassifier, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.rnn = nn.RNN(input_size, hidden_size, num_layers=num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," hidden = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n"," out, hidden = self.rnn(x.to(device), hidden.to(device))\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," out = self.fc(out)\n"," return out\n","\n","# Define the LSTM model\n","class LSTMClassifier(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(LSTMClassifier, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial hidden state\n"," c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial cell state\n"," out, _ = self.lstm(x, (h0, c0))\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," out = self.fc(out) # Use only the last time step's output for classification\n"," return out\n","\n","# Define the GRU model\n","class GRUModel(nn.Module):\n"," def __init__(self, input_size, hidden_size, output_size, num_layers, dropout_prob):\n"," super(GRUModel, self).__init__()\n"," self.hidden_size = hidden_size\n"," self.num_layers = num_layers\n"," self.dropout_prob = dropout_prob\n"," self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)\n"," self.dropout = nn.Dropout(dropout_prob)\n"," self.fc = nn.Linear(hidden_size, output_size)\n","\n"," def forward(self, x):\n"," h = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) # Initial hidden state\n"," # Forward pass through GRU layer\n"," out, h = self.gru(x, h)\n"," out = self.dropout(out)\n"," out = out[:, -1, :]\n"," # Forward pass through the linear layer\n"," out = self.fc(out)\n"," return out"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"3QO8fTXkae3_","executionInfo":{"status":"aborted","timestamp":1699023526114,"user_tz":-60,"elapsed":17,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["df = pd.concat([df1, df3, df2, df4], ignore_index=True)"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"_kdgW9BJbcm5","executionInfo":{"status":"aborted","timestamp":1699023526115,"user_tz":-60,"elapsed":18,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Encode data\n","le_y = LabelEncoder()\n","\n","X = df.loc[:, ['Temps','Source', 'From', 'Call-ID', 'Contact']].apply(le_y.fit_transform)\n","\n","def label_encoder(x):\n"," return x == 'Attack'\n","\n","Y = df.loc[:, ['Label']].apply(label_encoder).astype('int')\n","\n","# Split data\n","X_train = X.loc[len(df1)+1+len(df3) : ,:].values\n","y_train = Y.loc[len(df1)+1+len(df3) : ,:].values\n","\n","X_val = X.loc[:len(df1) ,:].values\n","y_val = Y.loc[:len(df1) ,:].values\n","\n","X_test = X.loc[ : len(df1)+len(df3) ,:].values\n","y_test = Y.loc[ : len(df1)+len(df3),:].values\n","\n","df11 = pd.concat([df1, df3], ignore_index=True)\n","\n","df11['Source'] = df11['Source'].str.replace('192.168.56', '199.162.59')\n","df11['From'] = df11['From'].str.replace('192.168.56', '199.162.59')\n","df11['Call-ID'] = df11['Call-ID'].str.replace('192.168.56', '199.162.59')\n","df11['Contact'] = df11['Contact'].str.replace('192.168.56', '199.162.59')\n","\n","# Encode data\n","le_y = LabelEncoder()\n","\n","X1 = df11.loc[:, ['Temps','Source', 'From', 'Call-ID', 'Contact']].apply(le_y.fit_transform)\n","\n","Y1 = df11.loc[:, ['Label']].apply(label_encoder).astype('int')\n","\n","X1_test = X1.values\n","y1_test = Y1.values"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"XVxJBVnW9q-W","executionInfo":{"status":"aborted","timestamp":1699023526115,"user_tz":-60,"elapsed":17,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Window size and forword step\n","sequence_length = 10\n","forward_step = 2\n","forward_step_inf = 2\n","\n","# Create the training-validation dataset and dataloader\n","batch_size = 240\n","\n","training_dataset = CustomDataset(X_train, y_train, sequence_length, forward_step)\n","train_dataloader = DataLoader(training_dataset, batch_size=batch_size, shuffle=True)\n","\n","validation_dataset = CustomDataset(X_val, y_val, sequence_length, forward_step)\n","val_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=False)\n","\n","test_dataset = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n","\n","test_dataset_Bis = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Bis = DataLoader(test_dataset_Bis, batch_size=batch_size, shuffle=False)"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"QjXSdmNUaftB","executionInfo":{"status":"aborted","timestamp":1699023526115,"user_tz":-60,"elapsed":17,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["labels = []\n","for x, y, Y, X in train_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))\n","\n","labels = []\n","for x, y, Y, X in val_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))\n","\n","labels = []\n","for x, y, Y, X in test_dataloader:\n"," labels.extend(list(y.detach().cpu().numpy()))\n","\n","values, counts = np.unique(labels, return_counts=True)\n","print(values, counts, counts[1]+counts[0])\n","print(counts[0] / np.sum(counts), counts[1] / np.sum(counts))"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"o_zSQF4vfGSo","executionInfo":{"status":"aborted","timestamp":1699023526116,"user_tz":-60,"elapsed":17,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["print (\"Train\")\n","for x, y, Y, X in train_dataloader:\n"," print(x.shape)\n"," break\n","\n","print (\"Validation\")\n","for x, y, Y, X in val_dataloader:\n"," print(x.shape)\n"," break\n","\n","print (\"Test\")\n","for x, y, Y, X in test_dataloader:\n"," print(x.shape)\n"," break"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"JJ1rC7WSTq2F","executionInfo":{"status":"aborted","timestamp":1699023526116,"user_tz":-60,"elapsed":17,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["# Create empty lists to store accuracy and loss\n","train_acc = []\n","train_loss = []\n","val_acc_list = []\n","val_loss_list = []\n","\n","# Define the hyperparameters\n","Nbr_row, Nbr_col = X_train.shape\n","\n","input_size = Nbr_col\n","hidden_size = pow(2, 4) # Number of hidden units\n","output_size = 2 # Number of output classes\n","num_layers = sequence_length # Number of hidden layers\n","learning_rate = 0.0001\n","num_epochs = 10\n","drop_out = 0.10\n","\n","# Define early stopping parameters\n","patience = 3 #3\n","best_loss = float('inf')\n","early_stop = False\n","epochs_no_improve = 0\n","\n","# Move the model to the GPU\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","model = DeepRNNClassifier(input_size, hidden_size, output_size, num_layers, drop_out).to(device)\n","\n","model = nn.DataParallel(model).to(device)\n","\n","# Define the loss function and optimizer\n","criterion = nn.CrossEntropyLoss()#weight=torch.from_numpy(np.array([0.6, 0.4])).to(device).float())\n","optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n","\n","since = time.time()\n","\n","# datetime object containing current date and time\n","# now = datetime.now()\n","# dt_string = now.strftime(\"%d-%m-%Y %H:%M:%S\") # dd/mm/YY H:M:S\n","\n","m = 'RNN'\n","# m = 'LSTM'\n","# m = 'GRU'\n","\n","# Train the model\n","for epoch in range(num_epochs):\n"," total_loss = 0.0\n"," total_correct = 0\n"," total = 0\n","\n"," total_val_loss = 0.0\n"," total_val_correct = 0\n"," total_val = 0\n","\n"," if early_stop:\n"," # torch.save(model.state_dict(), '/home/ubuntu/Bureau/collab/model_Seq/'+ dt_string +'/'+ m + '_Epoch-'+ str(epoch) +'_best_model.pt')\n"," break\n","\n"," pbar = tqdm(train_dataloader)\n","\n"," # Training loop\n"," model.train()\n"," for x, y, Y, X in pbar:\n"," # Forward pass\n"," x = x.to(device)\n"," y = y.to(device)\n"," optimizer.zero_grad()\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," # Backward pass and optimization\n"," loss.backward()\n"," optimizer.step()\n","\n"," # Forward pass\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total += y.size(0)\n","\n"," # Calculate accuracy and loss for the epoch\n"," acc = total_correct / total\n"," loss = total_loss / len(train_dataloader)\n","\n"," pbar.set_description('Epoch [{}/{}], Loss: {:.5f}, Acc: {:.5f}'.format(epoch+1, num_epochs, total_loss.item(), acc))\n","\n"," # Validation loop\n"," model.eval()\n"," with torch.no_grad():\n"," for x, y, Y, X in val_dataloader:\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n"," loss = criterion(outputs, y)\n"," total_val_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_val_correct += (predicted == y).sum().item()\n"," total_val += y.size(0)\n","\n"," val_acc = total_val_correct / total_val\n"," val_loss = total_val_loss / len(val_dataloader)\n","\n"," print('Epoch [{}/{}], Validation Loss: {:.5f}, Validation Acc: {:.5f}'.format(epoch+1, num_epochs, val_loss.item(), val_acc ))\n","\n"," # Early stopping check\n"," if val_loss < best_loss:\n"," best_loss = val_loss\n"," epochs_no_improve = 0\n"," else:\n"," epochs_no_improve += 1\n"," if epochs_no_improve == patience:\n"," early_stop = True\n","\n","time_elapsed = time.time() - since\n","\n","print('============== FIN ==============')\n","print('\\n\\nTraining complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n","# print('=== Train Loss: {:.5f}, Train Acc: {:.5f}'.format( mean(train_loss), mean(train_acc) ))\n","print('=== Train Loss: {:.5f}, Train Acc: {:.5f}'.format( loss, acc ))\n","print('=== Validation Loss: {:.5f}, Validation Acc: {:.5f}'.format( val_loss, val_acc ))\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ftxexraXnGPd","executionInfo":{"status":"aborted","timestamp":1699023526117,"user_tz":-60,"elapsed":18,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["def dessiner_graphe(tab1, tab2):\n"," indices1 = range(len(tab1))\n"," indices2 = range(len(tab2))\n","\n"," valeurs1 = tab1\n"," valeurs2 = tab2\n","\n"," plt.figure(figsize=(25, 6))\n"," plt.plot(indices1, valeurs1, 'bo-', label='True', color='blue') # 'bo-' représente des points bleus reliés par des lignes\n"," plt.plot(indices2, valeurs2, 'bo-', label='Predicted', color='red')\n"," plt.xlabel('Indices')\n"," plt.ylabel('Valeurs')\n"," plt.title('Graphe des valeurs du tableau')\n"," plt.grid(True)\n"," plt.show()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"V3_k-ql2ZNyj","executionInfo":{"status":"aborted","timestamp":1699023526117,"user_tz":-60,"elapsed":18,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["def compare_elements(table1, table2):\n"," different_indices = []\n","\n"," for i in range(len(table1)):\n"," if table1[i] != table2[i]:\n"," different_indices.append(i)\n","\n"," return different_indices"]},{"cell_type":"code","source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","L = []\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," #print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n"," L.append(np.array(Y[i].T).flatten())\n","\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n"],"metadata":{"id":"VJIrOe13kawg","executionInfo":{"status":"aborted","timestamp":1699023526118,"user_tz":-60,"elapsed":19,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["array_counts = Counter(map(tuple, L))\n","sorted_results = dict(sorted(array_counts.items(), key=lambda item: item[1], reverse=True))\n","len(array_counts), array_counts, sorted_results"],"metadata":{"id":"TMxBkkK8oRlc","executionInfo":{"status":"aborted","timestamp":1699023526118,"user_tz":-60,"elapsed":18,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df11.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","L = []\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Bis:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," #print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n"," L.append(np.array(Y[i].T).flatten())\n","\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n"],"metadata":{"id":"ukeXaL4NlFcl","executionInfo":{"status":"aborted","timestamp":1699023526118,"user_tz":-60,"elapsed":18,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["array_counts = Counter(map(tuple, L))\n","sorted_results = dict(sorted(array_counts.items(), key=lambda item: item[1], reverse=True))\n","len(array_counts), array_counts, sorted_results"],"metadata":{"id":"nY7_kDuNoG61","executionInfo":{"status":"aborted","timestamp":1699023526119,"user_tz":-60,"elapsed":19,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"UEtMvPnqUY_P","executionInfo":{"status":"aborted","timestamp":1699023526119,"user_tz":-60,"elapsed":19,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n","\n"," dessiner_graphe(y.cpu().numpy(), predicted.cpu().numpy())\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n","\n","# Convert lists to numpy arrays\n","true_labels = np.array(true_labels)\n","predicted_labels = np.array(predicted_labels)\n","\n","# Calculate the test accuracy and loss\n","test_accuracy = total_correct / total_samples\n","test_loss = total_loss / total_samples\n","\n","print('=== Test Loss: {:.5f}, Test Acc: {:.5f}'.format(test_loss, test_accuracy ))\n","\n","# Decode the predicted and actual labels\n","label_decoder = {0: 'Good', 1: 'Attack'}\n","\n","y_pred = [label_decoder[i] for i in predicted_labels]\n","y_true = [label_decoder[i] for i in true_labels]\n","\n","# Generate the IoU\n","cm = metrics.confusion_matrix(y_true, y_pred, normalize='true')\n","print('=== IoU: {:.5f}'.format(cm.ravel()[3]/(cm.ravel()[3]+cm.ravel()[2]+cm.ravel()[1])))\n","\n","# Generate the classification report\n","report = classification_report(y_true, y_pred, digits=5)\n","print(\"Classification Report:\")\n","print(report)\n","\n","# Generate the confusion matrix\n","print(\"Confusion Matrix:\")\n","print(cm)\n","\n","sns.heatmap(cm, annot=True, xticklabels=['Good', 'Attack'], yticklabels=['Good', 'Attack'], fmt='.5f')\n","plt.title('RNN')"]},{"cell_type":"code","source":["total_correct = 0\n","total_loss = 0\n","total_samples = 0\n","\n","true_labels = []\n","predicted_labels = []\n","\n","le_y_1 = LabelEncoder()\n","X11 = df11.loc[:, ['Source']].apply(le_y_1.fit_transform)\n","\n","i = 0\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Bis:\n","\n"," x = x.to(device)\n"," y = y.to(device)\n"," outputs = model(x)\n","\n"," loss = criterion(outputs, y)\n"," total_loss += loss / batch_size\n","\n"," _, predicted = torch.max(outputs.data, 1)\n"," total_correct += (predicted == y).sum().item()\n"," total_samples += y.size(0)\n","\n"," # Convert tensors to numpy arrays\n"," true_labels.extend(y.cpu().numpy())\n"," predicted_labels.extend(predicted.cpu().numpy())\n","\n"," comparison_result = y.cpu().numpy() == predicted.cpu().numpy()\n"," if False in comparison_result:\n"," i = 0\n"," print(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n"," for i in compare_elements(y.cpu().numpy(), predicted.cpu().numpy()):\n"," # Convert tensor to matrix\n"," matrix = np.matrix(X[i])\n","\n"," # Extract the second column\n"," second_column = matrix[:, 1]\n","\n"," # Convert the second column to a NumPy array\n"," second_column_array = np.array(second_column)\n"," print(le_y_1.inverse_transform(second_column_array))\n"," print(Y[i])\n","\n"," dessiner_graphe(y.cpu().numpy(), predicted.cpu().numpy())\n"," j += len(compare_elements(y.cpu().numpy(), predicted.cpu().numpy()))\n","\n"," print(j)\n","\n","# Convert lists to numpy arrays\n","true_labels = np.array(true_labels)\n","predicted_labels = np.array(predicted_labels)\n","\n","# Calculate the test accuracy and loss\n","test_accuracy = total_correct / total_samples\n","test_loss = total_loss / total_samples\n","\n","print('=== Test Loss: {:.5f}, Test Acc: {:.5f}'.format(test_loss, test_accuracy ))\n","\n","# Decode the predicted and actual labels\n","label_decoder = {0: 'Good', 1: 'Attack'}\n","\n","y_pred = [label_decoder[i] for i in predicted_labels]\n","y_true = [label_decoder[i] for i in true_labels]\n","\n","# Generate the IoU\n","cm = metrics.confusion_matrix(y_true, y_pred, normalize='true')\n","print('=== IoU: {:.5f}'.format(cm.ravel()[3]/(cm.ravel()[3]+cm.ravel()[2]+cm.ravel()[1])))\n","\n","# Generate the classification report\n","report = classification_report(y_true, y_pred, digits=5)\n","print(\"Classification Report:\")\n","print(report)\n","\n","# Generate the confusion matrix\n","print(\"Confusion Matrix:\")\n","print(cm)\n","\n","sns.heatmap(cm, annot=True, xticklabels=['Good', 'Attack'], yticklabels=['Good', 'Attack'], fmt='.5f')\n","plt.title('RNN')"],"metadata":{"id":"OaAlv1PQ689O","executionInfo":{"status":"aborted","timestamp":1699023526119,"user_tz":-60,"elapsed":19,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"UMXsb6FDfFX6","executionInfo":{"status":"aborted","timestamp":1699023526119,"user_tz":-60,"elapsed":19,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"outputs":[],"source":["test_dataset_Inf = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=1, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"]},{"cell_type":"code","source":["test_dataset_Inf = CustomDataset(X_test, y_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=batch_size, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"],"metadata":{"id":"25ZFCAowNmm7","executionInfo":{"status":"aborted","timestamp":1699023526119,"user_tz":-60,"elapsed":19,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["test_dataset_Inf = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=1, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"],"metadata":{"id":"FGheQJlqu65r","executionInfo":{"status":"aborted","timestamp":1699023526120,"user_tz":-60,"elapsed":20,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["test_dataset_Inf = CustomDataset(X1_test, y1_test, sequence_length, forward_step_inf)\n","test_dataloader_Inf = DataLoader(test_dataset_Inf, batch_size=batch_size, shuffle=False)\n","\n","Seq_Time = []\n","\n","# Evaluate the model on the test dataset\n","model.eval()\n","j=0\n","with torch.no_grad():\n"," for x, y, Y, X in test_dataloader_Inf:\n"," x = x.to(device)\n"," y = y.to(device)\n","\n"," since_Test_Seq = time.time()\n"," outputs = model(x)\n"," Seq_Time.append(time.time() - since_Test_Seq)\n","\n","execution_time_ms = mean(Seq_Time) * 1000\n","execution_time = datetime.timedelta(milliseconds=execution_time_ms)\n","minutes, seconds = divmod(execution_time.seconds, 60)\n","milliseconds = execution_time.microseconds // 1000\n","microseconds = execution_time.microseconds % 1000\n","print('mean Time Seq_Test (Seq = {:1.0f} msg) in {:.2f}m {:.2f}s {:.4f}ms {:.4f}µs'.format(sequence_length, minutes, seconds, milliseconds, microseconds))"],"metadata":{"id":"DvGFr-r4NpIa","executionInfo":{"status":"aborted","timestamp":1699023526120,"user_tz":-60,"elapsed":20,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["print(model)"],"metadata":{"id":"2zNsRDuvTnnX","executionInfo":{"status":"aborted","timestamp":1699023526120,"user_tz":-60,"elapsed":20,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["%cd /content/drive/My Drive/Colab Notebooks/step 2 Bis\n","PATH = m + \"_Bis_W\"+str(sequence_length) +\"-step\"+ str(forward_step )+'.pt'\n","torch.save(model.state_dict(), PATH)"],"metadata":{"id":"R6ZW2SRIUk0Q","executionInfo":{"status":"aborted","timestamp":1699023526120,"user_tz":-60,"elapsed":20,"user":{"displayName":"Oussama SBAI","userId":"04208739488405682316"}}},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"provenance":[],"gpuType":"T4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.9"}},"nbformat":4,"nbformat_minor":0}
\ No newline at end of file
sipp-3.3.zip
0 → 100644
View file @
1e98acb9
File added
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment