Spaces:
Runtime error
Runtime error
Upload 4 files
Browse files- Meso_4.ipynb +525 -0
- first.ipynb +265 -0
- gradio_api.py +10 -0
- inference_2.py +153 -0
Meso_4.ipynb
ADDED
|
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"metadata": {
|
| 7 |
+
"id": "h3_RFpXiPQR6"
|
| 8 |
+
},
|
| 9 |
+
"outputs": [],
|
| 10 |
+
"source": [
|
| 11 |
+
"from tensorflow.keras import Model\n",
|
| 12 |
+
"from tensorflow.keras import Input\n",
|
| 13 |
+
"from tensorflow.keras.layers import Conv2D, ReLU, ELU, LeakyReLU, Dropout, Dense, MaxPooling2D, Flatten, BatchNormalization\n",
|
| 14 |
+
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
|
| 15 |
+
"from tensorflow.keras.optimizers import Adam\n",
|
| 16 |
+
"from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping\n",
|
| 17 |
+
"from tensorflow.keras.utils import plot_model\n",
|
| 18 |
+
"from tensorflow.keras.models import load_model\n",
|
| 19 |
+
"from sklearn.metrics import classification_report\n",
|
| 20 |
+
"\n",
|
| 21 |
+
"import numpy as np\n",
|
| 22 |
+
"import matplotlib.pyplot as plt\n"
|
| 23 |
+
]
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"cell_type": "code",
|
| 27 |
+
"execution_count": 2,
|
| 28 |
+
"metadata": {
|
| 29 |
+
"id": "ADU3Hu_TAFvG"
|
| 30 |
+
},
|
| 31 |
+
"outputs": [],
|
| 32 |
+
"source": [
|
| 33 |
+
"IMG_WIDTH = 256"
|
| 34 |
+
]
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"cell_type": "code",
|
| 38 |
+
"execution_count": 3,
|
| 39 |
+
"metadata": {
|
| 40 |
+
"id": "NyTzubIUjeCR"
|
| 41 |
+
},
|
| 42 |
+
"outputs": [],
|
| 43 |
+
"source": [
|
| 44 |
+
"def get_datagen(use_default_augmentation=True, **kwargs):\n",
|
| 45 |
+
" kwargs.update({'rescale': 1./255})\n",
|
| 46 |
+
" if use_default_augmentation:\n",
|
| 47 |
+
" kwargs.update({\n",
|
| 48 |
+
" 'rotation_range': 15,\n",
|
| 49 |
+
" 'zoom_range': 0.2,\n",
|
| 50 |
+
" 'brightness_range': (0.8, 1.2),\n",
|
| 51 |
+
" 'channel_shift_range': 30,\n",
|
| 52 |
+
" 'horizontal_flip': True,\n",
|
| 53 |
+
" })\n",
|
| 54 |
+
" return ImageDataGenerator(**kwargs)"
|
| 55 |
+
]
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "code",
|
| 59 |
+
"execution_count": 4,
|
| 60 |
+
"metadata": {
|
| 61 |
+
"id": "nrH7Fz6EM4mk"
|
| 62 |
+
},
|
| 63 |
+
"outputs": [],
|
| 64 |
+
"source": [
|
| 65 |
+
"def get_train_data_generator(\n",
|
| 66 |
+
" train_data_dir, \n",
|
| 67 |
+
" batch_size, \n",
|
| 68 |
+
" validation_split=None, \n",
|
| 69 |
+
" use_default_augmentation=True,\n",
|
| 70 |
+
" augmentations=None\n",
|
| 71 |
+
"):\n",
|
| 72 |
+
" if not augmentations:\n",
|
| 73 |
+
" augmentations = {}\n",
|
| 74 |
+
"\n",
|
| 75 |
+
" train_datagen = get_datagen(\n",
|
| 76 |
+
" use_default_augmentation=use_default_augmentation,\n",
|
| 77 |
+
" validation_split=validation_split if validation_split else 0.0,\n",
|
| 78 |
+
" **augmentations\n",
|
| 79 |
+
" )\n",
|
| 80 |
+
" \n",
|
| 81 |
+
" train_generator = train_datagen.flow_from_directory(\n",
|
| 82 |
+
" directory=train_data_dir,\n",
|
| 83 |
+
" target_size=(IMG_WIDTH, IMG_WIDTH),\n",
|
| 84 |
+
" batch_size=batch_size,\n",
|
| 85 |
+
" class_mode='binary',\n",
|
| 86 |
+
" subset='training',\n",
|
| 87 |
+
" )\n",
|
| 88 |
+
"\n",
|
| 89 |
+
" validation_generator = None\n",
|
| 90 |
+
"\n",
|
| 91 |
+
" if validation_split:\n",
|
| 92 |
+
" validation_generator = train_datagen.flow_from_directory(\n",
|
| 93 |
+
" directory=train_data_dir,\n",
|
| 94 |
+
" target_size=(IMG_WIDTH, IMG_WIDTH),\n",
|
| 95 |
+
" batch_size=batch_size,\n",
|
| 96 |
+
" class_mode='binary',\n",
|
| 97 |
+
" subset='validation'\n",
|
| 98 |
+
" )\n",
|
| 99 |
+
"\n",
|
| 100 |
+
" return train_generator, validation_generator"
|
| 101 |
+
]
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"cell_type": "code",
|
| 105 |
+
"execution_count": 5,
|
| 106 |
+
"metadata": {
|
| 107 |
+
"id": "6G7tVf0wNHvd"
|
| 108 |
+
},
|
| 109 |
+
"outputs": [],
|
| 110 |
+
"source": [
|
| 111 |
+
"def get_test_data_generator(test_data_dir, batch_size, shuffle=False):\n",
|
| 112 |
+
" test_datagen = get_datagen(use_default_augmentation=False)\n",
|
| 113 |
+
" return test_datagen.flow_from_directory(\n",
|
| 114 |
+
" directory=test_data_dir,\n",
|
| 115 |
+
" target_size=(IMG_WIDTH, IMG_WIDTH),\n",
|
| 116 |
+
" batch_size=batch_size,\n",
|
| 117 |
+
" class_mode='binary',\n",
|
| 118 |
+
" shuffle=shuffle\n",
|
| 119 |
+
" )"
|
| 120 |
+
]
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"cell_type": "code",
|
| 124 |
+
"execution_count": 6,
|
| 125 |
+
"metadata": {
|
| 126 |
+
"id": "kSWlbqMv-TK4"
|
| 127 |
+
},
|
| 128 |
+
"outputs": [],
|
| 129 |
+
"source": [
|
| 130 |
+
"def activation_layer(ip, activation, *args):\n",
|
| 131 |
+
" return {'relu': ReLU(*args)(ip),\n",
|
| 132 |
+
" 'elu': ELU(*args)(ip),\n",
|
| 133 |
+
" 'lrelu': LeakyReLU(*args)(ip)}[activation]"
|
| 134 |
+
]
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"cell_type": "code",
|
| 138 |
+
"execution_count": 7,
|
| 139 |
+
"metadata": {
|
| 140 |
+
"id": "CvF1f4Y28oPM"
|
| 141 |
+
},
|
| 142 |
+
"outputs": [],
|
| 143 |
+
"source": [
|
| 144 |
+
"def conv2D(ip,\n",
|
| 145 |
+
" filters,\n",
|
| 146 |
+
" kernel_size,\n",
|
| 147 |
+
" activation,\n",
|
| 148 |
+
" padding='same',\n",
|
| 149 |
+
" pool_size=(2, 2)):\n",
|
| 150 |
+
" layer = Conv2D(filters,\n",
|
| 151 |
+
" kernel_size=kernel_size,\n",
|
| 152 |
+
" padding=padding)(ip)\n",
|
| 153 |
+
"\n",
|
| 154 |
+
" layer = activation_layer(layer, activation=activation)\n",
|
| 155 |
+
"\n",
|
| 156 |
+
" layer = BatchNormalization()(layer)\n",
|
| 157 |
+
"\n",
|
| 158 |
+
" return MaxPooling2D(pool_size=pool_size, padding=padding)(layer)"
|
| 159 |
+
]
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"cell_type": "code",
|
| 163 |
+
"execution_count": 8,
|
| 164 |
+
"metadata": {
|
| 165 |
+
"id": "d-4--jRd-bz1"
|
| 166 |
+
},
|
| 167 |
+
"outputs": [],
|
| 168 |
+
"source": [
|
| 169 |
+
"def fully_connected_layer(ip,\n",
|
| 170 |
+
" hidden_activation,\n",
|
| 171 |
+
" dropout):\n",
|
| 172 |
+
" layer = Dense(16)(ip)\n",
|
| 173 |
+
" layer = activation_layer(layer, hidden_activation, *[0.1,])\n",
|
| 174 |
+
" return Dropout(rate=dropout)(layer)"
|
| 175 |
+
]
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"cell_type": "code",
|
| 179 |
+
"execution_count": 9,
|
| 180 |
+
"metadata": {
|
| 181 |
+
"id": "1Cp48aFy_k4G"
|
| 182 |
+
},
|
| 183 |
+
"outputs": [],
|
| 184 |
+
"source": [
|
| 185 |
+
"def build_model(ip=Input(shape=(IMG_WIDTH, IMG_WIDTH, 3)),\n",
|
| 186 |
+
" activation='relu',\n",
|
| 187 |
+
" dropout=0.5,\n",
|
| 188 |
+
" hidden_activation='lrelu'):\n",
|
| 189 |
+
" \n",
|
| 190 |
+
" layer = conv2D(ip, filters=8, kernel_size=(3, 3), activation=activation)\n",
|
| 191 |
+
"\n",
|
| 192 |
+
" layer = conv2D(layer, filters=8, kernel_size=(5, 5), activation=activation)\n",
|
| 193 |
+
"\n",
|
| 194 |
+
" layer = conv2D(layer, filters=16, kernel_size=(5, 5), activation=activation)\n",
|
| 195 |
+
"\n",
|
| 196 |
+
" layer = conv2D(layer, filters=16, kernel_size=(5, 5), activation=activation, pool_size=(4, 4))\n",
|
| 197 |
+
"\n",
|
| 198 |
+
" layer = Flatten()(layer)\n",
|
| 199 |
+
" layer = Dropout(rate=dropout)(layer)\n",
|
| 200 |
+
"\n",
|
| 201 |
+
" layer = fully_connected_layer(layer, hidden_activation=hidden_activation, dropout=dropout)\n",
|
| 202 |
+
"\n",
|
| 203 |
+
" op_layer = Dense(1, activation='sigmoid')(layer)\n",
|
| 204 |
+
"\n",
|
| 205 |
+
" model = Model(ip, op_layer)\n",
|
| 206 |
+
"\n",
|
| 207 |
+
" return model"
|
| 208 |
+
]
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"cell_type": "code",
|
| 212 |
+
"execution_count": 10,
|
| 213 |
+
"metadata": {
|
| 214 |
+
"id": "ZdoMu0LbDGMC"
|
| 215 |
+
},
|
| 216 |
+
"outputs": [],
|
| 217 |
+
"source": [
|
| 218 |
+
"def evaluate_model(model, test_data_dir, batch_size):\n",
|
| 219 |
+
" data = get_test_data_generator(test_data_dir, batch_size)\n",
|
| 220 |
+
" return model.evaluate(data)\n",
|
| 221 |
+
"\n",
|
| 222 |
+
"\n",
|
| 223 |
+
"def predict(model, data, steps=None, threshold=0.5):\n",
|
| 224 |
+
" predictions = model.predict(data, steps=steps, verbose=1)\n",
|
| 225 |
+
" return predictions, np.where(predictions >= threshold, 1, 0)\n",
|
| 226 |
+
"\n",
|
| 227 |
+
"\n",
|
| 228 |
+
"def save_model_history(history, filename):\n",
|
| 229 |
+
" with open(filename, 'wb') as f:\n",
|
| 230 |
+
" pickle.dump(history.history, f)"
|
| 231 |
+
]
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"cell_type": "code",
|
| 235 |
+
"execution_count": 11,
|
| 236 |
+
"metadata": {
|
| 237 |
+
"id": "fuXsZWxke_ic"
|
| 238 |
+
},
|
| 239 |
+
"outputs": [],
|
| 240 |
+
"source": [
|
| 241 |
+
"def get_activation_model(model, conv_idx):\n",
|
| 242 |
+
" conv_layers = [layer for layer in model.layers if 'conv' in layer.name]\n",
|
| 243 |
+
" selected_layers = [layer for index, layer in enumerate(conv_layers) if index in conv_idx]\n",
|
| 244 |
+
" activation_model = Model(\n",
|
| 245 |
+
" inputs=model.inputs,\n",
|
| 246 |
+
" outputs=[layer.output for layer in selected_layers]\n",
|
| 247 |
+
" )\n",
|
| 248 |
+
" return activation_model"
|
| 249 |
+
]
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"cell_type": "code",
|
| 253 |
+
"execution_count": 15,
|
| 254 |
+
"metadata": {
|
| 255 |
+
"id": "W0Sda_34HCzQ"
|
| 256 |
+
},
|
| 257 |
+
"outputs": [],
|
| 258 |
+
"source": [
|
| 259 |
+
"def train_model(model,\n",
|
| 260 |
+
" train_data_dir,\n",
|
| 261 |
+
" validation_split=None,\n",
|
| 262 |
+
" batch_size=32,\n",
|
| 263 |
+
" use_default_augmentation=True,\n",
|
| 264 |
+
" augmentations=None,\n",
|
| 265 |
+
" epochs=30,\n",
|
| 266 |
+
" lr=1e-3,\n",
|
| 267 |
+
" loss='binary_crossentropy',\n",
|
| 268 |
+
" compile=True,\n",
|
| 269 |
+
" lr_decay=True,\n",
|
| 270 |
+
" decay_rate=0.10,\n",
|
| 271 |
+
" decay_limit=1e-6,\n",
|
| 272 |
+
" checkpoint=True,\n",
|
| 273 |
+
" stop_early=True,\n",
|
| 274 |
+
" monitor='val_accuracy',\n",
|
| 275 |
+
" mode='max',\n",
|
| 276 |
+
" patience=20,\n",
|
| 277 |
+
" tensorboard=True,\n",
|
| 278 |
+
" loss_curve=True):\n",
|
| 279 |
+
" \n",
|
| 280 |
+
"\n",
|
| 281 |
+
" train_generator, validation_generator = get_train_data_generator(\n",
|
| 282 |
+
" train_data_dir=train_data_dir,\n",
|
| 283 |
+
" batch_size=batch_size,\n",
|
| 284 |
+
" validation_split=validation_split,\n",
|
| 285 |
+
" use_default_augmentation=use_default_augmentation,\n",
|
| 286 |
+
" augmentations=augmentations\n",
|
| 287 |
+
" )\n",
|
| 288 |
+
"\n",
|
| 289 |
+
" callbacks = []\n",
|
| 290 |
+
" if checkpoint:\n",
|
| 291 |
+
" filepath = f'mesonet_trained.hdf5'\n",
|
| 292 |
+
" model_checkpoint = ModelCheckpoint(\n",
|
| 293 |
+
" filepath, monitor='val_accuracy', verbose=1,\n",
|
| 294 |
+
" save_best_only=True\n",
|
| 295 |
+
" )\n",
|
| 296 |
+
" callbacks.append(model_checkpoint)\n",
|
| 297 |
+
"\n",
|
| 298 |
+
" if stop_early:\n",
|
| 299 |
+
" callbacks.append(\n",
|
| 300 |
+
" EarlyStopping(\n",
|
| 301 |
+
" monitor=monitor,\n",
|
| 302 |
+
" mode=mode,\n",
|
| 303 |
+
" patience=patience,\n",
|
| 304 |
+
" verbose=1\n",
|
| 305 |
+
" )\n",
|
| 306 |
+
" )\n",
|
| 307 |
+
"\n",
|
| 308 |
+
"\n",
|
| 309 |
+
" history = model.fit(\n",
|
| 310 |
+
" train_generator,\n",
|
| 311 |
+
" epochs=epochs,\n",
|
| 312 |
+
" verbose=1,\n",
|
| 313 |
+
" callbacks=callbacks,\n",
|
| 314 |
+
" validation_data=validation_generator,\n",
|
| 315 |
+
" steps_per_epoch=train_generator.samples // batch_size,\n",
|
| 316 |
+
" validation_steps=validation_generator.samples // batch_size if validation_generator else None,\n",
|
| 317 |
+
" )\n",
|
| 318 |
+
"\n",
|
| 319 |
+
" return history"
|
| 320 |
+
]
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"cell_type": "code",
|
| 324 |
+
"execution_count": 14,
|
| 325 |
+
"metadata": {
|
| 326 |
+
"id": "aZtXFYPDoLZp"
|
| 327 |
+
},
|
| 328 |
+
"outputs": [
|
| 329 |
+
{
|
| 330 |
+
"ename": "OSError",
|
| 331 |
+
"evalue": "Unable to open file (file signature not found)",
|
| 332 |
+
"output_type": "error",
|
| 333 |
+
"traceback": [
|
| 334 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 335 |
+
"\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)",
|
| 336 |
+
"Input \u001b[0;32mIn [14]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model_exp \u001b[38;5;241m=\u001b[39m \u001b[43mload_model\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m/Users/jarvis/pymycod/Deepfakes/Meso_4.ipynb\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
| 337 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/keras/utils/traceback_utils.py:67\u001b[0m, in \u001b[0;36mfilter_traceback.<locals>.error_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e: \u001b[38;5;66;03m# pylint: disable=broad-except\u001b[39;00m\n\u001b[1;32m 66\u001b[0m filtered_tb \u001b[38;5;241m=\u001b[39m _process_traceback_frames(e\u001b[38;5;241m.\u001b[39m__traceback__)\n\u001b[0;32m---> 67\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\u001b[38;5;241m.\u001b[39mwith_traceback(filtered_tb) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 68\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 69\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m filtered_tb\n",
|
| 338 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/h5py/_hl/files.py:507\u001b[0m, in \u001b[0;36mFile.__init__\u001b[0;34m(self, name, mode, driver, libver, userblock_size, swmr, rdcc_nslots, rdcc_nbytes, rdcc_w0, track_order, fs_strategy, fs_persist, fs_threshold, fs_page_size, page_buf_size, min_meta_keep, min_raw_keep, locking, **kwds)\u001b[0m\n\u001b[1;32m 502\u001b[0m fapl \u001b[38;5;241m=\u001b[39m make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0,\n\u001b[1;32m 503\u001b[0m locking, page_buf_size, min_meta_keep, min_raw_keep, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwds)\n\u001b[1;32m 504\u001b[0m fcpl \u001b[38;5;241m=\u001b[39m make_fcpl(track_order\u001b[38;5;241m=\u001b[39mtrack_order, fs_strategy\u001b[38;5;241m=\u001b[39mfs_strategy,\n\u001b[1;32m 505\u001b[0m fs_persist\u001b[38;5;241m=\u001b[39mfs_persist, fs_threshold\u001b[38;5;241m=\u001b[39mfs_threshold,\n\u001b[1;32m 506\u001b[0m fs_page_size\u001b[38;5;241m=\u001b[39mfs_page_size)\n\u001b[0;32m--> 507\u001b[0m fid \u001b[38;5;241m=\u001b[39m make_fid(name, mode, userblock_size, fapl, fcpl, swmr\u001b[38;5;241m=\u001b[39mswmr)\n\u001b[1;32m 509\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(libver, \u001b[38;5;28mtuple\u001b[39m):\n\u001b[1;32m 510\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_libver \u001b[38;5;241m=\u001b[39m libver\n",
|
| 339 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/h5py/_hl/files.py:220\u001b[0m, in \u001b[0;36mmake_fid\u001b[0;34m(name, mode, userblock_size, fapl, fcpl, swmr)\u001b[0m\n\u001b[1;32m 218\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m swmr \u001b[38;5;129;01mand\u001b[39;00m swmr_support:\n\u001b[1;32m 219\u001b[0m flags \u001b[38;5;241m|\u001b[39m\u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mACC_SWMR_READ\n\u001b[0;32m--> 220\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mh5f\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mopen\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mflags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfapl\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 221\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m mode \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mr+\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 222\u001b[0m fid \u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mopen(name, h5f\u001b[38;5;241m.\u001b[39mACC_RDWR, fapl\u001b[38;5;241m=\u001b[39mfapl)\n",
|
| 340 |
+
"File \u001b[0;32mh5py/_objects.pyx:54\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n",
|
| 341 |
+
"File \u001b[0;32mh5py/_objects.pyx:55\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n",
|
| 342 |
+
"File \u001b[0;32mh5py/h5f.pyx:106\u001b[0m, in \u001b[0;36mh5py.h5f.open\u001b[0;34m()\u001b[0m\n",
|
| 343 |
+
"\u001b[0;31mOSError\u001b[0m: Unable to open file (file signature not found)"
|
| 344 |
+
]
|
| 345 |
+
}
|
| 346 |
+
],
|
| 347 |
+
"source": [
|
| 348 |
+
"model_exp = load_model('/Users/jarvis/pymycod/Deepfakes/Meso_4.ipynb')"
|
| 349 |
+
]
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"cell_type": "code",
|
| 353 |
+
"execution_count": null,
|
| 354 |
+
"metadata": {
|
| 355 |
+
"colab": {
|
| 356 |
+
"base_uri": "https://localhost:8080/"
|
| 357 |
+
},
|
| 358 |
+
"id": "IC4V7HflFZC2",
|
| 359 |
+
"outputId": "c86ee986-baeb-449c-b024-d6ad7ecdaf2a"
|
| 360 |
+
},
|
| 361 |
+
"outputs": [
|
| 362 |
+
{
|
| 363 |
+
"name": "stdout",
|
| 364 |
+
"output_type": "stream",
|
| 365 |
+
"text": [
|
| 366 |
+
"Found 1945 images belonging to 2 classes.\n",
|
| 367 |
+
"31/31 [==============================] - 7s 235ms/step - loss: 0.0998 - accuracy: 0.9625\n"
|
| 368 |
+
]
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"data": {
|
| 372 |
+
"text/plain": [
|
| 373 |
+
"[0.09982584416866302, 0.9624678492546082]"
|
| 374 |
+
]
|
| 375 |
+
},
|
| 376 |
+
"execution_count": 124,
|
| 377 |
+
"metadata": {
|
| 378 |
+
"tags": []
|
| 379 |
+
},
|
| 380 |
+
"output_type": "execute_result"
|
| 381 |
+
}
|
| 382 |
+
],
|
| 383 |
+
"source": [
|
| 384 |
+
"evaluate_model(model_exp, 'data/test', 64)"
|
| 385 |
+
]
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"cell_type": "code",
|
| 389 |
+
"execution_count": null,
|
| 390 |
+
"metadata": {
|
| 391 |
+
"colab": {
|
| 392 |
+
"base_uri": "https://localhost:8080/"
|
| 393 |
+
},
|
| 394 |
+
"id": "fG6V0lOzFeg2",
|
| 395 |
+
"outputId": "db1fec59-cd05-440f-9ae6-acaea21db0ea"
|
| 396 |
+
},
|
| 397 |
+
"outputs": [
|
| 398 |
+
{
|
| 399 |
+
"name": "stdout",
|
| 400 |
+
"output_type": "stream",
|
| 401 |
+
"text": [
|
| 402 |
+
"Found 1945 images belonging to 2 classes.\n",
|
| 403 |
+
"31/31 [==============================] - 7s 235ms/step\n",
|
| 404 |
+
" precision recall f1-score support\n",
|
| 405 |
+
"\n",
|
| 406 |
+
" 0 0.96 0.94 0.95 773\n",
|
| 407 |
+
" 1 0.96 0.97 0.97 1172\n",
|
| 408 |
+
"\n",
|
| 409 |
+
" accuracy 0.96 1945\n",
|
| 410 |
+
" macro avg 0.96 0.96 0.96 1945\n",
|
| 411 |
+
"weighted avg 0.96 0.96 0.96 1945\n",
|
| 412 |
+
"\n"
|
| 413 |
+
]
|
| 414 |
+
}
|
| 415 |
+
],
|
| 416 |
+
"source": [
|
| 417 |
+
"print(get_classification_report(model_exp, 'data/test'))"
|
| 418 |
+
]
|
| 419 |
+
},
|
| 420 |
+
{
|
| 421 |
+
"cell_type": "code",
|
| 422 |
+
"execution_count": null,
|
| 423 |
+
"metadata": {
|
| 424 |
+
"id": "cjHCL4fiF29I"
|
| 425 |
+
},
|
| 426 |
+
"outputs": [],
|
| 427 |
+
"source": []
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"cell_type": "code",
|
| 431 |
+
"execution_count": null,
|
| 432 |
+
"metadata": {
|
| 433 |
+
"colab": {
|
| 434 |
+
"base_uri": "https://localhost:8080/"
|
| 435 |
+
},
|
| 436 |
+
"id": "_z00HkaPG76d",
|
| 437 |
+
"outputId": "f3d07ca5-29dd-4191-ef04-e6ef04bf592a"
|
| 438 |
+
},
|
| 439 |
+
"outputs": [
|
| 440 |
+
{
|
| 441 |
+
"name": "stdout",
|
| 442 |
+
"output_type": "stream",
|
| 443 |
+
"text": [
|
| 444 |
+
"Found 1945 images belonging to 2 classes.\n",
|
| 445 |
+
"31/31 [==============================] - 8s 241ms/step - loss: 0.2321 - accuracy: 0.9080\n"
|
| 446 |
+
]
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"data": {
|
| 450 |
+
"text/plain": [
|
| 451 |
+
"[0.23209184408187866, 0.9079691767692566]"
|
| 452 |
+
]
|
| 453 |
+
},
|
| 454 |
+
"execution_count": 129,
|
| 455 |
+
"metadata": {
|
| 456 |
+
"tags": []
|
| 457 |
+
},
|
| 458 |
+
"output_type": "execute_result"
|
| 459 |
+
}
|
| 460 |
+
],
|
| 461 |
+
"source": [
|
| 462 |
+
"evaluate_model(model_exp, 'data/test', 64)"
|
| 463 |
+
]
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"cell_type": "code",
|
| 467 |
+
"execution_count": null,
|
| 468 |
+
"metadata": {
|
| 469 |
+
"colab": {
|
| 470 |
+
"base_uri": "https://localhost:8080/"
|
| 471 |
+
},
|
| 472 |
+
"id": "v3ZQN53bHBEf",
|
| 473 |
+
"outputId": "14bd2bd7-0c83-4e52-ee12-054a4e52ca9f"
|
| 474 |
+
},
|
| 475 |
+
"outputs": [
|
| 476 |
+
{
|
| 477 |
+
"name": "stdout",
|
| 478 |
+
"output_type": "stream",
|
| 479 |
+
"text": [
|
| 480 |
+
"Found 1945 images belonging to 2 classes.\n",
|
| 481 |
+
"31/31 [==============================] - 7s 234ms/step\n",
|
| 482 |
+
" precision recall f1-score support\n",
|
| 483 |
+
"\n",
|
| 484 |
+
" 0 0.90 0.87 0.88 773\n",
|
| 485 |
+
" 1 0.91 0.93 0.92 1172\n",
|
| 486 |
+
"\n",
|
| 487 |
+
" accuracy 0.91 1945\n",
|
| 488 |
+
" macro avg 0.91 0.90 0.90 1945\n",
|
| 489 |
+
"weighted avg 0.91 0.91 0.91 1945\n",
|
| 490 |
+
"\n"
|
| 491 |
+
]
|
| 492 |
+
}
|
| 493 |
+
],
|
| 494 |
+
"source": [
|
| 495 |
+
"print(get_classification_report(model_exp, 'data/test'))"
|
| 496 |
+
]
|
| 497 |
+
}
|
| 498 |
+
],
|
| 499 |
+
"metadata": {
|
| 500 |
+
"accelerator": "GPU",
|
| 501 |
+
"colab": {
|
| 502 |
+
"collapsed_sections": [],
|
| 503 |
+
"name": "Meso-4.ipynb",
|
| 504 |
+
"provenance": []
|
| 505 |
+
},
|
| 506 |
+
"kernelspec": {
|
| 507 |
+
"display_name": "Python 3",
|
| 508 |
+
"name": "python3"
|
| 509 |
+
},
|
| 510 |
+
"language_info": {
|
| 511 |
+
"codemirror_mode": {
|
| 512 |
+
"name": "ipython",
|
| 513 |
+
"version": 3
|
| 514 |
+
},
|
| 515 |
+
"file_extension": ".py",
|
| 516 |
+
"mimetype": "text/x-python",
|
| 517 |
+
"name": "python",
|
| 518 |
+
"nbconvert_exporter": "python",
|
| 519 |
+
"pygments_lexer": "ipython3",
|
| 520 |
+
"version": "3.8.13"
|
| 521 |
+
}
|
| 522 |
+
},
|
| 523 |
+
"nbformat": 4,
|
| 524 |
+
"nbformat_minor": 0
|
| 525 |
+
}
|
first.ipynb
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import cv2\n",
|
| 10 |
+
"import torch\n",
|
| 11 |
+
"from onnx2pytorch import ConvertModel\n",
|
| 12 |
+
"from keras.models import load_model\n",
|
| 13 |
+
"import onnx"
|
| 14 |
+
]
|
| 15 |
+
},
|
| 16 |
+
{
|
| 17 |
+
"cell_type": "code",
|
| 18 |
+
"execution_count": 3,
|
| 19 |
+
"metadata": {},
|
| 20 |
+
"outputs": [
|
| 21 |
+
{
|
| 22 |
+
"name": "stdout",
|
| 23 |
+
"output_type": "stream",
|
| 24 |
+
"text": [
|
| 25 |
+
"Metal device set to: Apple M1\n",
|
| 26 |
+
"\n",
|
| 27 |
+
"systemMemory: 8.00 GB\n",
|
| 28 |
+
"maxCacheSize: 2.67 GB\n",
|
| 29 |
+
"\n"
|
| 30 |
+
]
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"name": "stderr",
|
| 34 |
+
"output_type": "stream",
|
| 35 |
+
"text": [
|
| 36 |
+
"2024-02-01 19:38:26.414359: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:305] Could not identify NUMA node of platform GPU ID 0, defaulting to 0. Your kernel may not have been built with NUMA support.\n",
|
| 37 |
+
"2024-02-01 19:38:26.414541: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:271] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 0 MB memory) -> physical PluggableDevice (device: 0, name: METAL, pci bus id: <undefined>)\n"
|
| 38 |
+
]
|
| 39 |
+
}
|
| 40 |
+
],
|
| 41 |
+
"source": [
|
| 42 |
+
"\n",
|
| 43 |
+
"model1 = load_model('/Users/jarvis/pymycod/Deepfakes/DeepDetect/mesonet_trained.hdf5')"
|
| 44 |
+
]
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"cell_type": "code",
|
| 48 |
+
"execution_count": null,
|
| 49 |
+
"metadata": {},
|
| 50 |
+
"outputs": [
|
| 51 |
+
{
|
| 52 |
+
"name": "stdout",
|
| 53 |
+
"output_type": "stream",
|
| 54 |
+
"text": [
|
| 55 |
+
"1/1 [==============================] - 0s 198ms/step\n",
|
| 56 |
+
"the image is realllll boii\n"
|
| 57 |
+
]
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"name": "stderr",
|
| 61 |
+
"output_type": "stream",
|
| 62 |
+
"text": [
|
| 63 |
+
"2024-02-01 14:03:47.045296: W tensorflow/core/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz\n",
|
| 64 |
+
"2024-02-01 14:03:47.118034: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:113] Plugin optimizer for device_type GPU is enabled.\n"
|
| 65 |
+
]
|
| 66 |
+
}
|
| 67 |
+
],
|
| 68 |
+
"source": [
|
| 69 |
+
"import numpy as np\n",
|
| 70 |
+
"import keras.utils as image\n",
|
| 71 |
+
"\n",
|
| 72 |
+
"img_width, img_height = 256,256\n",
|
| 73 |
+
"img = image.load_img(f'/Users/jarvis/Downloads/im6.jpeg', target_size = (img_width, img_height))\n",
|
| 74 |
+
"img = image.img_to_array(img)\n",
|
| 75 |
+
"img = np.expand_dims(img, axis = 0)\n",
|
| 76 |
+
"ans = model1.predict(img)\n",
|
| 77 |
+
"if ans[0] ==0:\n",
|
| 78 |
+
" print(\"the image is fake afff\")\n",
|
| 79 |
+
"else:\n",
|
| 80 |
+
" print(\"the image is realllll boii\")"
|
| 81 |
+
]
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"cell_type": "code",
|
| 85 |
+
"execution_count": null,
|
| 86 |
+
"metadata": {},
|
| 87 |
+
"outputs": [
|
| 88 |
+
{
|
| 89 |
+
"ename": "DecodeError",
|
| 90 |
+
"evalue": "Wrong wire type in tag.",
|
| 91 |
+
"output_type": "error",
|
| 92 |
+
"traceback": [
|
| 93 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 94 |
+
"\u001b[0;31mDecodeError\u001b[0m Traceback (most recent call last)",
|
| 95 |
+
"Input \u001b[0;32mIn [11]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m onnx_model \u001b[38;5;241m=\u001b[39m \u001b[43monnx\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m/Users/jarvis/pymycod/Deepfakes/multimodal_deepfake_detection/checkpoints/efficientnet.onnx\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
| 96 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/onnx/__init__.py:208\u001b[0m, in \u001b[0;36mload_model\u001b[0;34m(f, format, load_external_data)\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload_model\u001b[39m(\n\u001b[1;32m 188\u001b[0m f: IO[\u001b[38;5;28mbytes\u001b[39m] \u001b[38;5;241m|\u001b[39m \u001b[38;5;28mstr\u001b[39m \u001b[38;5;241m|\u001b[39m os\u001b[38;5;241m.\u001b[39mPathLike,\n\u001b[1;32m 189\u001b[0m \u001b[38;5;28mformat\u001b[39m: _SupportedFormat \u001b[38;5;241m|\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 190\u001b[0m load_external_data: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m 191\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m ModelProto:\n\u001b[1;32m 192\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Loads a serialized ModelProto into memory.\u001b[39;00m\n\u001b[1;32m 193\u001b[0m \n\u001b[1;32m 194\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 206\u001b[0m \u001b[38;5;124;03m Loaded in-memory ModelProto.\u001b[39;00m\n\u001b[1;32m 207\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 208\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43m_get_serializer\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mformat\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mf\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdeserialize_proto\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_load_bytes\u001b[49m\u001b[43m(\u001b[49m\u001b[43mf\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mModelProto\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 210\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m load_external_data:\n\u001b[1;32m 211\u001b[0m model_filepath \u001b[38;5;241m=\u001b[39m _get_file_path(f)\n",
|
| 97 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/onnx/serialization.py:118\u001b[0m, in \u001b[0;36m_ProtobufSerializer.deserialize_proto\u001b[0;34m(self, serialized, proto)\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(serialized, \u001b[38;5;28mbytes\u001b[39m):\n\u001b[1;32m 115\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 116\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mParameter \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mserialized\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m must be bytes, but got type: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(serialized)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 117\u001b[0m )\n\u001b[0;32m--> 118\u001b[0m decoded \u001b[38;5;241m=\u001b[39m typing\u001b[38;5;241m.\u001b[39mcast(Optional[\u001b[38;5;28mint\u001b[39m], \u001b[43mproto\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mParseFromString\u001b[49m\u001b[43m(\u001b[49m\u001b[43mserialized\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[1;32m 119\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m decoded \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m decoded \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28mlen\u001b[39m(serialized):\n\u001b[1;32m 120\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m google\u001b[38;5;241m.\u001b[39mprotobuf\u001b[38;5;241m.\u001b[39mmessage\u001b[38;5;241m.\u001b[39mDecodeError(\n\u001b[1;32m 121\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mProtobuf decoding consumed too few bytes: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdecoded\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m out of \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(serialized)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 122\u001b[0m )\n",
|
| 98 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/google/protobuf/message.py:202\u001b[0m, in \u001b[0;36mMessage.ParseFromString\u001b[0;34m(self, serialized)\u001b[0m\n\u001b[1;32m 194\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Parse serialized protocol buffer data into this message.\u001b[39;00m\n\u001b[1;32m 195\u001b[0m \n\u001b[1;32m 196\u001b[0m \u001b[38;5;124;03mLike :func:`MergeFromString()`, except we clear the object first.\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[38;5;124;03m message.DecodeError if the input cannot be parsed.\u001b[39;00m\n\u001b[1;32m 200\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 201\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mClear()\n\u001b[0;32m--> 202\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mMergeFromString\u001b[49m\u001b[43m(\u001b[49m\u001b[43mserialized\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 99 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/google/protobuf/internal/python_message.py:1128\u001b[0m, in \u001b[0;36m_AddMergeFromStringMethod.<locals>.MergeFromString\u001b[0;34m(self, serialized)\u001b[0m\n\u001b[1;32m 1126\u001b[0m length \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(serialized)\n\u001b[1;32m 1127\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_InternalParse\u001b[49m\u001b[43m(\u001b[49m\u001b[43mserialized\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlength\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;241m!=\u001b[39m length:\n\u001b[1;32m 1129\u001b[0m \u001b[38;5;66;03m# The only reason _InternalParse would return early is if it\u001b[39;00m\n\u001b[1;32m 1130\u001b[0m \u001b[38;5;66;03m# encountered an end-group tag.\u001b[39;00m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m message_mod\u001b[38;5;241m.\u001b[39mDecodeError(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mUnexpected end-group tag.\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 1132\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (\u001b[38;5;167;01mIndexError\u001b[39;00m, \u001b[38;5;167;01mTypeError\u001b[39;00m):\n\u001b[1;32m 1133\u001b[0m \u001b[38;5;66;03m# Now ord(buf[p:p+1]) == ord('') gets TypeError.\u001b[39;00m\n",
|
| 100 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/google/protobuf/internal/python_message.py:1181\u001b[0m, in \u001b[0;36m_AddMergeFromStringMethod.<locals>.InternalParse\u001b[0;34m(self, buffer, pos, end)\u001b[0m\n\u001b[1;32m 1179\u001b[0m \u001b[38;5;66;03m# TODO(jieluo): remove old_pos.\u001b[39;00m\n\u001b[1;32m 1180\u001b[0m old_pos \u001b[38;5;241m=\u001b[39m new_pos\n\u001b[0;32m-> 1181\u001b[0m (data, new_pos) \u001b[38;5;241m=\u001b[39m \u001b[43mdecoder\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_DecodeUnknownField\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1182\u001b[0m \u001b[43m \u001b[49m\u001b[43mbuffer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnew_pos\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mwire_type\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# pylint: disable=protected-access\u001b[39;00m\n\u001b[1;32m 1183\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_pos \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 1184\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m pos\n",
|
| 101 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/google/protobuf/internal/decoder.py:965\u001b[0m, in \u001b[0;36m_DecodeUnknownField\u001b[0;34m(buffer, pos, wire_type)\u001b[0m\n\u001b[1;32m 963\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m)\n\u001b[1;32m 964\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 965\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m _DecodeError(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mWrong wire type in tag.\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 967\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (data, pos)\n",
|
| 102 |
+
"\u001b[0;31mDecodeError\u001b[0m: Wrong wire type in tag."
|
| 103 |
+
]
|
| 104 |
+
}
|
| 105 |
+
],
|
| 106 |
+
"source": [
|
| 107 |
+
"\n",
|
| 108 |
+
"onnx_model = onnx.load('/Users/jarvis/pymycod/Deepfakes/multimodal_deepfake_detection/checkpoints/efficientnet.onnx')\n",
|
| 109 |
+
"# pytorch_model = ConvertModel(onnx_model)"
|
| 110 |
+
]
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"cell_type": "code",
|
| 114 |
+
"execution_count": null,
|
| 115 |
+
"metadata": {},
|
| 116 |
+
"outputs": [],
|
| 117 |
+
"source": [
|
| 118 |
+
"\n",
|
| 119 |
+
"def load_img_modality_model(args):\n",
|
| 120 |
+
" '''Loads image modality model.'''\n",
|
| 121 |
+
" rgb_encoder = pytorch_model\n",
|
| 122 |
+
"\n",
|
| 123 |
+
" ckpt = torch.load('checkpoints/model.pth', map_location = torch.device('cpu'))\n",
|
| 124 |
+
" rgb_encoder.load_state_dict(ckpt['rgb_encoder'], strict = True)\n",
|
| 125 |
+
" rgb_encoder.eval()\n",
|
| 126 |
+
" return rgb_encoder\n",
|
| 127 |
+
"img_model = load_img_modality_model(args)\n",
|
| 128 |
+
"\n",
|
| 129 |
+
"def preprocess_img(face):\n",
|
| 130 |
+
" face = face / 255\n",
|
| 131 |
+
" face = cv2.resize(face, (256, 256))\n",
|
| 132 |
+
" # face = face.transpose(2, 0, 1) #(W, H, C) -> (C, W, H)\n",
|
| 133 |
+
" face_pt = torch.unsqueeze(torch.Tensor(face), dim = 0) \n",
|
| 134 |
+
" return face_pt\n",
|
| 135 |
+
"def preprocess_video(input_video, n_frames = 3):\n",
|
| 136 |
+
" v_cap = cv2.VideoCapture(input_video)\n",
|
| 137 |
+
" v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
|
| 138 |
+
"\n",
|
| 139 |
+
" # Pick 'n_frames' evenly spaced frames to sample\n",
|
| 140 |
+
" if n_frames is None:\n",
|
| 141 |
+
" sample = np.arange(0, v_len)\n",
|
| 142 |
+
" else:\n",
|
| 143 |
+
" sample = np.linspace(0, v_len - 1, n_frames).astype(int)\n",
|
| 144 |
+
"\n",
|
| 145 |
+
" #Loop through frames.\n",
|
| 146 |
+
" frames = []\n",
|
| 147 |
+
" for j in range(v_len):\n",
|
| 148 |
+
" success = v_cap.grab()\n",
|
| 149 |
+
" if j in sample:\n",
|
| 150 |
+
" # Load frame\n",
|
| 151 |
+
" success, frame = v_cap.retrieve()\n",
|
| 152 |
+
" if not success:\n",
|
| 153 |
+
" continue\n",
|
| 154 |
+
" frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
|
| 155 |
+
" frame = preprocess_img(frame)\n",
|
| 156 |
+
" frames.append(frame)\n",
|
| 157 |
+
" v_cap.release()\n",
|
| 158 |
+
" return frames\n",
|
| 159 |
+
"\n",
|
| 160 |
+
"\n",
|
| 161 |
+
"def deepfakes_video_predict(input_video):\n",
|
| 162 |
+
" '''Perform inference on a video.'''\n",
|
| 163 |
+
" video_frames = preprocess_video(input_video)\n",
|
| 164 |
+
" real_faces_list = []\n",
|
| 165 |
+
" fake_faces_list = []\n",
|
| 166 |
+
"\n",
|
| 167 |
+
" for face in video_frames:\n",
|
| 168 |
+
" # face = preprocess_img(face)\n",
|
| 169 |
+
"\n",
|
| 170 |
+
" img_grads = img_model.forward(face)\n",
|
| 171 |
+
" img_grads = img_grads.cpu().detach().numpy()\n",
|
| 172 |
+
" img_grads_np = np.squeeze(img_grads)\n",
|
| 173 |
+
" real_faces_list.append(img_grads_np[0])\n",
|
| 174 |
+
" fake_faces_list.append(img_grads_np[1])\n",
|
| 175 |
+
"\n",
|
| 176 |
+
" real_faces_mean = np.mean(real_faces_list)\n",
|
| 177 |
+
" fake_faces_mean = np.mean(fake_faces_list)\n",
|
| 178 |
+
"\n",
|
| 179 |
+
" if real_faces_mean > 0.5:\n",
|
| 180 |
+
" preds = round(real_faces_mean * 100, 3)\n",
|
| 181 |
+
" text2 = f\"The video is REAL. \\nConfidence score is: {preds}%\"\n",
|
| 182 |
+
"\n",
|
| 183 |
+
" else:\n",
|
| 184 |
+
" preds = round(fake_faces_mean * 100, 3)\n",
|
| 185 |
+
" text2 = f\"The video is FAKE. \\nConfidence score is: {preds}%\"\n",
|
| 186 |
+
"\n",
|
| 187 |
+
" return text2"
|
| 188 |
+
]
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"cell_type": "code",
|
| 192 |
+
"execution_count": null,
|
| 193 |
+
"metadata": {},
|
| 194 |
+
"outputs": [
|
| 195 |
+
{
|
| 196 |
+
"ename": "AttributeError",
|
| 197 |
+
"evalue": "'Functional' object has no attribute 'forward'",
|
| 198 |
+
"output_type": "error",
|
| 199 |
+
"traceback": [
|
| 200 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 201 |
+
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
|
| 202 |
+
"Input \u001b[0;32mIn [25]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mdeepfakes_video_predict\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m/Users/jarvis/Documents/Ss/ras_df.mov\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
| 203 |
+
"Input \u001b[0;32mIn [24]\u001b[0m, in \u001b[0;36mdeepfakes_video_predict\u001b[0;34m(input_video)\u001b[0m\n\u001b[1;32m 37\u001b[0m fake_faces_list \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 39\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m face \u001b[38;5;129;01min\u001b[39;00m video_frames:\n\u001b[1;32m 40\u001b[0m \u001b[38;5;66;03m# face = preprocess_img(face)\u001b[39;00m\n\u001b[0;32m---> 42\u001b[0m img_grads \u001b[38;5;241m=\u001b[39m \u001b[43mmodel1\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m(face)\n\u001b[1;32m 43\u001b[0m img_grads \u001b[38;5;241m=\u001b[39m img_grads\u001b[38;5;241m.\u001b[39mcpu()\u001b[38;5;241m.\u001b[39mdetach()\u001b[38;5;241m.\u001b[39mnumpy()\n\u001b[1;32m 44\u001b[0m img_grads_np \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39msqueeze(img_grads)\n",
|
| 204 |
+
"\u001b[0;31mAttributeError\u001b[0m: 'Functional' object has no attribute 'forward'"
|
| 205 |
+
]
|
| 206 |
+
}
|
| 207 |
+
],
|
| 208 |
+
"source": [
|
| 209 |
+
"deepfakes_video_predict(\"/Users/jarvis/Documents/Ss/ras_df.mov\")"
|
| 210 |
+
]
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"cell_type": "code",
|
| 214 |
+
"execution_count": null,
|
| 215 |
+
"metadata": {},
|
| 216 |
+
"outputs": [
|
| 217 |
+
{
|
| 218 |
+
"ename": "ValidationError",
|
| 219 |
+
"evalue": "Unable to parse proto from file: /Users/jarvis/pymycod/Deepfakes/multimodal_deepfake_detection/checkpoints/efficientnet.onnx. Please check if it is a valid protobuf file of proto. ",
|
| 220 |
+
"output_type": "error",
|
| 221 |
+
"traceback": [
|
| 222 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 223 |
+
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
|
| 224 |
+
"Input \u001b[0;32mIn [10]\u001b[0m, in \u001b[0;36m<cell line: 3>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01monnx\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m checker\n\u001b[0;32m----> 3\u001b[0m \u001b[43mchecker\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcheck_model\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m/Users/jarvis/pymycod/Deepfakes/multimodal_deepfake_detection/checkpoints/efficientnet.onnx\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
| 225 |
+
"File \u001b[0;32m/opt/anaconda3/envs/tensor/lib/python3.8/site-packages/onnx/checker.py:137\u001b[0m, in \u001b[0;36mcheck_model\u001b[0;34m(model, full_check, skip_opset_compatibility_check)\u001b[0m\n\u001b[1;32m 135\u001b[0m \u001b[38;5;66;03m# If model is a path instead of ModelProto\u001b[39;00m\n\u001b[1;32m 136\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(model, (\u001b[38;5;28mstr\u001b[39m, os\u001b[38;5;241m.\u001b[39mPathLike)):\n\u001b[0;32m--> 137\u001b[0m \u001b[43mC\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcheck_model_path\u001b[49m\u001b[43m(\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfspath\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfull_check\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mskip_opset_compatibility_check\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 138\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 139\u001b[0m protobuf_string \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 140\u001b[0m model \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(model, \u001b[38;5;28mbytes\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m model\u001b[38;5;241m.\u001b[39mSerializeToString()\n\u001b[1;32m 141\u001b[0m )\n",
|
| 226 |
+
"\u001b[0;31mValidationError\u001b[0m: Unable to parse proto from file: /Users/jarvis/pymycod/Deepfakes/multimodal_deepfake_detection/checkpoints/efficientnet.onnx. Please check if it is a valid protobuf file of proto. "
|
| 227 |
+
]
|
| 228 |
+
}
|
| 229 |
+
],
|
| 230 |
+
"source": [
|
| 231 |
+
"from onnx import checker\n",
|
| 232 |
+
"\n",
|
| 233 |
+
"checker.check_model(\"/Users/jarvis/pymycod/Deepfakes/multimodal_deepfake_detection/checkpoints/efficientnet.onnx\")\n"
|
| 234 |
+
]
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"cell_type": "code",
|
| 238 |
+
"execution_count": null,
|
| 239 |
+
"metadata": {},
|
| 240 |
+
"outputs": [],
|
| 241 |
+
"source": []
|
| 242 |
+
}
|
| 243 |
+
],
|
| 244 |
+
"metadata": {
|
| 245 |
+
"kernelspec": {
|
| 246 |
+
"display_name": "tensor",
|
| 247 |
+
"language": "python",
|
| 248 |
+
"name": "python3"
|
| 249 |
+
},
|
| 250 |
+
"language_info": {
|
| 251 |
+
"codemirror_mode": {
|
| 252 |
+
"name": "ipython",
|
| 253 |
+
"version": 3
|
| 254 |
+
},
|
| 255 |
+
"file_extension": ".py",
|
| 256 |
+
"mimetype": "text/x-python",
|
| 257 |
+
"name": "python",
|
| 258 |
+
"nbconvert_exporter": "python",
|
| 259 |
+
"pygments_lexer": "ipython3",
|
| 260 |
+
"version": "3.8.13"
|
| 261 |
+
}
|
| 262 |
+
},
|
| 263 |
+
"nbformat": 4,
|
| 264 |
+
"nbformat_minor": 2
|
| 265 |
+
}
|
gradio_api.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gradio_client import Client
|
| 2 |
+
|
| 3 |
+
client = Client("http://127.0.0.1:7860/")
|
| 4 |
+
# result = client.predict(
|
| 5 |
+
# "C:\\Users\\hp\\Downloads\\im1.jpeg",api_name="/predict"
|
| 6 |
+
# )
|
| 7 |
+
|
| 8 |
+
result = client.predict({"video":"C:\\Users\\hp\\Downloads\\ras_df.mp4",
|
| 9 |
+
"subtitles":None}, api_name="/predict_1")
|
| 10 |
+
print(result)
|
inference_2.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import onnx
|
| 3 |
+
import torch
|
| 4 |
+
import argparse
|
| 5 |
+
import numpy as np
|
| 6 |
+
from models import image
|
| 7 |
+
|
| 8 |
+
import warnings
|
| 9 |
+
from onnx2pytorch import ConvertModel
|
| 10 |
+
|
| 11 |
+
warnings.filterwarnings("ignore", message="The given NumPy array is not writable")
|
| 12 |
+
with warnings.catch_warnings():
|
| 13 |
+
warnings.filterwarnings("ignore", message="The given NumPy array is not writable")
|
| 14 |
+
onnx_model = onnx.load('models/efficientnet.onnx')
|
| 15 |
+
pytorch_model = ConvertModel(onnx_model)
|
| 16 |
+
torch.manual_seed(42)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
audio_args = { 'nb_samp': 64600, 'first_conv': 1024, 'in_channels': 1, 'filts': [20, [20, 20], [20, 128], [128, 128]], 'blocks': [2, 4],'nb_fc_node': 1024,'gru_node': 1024, 'nb_gru_layer': 3, 'nb_classes': 2}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_args(parser):
|
| 23 |
+
parser.add_argument("--batch_size", type=int, default=8)
|
| 24 |
+
parser.add_argument("--data_dir", type=str, default="datasets/train/fakeavceleb*")
|
| 25 |
+
parser.add_argument("--LOAD_SIZE", type=int, default=256)
|
| 26 |
+
parser.add_argument("--FINE_SIZE", type=int, default=224)
|
| 27 |
+
parser.add_argument("--dropout", type=float, default=0.2)
|
| 28 |
+
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
|
| 29 |
+
parser.add_argument("--hidden", nargs="*", type=int, default=[])
|
| 30 |
+
parser.add_argument("--hidden_sz", type=int, default=768)
|
| 31 |
+
parser.add_argument("--img_embed_pool_type", type=str, default="avg", choices=["max", "avg"])
|
| 32 |
+
parser.add_argument("--img_hidden_sz", type=int, default=1024)
|
| 33 |
+
parser.add_argument("--include_bn", type=int, default=True)
|
| 34 |
+
parser.add_argument("--lr", type=float, default=1e-4)
|
| 35 |
+
parser.add_argument("--lr_factor", type=float, default=0.3)
|
| 36 |
+
parser.add_argument("--lr_patience", type=int, default=10)
|
| 37 |
+
parser.add_argument("--max_epochs", type=int, default=500)
|
| 38 |
+
parser.add_argument("--n_workers", type=int, default=12)
|
| 39 |
+
parser.add_argument("--name", type=str, default="MMDF")
|
| 40 |
+
parser.add_argument("--num_image_embeds", type=int, default=1)
|
| 41 |
+
parser.add_argument("--patience", type=int, default=20)
|
| 42 |
+
parser.add_argument("--savedir", type=str, default="./savepath/")
|
| 43 |
+
parser.add_argument("--seed", type=int, default=1)
|
| 44 |
+
parser.add_argument("--n_classes", type=int, default=2)
|
| 45 |
+
parser.add_argument("--annealing_epoch", type=int, default=10)
|
| 46 |
+
parser.add_argument("--device", type=str, default='cpu')
|
| 47 |
+
parser.add_argument("--pretrained_image_encoder", type=bool, default = False)
|
| 48 |
+
parser.add_argument("--freeze_image_encoder", type=bool, default = False)
|
| 49 |
+
parser.add_argument("--pretrained_audio_encoder", type = bool, default=False)
|
| 50 |
+
parser.add_argument("--freeze_audio_encoder", type = bool, default = False)
|
| 51 |
+
parser.add_argument("--augment_dataset", type = bool, default = True)
|
| 52 |
+
|
| 53 |
+
for key, value in audio_args.items():
|
| 54 |
+
parser.add_argument(f"--{key}", type=type(value), default=value)
|
| 55 |
+
|
| 56 |
+
def load_img_modality_model(args):
|
| 57 |
+
rgb_encoder = pytorch_model
|
| 58 |
+
ckpt = torch.load('models/model.pth', map_location = torch.device('cpu'))
|
| 59 |
+
rgb_encoder.load_state_dict(ckpt['rgb_encoder'], strict = True)
|
| 60 |
+
rgb_encoder.eval()
|
| 61 |
+
return rgb_encoder
|
| 62 |
+
|
| 63 |
+
def load_spec_modality_model(args):
|
| 64 |
+
spec_encoder = image.RawNet(args)
|
| 65 |
+
ckpt = torch.load('models/model.pth', map_location = torch.device('cpu'))
|
| 66 |
+
spec_encoder.load_state_dict(ckpt['spec_encoder'], strict = True)
|
| 67 |
+
spec_encoder.eval()
|
| 68 |
+
return spec_encoder
|
| 69 |
+
|
| 70 |
+
parser = argparse.ArgumentParser(description="Inference models")
|
| 71 |
+
get_args(parser)
|
| 72 |
+
args, remaining_args = parser.parse_known_args()
|
| 73 |
+
assert remaining_args == [], remaining_args
|
| 74 |
+
spec_model = load_spec_modality_model(args)
|
| 75 |
+
img_model = load_img_modality_model(args)
|
| 76 |
+
|
| 77 |
+
def preprocess_img(face):
|
| 78 |
+
face = face / 255
|
| 79 |
+
face = cv2.resize(face, (256, 256))
|
| 80 |
+
face_pt = torch.unsqueeze(torch.Tensor(face), dim = 0)
|
| 81 |
+
return face_pt
|
| 82 |
+
|
| 83 |
+
def preprocess_audio(audio_file):
|
| 84 |
+
audio_pt = torch.unsqueeze(torch.Tensor(audio_file), dim = 0)
|
| 85 |
+
return audio_pt
|
| 86 |
+
|
| 87 |
+
def df_spec_pred(input_audio):
|
| 88 |
+
x, _ = input_audio
|
| 89 |
+
audio = preprocess_audio(x)
|
| 90 |
+
spec_grads = spec_model.forward(audio)
|
| 91 |
+
spec_grads_inv = np.exp(spec_grads.cpu().detach().numpy().squeeze())
|
| 92 |
+
max_value = np.argmax(spec_grads_inv)
|
| 93 |
+
if max_value > 0.5:
|
| 94 |
+
preds = round(100 - (max_value*100), 3)
|
| 95 |
+
text2 = f"The audio is REAL."
|
| 96 |
+
else:
|
| 97 |
+
preds = round(max_value*100, 3)
|
| 98 |
+
text2 = f"The audio is FAKE."
|
| 99 |
+
return text2
|
| 100 |
+
|
| 101 |
+
def df_img_pred(input_image):
|
| 102 |
+
face = preprocess_img(input_image)
|
| 103 |
+
print(f"Face shape is: {face.shape}")
|
| 104 |
+
img_grads = img_model.forward(face)
|
| 105 |
+
img_grads = img_grads.cpu().detach().numpy()
|
| 106 |
+
img_grads_np = np.squeeze(img_grads)
|
| 107 |
+
if img_grads_np[0] > 0.5:
|
| 108 |
+
preds = round(img_grads_np[0] * 100, 3)
|
| 109 |
+
text2 = f"The image is REAL. \nConfidence score is: {preds}"
|
| 110 |
+
else:
|
| 111 |
+
preds = round(img_grads_np[1] * 100, 3)
|
| 112 |
+
text2 = f"The image is FAKE. \nConfidence score is: {preds}"
|
| 113 |
+
return text2
|
| 114 |
+
|
| 115 |
+
def preprocess_video(input_video, n_frames = 3):
|
| 116 |
+
v_cap = cv2.VideoCapture(input_video)
|
| 117 |
+
v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 118 |
+
if n_frames is None:
|
| 119 |
+
sample = np.arange(0, v_len)
|
| 120 |
+
else:
|
| 121 |
+
sample = np.linspace(0, v_len - 1, n_frames).astype(int)
|
| 122 |
+
frames = []
|
| 123 |
+
for j in range(v_len):
|
| 124 |
+
success = v_cap.grab()
|
| 125 |
+
if j in sample:
|
| 126 |
+
success, frame = v_cap.retrieve()
|
| 127 |
+
if not success:
|
| 128 |
+
continue
|
| 129 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 130 |
+
frame = preprocess_img(frame)
|
| 131 |
+
frames.append(frame)
|
| 132 |
+
v_cap.release()
|
| 133 |
+
return frames
|
| 134 |
+
|
| 135 |
+
def df_video_pred(input_video):
|
| 136 |
+
video_frames = preprocess_video(input_video)
|
| 137 |
+
real_faces_list = []
|
| 138 |
+
fake_faces_list = []
|
| 139 |
+
for face in video_frames:
|
| 140 |
+
img_grads = img_model.forward(face)
|
| 141 |
+
img_grads = img_grads.cpu().detach().numpy()
|
| 142 |
+
img_grads_np = np.squeeze(img_grads)
|
| 143 |
+
real_faces_list.append(img_grads_np[0])
|
| 144 |
+
fake_faces_list.append(img_grads_np[1])
|
| 145 |
+
real_faces_mean = np.mean(real_faces_list)
|
| 146 |
+
fake_faces_mean = np.mean(fake_faces_list)
|
| 147 |
+
if real_faces_mean > 0.5:
|
| 148 |
+
preds = round(real_faces_mean * 100, 3)
|
| 149 |
+
text2 = f"The video is REAL. \nConfidence score is: {preds}%"
|
| 150 |
+
else:
|
| 151 |
+
preds = round(fake_faces_mean * 100, 3)
|
| 152 |
+
text2 = f"The video is FAKE. \nConfidence score is: {preds}%"
|
| 153 |
+
return text2
|