2023-10-14 16:49:50 -07:00

348 lines
13 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 102,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The autoreload extension is already loaded. To reload it, use:\n",
" %reload_ext autoreload\n"
]
}
],
"source": [
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": 103,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import os\n",
"import numpy as np\n",
"from utils import *\n",
"import math\n",
"import heapq\n",
"import random"
]
},
{
"cell_type": "code",
"execution_count": 104,
"metadata": {},
"outputs": [],
"source": [
"fd_collection = getCollection(\"team_5_mwdb_phase_2\", \"fd_collection\")\n",
"all_images = fd_collection.find()"
]
},
{
"cell_type": "code",
"execution_count": 105,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"label_sim-cm_fd-lda-10-model.joblib loaded\n"
]
}
],
"source": [
"selected_latent_space = valid_latent_spaces[\n",
" str(input(\"Enter latent space - one of \" + str(list(valid_latent_spaces.keys()))))\n",
"]\n",
"\n",
"selected_feature_model = valid_feature_models[\n",
" str(input(\"Enter feature model - one of \" + str(list(valid_feature_models.keys()))))\n",
"]\n",
"\n",
"k = int(input(\"Enter value of k: \"))\n",
"if k < 1:\n",
" raise ValueError(\"k should be a positive integer\")\n",
"\n",
"selected_dim_reduction_method = str(\n",
" input(\n",
" \"Enter dimensionality reduction method - one of \"\n",
" + str(list(valid_dim_reduction_methods.keys()))\n",
" )\n",
")\n",
"\n",
"image_id = int(input(\"Enter image ID: \"))\n",
"if image_id < 0 and image_id > 8676 and image_id % 2 != 0:\n",
" raise ValueError(\"image id should be even number between 0 and 8676\")\n",
"\n",
"knum = int(input(\"Enter value of knum: \"))\n",
"if knum < 1:\n",
" raise ValueError(\"knum should be a positive integer\")\n",
"\n",
"match selected_latent_space:\n",
" case \"\":\n",
" if selected_dim_reduction_method == \"lda\":\n",
" if os.path.exists(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib\") and os.path.exists(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"):\n",
" if os.path.exists(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib\"):\n",
" model = load(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib\")\n",
" data = json.load(open(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"))\n",
" print(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib and json loaded\")\n",
" else:\n",
" print(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib does not exist\")\n",
" else:\n",
" if os.path.exists(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"):\n",
" data = json.load(open(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"))\n",
" print(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json loaded\")\n",
" else:\n",
" print(f\"{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json does not exist\")\n",
" case \"cp\":\n",
" if os.path.exists(f\"{selected_feature_model}-cp-{k}-semantics.json\"):\n",
" data = json.load(open(f\"{selected_feature_model}-cp-{k}-semantics.json\"))\n",
" print(f\"{selected_feature_model}-cp-{k}-semantics.json loaded\")\n",
" else:\n",
" print(f\"{selected_feature_model}-cp-{k}-semantics.json does not exist\")\n",
" case _:\n",
" if selected_dim_reduction_method == \"lda\":\n",
" if os.path.exists(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib\") and os.path.exists(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"):\n",
" model = load(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib\")\n",
" data = json.load(open(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"))\n",
" print(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib loaded\")\n",
" else:\n",
" print(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-model.joblib does not exist\")\n",
" else:\n",
" if os.path.exists(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"):\n",
" data = json.load(open(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json\"))\n",
" print(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json loaded\")\n",
" else:\n",
" print(f\"{selected_latent_space}-{selected_feature_model}-{selected_dim_reduction_method}-{k}-semantics.json does not exist\")\n"
]
},
{
"cell_type": "code",
"execution_count": 106,
"metadata": {},
"outputs": [],
"source": [
"def extract_similarities_ls1_ls4(latent_space, dim_reduction, selected_feature_model, data, image_id):\n",
"\n",
" image_fd = np.array(all_images[int(image_id / 2)][selected_feature_model]).flatten()\n",
"\n",
" match dim_reduction:\n",
"\n",
" case 'svd':\n",
" U = np.array(data[\"image-semantic\"])\n",
" S = np.array(data[\"semantics-core\"])\n",
" if len(S.shape) == 1:\n",
" S = np.diag(S)\n",
" V = np.transpose(np.array(data[\"semantic-feature\"]))\n",
" \n",
" comparison_feature_space = np.matmul(U, S)\n",
"\n",
" if latent_space == \"image_sim\":\n",
" comparison_vector = comparison_feature_space[int(image_id / 2)]\n",
" else:\n",
" comparison_vector = np.matmul(np.matmul(image_fd, V), S)\n",
" \n",
" case \"nmf\":\n",
" H = np.array(data['semantic-feature'])\n",
" comparison_feature_space = np.array(data['image-semantic'])\n",
"\n",
" if latent_space == \"image_sim\":\n",
" comparison_vector = comparison_feature_space[int(image_id / 2)]\n",
" else:\n",
" comparison_vector = np.matmul(image_fd, np.transpose(H))\n",
"\n",
" case \"kmeans\":\n",
" comparison_vector = []\n",
" comparison_feature_space = np.array(data[\"image-semantic\"])\n",
" S = np.array(data[\"semantic-feature\"])\n",
"\n",
" for centroid in S:\n",
" if latent_space == \"image_sim\":\n",
" sim_matrix = np.array(data[\"sim-matrix\"])\n",
" comparison_vector.append(math.dist(sim_matrix[int(image_id / 2)], centroid))\n",
" else:\n",
" comparison_vector.append(math.dist(image_fd, centroid))\n",
" \n",
" case \"lda\":\n",
" comparison_feature_space = np.array(data[\"image-semantic\"])\n",
" if latent_space == \"image_sim\":\n",
" comparison_vector = comparison_feature_space[int(image_id / 2)]\n",
" else:\n",
" fd = np.array(all_images[int(image_id / 2)][selected_feature_model])\n",
" min_value = np.min(fd)\n",
" feature_vectors_shifted = fd - min_value\n",
" comparison_vector = model.transform(feature_vectors_shifted.flatten().reshape(1, -1)).flatten()\n",
" print(comparison_feature_space.shape)\n",
" print(comparison_vector.shape)\n",
" # print(retValue)\n",
"\n",
" n = len(comparison_feature_space)\n",
"\n",
" distances = []\n",
" for i in range(n):\n",
" if (i * 2) != image_id:\n",
" distances.append({\"image_id\": i, \"label\": all_images[i][\"true_label\"], \"distance\": math.dist(comparison_vector, comparison_feature_space[i])})\n",
"\n",
" distances = sorted(distances, key=lambda x: x[\"distance\"], reverse=False)[:knum]\n",
"\n",
" for x in distances:\n",
" print(x)"
]
},
{
"cell_type": "code",
"execution_count": 107,
"metadata": {},
"outputs": [],
"source": [
"def extract_similarities_ls2(data, image_id):\n",
"\n",
" IS = np.array(data[\"image-semantic\"])\n",
" S = np.array(data[\"semantics-core\"])\n",
"\n",
" if len(S.shape) == 1:\n",
" S = np.diag(S)\n",
"\n",
" comparison_feature_space = np.matmul(IS, S)\n",
" comparison_vector = comparison_feature_space[int(image_id / 2)]\n",
"\n",
" distances = []\n",
"\n",
" n = len(comparison_feature_space)\n",
" for i in range(n):\n",
" if i != (image_id / 2):\n",
" distances.append({\"image_id\": i * 2, \"distance\": math.dist(comparison_vector, comparison_feature_space[i])})\n",
" \n",
" distances = sorted(distances, key=lambda x: x[\"distance\"], reverse=False)[:knum]\n",
"\n",
" for x in distances:\n",
" print(x)"
]
},
{
"cell_type": "code",
"execution_count": 108,
"metadata": {},
"outputs": [],
"source": [
"def extract_similarities_ls3(dim_reduction, data, image_id):\n",
"\n",
" img_label = all_images[int(image_id / 2)][\"true_label\"]\n",
"\n",
" match dim_reduction:\n",
"\n",
" case 'svd':\n",
" U = np.array(data[\"image-semantic\"])\n",
" S = np.array(data[\"semantics-core\"])\n",
" V = np.transpose(np.array(data[\"semantic-feature\"]))\n",
"\n",
" comparison_feature_space = np.matmul(U, S)\n",
" comparison_vector = comparison_feature_space[img_label]\n",
" \n",
" case \"nmf\":\n",
" comparison_feature_space = np.array(data['image-semantic'])\n",
" comparison_vector = comparison_feature_space[img_label]\n",
"\n",
" case \"kmeans\":\n",
" comparison_feature_space = np.array(data[\"image-semantic\"])\n",
" comparison_vector = comparison_feature_space[img_label]\n",
"\n",
" case \"lda\":\n",
" comparison_feature_space = np.array(data[\"image-semantic\"])\n",
" comparison_vector = comparison_feature_space[img_label]\n",
"\n",
" n = len(comparison_feature_space)\n",
" distance = float('inf')\n",
" most_similar_label = img_label\n",
" for i in range(n):\n",
" if i != img_label:\n",
" temp_distance = math.dist(comparison_vector, comparison_feature_space[i])\n",
" if distance > temp_distance:\n",
" distance = temp_distance\n",
" most_similar_label = i\n",
"\n",
" label_images = [x[\"image_id\"] for x in all_images if x[\"true_label\"] == most_similar_label]\n",
" similar_images = random.sample(label_images, knum)\n",
"\n",
" print(f\"Most similar label to {img_label} is {most_similar_label}\")\n",
" for img in similar_images:\n",
" print(img)"
]
},
{
"cell_type": "code",
"execution_count": 109,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Most similar label to 0 is 19\n",
"3634\n",
"3600\n",
"3562\n",
"3618\n",
"3574\n",
"3604\n",
"3550\n",
"3542\n",
"3564\n",
"3538\n"
]
}
],
"source": [
"match selected_latent_space:\n",
"\n",
" case \"\" | \"image_sim\":\n",
" \n",
" extract_similarities_ls1_ls4(selected_latent_space, selected_dim_reduction_method, selected_feature_model, data, image_id)\n",
"\n",
" case \"label_sim\":\n",
"\n",
" extract_similarities_ls3(selected_dim_reduction_method, data, image_id)\n",
"\n",
" case \"cp\":\n",
"\n",
" extract_similarities_ls2(data, image_id)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}