mirror of
https://github.com/20kaushik02/CSE515_MWDB_Project.git
synced 2025-12-06 06:34:06 +00:00
refactored madhura's task 2 code, some more fixes
This commit is contained in:
parent
3b741069e0
commit
2ca88df5be
@ -137,9 +137,7 @@
|
||||
" str(input(\"Enter feature model - one of \" + str(list(valid_feature_models.keys()))))\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"selected_distance_measure = valid_distance_measures[\n",
|
||||
" str(input(\"Enter distance measure - one of \" + str(list(valid_distance_measures.keys()))))\n",
|
||||
"]\n",
|
||||
"selected_distance_measure = feature_distance_matches[selected_feature_model]\n",
|
||||
"\n",
|
||||
"if selected_image_id == -1:\n",
|
||||
" show_similar_images_for_image(\n",
|
||||
|
||||
151
Phase 2/task_2a.ipynb
Normal file
151
Phase 2/task_2a.ipynb
Normal file
File diff suppressed because one or more lines are too long
149
Phase 2/task_2b.ipynb
Normal file
149
Phase 2/task_2b.ipynb
Normal file
File diff suppressed because one or more lines are too long
1247
Phase 2/task_3.ipynb
1247
Phase 2/task_3.ipynb
File diff suppressed because it is too large
Load Diff
@ -291,7 +291,6 @@ def get_all_fd(image_id, given_image=None, given_label=None):
|
||||
else:
|
||||
img, label = dataset[image_id]
|
||||
img_shape = np.array(img).shape
|
||||
print(img_shape)
|
||||
if img_shape[0] >= 3:
|
||||
true_channels = 3
|
||||
else:
|
||||
@ -357,7 +356,7 @@ valid_feature_models = {
|
||||
"avgpool": "avgpool_fd",
|
||||
"layer3": "layer3_fd",
|
||||
"fc": "fc_fd",
|
||||
"resnet": "resnet_fd"
|
||||
"resnet": "resnet_fd",
|
||||
}
|
||||
valid_distance_measures = {
|
||||
"euclidean": euclidean_distance_measure,
|
||||
@ -380,7 +379,7 @@ def show_similar_images_for_image(
|
||||
target_image=None,
|
||||
target_label=None,
|
||||
k=10,
|
||||
feature_model="fc",
|
||||
feature_model="fc_fd",
|
||||
distance_measure=pearson_distance_measure,
|
||||
save_plots=False,
|
||||
):
|
||||
@ -509,7 +508,7 @@ def show_similar_images_for_image(
|
||||
|
||||
if save_plots:
|
||||
plt.savefig(
|
||||
f"Plots/Image_{target_image_id}_{feature_model}_{distance_measure.__name__}_k{k}.png"
|
||||
f"Plots/Image_{target_image_id}_{feature_model}_{distance_measure.__name__}_{k}_images.png"
|
||||
)
|
||||
plt.show()
|
||||
|
||||
@ -534,7 +533,7 @@ def show_similar_images_for_label(
|
||||
fd_collection,
|
||||
target_label,
|
||||
k=10,
|
||||
feature_model="fc",
|
||||
feature_model="fc_fd",
|
||||
distance_measure=pearson_distance_measure,
|
||||
save_plots=False,
|
||||
):
|
||||
@ -594,7 +593,7 @@ def show_similar_images_for_label(
|
||||
|
||||
if save_plots:
|
||||
plt.savefig(
|
||||
f"Plots/Label_{target_label}_{feature_model}_{distance_measure.__name__}_k{k}.png"
|
||||
f"Plots/Label_{target_label}_{feature_model}_{distance_measure.__name__}_{k}_images.png"
|
||||
)
|
||||
plt.show()
|
||||
|
||||
@ -605,10 +604,18 @@ def show_similar_labels_for_image(
|
||||
target_image=None,
|
||||
target_label=None,
|
||||
k=10,
|
||||
feature_model="fc",
|
||||
feature_model="fc_fd",
|
||||
distance_measure=pearson_distance_measure,
|
||||
save_plots=False,
|
||||
):
|
||||
assert (
|
||||
feature_model in valid_feature_models.values()
|
||||
), "feature_model should be one of " + str(valid_feature_models.keys())
|
||||
|
||||
assert (
|
||||
distance_measure in valid_distance_measures.values()
|
||||
), "distance_measure should be one of " + str(list(valid_distance_measures.keys()))
|
||||
|
||||
# if target from dataset
|
||||
if target_image_id != -1:
|
||||
print(
|
||||
@ -619,14 +626,14 @@ def show_similar_labels_for_image(
|
||||
|
||||
# store target_image itself
|
||||
min_dists = {target_image_id: 0}
|
||||
|
||||
|
||||
if target_image_id % 2 == 0:
|
||||
# Get target image's feature descriptors from database
|
||||
target_image = fd_collection.find_one({"image_id": target_image_id})
|
||||
else:
|
||||
# Calculate target image's feature descriptors
|
||||
target_image = get_all_fd(target_image_id)
|
||||
|
||||
|
||||
target_image_fd = target_image[feature_model]
|
||||
target_label = target_image["true_label"]
|
||||
|
||||
@ -644,62 +651,58 @@ def show_similar_labels_for_image(
|
||||
target_image_fd = np.array(target_image_fds[feature_model])
|
||||
|
||||
label_dict = {target_image_id: target_label}
|
||||
|
||||
target_image_fd = np.array(target_image[feature_model + "_fd"])
|
||||
|
||||
assert (
|
||||
feature_model in valid_feature_models
|
||||
), "feature_model should be one of " + str(valid_feature_models)
|
||||
|
||||
assert (
|
||||
distance_measure in valid_distance_measures.values()
|
||||
), "distance_measure should be one of " + str(list(valid_distance_measures.keys()))
|
||||
|
||||
# only RGB for non RGB images
|
||||
if feature_model != "hog":
|
||||
all_images = fd_collection.find({"true_channels": 3})
|
||||
else:
|
||||
all_images = fd_collection.find()
|
||||
target_image_fd = np.array(target_image[feature_model])
|
||||
|
||||
all_images = fd_collection.find({})
|
||||
for cur_img in all_images:
|
||||
cur_img_id = cur_img["image_id"]
|
||||
# skip target itself
|
||||
if cur_img_id == target_image_id:
|
||||
continue
|
||||
cur_img_fd = np.array(cur_img[feature_model + "_fd"])
|
||||
cur_img_fd = np.array(cur_img[feature_model])
|
||||
cur_dist = distance_measure(
|
||||
cur_img_fd,
|
||||
target_image_fd,
|
||||
)
|
||||
cursor = fd_collection.find({"image_id": cur_img_id})
|
||||
label=cursor[0]["true_label"]
|
||||
cur_label = cur_img["true_label"]
|
||||
|
||||
# store first k images irrespective of distance (so that we store no more than k minimum distances)
|
||||
if len(min_dists) < k + 1 and label not in label_dict.values():
|
||||
if len(min_dists) < k + 1 and cur_label not in label_dict.values():
|
||||
min_dists[cur_img_id] = cur_dist
|
||||
label_dict[cur_img_id] = label
|
||||
label_dict[cur_img_id] = cur_label
|
||||
|
||||
# if lower distance:
|
||||
elif cur_dist < max(min_dists.values()) and label not in label_dict.values():
|
||||
elif (
|
||||
cur_dist < max(min_dists.values()) and cur_label not in label_dict.values()
|
||||
):
|
||||
# add to min_dists
|
||||
min_dists.update({cur_img_id: cur_dist})
|
||||
label_dict.update({cur_img_id: label})
|
||||
# remove greatest distance by index
|
||||
pop_key=max(min_dists, key=min_dists.get)
|
||||
label_dict.update({cur_img_id: cur_label})
|
||||
# remove label with greatest distance by index
|
||||
pop_key = max(min_dists, key=min_dists.get)
|
||||
min_dists.pop(pop_key)
|
||||
label_dict.pop(pop_key)
|
||||
|
||||
min_dists = dict(sorted(min_dists.items(), key=lambda item: item[1]))
|
||||
|
||||
for image_id in min_dists.keys():
|
||||
if image_id==target_image_id:
|
||||
fig, axs = plt.subplots(1, k, figsize=(48, 12))
|
||||
for idx, image_id in enumerate(min_dists.keys()):
|
||||
if image_id == target_image_id:
|
||||
continue
|
||||
else:
|
||||
print("Label: ", label_dict[image_id], "; distance: ", min_dists[image_id])
|
||||
sample_image, sample_label = dataset[image_id]
|
||||
plt.imshow(transforms.ToPILImage()(sample_image))
|
||||
plt.show()
|
||||
|
||||
axs[idx-1].imshow(transforms.ToPILImage()(sample_image))
|
||||
axs[idx-1].set_title(
|
||||
f"Label: {label_dict[image_id]}; Distance: {min_dists[image_id]}"
|
||||
)
|
||||
axs[idx-1].axis("off")
|
||||
|
||||
if save_plots:
|
||||
plt.savefig(
|
||||
f"Plots/Image_{target_image_id}_{feature_model}_{distance_measure.__name__}_{k}_labels.png"
|
||||
)
|
||||
plt.show()
|
||||
|
||||
|
||||
valid_dim_reduction_methods = {
|
||||
@ -829,7 +832,7 @@ def extract_latent_semantics(
|
||||
|
||||
all_images = list(fd_collection.find())
|
||||
feature_ids = [img["image_id"] for img in all_images]
|
||||
|
||||
|
||||
top_img_str = ""
|
||||
if top_images is not None:
|
||||
top_img_str = f" (showing only top {top_images} image-weight pairs for each latent semantic)"
|
||||
@ -838,16 +841,16 @@ def extract_latent_semantics(
|
||||
if sim_matrix is not None:
|
||||
feature_vectors = sim_matrix
|
||||
print(
|
||||
"Applying {} on the {} space to get {} latent semantics{}...".format(
|
||||
dim_reduction_method, feature_model, k, top_img_str
|
||||
"Applying {} on the given similarity matrix to get {} latent semantics{}...".format(
|
||||
dim_reduction_method, k, top_img_str
|
||||
)
|
||||
)
|
||||
# else take feature space from database
|
||||
else:
|
||||
feature_vectors = np.array([img[feature_model] for img in all_images])
|
||||
feature_vectors = np.array([np.array(img[feature_model]).flatten() for img in all_images])
|
||||
print(
|
||||
"Applying {} on the given similarity matrix to get {} latent semantics{}...".format(
|
||||
dim_reduction_method, k, top_img_str
|
||||
"Applying {} on the {} space to get {} latent semantics{}...".format(
|
||||
dim_reduction_method, feature_model, k, top_img_str
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
Loading…
x
Reference in New Issue
Block a user