Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

clean deepfashion2 to coco script #64

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions deepfashion2_api/PythonAPI/deepfashion2_retrieval_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@
box = np.array(i['gallery_bbox'])
gallery_box = [box[:,0], box[:,1], box[:,2] - box[:,0], box[:,3] - box[:,1]]
gallery_box = np.transpose(gallery_box,(1,0)).tolist()

results_image_id_all.append(i['query_image_id'])
results_query_score_all.append(i['query_score'])
results_query_cls_all.append(i['query_cls'])
results_query_box_all.append(query_box)
results_gallery_id_all.append(i['gallery_image_id'])
results_gallery_box_all.append(gellery_box)
results_gallery_box_all.append(gallery_box)
f.close()

results_image_id_all = np.array(results_image_id_all)
Expand Down Expand Up @@ -103,7 +103,7 @@

for id in query_id_real:
results_id_ind = np.where(results_image_id_all==id)[0]
if len(results_id_ind) == 0: # in case no clothing item is detected
if len(results_id_ind) == 0: # in case no clothing item is detected
continue
query_id_ind = np.where(query_image_id_all==id)[0] # all query items in the given image
pair_id = query_pair_all[query_id_ind]
Expand All @@ -128,7 +128,7 @@
style = query_id_style[id_ind]
cls = query_id_cls[id_ind]
# For a given ground truth query item, select a detected item on behalf of it:
# First find out all detected items which are assigned the given ground truth label
# First find out all detected items which are assigned the given ground truth label
# and are classified correctly.
# Then select the detected item with the highest score among these detected items.
if style>0:
Expand Down
278 changes: 144 additions & 134 deletions evaluation/deepfashion2_to_coco.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,19 @@
import sys
import os
import json
from PIL import Image
import numpy as np


# input deepfashion2 json file names directory
base_json = sys.argv[1]
# input deepfashion2 images file names directory
base_image = sys.argv[2]
# output COCO-stype json annotation file
output_name = sys.argv[3]

# start script

dataset = {
"info": {},
"licenses": [],
Expand Down Expand Up @@ -103,139 +114,138 @@
'skeleton': []
})

sub_index = 0 # the index of ground truth instance
for num in range(1,num_images+1):
json_name = '/.../val_annos/' + str(num).zfill(6)+'.json'
image_name = '/.../val/' + str(num).zfill(6)+'.jpg'

if (num>=0):
imag = Image.open(image_name)
width, height = imag.size
with open(json_name, 'r') as f:
temp = json.loads(f.read())
pair_id = temp['pair_id']

dataset['images'].append({
'coco_url': '',
'date_captured': '',
'file_name': str(num).zfill(6) + '.jpg',
'flickr_url': '',
'id': num,
'license': 0,
'width': width,
'height': height
})
for i in temp:
if i == 'source' or i=='pair_id':
continue
else:
points = np.zeros(294 * 3)
sub_index = sub_index + 1
box = temp[i]['bounding_box']
w = box[2]-box[0]
h = box[3]-box[1]
x_1 = box[0]
y_1 = box[1]
bbox=[x_1,y_1,w,h]
cat = temp[i]['category_id']
style = temp[i]['style']
seg = temp[i]['segmentation']
landmarks = temp[i]['landmarks']

points_x = landmarks[0::3]
points_y = landmarks[1::3]
points_v = landmarks[2::3]
points_x = np.array(points_x)
points_y = np.array(points_y)
points_v = np.array(points_v)

if cat == 1:
for n in range(0, 25):
points[3 * n] = points_x[n]
points[3 * n + 1] = points_y[n]
points[3 * n + 2] = points_v[n]
elif cat ==2:
for n in range(25, 58):
points[3 * n] = points_x[n - 25]
points[3 * n + 1] = points_y[n - 25]
points[3 * n + 2] = points_v[n - 25]
elif cat ==3:
for n in range(58, 89):
points[3 * n] = points_x[n - 58]
points[3 * n + 1] = points_y[n - 58]
points[3 * n + 2] = points_v[n - 58]
elif cat == 4:
for n in range(89, 128):
points[3 * n] = points_x[n - 89]
points[3 * n + 1] = points_y[n - 89]
points[3 * n + 2] = points_v[n - 89]
elif cat == 5:
for n in range(128, 143):
points[3 * n] = points_x[n - 128]
points[3 * n + 1] = points_y[n - 128]
points[3 * n + 2] = points_v[n - 128]
elif cat == 6:
for n in range(143, 158):
points[3 * n] = points_x[n - 143]
points[3 * n + 1] = points_y[n - 143]
points[3 * n + 2] = points_v[n - 143]
elif cat == 7:
for n in range(158, 168):
points[3 * n] = points_x[n - 158]
points[3 * n + 1] = points_y[n - 158]
points[3 * n + 2] = points_v[n - 158]
elif cat == 8:
for n in range(168, 182):
points[3 * n] = points_x[n - 168]
points[3 * n + 1] = points_y[n - 168]
points[3 * n + 2] = points_v[n - 168]
elif cat == 9:
for n in range(182, 190):
points[3 * n] = points_x[n - 182]
points[3 * n + 1] = points_y[n - 182]
points[3 * n + 2] = points_v[n - 182]
elif cat == 10:
for n in range(190, 219):
points[3 * n] = points_x[n - 190]
points[3 * n + 1] = points_y[n - 190]
points[3 * n + 2] = points_v[n - 190]
elif cat == 11:
for n in range(219, 256):
points[3 * n] = points_x[n - 219]
points[3 * n + 1] = points_y[n - 219]
points[3 * n + 2] = points_v[n - 219]
elif cat == 12:
for n in range(256, 275):
points[3 * n] = points_x[n - 256]
points[3 * n + 1] = points_y[n - 256]
points[3 * n + 2] = points_v[n - 256]
elif cat == 13:
for n in range(275, 294):
points[3 * n] = points_x[n - 275]
points[3 * n + 1] = points_y[n - 275]
points[3 * n + 2] = points_v[n - 275]
num_points = len(np.where(points_v > 0)[0])

dataset['annotations'].append({
'area': w*h,
'bbox': bbox,
'category_id': cat,
'id': sub_index,
'pair_id': pair_id,
'image_id': num,
'iscrowd': 0,
'style': style,
'num_keypoints':num_points,
'keypoints':points.tolist(),
'segmentation': seg,
})


json_name = '/.../deepfashion2.json'
with open(json_name, 'w') as f:
json.dump(dataset, f)



num_images = len(os.listdir(base_image))

sub_index = 0 # the index of ground truth instance
for num in range(1, num_images+1):
zname = str(num).zfill(6)
zname_json = zname + '.json'
zname_img = zname + '.jpg'
json_name = os.path.join(base_json, zname_json)
image_name = os.path.join(base_image, zname_img)

with Image.open(image_name) as imag:
width, height = imag.size
with open(json_name, 'r') as f:
temp = json.loads(f.read())

dataset['images'].append({
'coco_url': '',
'date_captured': '',
'file_name': zname_img,
'flickr_url': '',
'id': num,
'license': 0,
'width': width,
'height': height
})

source = temp.pop('source')
pair_id = temp.pop('pair_id')

for i in temp:
points = np.zeros(294 * 3)
sub_index = sub_index + 1
box = temp[i]['bounding_box']
w = box[2] - box[0]
h = box[3] - box[1]
x_1 = box[0]
y_1 = box[1]
bbox = [x_1, y_1, w, h]
cat = temp[i]['category_id']
style = temp[i]['style']
seg = temp[i]['segmentation']
landmarks = temp[i]['landmarks']

points_x = landmarks[0::3]
points_y = landmarks[1::3]
points_v = landmarks[2::3]
points_x = np.array(points_x)
points_y = np.array(points_y)
points_v = np.array(points_v)

if cat == 1:
for n in range(0, 25):
points[3 * n] = points_x[n]
points[3 * n + 1] = points_y[n]
points[3 * n + 2] = points_v[n]
elif cat == 2:
for n in range(25, 58):
points[3 * n] = points_x[n - 25]
points[3 * n + 1] = points_y[n - 25]
points[3 * n + 2] = points_v[n - 25]
elif cat == 3:
for n in range(58, 89):
points[3 * n] = points_x[n - 58]
points[3 * n + 1] = points_y[n - 58]
points[3 * n + 2] = points_v[n - 58]
elif cat == 4:
for n in range(89, 128):
points[3 * n] = points_x[n - 89]
points[3 * n + 1] = points_y[n - 89]
points[3 * n + 2] = points_v[n - 89]
elif cat == 5:
for n in range(128, 143):
points[3 * n] = points_x[n - 128]
points[3 * n + 1] = points_y[n - 128]
points[3 * n + 2] = points_v[n - 128]
elif cat == 6:
for n in range(143, 158):
points[3 * n] = points_x[n - 143]
points[3 * n + 1] = points_y[n - 143]
points[3 * n + 2] = points_v[n - 143]
elif cat == 7:
for n in range(158, 168):
points[3 * n] = points_x[n - 158]
points[3 * n + 1] = points_y[n - 158]
points[3 * n + 2] = points_v[n - 158]
elif cat == 8:
for n in range(168, 182):
points[3 * n] = points_x[n - 168]
points[3 * n + 1] = points_y[n - 168]
points[3 * n + 2] = points_v[n - 168]
elif cat == 9:
for n in range(182, 190):
points[3 * n] = points_x[n - 182]
points[3 * n + 1] = points_y[n - 182]
points[3 * n + 2] = points_v[n - 182]
elif cat == 10:
for n in range(190, 219):
points[3 * n] = points_x[n - 190]
points[3 * n + 1] = points_y[n - 190]
points[3 * n + 2] = points_v[n - 190]
elif cat == 11:
for n in range(219, 256):
points[3 * n] = points_x[n - 219]
points[3 * n + 1] = points_y[n - 219]
points[3 * n + 2] = points_v[n - 219]
elif cat == 12:
for n in range(256, 275):
points[3 * n] = points_x[n - 256]
points[3 * n + 1] = points_y[n - 256]
points[3 * n + 2] = points_v[n - 256]
elif cat == 13:
for n in range(275, 294):
points[3 * n] = points_x[n - 275]
points[3 * n + 1] = points_y[n - 275]
points[3 * n + 2] = points_v[n - 275]
num_points = len(np.where(points_v > 0)[0])

dataset['annotations'].append({
'area': w*h,
'bbox': bbox,
'category_id': cat,
'id': sub_index,
'source': source,
'pair_id': pair_id,
'image_id': num,
'iscrowd': 0,
'style': style,
'num_keypoints': num_points,
'keypoints': points.tolist(),
'segmentation': seg,
})


with open(output_name, 'w') as f:
json.dump(dataset, f)