java系统找不到指定文件怎么解决
257
2022-08-26
TensorFlow Object Detection API教程——制作自己的数据集
感想
前一段时间,利用tensorflow object detection跑了一些demo,然后成功的训练了自己的模型,这里我把我的方法分享出来,希望能够帮助大家。tensorflow object detection api的github 开源地址为,detection 来分享一下
1 数据集制作
我这里利用了voc格式的数据,事先要把数据集准备好,我把xml放在了 merged_xml文件夹下,把图片放在了images文件夹
我的xml文件示例为:
解析也是按照这个解析的
然后利用下面的 train_test_split.py把xml数据集分为了train test validation三部分,代码如下:
import osimport randomimport timeimport shutilxmlfilepath=r'merged_xml'saveBasePath=r"./annotations"trainval_percent=0.9train_percent=0.85total_xml = os.listdir(xmlfilepath)num=len(total_xml)list=range(num)tv=int(num*trainval_percent)tr=int(tv*train_percent)trainval= random.sample(list,tv)train=random.sample(trainval,tr)print("train and val size",tv)print("train size",tr)# print(total_xml[1])start = time.time()# print(trainval)# print(train)test_num=0val_num=0train_num=0# for directory in ['train','test',"val"]:# xml_path = os.path.join(os.getcwd(), 'annotations/{}'.format(directory))# if(not os.path.exists(xml_path)):# os.mkdir(xml_path)# # shutil.copyfile(filePath, newfile)# print(xml_path)for i in list: name=total_xml[i] # print(i) if i in trainval: #train and val set # ftrainval.write(name) if i in train: # ftrain.write(name) # print("train") # print(name) # print("train: "+name+" "+str(train_num)) directory="train" train_num+=1 xml_path = os.path.join(os.getcwd(), 'annotations/{}'.format(directory)) if(not os.path.exists(xml_path)): os.mkdir(xml_path) filePath=os.path.join(xmlfilepath,name) newfile=os.path.join(saveBasePath,os.path.join(directory,name)) shutil.copyfile(filePath, newfile) else: # fval.write(name) # print("val") # print("val: "+name+" "+str(val_num)) directory="validation" xml_path = os.path.join(os.getcwd(), 'annotations/{}'.format(directory)) if(not os.path.exists(xml_path)): os.mkdir(xml_path) val_num+=1 filePath=os.path.join(xmlfilepath,name) newfile=os.path.join(saveBasePath,os.path.join(directory,name)) shutil.copyfile(filePath, newfile) # print(name) else: #test set # ftest.write(name) # print("test") # print("test: "+name+" "+str(test_num)) directory="test" xml_path = os.path.join(os.getcwd(), 'annotations/{}'.format(directory)) if(not os.path.exists(xml_path)): os.mkdir(xml_path) test_num+=1 filePath=os.path.join(xmlfilepath,name) newfile=os.path.join(saveBasePath,os.path.join(directory,name)) shutil.copyfile(filePath, newfile) # print(name)# End timeend = time.time()seconds=end-startprint("train total : "+str(train_num))print("validation total : "+str(val_num))print("test total : "+str(test_num))total_num=train_num+val_num+test_numprint("total number : "+str(total_num))print( "Time taken : {0} seconds".format(seconds))
运行完以后,annotations文件夹下就放好了分类的xml,annotations有三个目录,分别是train,test,validation。
然后把xml转换成csv文件,我的代码文件名为xml_to_csv.py,,运行代码前,需要建一个data目录,用来放生成的csv文件,然后我的代码为:
import osimport globimport pandas as pdimport xml.etree.ElementTree as ETdef xml_to_csv(path): xml_list = [] for xml_file in glob.glob(path + '/*.xml'): tree = ET.parse(xml_file) root = tree.getroot() # print(root) print(root.find('filename').text) for member in root.findall('object'): value = (root.find('filename').text, int(root.find('size')[1].text), #width int(root.find('size')[2].text), #height member[0].text, int(member[4][0].text), int(float(member[4][1].text)), int(member[4][2].text), int(member[4][3].text) ) xml_list.append(value) column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax'] xml_df = pd.DataFrame(xml_list, columns=column_name) return xml_dfdef main(): for directory in ['train','test','validation']: xml_path = os.path.join(os.getcwd(), 'annotations/{}'.format(directory)) # image_path = os.path.join(os.getcwd(), 'merged_xml') xml_df = xml_to_csv(xml_path) # xml_df.to_csv('whsyxt.csv', index=None) xml_df.to_csv('data/whsyxt_{}_labels.csv'.format(directory), index=None) print('Successfully converted xml to csv.')main()
做完这一步以后,我们就来生成tfrecords文件,我的python文件名为generate_tfrecord.py,代码为:
"""Usage: # From tensorflow/models/ # Create train data: python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record # Create test data: python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=test.record"""from __future__ import divisionfrom __future__ import print_functionfrom __future__ import absolute_importimport osimport ioimport pandas as pdimport tensorflow as tffrom PIL import Imagefrom object_detection.utils import dataset_utilfrom collections import namedtuple, OrderedDictflags = tf.app.flagsflags.DEFINE_string('csv_input', '', 'Path to the CSV input')flags.DEFINE_string('output_path', '', 'Path to output TFRecord')FLAGS = flags.FLAGS# TO-DO replace this with label mapdef class_text_to_int(row_label): if row_label == 'car': return 1 elif row_label == 'person': return 2 else: Nonedef split(df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]def create_tf_example(group, path): with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs = [] classes_text = [] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_exampledef main(_): writer = tf.python_io.TFRecordWriter(FLAGS.output_path) path = os.path.join(os.getcwd(), 'images') examples = pd.read_csv(FLAGS.csv_input) grouped = split(examples, 'filename') num=0 for group in grouped: num+=1 tf_example = create_tf_example(group, path) writer.write(tf_example.SerializeToString()) if(num%100==0): #每完成100个转换,打印一次 print(num) writer.close() output_path = os.path.join(os.getcwd(), FLAGS.output_path) print('Successfully created the TFRecords: {}'.format(output_path))if __name__ == '__main__': tf.app.run()
我运行的命令为:
python3 generate_tfrecord.py --csv_input=data/whsyxt_train_labels.csv --output_path=data/whsyxt_train.tfrecordpython3 generate_tfrecord.py --csv_input=data/whsyxt_test_labels.csv --output_path=data/whsyxt_test.tfrecordpython3 generate_tfrecord.py --csv_input=data/whsyxt_validation_labels.csv --output_path=data/whsyxt_validation.tfrecord
然后就获得了这三个训练需要的文件啦,训练方法请见我的下一篇博客
参考文献
[1].Introduction and Use - Tensorflow Object Detection API Tutorial.https://pythonprogramming.net/introduction-use-tensorflow-object-detection-api-tutorial/
版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。
发表评论
暂时没有评论,来抢沙发吧~