帝王谷资源网 Design By www.wdxyy.com
我就废话不多说了,大家还是直接看代码吧~
import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]="" import sys import gc import time import cv2 import random import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from tqdm import tqdm from random_eraser import get_random_eraser from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img datagen = ImageDataGenerator( rotation_range=20, #旋转 width_shift_range=0.1, #水平位置平移 # height_shift_range=0.2, #上下位置平移 shear_range=0.5, #错切变换,让所有点的x坐标(或者y坐标)保持不变,而对应的y坐标(或者x坐标)则按比例发生平移 zoom_range=[0.9,0.9], # 单方向缩放,当一个数值时两个方向等比例缩放,参数为list时长宽不同程度缩放。参数大于0小于1时,执行的是放大操作,当参数大于1时,执行的是缩小操作。 channel_shift_range = 40, #偏移通道数值,改变图片颜色,越大颜色越深 horizontal_flip=True, #水平翻转,垂直翻转vertical_flip fill_mode='nearest', #操作导致图像缺失时填充方式。“constant”、“nearest”(默认)、“reflect”和“wrap” preprocessing_function = get_random_eraser(p=0.7,v_l=0,v_h=255,s_l=0.01,s_h=0.03,r_1=1,r_2=1.5,pixel_level=True) ) # train_generator = datagen.flow_from_directory( # 'base/Images/', # save_to_dir = 'base/fake/', # batch_size=1 # ) # for i in range(5): # train_generator.next() # ! # df_train = pd.read_csv('base/Annotations/label.csv', header=None) # df_train.columns = ['image_id', 'class', 'label'] # classes = ['collar_design_labels', 'neckline_design_labels', 'skirt_length_labels', # 'sleeve_length_labels', 'neck_design_labels', 'coat_length_labels', 'lapel_design_labels', # 'pant_length_labels'] # ! # classes = ['collar_design_labels'] # ! # for i in range(len(classes)): # gc.enable() # # 单个分类 # cur_class = classes[i] # df_load = df_train[(df_train['class'] == cur_class)].copy() # df_load.reset_index(inplace=True) # del df_load['index'] # # print(cur_class) # # 加载数据和label # n = len(df_load) # # n_class = len(df_load['label'][0]) # # width = 256 # # X = np.zeros((n,width, width, 3), dtype=np.uint8) # # y = np.zeros((n, n_class), dtype=np.uint8) # print(f'starting load trainset {cur_class} {n}') # sys.stdout.flush() # for i in tqdm(range(n)): # # tmp_label = df_load['label'][i] # img = load_img('base/{0}'.format(df_load['image_id'][i])) # x = img_to_array(img) # x = x.reshape((1,) + x.shape) # m=0 # for batch in datagen.flow(x,batch_size=1): # # plt.imshow(array_to_img(batch[0])) # # print(batch) # array_to_img(batch[0]).save(f'base/fake/{format(df_load["image_id"][i])}-{m}.jpg') # m+=1 # if m>3: # break # gc.collect() # ! img = load_img('base/Images/collar_design_labels/2f639f11de22076ead5fe1258eae024d.jpg') plt.figure() plt.imshow(img) x = img_to_array(img) x = x.reshape((1,) + x.shape) i = 0 for batch in datagen.flow(x,batch_size=5): plt.figure() plt.imshow(array_to_img(batch[0])) # print(len(batch)) i += 1 if i >0: break
#多输入,设置随机种子 # Define the image transformations here gen = ImageDataGenerator(horizontal_flip = True, vertical_flip = True, width_shift_range = 0.1, height_shift_range = 0.1, zoom_range = 0.1, rotation_range = 40) # Here is the function that merges our two generators # We use the exact same generator with the same random seed for both the y and angle arrays def gen_flow_for_two_inputs(X1, X2, y): genX1 = gen.flow(X1,y, batch_size=batch_size,seed=666) genX2 = gen.flow(X1,X2, batch_size=batch_size,seed=666) while True: X1i = genX1.next() X2i = genX2.next() #Assert arrays are equal - this was for peace of mind, but slows down training #np.testing.assert_array_equal(X1i[0],X2i[0]) yield [X1i[0], X2i[1]], X1i[1]
#手动构造,直接输出多label generator = ImageDataGenerator(rotation_range=5., width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=True) def generate_data_generator(generator, X, Y1, Y2): genX = generator.flow(X, seed=7) genY1 = generator.flow(Y1, seed=7) while True: Xi = genX.next() Yi1 = genY1.next() Yi2 = function(Y2) yield Xi, [Yi1, Yi2] model.fit_generator(generate_data_generator(generator, X, Y1, Y2), epochs=epochs)
def batch_generator(generator,X,Y): Xgen = generator.flow(X) while True: yield Xgen.next(),Y h = model.fit_generator(batch_generator(datagen, X_all, y_all), steps_per_epoch=len(X_all)//32+1, epochs=80,workers=3, callbacks=[EarlyStopping(patience=3), checkpointer,ReduceLROnPlateau(monitor='val_loss',factor=0.5,patience=1)], validation_data=(X_val,y_val))
补充知识:读取图片成numpy数组,裁剪并保存 和 数据增强(ImageDataGenerator)
我就废话不多说了,大家还是直接看代码吧~
from PIL import Image import numpy as np from PIL import Image from keras.preprocessing import image import matplotlib.pyplot as plt import os import cv2 # from scipy.misc import toimage import matplotlib # 生成图片地址和对应标签 file_dir = '../train/' image_list = [] label_list = [] cate = [file_dir + x for x in os.listdir(file_dir) if os.path.isdir(file_dir + x)] for name in cate: temp = name.split('/') path = '../train_new/' + temp[-1] isExists = os.path.exists(path) if not isExists: os.makedirs(path) # 目录不存在则创建 class_path = name + "/" for file in os.listdir(class_path): print(file) img_obj = Image.open(class_path + file) # 读取图片 img_array = np.array(img_obj) resized = cv2.resize(img_array, (256, 256)) # 裁剪 resized = resized.astype('float32') resized /= 255. # plt.imshow(resized) # plt.show() save_path = path + '/' + file matplotlib.image.imsave(save_path, resized) # 保存
keras之数据增强
from PIL import Image import numpy as np from PIL import Image from keras.preprocessing import image import os import cv2 # 生成图片地址和对应标签 file_dir = '../train/' label_list = [] cate = [file_dir + x for x in os.listdir(file_dir) if os.path.isdir(file_dir + x)] for name in cate: image_list = [] class_path = name + "/" for file in os.listdir(class_path): image_list.append(class_path + file) batch_size = 64 if len(image_list) < 10000: num = int(10000 / len(image_list)) else: num = 0 # 设置生成器参数 datagen = image.ImageDataGenerator(fill_mode='wrap', # 填充模式 rotation_range=40, # 指定旋转角度范围 width_shift_range=0.2, # 水平位置平移 height_shift_range=0.2, # 上下位置平移 horizontal_flip=True, # 随机对图片执行水平翻转操作 vertical_flip=True, # 对图片执行上下翻转操作 shear_range=0.2, rescale=1./255, # 缩放 data_format='channels_last') if num > 0: temp = name.split('/') path = '../train_datage/' + temp[-1] isExists = os.path.exists(path) if not isExists: os.makedirs(path) for image_path in image_list: i = 1 img_obj = Image.open(image_path) # 读取图片 img_array = np.array(img_obj) x = img_array.reshape((1,) + img_array.shape) #要求为4维 name_image = image_path.split('/') print(name_image) for batch in datagen.flow(x, batch_size=1, save_to_dir=path, save_prefix=name_image[-1][:-4] + '_', save_format='jpg'): i += 1 if i > num: break
以上这篇Keras 数据增强ImageDataGenerator多输入多输出实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
帝王谷资源网 Design By www.wdxyy.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
帝王谷资源网 Design By www.wdxyy.com
暂无评论...
P70系列延期,华为新旗舰将在下月发布
3月20日消息,近期博主@数码闲聊站 透露,原定三月份发布的华为新旗舰P70系列延期发布,预计4月份上市。
而博主@定焦数码 爆料,华为的P70系列在定位上已经超过了Mate60,成为了重要的旗舰系列之一。它肩负着重返影像领域顶尖的使命。那么这次P70会带来哪些令人惊艳的创新呢?
根据目前爆料的消息来看,华为P70系列将推出三个版本,其中P70和P70 Pro采用了三角形的摄像头模组设计,而P70 Art则采用了与上一代P60 Art相似的不规则形状设计。这样的外观是否好看见仁见智,但辨识度绝对拉满。
更新日志
2024年12月26日
2024年12月26日
- 小骆驼-《草原狼2(蓝光CD)》[原抓WAV+CUE]
- 群星《欢迎来到我身边 电影原声专辑》[320K/MP3][105.02MB]
- 群星《欢迎来到我身边 电影原声专辑》[FLAC/分轨][480.9MB]
- 雷婷《梦里蓝天HQⅡ》 2023头版限量编号低速原抓[WAV+CUE][463M]
- 群星《2024好听新歌42》AI调整音效【WAV分轨】
- 王思雨-《思念陪着鸿雁飞》WAV
- 王思雨《喜马拉雅HQ》头版限量编号[WAV+CUE]
- 李健《无时无刻》[WAV+CUE][590M]
- 陈奕迅《酝酿》[WAV分轨][502M]
- 卓依婷《化蝶》2CD[WAV+CUE][1.1G]
- 群星《吉他王(黑胶CD)》[WAV+CUE]
- 齐秦《穿乐(穿越)》[WAV+CUE]
- 发烧珍品《数位CD音响测试-动向效果(九)》【WAV+CUE】
- 邝美云《邝美云精装歌集》[DSF][1.6G]
- 吕方《爱一回伤一回》[WAV+CUE][454M]