From 081c7e53b564d2780876bca407726b3b9a0266e6 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Sat, 1 Sep 2018 16:35:01 +0900 Subject: [PATCH 01/49] #25 : copy origin articles --- .../deep_dream.py | 191 +++++++++++ .../neural_style_transfer.py | 296 ++++++++++++++++++ 2 files changed, 487 insertions(+) create mode 100644 25_Keras_examples_(3)_Generative_models_examples/deep_dream.py create mode 100644 25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py diff --git a/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py b/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py new file mode 100644 index 0000000..aa9b435 --- /dev/null +++ b/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py @@ -0,0 +1,191 @@ +'''Deep Dreaming in Keras. + +Run the script with: +``` +python deep_dream.py path_to_your_base_image.jpg prefix_for_results +``` +e.g.: +``` +python deep_dream.py img/mypic.jpg results/dream +``` +''' +from __future__ import print_function + +from keras.preprocessing.image import load_img, save_img, img_to_array +import numpy as np +import scipy +import argparse + +from keras.applications import inception_v3 +from keras import backend as K + +parser = argparse.ArgumentParser(description='Deep Dreams with Keras.') +parser.add_argument('base_image_path', metavar='base', type=str, + help='Path to the image to transform.') +parser.add_argument('result_prefix', metavar='res_prefix', type=str, + help='Prefix for the saved results.') + +args = parser.parse_args() +base_image_path = args.base_image_path +result_prefix = args.result_prefix + +# These are the names of the layers +# for which we try to maximize activation, +# as well as their weight in the final loss +# we try to maximize. +# You can tweak these setting to obtain new visual effects. +settings = { + 'features': { + 'mixed2': 0.2, + 'mixed3': 0.5, + 'mixed4': 2., + 'mixed5': 1.5, + }, +} + + +def preprocess_image(image_path): + # Util function to open, resize and format pictures + # into appropriate tensors. + img = load_img(image_path) + img = img_to_array(img) + img = np.expand_dims(img, axis=0) + img = inception_v3.preprocess_input(img) + return img + + +def deprocess_image(x): + # Util function to convert a tensor into a valid image. + if K.image_data_format() == 'channels_first': + x = x.reshape((3, x.shape[2], x.shape[3])) + x = x.transpose((1, 2, 0)) + else: + x = x.reshape((x.shape[1], x.shape[2], 3)) + x /= 2. + x += 0.5 + x *= 255. + x = np.clip(x, 0, 255).astype('uint8') + return x + +K.set_learning_phase(0) + +# Build the InceptionV3 network with our placeholder. +# The model will be loaded with pre-trained ImageNet weights. +model = inception_v3.InceptionV3(weights='imagenet', + include_top=False) +dream = model.input +print('Model loaded.') + +# Get the symbolic outputs of each "key" layer (we gave them unique names). +layer_dict = dict([(layer.name, layer) for layer in model.layers]) + +# Define the loss. +loss = K.variable(0.) +for layer_name in settings['features']: + # Add the L2 norm of the features of a layer to the loss. + assert (layer_name in layer_dict.keys(), + 'Layer ' + layer_name + ' not found in model.') + coeff = settings['features'][layer_name] + x = layer_dict[layer_name].output + # We avoid border artifacts by only involving non-border pixels in the loss. + scaling = K.prod(K.cast(K.shape(x), 'float32')) + if K.image_data_format() == 'channels_first': + loss += coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling + else: + loss += coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling + +# Compute the gradients of the dream wrt the loss. +grads = K.gradients(loss, dream)[0] +# Normalize gradients. +grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon()) + +# Set up function to retrieve the value +# of the loss and gradients given an input image. +outputs = [loss, grads] +fetch_loss_and_grads = K.function([dream], outputs) + + +def eval_loss_and_grads(x): + outs = fetch_loss_and_grads([x]) + loss_value = outs[0] + grad_values = outs[1] + return loss_value, grad_values + + +def resize_img(img, size): + img = np.copy(img) + if K.image_data_format() == 'channels_first': + factors = (1, 1, + float(size[0]) / img.shape[2], + float(size[1]) / img.shape[3]) + else: + factors = (1, + float(size[0]) / img.shape[1], + float(size[1]) / img.shape[2], + 1) + return scipy.ndimage.zoom(img, factors, order=1) + + +def gradient_ascent(x, iterations, step, max_loss=None): + for i in range(iterations): + loss_value, grad_values = eval_loss_and_grads(x) + if max_loss is not None and loss_value > max_loss: + break + print('..Loss value at', i, ':', loss_value) + x += step * grad_values + return x + + +"""Process: + +- Load the original image. +- Define a number of processing scales (i.e. image shapes), + from smallest to largest. +- Resize the original image to the smallest scale. +- For every scale, starting with the smallest (i.e. current one): + - Run gradient ascent + - Upscale image to the next scale + - Reinject the detail that was lost at upscaling time +- Stop when we are back to the original size. + +To obtain the detail lost during upscaling, we simply +take the original image, shrink it down, upscale it, +and compare the result to the (resized) original image. +""" + + +# Playing with these hyperparameters will also allow you to achieve new effects +step = 0.01 # Gradient ascent step size +num_octave = 3 # Number of scales at which to run gradient ascent +octave_scale = 1.4 # Size ratio between scales +iterations = 20 # Number of ascent steps per scale +max_loss = 10. + +img = preprocess_image(base_image_path) +if K.image_data_format() == 'channels_first': + original_shape = img.shape[2:] +else: + original_shape = img.shape[1:3] +successive_shapes = [original_shape] +for i in range(1, num_octave): + shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape]) + successive_shapes.append(shape) +successive_shapes = successive_shapes[::-1] +original_img = np.copy(img) +shrunk_original_img = resize_img(img, successive_shapes[0]) + +for shape in successive_shapes: + print('Processing image shape', shape) + img = resize_img(img, shape) + img = gradient_ascent(img, + iterations=iterations, + step=step, + max_loss=max_loss) + upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape) + same_size_original = resize_img(original_img, shape) + lost_detail = same_size_original - upscaled_shrunk_original_img + + img += lost_detail + shrunk_original_img = resize_img(original_img, shape) + +save_img(result_prefix + '.png', deprocess_image(np.copy(img))) \ No newline at end of file diff --git a/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py b/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py new file mode 100644 index 0000000..d450ff4 --- /dev/null +++ b/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py @@ -0,0 +1,296 @@ +'''Neural style transfer with Keras. + +Run the script with: +``` +python neural_style_transfer.py path_to_your_base_image.jpg \ + path_to_your_reference.jpg prefix_for_results +``` +e.g.: +``` +python neural_style_transfer.py img/tuebingen.jpg \ + img/starry_night.jpg results/my_result +``` +Optional parameters: +``` +--iter, To specify the number of iterations \ + the style transfer takes place (Default is 10) +--content_weight, The weight given to the content loss (Default is 0.025) +--style_weight, The weight given to the style loss (Default is 1.0) +--tv_weight, The weight given to the total variation loss (Default is 1.0) +``` + +It is preferable to run this script on GPU, for speed. + +Example result: https://twitter.com/fchollet/status/686631033085677568 + +# Details + +Style transfer consists in generating an image +with the same "content" as a base image, but with the +"style" of a different picture (typically artistic). + +This is achieved through the optimization of a loss function +that has 3 components: "style loss", "content loss", +and "total variation loss": + +- The total variation loss imposes local spatial continuity between +the pixels of the combination image, giving it visual coherence. + +- The style loss is where the deep learning keeps in --that one is defined +using a deep convolutional neural network. Precisely, it consists in a sum of +L2 distances between the Gram matrices of the representations of +the base image and the style reference image, extracted from +different layers of a convnet (trained on ImageNet). The general idea +is to capture color/texture information at different spatial +scales (fairly large scales --defined by the depth of the layer considered). + + - The content loss is a L2 distance between the features of the base +image (extracted from a deep layer) and the features of the combination image, +keeping the generated image close enough to the original one. + +# References + - [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576) +''' + +from __future__ import print_function +from keras.preprocessing.image import load_img, save_img, img_to_array +import numpy as np +from scipy.optimize import fmin_l_bfgs_b +import time +import argparse + +from keras.applications import vgg19 +from keras import backend as K + +parser = argparse.ArgumentParser(description='Neural style transfer with Keras.') +parser.add_argument('base_image_path', metavar='base', type=str, + help='Path to the image to transform.') +parser.add_argument('style_reference_image_path', metavar='ref', type=str, + help='Path to the style reference image.') +parser.add_argument('result_prefix', metavar='res_prefix', type=str, + help='Prefix for the saved results.') +parser.add_argument('--iter', type=int, default=10, required=False, + help='Number of iterations to run.') +parser.add_argument('--content_weight', type=float, default=0.025, required=False, + help='Content weight.') +parser.add_argument('--style_weight', type=float, default=1.0, required=False, + help='Style weight.') +parser.add_argument('--tv_weight', type=float, default=1.0, required=False, + help='Total Variation weight.') + +args = parser.parse_args() +base_image_path = args.base_image_path +style_reference_image_path = args.style_reference_image_path +result_prefix = args.result_prefix +iterations = args.iter + +# these are the weights of the different loss components +total_variation_weight = args.tv_weight +style_weight = args.style_weight +content_weight = args.content_weight + +# dimensions of the generated picture. +width, height = load_img(base_image_path).size +img_nrows = 400 +img_ncols = int(width * img_nrows / height) + +# util function to open, resize and format pictures into appropriate tensors + + +def preprocess_image(image_path): + img = load_img(image_path, target_size=(img_nrows, img_ncols)) + img = img_to_array(img) + img = np.expand_dims(img, axis=0) + img = vgg19.preprocess_input(img) + return img + +# util function to convert a tensor into a valid image + + +def deprocess_image(x): + if K.image_data_format() == 'channels_first': + x = x.reshape((3, img_nrows, img_ncols)) + x = x.transpose((1, 2, 0)) + else: + x = x.reshape((img_nrows, img_ncols, 3)) + # Remove zero-center by mean pixel + x[:, :, 0] += 103.939 + x[:, :, 1] += 116.779 + x[:, :, 2] += 123.68 + # 'BGR'->'RGB' + x = x[:, :, ::-1] + x = np.clip(x, 0, 255).astype('uint8') + return x + +# get tensor representations of our images +base_image = K.variable(preprocess_image(base_image_path)) +style_reference_image = K.variable(preprocess_image(style_reference_image_path)) + +# this will contain our generated image +if K.image_data_format() == 'channels_first': + combination_image = K.placeholder((1, 3, img_nrows, img_ncols)) +else: + combination_image = K.placeholder((1, img_nrows, img_ncols, 3)) + +# combine the 3 images into a single Keras tensor +input_tensor = K.concatenate([base_image, + style_reference_image, + combination_image], axis=0) + +# build the VGG16 network with our 3 images as input +# the model will be loaded with pre-trained ImageNet weights +model = vgg19.VGG19(input_tensor=input_tensor, + weights='imagenet', include_top=False) +print('Model loaded.') + +# get the symbolic outputs of each "key" layer (we gave them unique names). +outputs_dict = dict([(layer.name, layer.output) for layer in model.layers]) + +# compute the neural style loss +# first we need to define 4 util functions + +# the gram matrix of an image tensor (feature-wise outer product) + + +def gram_matrix(x): + assert K.ndim(x) == 3 + if K.image_data_format() == 'channels_first': + features = K.batch_flatten(x) + else: + features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) + gram = K.dot(features, K.transpose(features)) + return gram + +# the "style loss" is designed to maintain +# the style of the reference image in the generated image. +# It is based on the gram matrices (which capture style) of +# feature maps from the style reference image +# and from the generated image + + +def style_loss(style, combination): + assert K.ndim(style) == 3 + assert K.ndim(combination) == 3 + S = gram_matrix(style) + C = gram_matrix(combination) + channels = 3 + size = img_nrows * img_ncols + return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2)) + +# an auxiliary loss function +# designed to maintain the "content" of the +# base image in the generated image + + +def content_loss(base, combination): + return K.sum(K.square(combination - base)) + +# the 3rd loss function, total variation loss, +# designed to keep the generated image locally coherent + + +def total_variation_loss(x): + assert K.ndim(x) == 4 + if K.image_data_format() == 'channels_first': + a = K.square( + x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) + b = K.square( + x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) + else: + a = K.square( + x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) + b = K.square( + x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) + return K.sum(K.pow(a + b, 1.25)) + +# combine these loss functions into a single scalar +loss = K.variable(0.) +layer_features = outputs_dict['block5_conv2'] +base_image_features = layer_features[0, :, :, :] +combination_features = layer_features[2, :, :, :] +loss += content_weight * content_loss(base_image_features, + combination_features) + +feature_layers = ['block1_conv1', 'block2_conv1', + 'block3_conv1', 'block4_conv1', + 'block5_conv1'] +for layer_name in feature_layers: + layer_features = outputs_dict[layer_name] + style_reference_features = layer_features[1, :, :, :] + combination_features = layer_features[2, :, :, :] + sl = style_loss(style_reference_features, combination_features) + loss += (style_weight / len(feature_layers)) * sl +loss += total_variation_weight * total_variation_loss(combination_image) + +# get the gradients of the generated image wrt the loss +grads = K.gradients(loss, combination_image) + +outputs = [loss] +if isinstance(grads, (list, tuple)): + outputs += grads +else: + outputs.append(grads) + +f_outputs = K.function([combination_image], outputs) + + +def eval_loss_and_grads(x): + if K.image_data_format() == 'channels_first': + x = x.reshape((1, 3, img_nrows, img_ncols)) + else: + x = x.reshape((1, img_nrows, img_ncols, 3)) + outs = f_outputs([x]) + loss_value = outs[0] + if len(outs[1:]) == 1: + grad_values = outs[1].flatten().astype('float64') + else: + grad_values = np.array(outs[1:]).flatten().astype('float64') + return loss_value, grad_values + +# this Evaluator class makes it possible +# to compute loss and gradients in one pass +# while retrieving them via two separate functions, +# "loss" and "grads". This is done because scipy.optimize +# requires separate functions for loss and gradients, +# but computing them separately would be inefficient. + + +class Evaluator(object): + + def __init__(self): + self.loss_value = None + self.grads_values = None + + def loss(self, x): + assert self.loss_value is None + loss_value, grad_values = eval_loss_and_grads(x) + self.loss_value = loss_value + self.grad_values = grad_values + return self.loss_value + + def grads(self, x): + assert self.loss_value is not None + grad_values = np.copy(self.grad_values) + self.loss_value = None + self.grad_values = None + return grad_values + +evaluator = Evaluator() + +# run scipy-based optimization (L-BFGS) over the pixels of the generated image +# so as to minimize the neural style loss +x = preprocess_image(base_image_path) + +for i in range(iterations): + print('Start of iteration', i) + start_time = time.time() + x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), + fprime=evaluator.grads, maxfun=20) + print('Current loss value:', min_val) + # save current generated image + img = deprocess_image(x.copy()) + fname = result_prefix + '_at_iteration_%d.png' % i + save_img(fname, img) + end_time = time.time() + print('Image saved as', fname) + print('Iteration %d completed in %ds' % (i, end_time - start_time)) \ No newline at end of file From 811f27e36fb2c6e9001e8e3dfaad7c49d7b122f6 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Mon, 3 Sep 2018 10:55:39 +0900 Subject: [PATCH 02/49] =?UTF-8?q?=20#25=20:=20=EA=B8=B0=EB=B3=B8=20?= =?UTF-8?q?=EC=A3=BC=EC=84=9D=20=ED=95=9C=EA=B8=80=ED=99=94=20=EC=99=84?= =?UTF-8?q?=EB=A3=8C,=20code=20line=EB=B3=84=20=EC=B2=A8=EA=B0=80=20?= =?UTF-8?q?=EC=A3=BC=EC=84=9D=EC=9D=84=20=EB=B6=99=EC=97=AC=EC=A4=98?= =?UTF-8?q?=EC=95=BC=20=ED=95=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../deep_dream.py | 82 ++++++++++--------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py b/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py index aa9b435..54d9087 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py +++ b/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py @@ -1,10 +1,17 @@ -'''Deep Dreaming in Keras. +'''케라스로 Deep Dreaming 하기 +원문 : https://github.com/keras-team/keras/tree/master/examples/deep_dream.py +> 현 스크립트는 케라스를 이용해 입력 이미지의 특징들을 pareidolia 알고리즘으로 분석, 강화시켜 +> 마치 꿈, 환각같은 이미지 형태로 출력해주는 튜토리얼입니다. -Run the script with: +* deepdream +* keras +* CNN + +현 스크립트를 실행하기 위해선: ``` python deep_dream.py path_to_your_base_image.jpg prefix_for_results ``` -e.g.: +예 : ``` python deep_dream.py img/mypic.jpg results/dream ``` @@ -19,6 +26,7 @@ from keras.applications import inception_v3 from keras import backend as K +# 입출력 이미지 경로 설정 parser = argparse.ArgumentParser(description='Deep Dreams with Keras.') parser.add_argument('base_image_path', metavar='base', type=str, help='Path to the image to transform.') @@ -29,11 +37,9 @@ base_image_path = args.base_image_path result_prefix = args.result_prefix -# These are the names of the layers -# for which we try to maximize activation, -# as well as their weight in the final loss -# we try to maximize. -# You can tweak these setting to obtain new visual effects. +# 아래 코드는 마지막 손실에서 가중치와 활성화를 최대로 하는 계층들의 이름입니다 +# 이제 최대화를 시도해봅시다. +# 밑의 설정들을 수정해서 새로운 시각효과를 얻어봅시다. settings = { 'features': { 'mixed2': 0.2, @@ -45,8 +51,7 @@ def preprocess_image(image_path): - # Util function to open, resize and format pictures - # into appropriate tensors. + # 이미지들을 열어서 적절한 tensor에 resize, format 해주는 함수 img = load_img(image_path) img = img_to_array(img) img = np.expand_dims(img, axis=0) @@ -55,7 +60,7 @@ def preprocess_image(image_path): def deprocess_image(x): - # Util function to convert a tensor into a valid image. + # 하나의 tensor를 검증 이미지로 변환해주는 함수 if K.image_data_format() == 'channels_first': x = x.reshape((3, x.shape[2], x.shape[3])) x = x.transpose((1, 2, 0)) @@ -69,38 +74,37 @@ def deprocess_image(x): K.set_learning_phase(0) -# Build the InceptionV3 network with our placeholder. -# The model will be loaded with pre-trained ImageNet weights. +# 실험하실 placehorder(입력 데이터)기반으로 InceptionV3 network를 설계합니다. +# 해당 모델은 ImageNet으로 선행학습된 가중치를 가져올 겁니다. model = inception_v3.InceptionV3(weights='imagenet', include_top=False) dream = model.input print('Model loaded.') -# Get the symbolic outputs of each "key" layer (we gave them unique names). +# 각 핵심 계층의 상징적인 결과를 가져옵니다(고유한 이름을 부여해야 합니다.). layer_dict = dict([(layer.name, layer) for layer in model.layers]) -# Define the loss. +# 손실을 정의합니다. loss = K.variable(0.) for layer_name in settings['features']: - # Add the L2 norm of the features of a layer to the loss. + # 계층의 특징들에 대한 L2 norm을 손실에 추가합니다. assert (layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.') coeff = settings['features'][layer_name] x = layer_dict[layer_name].output - # We avoid border artifacts by only involving non-border pixels in the loss. + # 손실에서 경계부분을 제외한 픽셀만 포함시키도록 artifacts(예술작품?) 경계를 피해줍니다. scaling = K.prod(K.cast(K.shape(x), 'float32')) if K.image_data_format() == 'channels_first': loss += coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling else: loss += coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling -# Compute the gradients of the dream wrt the loss. +# 손실에 대해 실제 'dream' 모델의 기울기를 계산합니다. grads = K.gradients(loss, dream)[0] -# Normalize gradients. +# 기울기들을 표준화합니다 grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon()) -# Set up function to retrieve the value -# of the loss and gradients given an input image. +# 주어진 입력이미지의 기울기들과 손실 값을 검색하는 함수를 설정합니다. outputs = [loss, grads] fetch_loss_and_grads = K.function([dream], outputs) @@ -136,29 +140,29 @@ def gradient_ascent(x, iterations, step, max_loss=None): return x -"""Process: +"""진행과정 + +- 원본 이미지를 불러옵니다. +- 아주 작은것부터 가장 큰 것까지, +- 여러가지의 처리 구조를 정의합니다(예: 이미지 형태) +- 원본 이미지를 가장작은 규모로 크기 변경합니다. +- 모든 계층 구조를 위해, 가장 작은 단위에서 시작합니다.(예, 현재 척도): -- Load the original image. -- Define a number of processing scales (i.e. image shapes), - from smallest to largest. -- Resize the original image to the smallest scale. -- For every scale, starting with the smallest (i.e. current one): - - Run gradient ascent - - Upscale image to the next scale - - Reinject the detail that was lost at upscaling time -- Stop when we are back to the original size. + - 기울기 상승 진행 + - 이미지를 다음 규모로 업그레이드 + - 업그레이드시 손실된 세부정보를 재입력 +- 원래 크기로 돌아갔을 때, 정지합니다. -To obtain the detail lost during upscaling, we simply -take the original image, shrink it down, upscale it, -and compare the result to the (resized) original image. +업그레이드 동안 손실된 세부정보를 얻기 위해, 그저 원본 이미지를 가져와, +축소, 확장하고, 그 결과를 원래 (크기 변경된) 이미지와 비교합니다 """ -# Playing with these hyperparameters will also allow you to achieve new effects -step = 0.01 # Gradient ascent step size -num_octave = 3 # Number of scales at which to run gradient ascent -octave_scale = 1.4 # Size ratio between scales -iterations = 20 # Number of ascent steps per scale +# 아래 하이퍼파라미터들을 사용하면 새로운 효과들을 얻을 수 있습니다. +step = 0.01 # 기울기 상승 step의 크기 +num_octave = 3 # 기울기 상승을 실행할 때, 계층 구조의 수(?) +octave_scale = 1.4 # 계층들 간 비율 +iterations = 20 # 계층마다 (기울기) 상승 step의 횟수 max_loss = 10. img = preprocess_image(base_image_path) From f5b7887625da7880b49595a889f11c8020370a27 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Mon, 3 Sep 2018 11:47:58 +0900 Subject: [PATCH 03/49] =?UTF-8?q?#25=20:=20=EA=B8=B0=EB=B3=B8=20=EC=A3=BC?= =?UTF-8?q?=EC=84=9D=20=ED=95=9C=EA=B8=80=ED=99=94=20=EC=99=84=EB=A3=8C,?= =?UTF-8?q?=20code=20line=EB=B3=84=20=EC=B2=A8=EA=B0=80=20=EC=A3=BC?= =?UTF-8?q?=EC=84=9D=EC=9D=84=20=EB=B6=99=EC=97=AC=EC=A4=98=EC=95=BC?= =?UTF-8?q?=ED=95=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../neural_style_transfer.py | 126 ++++++++---------- 1 file changed, 55 insertions(+), 71 deletions(-) diff --git a/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py b/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py index d450ff4..836c82e 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py +++ b/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py @@ -1,54 +1,48 @@ -'''Neural style transfer with Keras. +'''케라스로 신경 스타일로 바꾸기 -Run the script with: +현 스크립트를 실행하기 위해선: ``` python neural_style_transfer.py path_to_your_base_image.jpg \ path_to_your_reference.jpg prefix_for_results ``` -e.g.: +예 : ``` python neural_style_transfer.py img/tuebingen.jpg \ img/starry_night.jpg results/my_result ``` -Optional parameters: +추가 파라미터들은 : ``` ---iter, To specify the number of iterations \ - the style transfer takes place (Default is 10) ---content_weight, The weight given to the content loss (Default is 0.025) ---style_weight, The weight given to the style loss (Default is 1.0) ---tv_weight, The weight given to the total variation loss (Default is 1.0) +--iter, 스타일 변화를 수행하는 반복횟수를 지정함.(고정값은 10) +--content_weight, 컨텐츠 손실에 대한 가중치 (고정값은 0.025) +--style_weight, 스타일 손실에 대한 가중치 (고정값은 1.0) +--tv_weight, 전체 변화 손실에 대한 가중치 (고정값은 1.0) ``` -It is preferable to run this script on GPU, for speed. +속도를 위해, GPU에서 현 스크립트를 실행하길 권합니다. -Example result: https://twitter.com/fchollet/status/686631033085677568 +예시에 대한 결과 : https://twitter.com/fchollet/status/686631033085677568 -# Details +# 상세설명 -Style transfer consists in generating an image -with the same "content" as a base image, but with the -"style" of a different picture (typically artistic). +스타일 변형은 기존 이미지와 동일한 '콘텐츠'를 사용하여 +이미지를 생성하지만 다른 이미지의 '스타일'을 갖도록 생성합니다. -This is achieved through the optimization of a loss function -that has 3 components: "style loss", "content loss", -and "total variation loss": +손실 함수의 최적화를 통해 얻을 수 있으며, +이는 3가지 요소를 가집니다.: "style loss", "content loss", "total variation loss" -- The total variation loss imposes local spatial continuity between -the pixels of the combination image, giving it visual coherence. + - 전체 변화 손실(total variation loss)은 조합 이미지의 픽셀들 사이에 +local 공간 연속성을 부과하여 시각적 일관화를 제공합니다. -- The style loss is where the deep learning keeps in --that one is defined -using a deep convolutional neural network. Precisely, it consists in a sum of -L2 distances between the Gram matrices of the representations of -the base image and the style reference image, extracted from -different layers of a convnet (trained on ImageNet). The general idea -is to capture color/texture information at different spatial -scales (fairly large scales --defined by the depth of the layer considered). + - 스타일 손실(The style loss)은 딥러닝이 시행되는 구간으로, 이는 deep CNN을 사용하여 + 정의합니다. 정확히는, (ImageNet으로 훈련된) 다른 계층들에서 추출된, + 기본 이미지의 표현값들의 Gram matrix와 스타일 기준 이미지간의 L2 거리의 합으로 구성됩니다. + 일반적인 아이디어는 색상/질감 정보를 다양한 공간적 척도(scale) + (꽤나 큰 구조 -- 언급된 계층의 깊이로 정의)로 수집하는 겁니다. - - The content loss is a L2 distance between the features of the base -image (extracted from a deep layer) and the features of the combination image, -keeping the generated image close enough to the original one. + - 콘텐츠 손실(The content loss)은 기본 이미지 특징과 결합 이미지 간의 L2 거리로, + 생성된 이미지가 원본에 가까워 지도록 해줍니다. -# References +# 참고 자료 - [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576) ''' @@ -84,18 +78,17 @@ result_prefix = args.result_prefix iterations = args.iter -# these are the weights of the different loss components +# 서로 다른 손실 요소들의 가중치들을 가져옵니다. total_variation_weight = args.tv_weight style_weight = args.style_weight content_weight = args.content_weight -# dimensions of the generated picture. +# 생성할 이미지의 차원을 설정합니다. width, height = load_img(base_image_path).size img_nrows = 400 img_ncols = int(width * img_nrows / height) -# util function to open, resize and format pictures into appropriate tensors - +# 아래 함수는 이미지를 열어서 크기를 조정하고 적절한 tensor로 format시킵니다. def preprocess_image(image_path): img = load_img(image_path, target_size=(img_nrows, img_ncols)) @@ -104,8 +97,7 @@ def preprocess_image(image_path): img = vgg19.preprocess_input(img) return img -# util function to convert a tensor into a valid image - +# 아래 함수는 tensor를 검증 이미지로 전환시킵니다. def deprocess_image(x): if K.image_data_format() == 'channels_first': @@ -113,7 +105,7 @@ def deprocess_image(x): x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) - # Remove zero-center by mean pixel + # 평균 픽셀을 기준으로 zero중심 제거 x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 @@ -122,35 +114,35 @@ def deprocess_image(x): x = np.clip(x, 0, 255).astype('uint8') return x -# get tensor representations of our images + +# 주어진 이미지를 tensor형식으로 갖는다. base_image = K.variable(preprocess_image(base_image_path)) style_reference_image = K.variable(preprocess_image(style_reference_image_path)) -# this will contain our generated image +# 생성될 이미지를 갖도록 설정합니다. if K.image_data_format() == 'channels_first': combination_image = K.placeholder((1, 3, img_nrows, img_ncols)) else: combination_image = K.placeholder((1, img_nrows, img_ncols, 3)) -# combine the 3 images into a single Keras tensor +# 3가지 이미지를 1개의 keras tensor로 결합시킵니다. input_tensor = K.concatenate([base_image, style_reference_image, combination_image], axis=0) -# build the VGG16 network with our 3 images as input -# the model will be loaded with pre-trained ImageNet weights +# (갖고있는)3가지 이미지를 입력으로하는 VGG16 네트워크를 구성합니다. +# 해당 model은 미리 ImageNet으로 학습된 가중치를 갖습니다. model = vgg19.VGG19(input_tensor=input_tensor, weights='imagenet', include_top=False) print('Model loaded.') -# get the symbolic outputs of each "key" layer (we gave them unique names). +# 각 핵십 계층의 상징적 결과들을 갖습니다(그 결과들에 이름을 지정합니다.) outputs_dict = dict([(layer.name, layer.output) for layer in model.layers]) -# compute the neural style loss -# first we need to define 4 util functions - -# the gram matrix of an image tensor (feature-wise outer product) +# 신경 스타일 손실을 계산합니다. +# 먼저 4가지 함수들을 정의할 필요가 있습니다. +# 이미지 tensor의 gram matrix(feature-wise 외적연산) def gram_matrix(x): assert K.ndim(x) == 3 @@ -161,11 +153,8 @@ def gram_matrix(x): gram = K.dot(features, K.transpose(features)) return gram -# the "style loss" is designed to maintain -# the style of the reference image in the generated image. -# It is based on the gram matrices (which capture style) of -# feature maps from the style reference image -# and from the generated image +# 스타일 손실은 생성된 이미지에서 기존 이미지의 스타일을 유지하도록 설계됩니다. +# 스타일 기준 이미지와 생성된 이미지에서 특징에 대한 gram matrix(스타일 수집)를 기반으로 합니다. def style_loss(style, combination): @@ -177,17 +166,15 @@ def style_loss(style, combination): size = img_nrows * img_ncols return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2)) -# an auxiliary loss function -# designed to maintain the "content" of the -# base image in the generated image +# 보조 손실 함수는 생성된 이미지에서 +# 기존 이미지의 '콘텐츠'를 유지하도록 설계되었습니다. def content_loss(base, combination): return K.sum(K.square(combination - base)) -# the 3rd loss function, total variation loss, -# designed to keep the generated image locally coherent - +# 3번째 손실함수인 전체 변화 손실(total variation loss)은 +# 생성된 이미지를 지역적으로 일관성있게 유지되도록 설계되었습니다. def total_variation_loss(x): assert K.ndim(x) == 4 @@ -203,7 +190,7 @@ def total_variation_loss(x): x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) -# combine these loss functions into a single scalar +# 이런 손실 함수들을 단일 숫자(스칼라)로 결합시킵니다. loss = K.variable(0.) layer_features = outputs_dict['block5_conv2'] base_image_features = layer_features[0, :, :, :] @@ -222,7 +209,7 @@ def total_variation_loss(x): loss += (style_weight / len(feature_layers)) * sl loss += total_variation_weight * total_variation_loss(combination_image) -# get the gradients of the generated image wrt the loss +# 손실에 의해 생성된 이미지의 기울기를 갖습니다. grads = K.gradients(loss, combination_image) outputs = [loss] @@ -247,13 +234,10 @@ def eval_loss_and_grads(x): grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values -# this Evaluator class makes it possible -# to compute loss and gradients in one pass -# while retrieving them via two separate functions, -# "loss" and "grads". This is done because scipy.optimize -# requires separate functions for loss and gradients, -# but computing them separately would be inefficient. - +# Evaluator 클래스는 '손실'과 '기울기'라는 2가지 별도 함수들을 통해 +# 한번으로 손실과 기울기를 검색하는 동시에 계산을 할 수 있도록 합니다. +# 왜 그렇게 진행했냐면 scipy.optimize는 손실과 기울기에 대한 별도의 함수를 +# 요구하지만 따로 계산할 경우 비효율적일수 있기 때문입니다. class Evaluator(object): @@ -277,8 +261,8 @@ def grads(self, x): evaluator = Evaluator() -# run scipy-based optimization (L-BFGS) over the pixels of the generated image -# so as to minimize the neural style loss +# 신결 스타일 손실을 최소화하기 위해 +# 생성된 이미지 픽셀들을 scipy기반으로 최적화(L-BFGS)합니다. x = preprocess_image(base_image_path) for i in range(iterations): @@ -287,7 +271,7 @@ def grads(self, x): x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20) print('Current loss value:', min_val) - # save current generated image + # 현재 생성된 이미지를 저장합니다. img = deprocess_image(x.copy()) fname = result_prefix + '_at_iteration_%d.png' % i save_img(fname, img) From 13093b791f0112e1215075e81bc47a1325f1ce3a Mon Sep 17 00:00:00 2001 From: mike2ox Date: Wed, 19 Sep 2018 00:32:57 +0900 Subject: [PATCH 04/49] =?UTF-8?q?#25=20:=20=EA=B8=B0=EB=B3=B8=20=EC=A3=BC?= =?UTF-8?q?=EC=84=9D=20=EB=B2=88=EC=97=AD=20=EC=99=84=EB=A3=8C.=20?= =?UTF-8?q?=EC=A4=91=EA=B0=84=EB=A7=88=EB=8B=A4=20=ED=95=B5=EC=8B=AC=20lin?= =?UTF-8?q?e=20=EC=B6=94=EA=B0=80=ED=95=B4=EC=95=BC=ED=95=A8=20(=EC=B6=94?= =?UTF-8?q?=EA=B0=80=20=ED=99=95=EC=9D=B8=20=20:=20#L37)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../lstm_text_generation.py | 113 ++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 25_Keras_examples_(3)_Generative_models_examples/lstm_text_generation.py diff --git a/25_Keras_examples_(3)_Generative_models_examples/lstm_text_generation.py b/25_Keras_examples_(3)_Generative_models_examples/lstm_text_generation.py new file mode 100644 index 0000000..372e6bd --- /dev/null +++ b/25_Keras_examples_(3)_Generative_models_examples/lstm_text_generation.py @@ -0,0 +1,113 @@ +'''니체의 작품에서 텍스트를 만드는 예제 스크립트 + +생성된 텍스트가 일관성 있어지기까지 최소 20 에폭(epoch)이 요구됩니다. + +순환망(recurrent network)는 상당히 많은 계산량을 가졌기에 +GPU환경에서 이 스크립트를 돌리길 권장합니다. + +만약 새로운 데이터 기반으로 이 스크립트를 시도한다면, +데이터에 있는 말뭉치(corpus)가 적어도 100k자를 갖고 있어야 합니다.(1M 자가 좋습니다) +''' + +from __future__ import print_function +from keras.callbacks import LambdaCallback +from keras.models import Sequential +from keras.layers import Dense +from keras.layers import LSTM +from keras.optimizers import RMSprop +from keras.utils.data_utils import get_file +import numpy as np +import random +import sys +import io + +path = get_file( + 'nietzsche.txt', + origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt') +with io.open(path, encoding='utf-8') as f: + text = f.read().lower() +print('corpus length:', len(text)) + +chars = sorted(list(set(text))) +print('total chars:', len(chars)) +char_indices = dict((c, i) for i, c in enumerate(chars)) +indices_char = dict((i, c) for i, c in enumerate(chars)) + +# cut the text in semi-redundant sequences of maxlen characters +# 텍스트를 maxlen만큼의 문자들로 잘라냅니다.(?) +maxlen = 40 +step = 3 +sentences = [] +next_chars = [] +for i in range(0, len(text) - maxlen, step): + sentences.append(text[i: i + maxlen]) + next_chars.append(text[i + maxlen]) +print('nb sequences:', len(sentences)) + +print('Vectorization...') +x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool) +y = np.zeros((len(sentences), len(chars)), dtype=np.bool) +for i, sentence in enumerate(sentences): + for t, char in enumerate(sentence): + x[i, t, char_indices[char]] = 1 + y[i, char_indices[next_chars[i]]] = 1 + + +# 단일 LSTM 모델을 생성합니다. +print('Build model...') +model = Sequential() +model.add(LSTM(128, input_shape=(maxlen, len(chars)))) +model.add(Dense(len(chars), activation='softmax')) + +optimizer = RMSprop(lr=0.01) +model.compile(loss='categorical_crossentropy', optimizer=optimizer) + + +def sample(preds, temperature=1.0): + # 확률 배열에서 index를 뽑아주는 함수 + preds = np.asarray(preds).astype('float64') + preds = np.log(preds) / temperature + exp_preds = np.exp(preds) + preds = exp_preds / np.sum(exp_preds) + probas = np.random.multinomial(1, preds, 1) + return np.argmax(probas) + + +def on_epoch_end(epoch, _): + # 각 에폭의 마지막 단계에서 호출되는 함수입니다. + # 생성된 텍스트를 출력합니다. + print() + print('----- Generating text after Epoch: %d' % epoch) + + start_index = random.randint(0, len(text) - maxlen - 1) + for diversity in [0.2, 0.5, 1.0, 1.2]: + print('----- diversity:', diversity) + + generated = '' + sentence = text[start_index: start_index + maxlen] + generated += sentence + print('----- Generating with seed: "' + sentence + '"') + sys.stdout.write(generated) + + for i in range(400): + x_pred = np.zeros((1, maxlen, len(chars))) + for t, char in enumerate(sentence): + x_pred[0, t, char_indices[char]] = 1. + + preds = model.predict(x_pred, verbose=0)[0] + next_index = sample(preds, diversity) + next_char = indices_char[next_index] + + generated += next_char + sentence = sentence[1:] + next_char + + sys.stdout.write(next_char) + sys.stdout.flush() + print() + +print_callback = LambdaCallback(on_epoch_end=on_epoch_end) + +model.fit(x, y, + batch_size=128, + epochs=60, + callbacks=[print_callback]) \ No newline at end of file From 97dfcbe05c575b3ee0b43081425081410e4c191e Mon Sep 17 00:00:00 2001 From: mike2ox Date: Sat, 22 Sep 2018 14:09:14 +0900 Subject: [PATCH 05/49] =?UTF-8?q?#25=20:=20=EC=9B=90=EB=AC=B8=20=EB=B3=B5?= =?UTF-8?q?=EC=82=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../conv_filter_visualization.py | 139 +++++++ .../neural_doodle.py | 377 ++++++++++++++++++ .../variational_autoencoder.py | 207 ++++++++++ .../variational_autoencoder_deconv.py | 230 +++++++++++ 4 files changed, 953 insertions(+) create mode 100644 25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py create mode 100644 25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py create mode 100644 25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder.py create mode 100644 25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py diff --git a/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py b/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py new file mode 100644 index 0000000..da60c20 --- /dev/null +++ b/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py @@ -0,0 +1,139 @@ +'''Visualization of the filters of VGG16, via gradient ascent in input space. + +This script can run on CPU in a few minutes. + +Results example: http://i.imgur.com/4nj4KjN.jpg +''' +from __future__ import print_function + +import numpy as np +import time +from keras.preprocessing.image import save_img +from keras.applications import vgg16 +from keras import backend as K + +# dimensions of the generated pictures for each filter. +img_width = 128 +img_height = 128 + +# the name of the layer we want to visualize +# (see model definition at keras/applications/vgg16.py) +layer_name = 'block5_conv1' + +# util function to convert a tensor into a valid image + + +def deprocess_image(x): + # normalize tensor: center on 0., ensure std is 0.1 + x -= x.mean() + x /= (x.std() + K.epsilon()) + x *= 0.1 + + # clip to [0, 1] + x += 0.5 + x = np.clip(x, 0, 1) + + # convert to RGB array + x *= 255 + if K.image_data_format() == 'channels_first': + x = x.transpose((1, 2, 0)) + x = np.clip(x, 0, 255).astype('uint8') + return x + + +# build the VGG16 network with ImageNet weights +model = vgg16.VGG16(weights='imagenet', include_top=False) +print('Model loaded.') + +model.summary() + +# this is the placeholder for the input images +input_img = model.input + +# get the symbolic outputs of each "key" layer (we gave them unique names). +layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) + + +def normalize(x): + # utility function to normalize a tensor by its L2 norm + return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon()) + + +kept_filters = [] +for filter_index in range(200): + # we only scan through the first 200 filters, + # but there are actually 512 of them + print('Processing filter %d' % filter_index) + start_time = time.time() + + # we build a loss function that maximizes the activation + # of the nth filter of the layer considered + layer_output = layer_dict[layer_name].output + if K.image_data_format() == 'channels_first': + loss = K.mean(layer_output[:, filter_index, :, :]) + else: + loss = K.mean(layer_output[:, :, :, filter_index]) + + # we compute the gradient of the input picture wrt this loss + grads = K.gradients(loss, input_img)[0] + + # normalization trick: we normalize the gradient + grads = normalize(grads) + + # this function returns the loss and grads given the input picture + iterate = K.function([input_img], [loss, grads]) + + # step size for gradient ascent + step = 1. + + # we start from a gray image with some random noise + if K.image_data_format() == 'channels_first': + input_img_data = np.random.random((1, 3, img_width, img_height)) + else: + input_img_data = np.random.random((1, img_width, img_height, 3)) + input_img_data = (input_img_data - 0.5) * 20 + 128 + + # we run gradient ascent for 20 steps + for i in range(20): + loss_value, grads_value = iterate([input_img_data]) + input_img_data += grads_value * step + + print('Current loss value:', loss_value) + if loss_value <= 0.: + # some filters get stuck to 0, we can skip them + break + + # decode the resulting input image + if loss_value > 0: + img = deprocess_image(input_img_data[0]) + kept_filters.append((img, loss_value)) + end_time = time.time() + print('Filter %d processed in %ds' % (filter_index, end_time - start_time)) + +# we will stich the best 64 filters on a 8 x 8 grid. +n = 8 + +# the filters that have the highest loss are assumed to be better-looking. +# we will only keep the top 64 filters. +kept_filters.sort(key=lambda x: x[1], reverse=True) +kept_filters = kept_filters[:n * n] + +# build a black picture with enough space for +# our 8 x 8 filters of size 128 x 128, with a 5px margin in between +margin = 5 +width = n * img_width + (n - 1) * margin +height = n * img_height + (n - 1) * margin +stitched_filters = np.zeros((width, height, 3)) + +# fill the picture with our saved filters +for i in range(n): + for j in range(n): + img, loss = kept_filters[i * n + j] + width_margin = (img_width + margin) * i + height_margin = (img_height + margin) * j + stitched_filters[ + width_margin: width_margin + img_width, + height_margin: height_margin + img_height, :] = img + +# save the result to disk +save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters) \ No newline at end of file diff --git a/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py b/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py new file mode 100644 index 0000000..ad1f5db --- /dev/null +++ b/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py @@ -0,0 +1,377 @@ +'''Neural doodle with Keras + +# Script Usage + +## Arguments +``` +--nlabels: # of regions (colors) in mask images +--style-image: image to learn style from +--style-mask: semantic labels for style image +--target-mask: semantic labels for target image (your doodle) +--content-image: optional image to learn content from +--target-image-prefix: path prefix for generated target images +``` + +## Example 1: doodle using a style image, style mask +and target mask. +``` +python neural_doodle.py --nlabels 4 --style-image Monet/style.png \ +--style-mask Monet/style_mask.png --target-mask Monet/target_mask.png \ +--target-image-prefix generated/monet +``` + +## Example 2: doodle using a style image, style mask, +target mask and an optional content image. +``` +python neural_doodle.py --nlabels 4 --style-image Renoir/style.png \ +--style-mask Renoir/style_mask.png --target-mask Renoir/target_mask.png \ +--content-image Renoir/creek.jpg \ +--target-image-prefix generated/renoir +``` + +# References + +- [Dmitry Ulyanov's blog on fast-neural-doodle] + (http://dmitryulyanov.github.io/feed-forward-neural-doodle/) +- [Torch code for fast-neural-doodle] + (https://github.com/DmitryUlyanov/fast-neural-doodle) +- [Torch code for online-neural-doodle] + (https://github.com/DmitryUlyanov/online-neural-doodle) +- [Paper Texture Networks: Feed-forward Synthesis of Textures and Stylized Images] + (http://arxiv.org/abs/1603.03417) +- [Discussion on parameter tuning] + (https://github.com/keras-team/keras/issues/3705) + +# Resources + +Example images can be downloaded from +https://github.com/DmitryUlyanov/fast-neural-doodle/tree/master/data +''' +from __future__ import print_function +import time +import argparse +import numpy as np +from scipy.optimize import fmin_l_bfgs_b + +from keras import backend as K +from keras.layers import Input, AveragePooling2D +from keras.models import Model +from keras.preprocessing.image import load_img, save_img, img_to_array +from keras.applications import vgg19 + +# Command line arguments +parser = argparse.ArgumentParser(description='Keras neural doodle example') +parser.add_argument('--nlabels', type=int, + help='number of semantic labels' + ' (regions in differnet colors)' + ' in style_mask/target_mask') +parser.add_argument('--style-image', type=str, + help='path to image to learn style from') +parser.add_argument('--style-mask', type=str, + help='path to semantic mask of style image') +parser.add_argument('--target-mask', type=str, + help='path to semantic mask of target image') +parser.add_argument('--content-image', type=str, default=None, + help='path to optional content image') +parser.add_argument('--target-image-prefix', type=str, + help='path prefix for generated results') +args = parser.parse_args() + +style_img_path = args.style_image +style_mask_path = args.style_mask +target_mask_path = args.target_mask +content_img_path = args.content_image +target_img_prefix = args.target_image_prefix +use_content_img = content_img_path is not None + +num_labels = args.nlabels +num_colors = 3 # RGB +# determine image sizes based on target_mask +ref_img = img_to_array(load_img(target_mask_path)) +img_nrows, img_ncols = ref_img.shape[:2] + +total_variation_weight = 50. +style_weight = 1. +content_weight = 0.1 if use_content_img else 0 + +content_feature_layers = ['block5_conv2'] +# To get better generation qualities, use more conv layers for style features +style_feature_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', + 'block4_conv1', 'block5_conv1'] + + +# helper functions for reading/processing images +def preprocess_image(image_path): + img = load_img(image_path, target_size=(img_nrows, img_ncols)) + img = img_to_array(img) + img = np.expand_dims(img, axis=0) + img = vgg19.preprocess_input(img) + return img + + +def deprocess_image(x): + if K.image_data_format() == 'channels_first': + x = x.reshape((3, img_nrows, img_ncols)) + x = x.transpose((1, 2, 0)) + else: + x = x.reshape((img_nrows, img_ncols, 3)) + # Remove zero-center by mean pixel + x[:, :, 0] += 103.939 + x[:, :, 1] += 116.779 + x[:, :, 2] += 123.68 + # 'BGR'->'RGB' + x = x[:, :, ::-1] + x = np.clip(x, 0, 255).astype('uint8') + return x + + +def kmeans(xs, k): + assert xs.ndim == 2 + try: + from sklearn.cluster import k_means + _, labels, _ = k_means(xs.astype('float64'), k) + except ImportError: + from scipy.cluster.vq import kmeans2 + _, labels = kmeans2(xs, k, missing='raise') + return labels + + +def load_mask_labels(): + '''Load both target and style masks. + A mask image (nr x nc) with m labels/colors will be loaded + as a 4D boolean tensor: + (1, m, nr, nc) for 'channels_first' or (1, nr, nc, m) for 'channels_last' + ''' + target_mask_img = load_img(target_mask_path, + target_size=(img_nrows, img_ncols)) + target_mask_img = img_to_array(target_mask_img) + style_mask_img = load_img(style_mask_path, + target_size=(img_nrows, img_ncols)) + style_mask_img = img_to_array(style_mask_img) + if K.image_data_format() == 'channels_first': + mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T, + target_mask_img.reshape((3, -1)).T]) + else: + mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)), + target_mask_img.reshape((-1, 3))]) + + labels = kmeans(mask_vecs, num_labels) + style_mask_label = labels[:img_nrows * + img_ncols].reshape((img_nrows, img_ncols)) + target_mask_label = labels[img_nrows * + img_ncols:].reshape((img_nrows, img_ncols)) + + stack_axis = 0 if K.image_data_format() == 'channels_first' else -1 + style_mask = np.stack([style_mask_label == r for r in range(num_labels)], + axis=stack_axis) + target_mask = np.stack([target_mask_label == r for r in range(num_labels)], + axis=stack_axis) + + return (np.expand_dims(style_mask, axis=0), + np.expand_dims(target_mask, axis=0)) + + +# Create tensor variables for images +if K.image_data_format() == 'channels_first': + shape = (1, num_colors, img_nrows, img_ncols) +else: + shape = (1, img_nrows, img_ncols, num_colors) + +style_image = K.variable(preprocess_image(style_img_path)) +target_image = K.placeholder(shape=shape) +if use_content_img: + content_image = K.variable(preprocess_image(content_img_path)) +else: + content_image = K.zeros(shape=shape) + +images = K.concatenate([style_image, target_image, content_image], axis=0) + +# Create tensor variables for masks +raw_style_mask, raw_target_mask = load_mask_labels() +style_mask = K.variable(raw_style_mask.astype('float32')) +target_mask = K.variable(raw_target_mask.astype('float32')) +masks = K.concatenate([style_mask, target_mask], axis=0) + +# index constants for images and tasks variables +STYLE, TARGET, CONTENT = 0, 1, 2 + +# Build image model, mask model and use layer outputs as features +# image model as VGG19 +image_model = vgg19.VGG19(include_top=False, input_tensor=images) + +# mask model as a series of pooling +mask_input = Input(tensor=masks, shape=(None, None, None), name='mask_input') +x = mask_input +for layer in image_model.layers[1:]: + name = 'mask_%s' % layer.name + if 'conv' in layer.name: + x = AveragePooling2D((3, 3), padding='same', strides=( + 1, 1), name=name)(x) + elif 'pool' in layer.name: + x = AveragePooling2D((2, 2), name=name)(x) +mask_model = Model(mask_input, x) + +# Collect features from image_model and task_model +image_features = {} +mask_features = {} +for img_layer, mask_layer in zip(image_model.layers, mask_model.layers): + if 'conv' in img_layer.name: + assert 'mask_' + img_layer.name == mask_layer.name + layer_name = img_layer.name + img_feat, mask_feat = img_layer.output, mask_layer.output + image_features[layer_name] = img_feat + mask_features[layer_name] = mask_feat + + +# Define loss functions +def gram_matrix(x): + assert K.ndim(x) == 3 + features = K.batch_flatten(x) + gram = K.dot(features, K.transpose(features)) + return gram + + +def region_style_loss(style_image, target_image, style_mask, target_mask): + '''Calculate style loss between style_image and target_image, + for one common region specified by their (boolean) masks + ''' + assert 3 == K.ndim(style_image) == K.ndim(target_image) + assert 2 == K.ndim(style_mask) == K.ndim(target_mask) + if K.image_data_format() == 'channels_first': + masked_style = style_image * style_mask + masked_target = target_image * target_mask + num_channels = K.shape(style_image)[0] + else: + masked_style = K.permute_dimensions( + style_image, (2, 0, 1)) * style_mask + masked_target = K.permute_dimensions( + target_image, (2, 0, 1)) * target_mask + num_channels = K.shape(style_image)[-1] + num_channels = K.cast(num_channels, dtype='float32') + s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels + c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels + return K.mean(K.square(s - c)) + + +def style_loss(style_image, target_image, style_masks, target_masks): + '''Calculate style loss between style_image and target_image, + in all regions. + ''' + assert 3 == K.ndim(style_image) == K.ndim(target_image) + assert 3 == K.ndim(style_masks) == K.ndim(target_masks) + loss = K.variable(0) + for i in range(num_labels): + if K.image_data_format() == 'channels_first': + style_mask = style_masks[i, :, :] + target_mask = target_masks[i, :, :] + else: + style_mask = style_masks[:, :, i] + target_mask = target_masks[:, :, i] + loss += region_style_loss(style_image, + target_image, style_mask, target_mask) + return loss + + +def content_loss(content_image, target_image): + return K.sum(K.square(target_image - content_image)) + + +def total_variation_loss(x): + assert 4 == K.ndim(x) + if K.image_data_format() == 'channels_first': + a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - + x[:, :, 1:, :img_ncols - 1]) + b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - + x[:, :, :img_nrows - 1, 1:]) + else: + a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - + x[:, 1:, :img_ncols - 1, :]) + b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - + x[:, :img_nrows - 1, 1:, :]) + return K.sum(K.pow(a + b, 1.25)) + + +# Overall loss is the weighted sum of content_loss, style_loss and tv_loss +# Each individual loss uses features from image/mask models. +loss = K.variable(0) +for layer in content_feature_layers: + content_feat = image_features[layer][CONTENT, :, :, :] + target_feat = image_features[layer][TARGET, :, :, :] + loss += content_weight * content_loss(content_feat, target_feat) + +for layer in style_feature_layers: + style_feat = image_features[layer][STYLE, :, :, :] + target_feat = image_features[layer][TARGET, :, :, :] + style_masks = mask_features[layer][STYLE, :, :, :] + target_masks = mask_features[layer][TARGET, :, :, :] + sl = style_loss(style_feat, target_feat, style_masks, target_masks) + loss += (style_weight / len(style_feature_layers)) * sl + +loss += total_variation_weight * total_variation_loss(target_image) +loss_grads = K.gradients(loss, target_image) + +# Evaluator class for computing efficiency +outputs = [loss] +if isinstance(loss_grads, (list, tuple)): + outputs += loss_grads +else: + outputs.append(loss_grads) + +f_outputs = K.function([target_image], outputs) + + +def eval_loss_and_grads(x): + if K.image_data_format() == 'channels_first': + x = x.reshape((1, 3, img_nrows, img_ncols)) + else: + x = x.reshape((1, img_nrows, img_ncols, 3)) + outs = f_outputs([x]) + loss_value = outs[0] + if len(outs[1:]) == 1: + grad_values = outs[1].flatten().astype('float64') + else: + grad_values = np.array(outs[1:]).flatten().astype('float64') + return loss_value, grad_values + + +class Evaluator(object): + + def __init__(self): + self.loss_value = None + self.grads_values = None + + def loss(self, x): + assert self.loss_value is None + loss_value, grad_values = eval_loss_and_grads(x) + self.loss_value = loss_value + self.grad_values = grad_values + return self.loss_value + + def grads(self, x): + assert self.loss_value is not None + grad_values = np.copy(self.grad_values) + self.loss_value = None + self.grad_values = None + return grad_values + +evaluator = Evaluator() + +# Generate images by iterative optimization +if K.image_data_format() == 'channels_first': + x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128. +else: + x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128. + +for i in range(50): + print('Start of iteration', i) + start_time = time.time() + x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), + fprime=evaluator.grads, maxfun=20) + print('Current loss value:', min_val) + # save current generated image + img = deprocess_image(x.copy()) + fname = target_img_prefix + '_at_iteration_%d.png' % i + save_img(fname, img) + end_time = time.time() + print('Image saved as', fname) + print('Iteration %d completed in %ds' % (i, end_time - start_time)) \ No newline at end of file diff --git a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder.py b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder.py new file mode 100644 index 0000000..060cf71 --- /dev/null +++ b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder.py @@ -0,0 +1,207 @@ +'''Example of VAE on MNIST dataset using MLP + +The VAE has a modular design. The encoder, decoder and VAE +are 3 models that share weights. After training the VAE model, +the encoder can be used to generate latent vectors. +The decoder can be used to generate MNIST digits by sampling the +latent vector from a Gaussian distribution with mean=0 and std=1. + +# Reference + +[1] Kingma, Diederik P., and Max Welling. +"Auto-encoding variational bayes." +https://arxiv.org/abs/1312.6114 +''' + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from keras.layers import Lambda, Input, Dense +from keras.models import Model +from keras.datasets import mnist +from keras.losses import mse, binary_crossentropy +from keras.utils import plot_model +from keras import backend as K + +import numpy as np +import matplotlib.pyplot as plt +import argparse +import os + + +# reparameterization trick +# instead of sampling from Q(z|X), sample eps = N(0,I) +# z = z_mean + sqrt(var)*eps +def sampling(args): + """Reparameterization trick by sampling fr an isotropic unit Gaussian. + + # Arguments: + args (tensor): mean and log of variance of Q(z|X) + + # Returns: + z (tensor): sampled latent vector + """ + + z_mean, z_log_var = args + batch = K.shape(z_mean)[0] + dim = K.int_shape(z_mean)[1] + # by default, random_normal has mean=0 and std=1.0 + epsilon = K.random_normal(shape=(batch, dim)) + return z_mean + K.exp(0.5 * z_log_var) * epsilon + + +def plot_results(models, + data, + batch_size=128, + model_name="vae_mnist"): + """Plots labels and MNIST digits as function of 2-dim latent vector + + # Arguments: + models (tuple): encoder and decoder models + data (tuple): test data and label + batch_size (int): prediction batch size + model_name (string): which model is using this function + """ + + encoder, decoder = models + x_test, y_test = data + os.makedirs(model_name, exist_ok=True) + + filename = os.path.join(model_name, "vae_mean.png") + # display a 2D plot of the digit classes in the latent space + z_mean, _, _ = encoder.predict(x_test, + batch_size=batch_size) + plt.figure(figsize=(12, 10)) + plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test) + plt.colorbar() + plt.xlabel("z[0]") + plt.ylabel("z[1]") + plt.savefig(filename) + plt.show() + + filename = os.path.join(model_name, "digits_over_latent.png") + # display a 30x30 2D manifold of digits + n = 30 + digit_size = 28 + figure = np.zeros((digit_size * n, digit_size * n)) + # linearly spaced coordinates corresponding to the 2D plot + # of digit classes in the latent space + grid_x = np.linspace(-4, 4, n) + grid_y = np.linspace(-4, 4, n)[::-1] + + for i, yi in enumerate(grid_y): + for j, xi in enumerate(grid_x): + z_sample = np.array([[xi, yi]]) + x_decoded = decoder.predict(z_sample) + digit = x_decoded[0].reshape(digit_size, digit_size) + figure[i * digit_size: (i + 1) * digit_size, + j * digit_size: (j + 1) * digit_size] = digit + + plt.figure(figsize=(10, 10)) + start_range = digit_size // 2 + end_range = n * digit_size + start_range + 1 + pixel_range = np.arange(start_range, end_range, digit_size) + sample_range_x = np.round(grid_x, 1) + sample_range_y = np.round(grid_y, 1) + plt.xticks(pixel_range, sample_range_x) + plt.yticks(pixel_range, sample_range_y) + plt.xlabel("z[0]") + plt.ylabel("z[1]") + plt.imshow(figure, cmap='Greys_r') + plt.savefig(filename) + plt.show() + + +# MNIST dataset +(x_train, y_train), (x_test, y_test) = mnist.load_data() + +image_size = x_train.shape[1] +original_dim = image_size * image_size +x_train = np.reshape(x_train, [-1, original_dim]) +x_test = np.reshape(x_test, [-1, original_dim]) +x_train = x_train.astype('float32') / 255 +x_test = x_test.astype('float32') / 255 + +# network parameters +input_shape = (original_dim, ) +intermediate_dim = 512 +batch_size = 128 +latent_dim = 2 +epochs = 50 + +# VAE model = encoder + decoder +# build encoder model +inputs = Input(shape=input_shape, name='encoder_input') +x = Dense(intermediate_dim, activation='relu')(inputs) +z_mean = Dense(latent_dim, name='z_mean')(x) +z_log_var = Dense(latent_dim, name='z_log_var')(x) + +# use reparameterization trick to push the sampling out as input +# note that "output_shape" isn't necessary with the TensorFlow backend +z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) + +# instantiate encoder model +encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') +encoder.summary() +plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True) + +# build decoder model +latent_inputs = Input(shape=(latent_dim,), name='z_sampling') +x = Dense(intermediate_dim, activation='relu')(latent_inputs) +outputs = Dense(original_dim, activation='sigmoid')(x) + +# instantiate decoder model +decoder = Model(latent_inputs, outputs, name='decoder') +decoder.summary() +plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True) + +# instantiate VAE model +outputs = decoder(encoder(inputs)[2]) +vae = Model(inputs, outputs, name='vae_mlp') + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + help_ = "Load h5 model trained weights" + parser.add_argument("-w", "--weights", help=help_) + help_ = "Use mse loss instead of binary cross entropy (default)" + parser.add_argument("-m", + "--mse", + help=help_, action='store_true') + args = parser.parse_args() + models = (encoder, decoder) + data = (x_test, y_test) + + # VAE loss = mse_loss or xent_loss + kl_loss + if args.mse: + reconstruction_loss = mse(inputs, outputs) + else: + reconstruction_loss = binary_crossentropy(inputs, + outputs) + + reconstruction_loss *= original_dim + kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var) + kl_loss = K.sum(kl_loss, axis=-1) + kl_loss *= -0.5 + vae_loss = K.mean(reconstruction_loss + kl_loss) + vae.add_loss(vae_loss) + vae.compile(optimizer='adam') + vae.summary() + plot_model(vae, + to_file='vae_mlp.png', + show_shapes=True) + + if args.weights: + vae.load_weights(args.weights) + else: + # train the autoencoder + vae.fit(x_train, + epochs=epochs, + batch_size=batch_size, + validation_data=(x_test, None)) + vae.save_weights('vae_mlp_mnist.h5') + + plot_results(models, + data, + batch_size=batch_size, + model_name="vae_mlp") \ No newline at end of file diff --git a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py new file mode 100644 index 0000000..7cd83c4 --- /dev/null +++ b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py @@ -0,0 +1,230 @@ +'''Example of VAE on MNIST dataset using CNN + +The VAE has a modular design. The encoder, decoder and VAE +are 3 models that share weights. After training the VAE model, +the encoder can be used to generate latent vectors. +The decoder can be used to generate MNIST digits by sampling the +latent vector from a Gaussian distribution with mean=0 and std=1. + +# Reference + +[1] Kingma, Diederik P., and Max Welling. +"Auto-encoding variational bayes." +https://arxiv.org/abs/1312.6114 +''' + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from keras.layers import Dense, Input +from keras.layers import Conv2D, Flatten, Lambda +from keras.layers import Reshape, Conv2DTranspose +from keras.models import Model +from keras.datasets import mnist +from keras.losses import mse, binary_crossentropy +from keras.utils import plot_model +from keras import backend as K + +import numpy as np +import matplotlib.pyplot as plt +import argparse +import os + + +# reparameterization trick +# instead of sampling from Q(z|X), sample eps = N(0,I) +# then z = z_mean + sqrt(var)*eps +def sampling(args): + """Reparameterization trick by sampling fr an isotropic unit Gaussian. + + # Arguments: + args (tensor): mean and log of variance of Q(z|X) + + # Returns: + z (tensor): sampled latent vector + """ + + z_mean, z_log_var = args + batch = K.shape(z_mean)[0] + dim = K.int_shape(z_mean)[1] + # by default, random_normal has mean=0 and std=1.0 + epsilon = K.random_normal(shape=(batch, dim)) + return z_mean + K.exp(0.5 * z_log_var) * epsilon + + +def plot_results(models, + data, + batch_size=128, + model_name="vae_mnist"): + """Plots labels and MNIST digits as function of 2-dim latent vector + + # Arguments: + models (tuple): encoder and decoder models + data (tuple): test data and label + batch_size (int): prediction batch size + model_name (string): which model is using this function + """ + + encoder, decoder = models + x_test, y_test = data + os.makedirs(model_name, exist_ok=True) + + filename = os.path.join(model_name, "vae_mean.png") + # display a 2D plot of the digit classes in the latent space + z_mean, _, _ = encoder.predict(x_test, + batch_size=batch_size) + plt.figure(figsize=(12, 10)) + plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test) + plt.colorbar() + plt.xlabel("z[0]") + plt.ylabel("z[1]") + plt.savefig(filename) + plt.show() + + filename = os.path.join(model_name, "digits_over_latent.png") + # display a 30x30 2D manifold of digits + n = 30 + digit_size = 28 + figure = np.zeros((digit_size * n, digit_size * n)) + # linearly spaced coordinates corresponding to the 2D plot + # of digit classes in the latent space + grid_x = np.linspace(-4, 4, n) + grid_y = np.linspace(-4, 4, n)[::-1] + + for i, yi in enumerate(grid_y): + for j, xi in enumerate(grid_x): + z_sample = np.array([[xi, yi]]) + x_decoded = decoder.predict(z_sample) + digit = x_decoded[0].reshape(digit_size, digit_size) + figure[i * digit_size: (i + 1) * digit_size, + j * digit_size: (j + 1) * digit_size] = digit + + plt.figure(figsize=(10, 10)) + start_range = digit_size // 2 + end_range = n * digit_size + start_range + 1 + pixel_range = np.arange(start_range, end_range, digit_size) + sample_range_x = np.round(grid_x, 1) + sample_range_y = np.round(grid_y, 1) + plt.xticks(pixel_range, sample_range_x) + plt.yticks(pixel_range, sample_range_y) + plt.xlabel("z[0]") + plt.ylabel("z[1]") + plt.imshow(figure, cmap='Greys_r') + plt.savefig(filename) + plt.show() + + +# MNIST dataset +(x_train, y_train), (x_test, y_test) = mnist.load_data() + +image_size = x_train.shape[1] +x_train = np.reshape(x_train, [-1, image_size, image_size, 1]) +x_test = np.reshape(x_test, [-1, image_size, image_size, 1]) +x_train = x_train.astype('float32') / 255 +x_test = x_test.astype('float32') / 255 + +# network parameters +input_shape = (image_size, image_size, 1) +batch_size = 128 +kernel_size = 3 +filters = 16 +latent_dim = 2 +epochs = 30 + +# VAE model = encoder + decoder +# build encoder model +inputs = Input(shape=input_shape, name='encoder_input') +x = inputs +for i in range(2): + filters *= 2 + x = Conv2D(filters=filters, + kernel_size=kernel_size, + activation='relu', + strides=2, + padding='same')(x) + +# shape info needed to build decoder model +shape = K.int_shape(x) + +# generate latent vector Q(z|X) +x = Flatten()(x) +x = Dense(16, activation='relu')(x) +z_mean = Dense(latent_dim, name='z_mean')(x) +z_log_var = Dense(latent_dim, name='z_log_var')(x) + +# use reparameterization trick to push the sampling out as input +# note that "output_shape" isn't necessary with the TensorFlow backend +z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) + +# instantiate encoder model +encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') +encoder.summary() +plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True) + +# build decoder model +latent_inputs = Input(shape=(latent_dim,), name='z_sampling') +x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs) +x = Reshape((shape[1], shape[2], shape[3]))(x) + +for i in range(2): + x = Conv2DTranspose(filters=filters, + kernel_size=kernel_size, + activation='relu', + strides=2, + padding='same')(x) + filters //= 2 + +outputs = Conv2DTranspose(filters=1, + kernel_size=kernel_size, + activation='sigmoid', + padding='same', + name='decoder_output')(x) + +# instantiate decoder model +decoder = Model(latent_inputs, outputs, name='decoder') +decoder.summary() +plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True) + +# instantiate VAE model +outputs = decoder(encoder(inputs)[2]) +vae = Model(inputs, outputs, name='vae') + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + help_ = "Load h5 model trained weights" + parser.add_argument("-w", "--weights", help=help_) + help_ = "Use mse loss instead of binary cross entropy (default)" + parser.add_argument("-m", "--mse", help=help_, action='store_true') + args = parser.parse_args() + models = (encoder, decoder) + data = (x_test, y_test) + + # VAE loss = mse_loss or xent_loss + kl_loss + if args.mse: + reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs)) + else: + reconstruction_loss = binary_crossentropy(K.flatten(inputs), + K.flatten(outputs)) + + reconstruction_loss *= image_size * image_size + kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var) + kl_loss = K.sum(kl_loss, axis=-1) + kl_loss *= -0.5 + vae_loss = K.mean(reconstruction_loss + kl_loss) + vae.add_loss(vae_loss) + vae.compile(optimizer='rmsprop') + vae.summary() + plot_model(vae, to_file='vae_cnn.png', show_shapes=True) + + if args.weights: + vae.load_weights(args.weights) + else: + # train the autoencoder + vae.fit(x_train, + epochs=epochs, + batch_size=batch_size, + validation_data=(x_test, None)) + vae.save_weights('vae_cnn_mnist.h5') + + plot_results(models, data, batch_size=batch_size, model_name="vae_cnn") \ No newline at end of file From a778ddf06b509c32f844c3608fd53cc701f60fd3 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Sun, 23 Sep 2018 00:21:28 +0900 Subject: [PATCH 06/49] =?UTF-8?q?#25=20:=20=EC=B5=9C=EC=B4=88=20=EB=B2=88?= =?UTF-8?q?=EC=97=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../conv_filter_visualization.py | 41 ++++++++++++++----- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py b/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py index da60c20..99fefe2 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py +++ b/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py @@ -1,8 +1,8 @@ '''Visualization of the filters of VGG16, via gradient ascent in input space. +VGG16의 필터들의 시각화, -This script can run on CPU in a few minutes. - -Results example: http://i.imgur.com/4nj4KjN.jpg +이 스크립트는 cpu기반에서 몇분안으로 작동할 수 있습니다. +결과 예시: http://i.imgur.com/4nj4KjN.jpg ''' from __future__ import print_function @@ -13,18 +13,20 @@ from keras import backend as K # dimensions of the generated pictures for each filter. +# 각각의 필터에 맞게 생성되는 이미지의 차원들 img_width = 128 img_height = 128 -# the name of the layer we want to visualize -# (see model definition at keras/applications/vgg16.py) +# 우리가 시각화 하고 싶은 레이어의 이름을 지정합니다. +# 모델 정의는 keras/applications/vgg16.py에서 보실 수 있으십니다. layer_name = 'block5_conv1' # util function to convert a tensor into a valid image - +# 텐서(tensor)가 검증된 이미지로 변환되도록 해주는 활용 함수입니다. def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 + # 텐서를 일반화 해줍니다. : 중심을 0.으로, 편차를 0.1로 x -= x.mean() x /= (x.std() + K.epsilon()) x *= 0.1 @@ -33,7 +35,7 @@ def deprocess_image(x): x += 0.5 x = np.clip(x, 0, 1) - # convert to RGB array + # RGB 배열로 변환해 줍니다. x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose((1, 2, 0)) @@ -41,33 +43,39 @@ def deprocess_image(x): return x -# build the VGG16 network with ImageNet weights +# ImageNet 가중치를 가지고 있는 VGG16 네트워크를 설계합니다. model = vgg16.VGG16(weights='imagenet', include_top=False) print('Model loaded.') model.summary() # this is the placeholder for the input images +# 입력 이미지를 위한 placeholder입니다. input_img = model.input # get the symbolic outputs of each "key" layer (we gave them unique names). +# 각 핵심 레이어(layer)의 중요 결과물들을 가져옵니다.(그것들에게 고유 이름을 줄 수 있습니다.) layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) def normalize(x): # utility function to normalize a tensor by its L2 norm + # L2 norm을 통해 텐서를 일반화 해주는 활용 함수 입니다. return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon()) kept_filters = [] for filter_index in range(200): # we only scan through the first 200 filters, + # 우리는 오직 200개의 필터들로 훑어 봅니다. # but there are actually 512 of them + # 하지만 실제론 512개 입니다. print('Processing filter %d' % filter_index) start_time = time.time() # we build a loss function that maximizes the activation # of the nth filter of the layer considered + # 관심있는 계층의 n번째 필터의 활성화를 최대로 해주는 손실 함수를 설계합니다. layer_output = layer_dict[layer_name].output if K.image_data_format() == 'channels_first': loss = K.mean(layer_output[:, filter_index, :, :]) @@ -75,18 +83,23 @@ def normalize(x): loss = K.mean(layer_output[:, :, :, filter_index]) # we compute the gradient of the input picture wrt this loss + # TODO : 앞의 손실 함수를 통해 입력 이미지의 기울기(*)를 계산합니다. grads = K.gradients(loss, input_img)[0] # normalization trick: we normalize the gradient + # 기울기를 우리는 일반화 해줍니다. grads = normalize(grads) # this function returns the loss and grads given the input picture + # 해당 함수는 주어진 입력 이미지로 손실과 기울기를 반환합니다. iterate = K.function([input_img], [loss, grads]) - # step size for gradient ascent + # step size for gradient ascent(*) + # TODO : 기울기 감소에서 step의 크기 step = 1. # we start from a gray image with some random noise + # 우리는 임의의 노이즈가 있는 회색 이미디부터 시작합니다. if K.image_data_format() == 'channels_first': input_img_data = np.random.random((1, 3, img_width, img_height)) else: @@ -94,6 +107,7 @@ def normalize(x): input_img_data = (input_img_data - 0.5) * 20 + 128 # we run gradient ascent for 20 steps + # 20 스텝동안 우리는 기울기 감소를 진행합니다. for i in range(20): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step @@ -101,9 +115,11 @@ def normalize(x): print('Current loss value:', loss_value) if loss_value <= 0.: # some filters get stuck to 0, we can skip them + # TODO : 약간의 필터들은 0으로 break # decode the resulting input image + # 입력이미지의 결과를 디코딩(decode)한다. if loss_value > 0: img = deprocess_image(input_img_data[0]) kept_filters.append((img, loss_value)) @@ -111,21 +127,26 @@ def normalize(x): print('Filter %d processed in %ds' % (filter_index, end_time - start_time)) # we will stich the best 64 filters on a 8 x 8 grid. +# TODO : 우리는 8 X 8 격자인 64개의 최적의 필터들을 stich 할수 있습니다. n = 8 # the filters that have the highest loss are assumed to be better-looking. +# 가장 높은 손실을 가지고 있는 필터들은 더 잘 보이는 거로 추정됩니다. # we will only keep the top 64 filters. +# 우리는 64개의 필터들을 유지할 수 있습니다. kept_filters.sort(key=lambda x: x[1], reverse=True) kept_filters = kept_filters[:n * n] # build a black picture with enough space for # our 8 x 8 filters of size 128 x 128, with a 5px margin in between +# TODO : 2번째 줄이 이해가 안됨. margin = 5 width = n * img_width + (n - 1) * margin height = n * img_height + (n - 1) * margin stitched_filters = np.zeros((width, height, 3)) # fill the picture with our saved filters +# 저장된 필터들과 같이 이미지들을 채웁니다. for i in range(n): for j in range(n): img, loss = kept_filters[i * n + j] @@ -135,5 +156,5 @@ def normalize(x): width_margin: width_margin + img_width, height_margin: height_margin + img_height, :] = img -# save the result to disk +# 결과를 로컬에 저장합니다. save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters) \ No newline at end of file From c8646dddec94b57446bcc5787388d054604968dc Mon Sep 17 00:00:00 2001 From: mike2ox Date: Sun, 23 Sep 2018 04:09:43 +0900 Subject: [PATCH 07/49] =?UTF-8?q?#25=20:=20=EC=B4=88=EA=B8=B0=20=EB=B2=88?= =?UTF-8?q?=EC=97=AD=20ing.=202/3=20=EC=A0=95=EB=8F=84=20=EB=82=A8?= =?UTF-8?q?=EC=9D=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../variational_autoencoder_deconv.py | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py index 7cd83c4..e1eb718 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py +++ b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py @@ -1,4 +1,11 @@ '''Example of VAE on MNIST dataset using CNN +CNN을 활용하여 MNIST 데이터 세트기반의 VAE 예제 + +VAE는 모듈러(modular) 설계를 가집니다. 인코더(encoder), 디코더(decoder) 그리고 VAE(다양한 오토인코더(auto encoder))는 +가중치를 공유하는 3가지 모델입니다. VAE 모델을 학습시키고, +인코더는 latent 벡터들을 생성하는데 사용됩니다. +디코더는 평균이 0이고 편차가 1인 가우시안 분포로 부터 latent vector를 샘플링함으로서 +MNIST digits를 생성하는데 사용됩니다. The VAE has a modular design. The encoder, decoder and VAE are 3 models that share weights. After training the VAE model, @@ -6,7 +13,8 @@ The decoder can be used to generate MNIST digits by sampling the latent vector from a Gaussian distribution with mean=0 and std=1. -# Reference + +# 참고 문헌 [1] Kingma, Diederik P., and Max Welling. "Auto-encoding variational bayes." @@ -33,15 +41,22 @@ # reparameterization trick +# 재(re)-매개변수화 기법 +# Q(z|X)에서 샘플링하지 않고 N(0,I)에서 샘플링(eps)합니다. +# 그때 Z는 z_mean + sqrt(var)*eps # instead of sampling from Q(z|X), sample eps = N(0,I) # then z = z_mean + sqrt(var)*eps def sampling(args): """Reparameterization trick by sampling fr an isotropic unit Gaussian. - + # TODO : 재(re)-매개변수화 기법 : isotropic 단일 가우시안에서 샘플링합니다. # Arguments: + # 설명: + args (텐서) : Q(z|X)의 분산의 log취한 값과 평균 args (tensor): mean and log of variance of Q(z|X) # Returns: + # 반환: + z (텐서) : 샘플링된 latent 벡터 z (tensor): sampled latent vector """ @@ -49,6 +64,7 @@ def sampling(args): batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 + # 고정적으로, random_normal은 평균 0, 표준편차 1.0을 가진다. epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon From 3387651dbe9edd65240a2f9976ae495299a2ed69 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Tue, 25 Sep 2018 01:41:31 +0900 Subject: [PATCH 08/49] =?UTF-8?q?#25=20:=20=20=EB=A7=A4=EB=81=84=EB=9F=AC?= =?UTF-8?q?=EC=9A=B4=20=ED=95=B4=EC=84=9D=EC=9D=84=20=EC=9C=84=ED=95=B4=20?= =?UTF-8?q?VAE=20=EC=9D=B4=EB=A1=A0=20review=EA=B0=80=20=ED=95=84=EC=9A=94?= =?UTF-8?q?.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../variational_autoencoder_deconv.py | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py index e1eb718..9fe4cfe 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py +++ b/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py @@ -74,12 +74,13 @@ def plot_results(models, batch_size=128, model_name="vae_mnist"): """Plots labels and MNIST digits as function of 2-dim latent vector + 2차원 축소 벡터의 함수로써 레이블과 MNIST 자릿수들을 표기한다. - # Arguments: - models (tuple): encoder and decoder models - data (tuple): test data and label - batch_size (int): prediction batch size - model_name (string): which model is using this function + # 입력값들: + models (튜플(tuple)): 인코더, 디코더 모델들 + data (튜플): 테스트 데이터와 레이블 + batch_size (정수): 예측 배치 사이즈 + model_name (문자열): 이 함수에서 쓸 모델 이름 """ encoder, decoder = models @@ -88,6 +89,7 @@ def plot_results(models, filename = os.path.join(model_name, "vae_mean.png") # display a 2D plot of the digit classes in the latent space + # 잠재 공간에 있는 자릿수 클래스들의 2차원 표시를 보여준다. z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size) plt.figure(figsize=(12, 10)) @@ -100,11 +102,13 @@ def plot_results(models, filename = os.path.join(model_name, "digits_over_latent.png") # display a 30x30 2D manifold of digits + # 30x30 크기의 2차원 자릿수들을 보여준다. n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) # linearly spaced coordinates corresponding to the 2D plot # of digit classes in the latent space + # TODO : 잠재 공간에 grid_x = np.linspace(-4, 4, n) grid_y = np.linspace(-4, 4, n)[::-1] @@ -132,6 +136,7 @@ def plot_results(models, # MNIST dataset +# MNIST 데이터세트를 불러옵니다. (x_train, y_train), (x_test, y_test) = mnist.load_data() image_size = x_train.shape[1] @@ -141,6 +146,7 @@ def plot_results(models, x_test = x_test.astype('float32') / 255 # network parameters +# 네트워크를 구성하는 매개변수들 input_shape = (image_size, image_size, 1) batch_size = 128 kernel_size = 3 @@ -148,8 +154,8 @@ def plot_results(models, latent_dim = 2 epochs = 30 -# VAE model = encoder + decoder -# build encoder model +# VAE 모델은 인코더와 디코더를 결합한 구조입니다. +# 아래는 인코더 모델을 구성하는 과정입니다. inputs = Input(shape=input_shape, name='encoder_input') x = inputs for i in range(2): @@ -161,9 +167,11 @@ def plot_results(models, padding='same')(x) # shape info needed to build decoder model +# 디코더 모델을 구성하는데 필요한 데이터(x) 형태를 추출합니다. shape = K.int_shape(x) # generate latent vector Q(z|X) +# 잠재 벡터 Q(z|X)를 생성합니다. x = Flatten()(x) x = Dense(16, activation='relu')(x) z_mean = Dense(latent_dim, name='z_mean')(x) @@ -174,11 +182,12 @@ def plot_results(models, z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) # instantiate encoder model +# 인코더 모델을 인스턴스(instantiate)합니다. encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') encoder.summary() plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True) -# build decoder model +# 디코더 모델을 생성합니다. latent_inputs = Input(shape=(latent_dim,), name='z_sampling') x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) @@ -198,11 +207,13 @@ def plot_results(models, name='decoder_output')(x) # instantiate decoder model +# 디코더 모델을 인스턴스 합니다. decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True) # instantiate VAE model +# VAE 모델을 인스턴스 합니다. outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae') @@ -217,6 +228,7 @@ def plot_results(models, data = (x_test, y_test) # VAE loss = mse_loss or xent_loss + kl_loss + # VAE 손실은 mes 손실 혹은 xent 손실과 kl 손실 간의 합과 같습니다. if args.mse: reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs)) else: @@ -236,7 +248,7 @@ def plot_results(models, if args.weights: vae.load_weights(args.weights) else: - # train the autoencoder + # 오토인코더를 학습시킵니다. vae.fit(x_train, epochs=epochs, batch_size=batch_size, From eb43a6f87dbdb8d4e00b5e3f240951d8b872c14d Mon Sep 17 00:00:00 2001 From: mike2ox Date: Fri, 28 Sep 2018 14:41:35 +0900 Subject: [PATCH 09/49] =?UTF-8?q?#25=20:=20=EC=95=9E=EC=AA=BD=20=EC=84=A4?= =?UTF-8?q?=EB=AA=85=20=EB=B2=88=EC=97=AD=EC=99=84=EB=A3=8C.=20=EB=B3=B8?= =?UTF-8?q?=EB=AC=B8=20=EC=8B=9C=EC=9E=91=ED=95=B4=EC=95=BC=ED=95=A8.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../neural_doodle.py | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py b/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py index ad1f5db..428c924 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py +++ b/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py @@ -1,17 +1,25 @@ -'''Neural doodle with Keras +'''케라스로 뉴럴 낙서하기 -# Script Usage +# 스크립트 사용법 ## Arguments +## 입력값들 ``` --nlabels: # of regions (colors) in mask images ---style-image: image to learn style from + 마스크(mask) 이미지에서 색상의 수(구분을 위한) +--style-image: image to learn style from + --style-mask: semantic labels for style image + 스타일 이미지의 의미있는 레이블 --target-mask: semantic labels for target image (your doodle) + 당신의 낙서인 목표 이미지의 의미있는 레이블 --content-image: optional image to learn content from + 학습시키고 싶은 컨텐츠가 있는 이미지(선택사항) --target-image-prefix: path prefix for generated target images + 생성된 목표 이미지를 저장하기 위한 경로 설정 ``` - +## 예제 1: 스타일용 이미지, 마스크 그리고 목표 이미지의 마스크를 사용한 낙서 +## 아래는 예제 1을 실행하기 위한 실행 코드입니다. ## Example 1: doodle using a style image, style mask and target mask. ``` @@ -22,6 +30,9 @@ ## Example 2: doodle using a style image, style mask, target mask and an optional content image. +## 예제 2: 스타일용 이미지, 마스크, 목표 이미지의 마스크 +## 그리고 선택사항인 컨텐츠 이미지를 사용한 낙서 +## 아래는 예제 2을 실행하기 위한 실행 코드입니다. ``` python neural_doodle.py --nlabels 4 --style-image Renoir/style.png \ --style-mask Renoir/style_mask.png --target-mask Renoir/target_mask.png \ @@ -29,7 +40,7 @@ --target-image-prefix generated/renoir ``` -# References +# 참고사항 - [Dmitry Ulyanov's blog on fast-neural-doodle] (http://dmitryulyanov.github.io/feed-forward-neural-doodle/) From 7c2975b0681448f9705504373eb29e1832488b90 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Mon, 1 Oct 2018 22:59:49 +0900 Subject: [PATCH 10/49] =?UTF-8?q?#25=20:=20code=20=EC=8B=A4=ED=96=89?= =?UTF-8?q?=EC=9D=84=20=EC=9C=84=ED=95=B4=20=ED=8F=B4=EB=8D=94=20=EC=9D=B4?= =?UTF-8?q?=EB=A6=84=20=EB=B3=80=EA=B2=BD=EA=B2=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../conv_filter_visualization.py | 41 ++++----------- .../deep_dream.py | 0 .../lstm_text_generation.py | 0 .../neural_doodle.py | 21 ++------ .../neural_style_transfer.py | 0 .../variational_autoencoder.py | 0 .../variational_autoencoder_deconv.py | 50 ++++--------------- 7 files changed, 26 insertions(+), 86 deletions(-) rename {25_Keras_examples_(3)_Generative_models_examples => 25_Keras_examples_3_Generative_models_examples}/conv_filter_visualization.py (65%) rename {25_Keras_examples_(3)_Generative_models_examples => 25_Keras_examples_3_Generative_models_examples}/deep_dream.py (100%) rename {25_Keras_examples_(3)_Generative_models_examples => 25_Keras_examples_3_Generative_models_examples}/lstm_text_generation.py (100%) rename {25_Keras_examples_(3)_Generative_models_examples => 25_Keras_examples_3_Generative_models_examples}/neural_doodle.py (93%) rename {25_Keras_examples_(3)_Generative_models_examples => 25_Keras_examples_3_Generative_models_examples}/neural_style_transfer.py (100%) rename {25_Keras_examples_(3)_Generative_models_examples => 25_Keras_examples_3_Generative_models_examples}/variational_autoencoder.py (100%) rename {25_Keras_examples_(3)_Generative_models_examples => 25_Keras_examples_3_Generative_models_examples}/variational_autoencoder_deconv.py (76%) diff --git a/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py b/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py similarity index 65% rename from 25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py rename to 25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py index 99fefe2..da60c20 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/conv_filter_visualization.py +++ b/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py @@ -1,8 +1,8 @@ '''Visualization of the filters of VGG16, via gradient ascent in input space. -VGG16의 필터들의 시각화, -이 스크립트는 cpu기반에서 몇분안으로 작동할 수 있습니다. -결과 예시: http://i.imgur.com/4nj4KjN.jpg +This script can run on CPU in a few minutes. + +Results example: http://i.imgur.com/4nj4KjN.jpg ''' from __future__ import print_function @@ -13,20 +13,18 @@ from keras import backend as K # dimensions of the generated pictures for each filter. -# 각각의 필터에 맞게 생성되는 이미지의 차원들 img_width = 128 img_height = 128 -# 우리가 시각화 하고 싶은 레이어의 이름을 지정합니다. -# 모델 정의는 keras/applications/vgg16.py에서 보실 수 있으십니다. +# the name of the layer we want to visualize +# (see model definition at keras/applications/vgg16.py) layer_name = 'block5_conv1' # util function to convert a tensor into a valid image -# 텐서(tensor)가 검증된 이미지로 변환되도록 해주는 활용 함수입니다. + def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 - # 텐서를 일반화 해줍니다. : 중심을 0.으로, 편차를 0.1로 x -= x.mean() x /= (x.std() + K.epsilon()) x *= 0.1 @@ -35,7 +33,7 @@ def deprocess_image(x): x += 0.5 x = np.clip(x, 0, 1) - # RGB 배열로 변환해 줍니다. + # convert to RGB array x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose((1, 2, 0)) @@ -43,39 +41,33 @@ def deprocess_image(x): return x -# ImageNet 가중치를 가지고 있는 VGG16 네트워크를 설계합니다. +# build the VGG16 network with ImageNet weights model = vgg16.VGG16(weights='imagenet', include_top=False) print('Model loaded.') model.summary() # this is the placeholder for the input images -# 입력 이미지를 위한 placeholder입니다. input_img = model.input # get the symbolic outputs of each "key" layer (we gave them unique names). -# 각 핵심 레이어(layer)의 중요 결과물들을 가져옵니다.(그것들에게 고유 이름을 줄 수 있습니다.) layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) def normalize(x): # utility function to normalize a tensor by its L2 norm - # L2 norm을 통해 텐서를 일반화 해주는 활용 함수 입니다. return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon()) kept_filters = [] for filter_index in range(200): # we only scan through the first 200 filters, - # 우리는 오직 200개의 필터들로 훑어 봅니다. # but there are actually 512 of them - # 하지만 실제론 512개 입니다. print('Processing filter %d' % filter_index) start_time = time.time() # we build a loss function that maximizes the activation # of the nth filter of the layer considered - # 관심있는 계층의 n번째 필터의 활성화를 최대로 해주는 손실 함수를 설계합니다. layer_output = layer_dict[layer_name].output if K.image_data_format() == 'channels_first': loss = K.mean(layer_output[:, filter_index, :, :]) @@ -83,23 +75,18 @@ def normalize(x): loss = K.mean(layer_output[:, :, :, filter_index]) # we compute the gradient of the input picture wrt this loss - # TODO : 앞의 손실 함수를 통해 입력 이미지의 기울기(*)를 계산합니다. grads = K.gradients(loss, input_img)[0] # normalization trick: we normalize the gradient - # 기울기를 우리는 일반화 해줍니다. grads = normalize(grads) # this function returns the loss and grads given the input picture - # 해당 함수는 주어진 입력 이미지로 손실과 기울기를 반환합니다. iterate = K.function([input_img], [loss, grads]) - # step size for gradient ascent(*) - # TODO : 기울기 감소에서 step의 크기 + # step size for gradient ascent step = 1. # we start from a gray image with some random noise - # 우리는 임의의 노이즈가 있는 회색 이미디부터 시작합니다. if K.image_data_format() == 'channels_first': input_img_data = np.random.random((1, 3, img_width, img_height)) else: @@ -107,7 +94,6 @@ def normalize(x): input_img_data = (input_img_data - 0.5) * 20 + 128 # we run gradient ascent for 20 steps - # 20 스텝동안 우리는 기울기 감소를 진행합니다. for i in range(20): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step @@ -115,11 +101,9 @@ def normalize(x): print('Current loss value:', loss_value) if loss_value <= 0.: # some filters get stuck to 0, we can skip them - # TODO : 약간의 필터들은 0으로 break # decode the resulting input image - # 입력이미지의 결과를 디코딩(decode)한다. if loss_value > 0: img = deprocess_image(input_img_data[0]) kept_filters.append((img, loss_value)) @@ -127,26 +111,21 @@ def normalize(x): print('Filter %d processed in %ds' % (filter_index, end_time - start_time)) # we will stich the best 64 filters on a 8 x 8 grid. -# TODO : 우리는 8 X 8 격자인 64개의 최적의 필터들을 stich 할수 있습니다. n = 8 # the filters that have the highest loss are assumed to be better-looking. -# 가장 높은 손실을 가지고 있는 필터들은 더 잘 보이는 거로 추정됩니다. # we will only keep the top 64 filters. -# 우리는 64개의 필터들을 유지할 수 있습니다. kept_filters.sort(key=lambda x: x[1], reverse=True) kept_filters = kept_filters[:n * n] # build a black picture with enough space for # our 8 x 8 filters of size 128 x 128, with a 5px margin in between -# TODO : 2번째 줄이 이해가 안됨. margin = 5 width = n * img_width + (n - 1) * margin height = n * img_height + (n - 1) * margin stitched_filters = np.zeros((width, height, 3)) # fill the picture with our saved filters -# 저장된 필터들과 같이 이미지들을 채웁니다. for i in range(n): for j in range(n): img, loss = kept_filters[i * n + j] @@ -156,5 +135,5 @@ def normalize(x): width_margin: width_margin + img_width, height_margin: height_margin + img_height, :] = img -# 결과를 로컬에 저장합니다. +# save the result to disk save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters) \ No newline at end of file diff --git a/25_Keras_examples_(3)_Generative_models_examples/deep_dream.py b/25_Keras_examples_3_Generative_models_examples/deep_dream.py similarity index 100% rename from 25_Keras_examples_(3)_Generative_models_examples/deep_dream.py rename to 25_Keras_examples_3_Generative_models_examples/deep_dream.py diff --git a/25_Keras_examples_(3)_Generative_models_examples/lstm_text_generation.py b/25_Keras_examples_3_Generative_models_examples/lstm_text_generation.py similarity index 100% rename from 25_Keras_examples_(3)_Generative_models_examples/lstm_text_generation.py rename to 25_Keras_examples_3_Generative_models_examples/lstm_text_generation.py diff --git a/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py b/25_Keras_examples_3_Generative_models_examples/neural_doodle.py similarity index 93% rename from 25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py rename to 25_Keras_examples_3_Generative_models_examples/neural_doodle.py index 428c924..ad1f5db 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/neural_doodle.py +++ b/25_Keras_examples_3_Generative_models_examples/neural_doodle.py @@ -1,25 +1,17 @@ -'''케라스로 뉴럴 낙서하기 +'''Neural doodle with Keras -# 스크립트 사용법 +# Script Usage ## Arguments -## 입력값들 ``` --nlabels: # of regions (colors) in mask images - 마스크(mask) 이미지에서 색상의 수(구분을 위한) ---style-image: image to learn style from - +--style-image: image to learn style from --style-mask: semantic labels for style image - 스타일 이미지의 의미있는 레이블 --target-mask: semantic labels for target image (your doodle) - 당신의 낙서인 목표 이미지의 의미있는 레이블 --content-image: optional image to learn content from - 학습시키고 싶은 컨텐츠가 있는 이미지(선택사항) --target-image-prefix: path prefix for generated target images - 생성된 목표 이미지를 저장하기 위한 경로 설정 ``` -## 예제 1: 스타일용 이미지, 마스크 그리고 목표 이미지의 마스크를 사용한 낙서 -## 아래는 예제 1을 실행하기 위한 실행 코드입니다. + ## Example 1: doodle using a style image, style mask and target mask. ``` @@ -30,9 +22,6 @@ ## Example 2: doodle using a style image, style mask, target mask and an optional content image. -## 예제 2: 스타일용 이미지, 마스크, 목표 이미지의 마스크 -## 그리고 선택사항인 컨텐츠 이미지를 사용한 낙서 -## 아래는 예제 2을 실행하기 위한 실행 코드입니다. ``` python neural_doodle.py --nlabels 4 --style-image Renoir/style.png \ --style-mask Renoir/style_mask.png --target-mask Renoir/target_mask.png \ @@ -40,7 +29,7 @@ --target-image-prefix generated/renoir ``` -# 참고사항 +# References - [Dmitry Ulyanov's blog on fast-neural-doodle] (http://dmitryulyanov.github.io/feed-forward-neural-doodle/) diff --git a/25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py b/25_Keras_examples_3_Generative_models_examples/neural_style_transfer.py similarity index 100% rename from 25_Keras_examples_(3)_Generative_models_examples/neural_style_transfer.py rename to 25_Keras_examples_3_Generative_models_examples/neural_style_transfer.py diff --git a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py similarity index 100% rename from 25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder.py rename to 25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py diff --git a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py similarity index 76% rename from 25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py rename to 25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py index 9fe4cfe..7cd83c4 100644 --- a/25_Keras_examples_(3)_Generative_models_examples/variational_autoencoder_deconv.py +++ b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py @@ -1,11 +1,4 @@ '''Example of VAE on MNIST dataset using CNN -CNN을 활용하여 MNIST 데이터 세트기반의 VAE 예제 - -VAE는 모듈러(modular) 설계를 가집니다. 인코더(encoder), 디코더(decoder) 그리고 VAE(다양한 오토인코더(auto encoder))는 -가중치를 공유하는 3가지 모델입니다. VAE 모델을 학습시키고, -인코더는 latent 벡터들을 생성하는데 사용됩니다. -디코더는 평균이 0이고 편차가 1인 가우시안 분포로 부터 latent vector를 샘플링함으로서 -MNIST digits를 생성하는데 사용됩니다. The VAE has a modular design. The encoder, decoder and VAE are 3 models that share weights. After training the VAE model, @@ -13,8 +6,7 @@ The decoder can be used to generate MNIST digits by sampling the latent vector from a Gaussian distribution with mean=0 and std=1. - -# 참고 문헌 +# Reference [1] Kingma, Diederik P., and Max Welling. "Auto-encoding variational bayes." @@ -41,22 +33,15 @@ # reparameterization trick -# 재(re)-매개변수화 기법 -# Q(z|X)에서 샘플링하지 않고 N(0,I)에서 샘플링(eps)합니다. -# 그때 Z는 z_mean + sqrt(var)*eps # instead of sampling from Q(z|X), sample eps = N(0,I) # then z = z_mean + sqrt(var)*eps def sampling(args): """Reparameterization trick by sampling fr an isotropic unit Gaussian. - # TODO : 재(re)-매개변수화 기법 : isotropic 단일 가우시안에서 샘플링합니다. + # Arguments: - # 설명: - args (텐서) : Q(z|X)의 분산의 log취한 값과 평균 args (tensor): mean and log of variance of Q(z|X) # Returns: - # 반환: - z (텐서) : 샘플링된 latent 벡터 z (tensor): sampled latent vector """ @@ -64,7 +49,6 @@ def sampling(args): batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 - # 고정적으로, random_normal은 평균 0, 표준편차 1.0을 가진다. epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon @@ -74,13 +58,12 @@ def plot_results(models, batch_size=128, model_name="vae_mnist"): """Plots labels and MNIST digits as function of 2-dim latent vector - 2차원 축소 벡터의 함수로써 레이블과 MNIST 자릿수들을 표기한다. - # 입력값들: - models (튜플(tuple)): 인코더, 디코더 모델들 - data (튜플): 테스트 데이터와 레이블 - batch_size (정수): 예측 배치 사이즈 - model_name (문자열): 이 함수에서 쓸 모델 이름 + # Arguments: + models (tuple): encoder and decoder models + data (tuple): test data and label + batch_size (int): prediction batch size + model_name (string): which model is using this function """ encoder, decoder = models @@ -89,7 +72,6 @@ def plot_results(models, filename = os.path.join(model_name, "vae_mean.png") # display a 2D plot of the digit classes in the latent space - # 잠재 공간에 있는 자릿수 클래스들의 2차원 표시를 보여준다. z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size) plt.figure(figsize=(12, 10)) @@ -102,13 +84,11 @@ def plot_results(models, filename = os.path.join(model_name, "digits_over_latent.png") # display a 30x30 2D manifold of digits - # 30x30 크기의 2차원 자릿수들을 보여준다. n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) # linearly spaced coordinates corresponding to the 2D plot # of digit classes in the latent space - # TODO : 잠재 공간에 grid_x = np.linspace(-4, 4, n) grid_y = np.linspace(-4, 4, n)[::-1] @@ -136,7 +116,6 @@ def plot_results(models, # MNIST dataset -# MNIST 데이터세트를 불러옵니다. (x_train, y_train), (x_test, y_test) = mnist.load_data() image_size = x_train.shape[1] @@ -146,7 +125,6 @@ def plot_results(models, x_test = x_test.astype('float32') / 255 # network parameters -# 네트워크를 구성하는 매개변수들 input_shape = (image_size, image_size, 1) batch_size = 128 kernel_size = 3 @@ -154,8 +132,8 @@ def plot_results(models, latent_dim = 2 epochs = 30 -# VAE 모델은 인코더와 디코더를 결합한 구조입니다. -# 아래는 인코더 모델을 구성하는 과정입니다. +# VAE model = encoder + decoder +# build encoder model inputs = Input(shape=input_shape, name='encoder_input') x = inputs for i in range(2): @@ -167,11 +145,9 @@ def plot_results(models, padding='same')(x) # shape info needed to build decoder model -# 디코더 모델을 구성하는데 필요한 데이터(x) 형태를 추출합니다. shape = K.int_shape(x) # generate latent vector Q(z|X) -# 잠재 벡터 Q(z|X)를 생성합니다. x = Flatten()(x) x = Dense(16, activation='relu')(x) z_mean = Dense(latent_dim, name='z_mean')(x) @@ -182,12 +158,11 @@ def plot_results(models, z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) # instantiate encoder model -# 인코더 모델을 인스턴스(instantiate)합니다. encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') encoder.summary() plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True) -# 디코더 모델을 생성합니다. +# build decoder model latent_inputs = Input(shape=(latent_dim,), name='z_sampling') x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) @@ -207,13 +182,11 @@ def plot_results(models, name='decoder_output')(x) # instantiate decoder model -# 디코더 모델을 인스턴스 합니다. decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True) # instantiate VAE model -# VAE 모델을 인스턴스 합니다. outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae') @@ -228,7 +201,6 @@ def plot_results(models, data = (x_test, y_test) # VAE loss = mse_loss or xent_loss + kl_loss - # VAE 손실은 mes 손실 혹은 xent 손실과 kl 손실 간의 합과 같습니다. if args.mse: reconstruction_loss = mse(K.flatten(inputs), K.flatten(outputs)) else: @@ -248,7 +220,7 @@ def plot_results(models, if args.weights: vae.load_weights(args.weights) else: - # 오토인코더를 학습시킵니다. + # train the autoencoder vae.fit(x_train, epochs=epochs, batch_size=batch_size, From b4e452f8b24a859be178ee453b2271d11d74c8fa Mon Sep 17 00:00:00 2001 From: mike2ox Date: Thu, 18 Oct 2018 15:14:08 +0900 Subject: [PATCH 11/49] =?UTF-8?q?#25=20:=20=EC=84=A4=EB=AA=85=20=EC=99=84?= =?UTF-8?q?=EB=A3=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../25_keras_examples_3.md | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md diff --git a/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md new file mode 100644 index 0000000..34e8d6a --- /dev/null +++ b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md @@ -0,0 +1,37 @@ +## 케라스 예제 3(Keras examples) +[원문 링크](https://github.com/keras-team/keras/tree/master/examples) +> 현 디렉토리에는 케라스를 활용한 예제들 중 Generative 모델을 다루는 코드들이 있습니다. style transfer, auto encoder 등 generator 이론을 알고있는 전제하에 실제 구현에 초점을 맞춰져 있으므로 코드에 첨가된 주석 외 정보는 직접 찾아보셔야 합니다. + +* keras +* neural style transfer +* auto encoder +* lstm +* visualization + +### Generative models examples + +[lstm_text_generation.py](lstm_text_generation.py) +니체풍의 문장을 생성하기 + +[conv_filter_visualization.py](conv_filter_visualization.py) +입력공간의 기울기를 통해 VGG16 필터들을 시각화 + +[deep_dream.py](deep_dream.py) +케라스로 Deep Dream. + +[neural_doodle.py](neural_doodle.py) +신경망의 낙서. + +[neural_style_transfer.py](neural_style_transfer.py) +Neural style transfer. + +[variational_autoencoder.py](variational_autoencoder.py) +변종 Autoencoder를 만드는 방법을 보여줍니다. + +[variational_autoencoder_deconv.py](variational_autoencoder_deconv.py) +Deconvolution 레이어와 케라스를 사용해 변종 Autoencoder를 만드는 방법을 보여줍니다. + + +> 이 글은 2018 컨트리뷰톤에서 [`Contributue to Keras`](https://github.com/KerasKorea/KEKOxTutorial) 프로젝트로 진행했습니다. +> Translator : [mike2ox](https://github.com/mike2ox) (Moonhyeok Song) +> Translator Email : \ No newline at end of file From a8eb14bd24a74849de8358394f937d2b795aaf24 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Thu, 18 Oct 2018 15:48:33 +0900 Subject: [PATCH 12/49] =?UTF-8?q?#25=20:=20conv=5Ffilter=5Fvisualize=20?= =?UTF-8?q?=EB=B2=88=EC=97=AD=20=EC=99=84=EB=A3=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../conv_filter_visualization.py | 63 ++++++++++--------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py b/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py index da60c20..c84c048 100644 --- a/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py +++ b/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py @@ -1,8 +1,8 @@ -'''Visualization of the filters of VGG16, via gradient ascent in input space. +'''입력공간의 기울기를 통해 VGG16 필터들을 시각화 -This script can run on CPU in a few minutes. +이 글은 CPU환경에서 몇분이면 실행할 수 있습니다. -Results example: http://i.imgur.com/4nj4KjN.jpg +결과 예시 : http://i.imgur.com/4nj4KjN.jpg ''' from __future__ import print_function @@ -12,19 +12,19 @@ from keras.applications import vgg16 from keras import backend as K -# dimensions of the generated pictures for each filter. +# 각 필터들을 위한 생성 이미지의 차원 설정 img_width = 128 img_height = 128 -# the name of the layer we want to visualize -# (see model definition at keras/applications/vgg16.py) +# 시각화하고 싶은 레이어의 이름 설정 +# (모델에 대한 정의는 keras/applications/vgg16.py에서 볼 수 있습니다.) layer_name = 'block5_conv1' -# util function to convert a tensor into a valid image - +# 텐서(tensor)를 확인된 이미지로 변환해주는 함수 def deprocess_image(x): - # normalize tensor: center on 0., ensure std is 0.1 + + # 텐서를 정규화한다 : 중심은 0, 편차는 0.1 x -= x.mean() x /= (x.std() + K.epsilon()) x *= 0.1 @@ -33,7 +33,7 @@ def deprocess_image(x): x += 0.5 x = np.clip(x, 0, 1) - # convert to RGB array + # RGB 배열로 변환 x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose((1, 2, 0)) @@ -41,91 +41,92 @@ def deprocess_image(x): return x -# build the VGG16 network with ImageNet weights +# ImageNet의 가중치를 VGG16에 적용, 설계한다. model = vgg16.VGG16(weights='imagenet', include_top=False) print('Model loaded.') model.summary() -# this is the placeholder for the input images +# 이미지를 입력받기 위한 placeholder 설정 input_img = model.input -# get the symbolic outputs of each "key" layer (we gave them unique names). +# (앞서 이름을 지정한)각 핵심 레이어의 출력들을 가져옴. layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) def normalize(x): - # utility function to normalize a tensor by its L2 norm + # L2 norm으로 텐서를 정규화 해주는 함수 return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon()) kept_filters = [] for filter_index in range(200): - # we only scan through the first 200 filters, - # but there are actually 512 of them + + # 실제론 512개의 필터가 있지만 처음 200개의 필터만 스캔합니다. print('Processing filter %d' % filter_index) start_time = time.time() - # we build a loss function that maximizes the activation - # of the nth filter of the layer considered + # 관심을 두고 있는 레이어의 n번째 필터의 활성화를 최대치로 하는 손실 함수를 설계합니다. layer_output = layer_dict[layer_name].output if K.image_data_format() == 'channels_first': loss = K.mean(layer_output[:, filter_index, :, :]) else: loss = K.mean(layer_output[:, :, :, filter_index]) - # we compute the gradient of the input picture wrt this loss + # 손실 함수를 통해 입력 이미지의 기울기를 계산합니다 grads = K.gradients(loss, input_img)[0] - # normalization trick: we normalize the gradient + # 정규화 기법 : 기울기를 정규화 합니다. grads = normalize(grads) - # this function returns the loss and grads given the input picture + # 입력 이미지의 손실과 기울기를 반환합니다. iterate = K.function([input_img], [loss, grads]) - # step size for gradient ascent + # 기울기 상승을 위해 스탭 크기 지정. step = 1. - # we start from a gray image with some random noise + # 몇 개의 임의의 노이즈와 같이 회색 이미지부터 시작합니다. if K.image_data_format() == 'channels_first': input_img_data = np.random.random((1, 3, img_width, img_height)) else: input_img_data = np.random.random((1, img_width, img_height, 3)) input_img_data = (input_img_data - 0.5) * 20 + 128 - # we run gradient ascent for 20 steps + # 20 스텝동안 기울기 상승을 시도합니다. for i in range(20): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step print('Current loss value:', loss_value) if loss_value <= 0.: - # some filters get stuck to 0, we can skip them + # 몇가지 필터가 0을 가질 때는 넘어갑니다. break - # decode the resulting input image + # 입력 이미지의 결과물을 디코드(decode)합니다. if loss_value > 0: img = deprocess_image(input_img_data[0]) kept_filters.append((img, loss_value)) end_time = time.time() print('Filter %d processed in %ds' % (filter_index, end_time - start_time)) -# we will stich the best 64 filters on a 8 x 8 grid. +# 8 X 8 격자인 64개의 필터들을 사용할 겁니다. n = 8 -# the filters that have the highest loss are assumed to be better-looking. -# we will only keep the top 64 filters. +# 가장 큰 손실값을 가진 필터는 더 잘보일 것입니다. +# 상위 64개의 필터는 유지시킬 겁니다. kept_filters.sort(key=lambda x: x[1], reverse=True) kept_filters = kept_filters[:n * n] # build a black picture with enough space for # our 8 x 8 filters of size 128 x 128, with a 5px margin in between +# 128 x 128 크기의 8 x 8 필터를 저장할 수 있는 충분한 공간이 있는 검정 이미지를 만듭니다. +# 5px의 여유공간도 둬야합니다. margin = 5 width = n * img_width + (n - 1) * margin height = n * img_height + (n - 1) * margin stitched_filters = np.zeros((width, height, 3)) -# fill the picture with our saved filters +# 필터와 이미지를 저장합니다. for i in range(n): for j in range(n): img, loss = kept_filters[i * n + j] @@ -135,5 +136,5 @@ def normalize(x): width_margin: width_margin + img_width, height_margin: height_margin + img_height, :] = img -# save the result to disk +# 결과를 디스크에 저장합니다. save_img('stitched_filters_%dx%d.png' % (n, n), stitched_filters) \ No newline at end of file From 894e8a2a2e54f1cdb65720b70bc412f1321b8c07 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Thu, 18 Oct 2018 16:27:23 +0900 Subject: [PATCH 13/49] =?UTF-8?q?#25=20:=20variational=20=EC=98=A4?= =?UTF-8?q?=ED=86=A0=EC=9D=B8=EC=BD=94=EB=8D=94=20with=20deconv=201?= =?UTF-8?q?=EC=B0=A8=EB=B2=88=EC=97=AD=20=EC=99=84=EB=A3=8C,=20=EC=9D=98?= =?UTF-8?q?=EC=97=AD=EC=9D=B4=20=EB=8B=A4=EB=B6=84=ED=95=98=EB=8B=88=20?= =?UTF-8?q?=EB=A6=AC=EB=B7=B0=20=ED=95=84=EC=88=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../variational_autoencoder_deconv.py | 63 +++++++++---------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py index 7cd83c4..17b2e09 100644 --- a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py +++ b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py @@ -1,12 +1,11 @@ -'''Example of VAE on MNIST dataset using CNN +'''CNN과 MNIST를 사용한 VAE 예제 -The VAE has a modular design. The encoder, decoder and VAE -are 3 models that share weights. After training the VAE model, -the encoder can be used to generate latent vectors. -The decoder can be used to generate MNIST digits by sampling the -latent vector from a Gaussian distribution with mean=0 and std=1. +VAE는 모듈러(modular) 구조를 갖고 있습니다. 인코더(encoder), 디코더(decoder) +그리고 VAE는 가중치를 서로 공유하고 있습니다. VAE 모델을 학습한 후, +인코더는 은닉 벡터(latent vectors)를 생성하는데, 디코더는 가우시안 분포도에서 은닉 벡터를 +샘플링함으로써 MNIST 숫자를 생성하는데 사용될 수 있습니다. -# Reference +# 참고 자료 [1] Kingma, Diederik P., and Max Welling. "Auto-encoding variational bayes." @@ -32,23 +31,23 @@ import os -# reparameterization trick -# instead of sampling from Q(z|X), sample eps = N(0,I) -# then z = z_mean + sqrt(var)*eps +# 재매개변수화(reparameterization) 기법 +# Q(z|X)에서 샘플링하는 대신에 eps = N(0,I)에서 샘플링을 실행 +# 그때 z = z_mean + sqrt(var)*eps def sampling(args): - """Reparameterization trick by sampling fr an isotropic unit Gaussian. + """등방성 단일 가우시안에서 샘플을 채취하는 재매개변수화 기법 # Arguments: - args (tensor): mean and log of variance of Q(z|X) + args (tensor): Q(z|X)의 분산의 로그값과 평균값 # Returns: - z (tensor): sampled latent vector + z (tensor): 샘플링된 은닉 벡터들 """ z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] - # by default, random_normal has mean=0 and std=1.0 + # 기본설정으로, random_normal는 mean=0, std=1.0로 지정되있음. epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon @@ -57,13 +56,13 @@ def plot_results(models, data, batch_size=128, model_name="vae_mnist"): - """Plots labels and MNIST digits as function of 2-dim latent vector + """2차원 은닉 벡터의 함수로서 라벨과 MNIST 숫자를 표시 # Arguments: - models (tuple): encoder and decoder models - data (tuple): test data and label - batch_size (int): prediction batch size - model_name (string): which model is using this function + models (tuple): 인코더와 디코더 모델 + data (tuple): 테스트 데이터와 라벨 + batch_size (int): 배치 사이즈 + model_name (string): 사용하려는 모델 이름 """ encoder, decoder = models @@ -71,7 +70,7 @@ def plot_results(models, os.makedirs(model_name, exist_ok=True) filename = os.path.join(model_name, "vae_mean.png") - # display a 2D plot of the digit classes in the latent space + # 은닉공간의 숫자 클래스의 2D 이미지를 표시합니다. z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size) plt.figure(figsize=(12, 10)) @@ -83,7 +82,7 @@ def plot_results(models, plt.show() filename = os.path.join(model_name, "digits_over_latent.png") - # display a 30x30 2D manifold of digits + # 30X30 2D형태의 숫자들을 표시. n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) @@ -124,7 +123,7 @@ def plot_results(models, x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 -# network parameters +# 신경망 매개변수들 input_shape = (image_size, image_size, 1) batch_size = 128 kernel_size = 3 @@ -133,7 +132,7 @@ def plot_results(models, epochs = 30 # VAE model = encoder + decoder -# build encoder model +# 인코더 모델 설계 inputs = Input(shape=input_shape, name='encoder_input') x = inputs for i in range(2): @@ -144,25 +143,25 @@ def plot_results(models, strides=2, padding='same')(x) -# shape info needed to build decoder model +# 디코더 모델을 설계하기 위해 입력값의 형태를 가져오기. shape = K.int_shape(x) -# generate latent vector Q(z|X) +# Q(z|X)에서 은닉 벡터 생성하기 x = Flatten()(x) x = Dense(16, activation='relu')(x) z_mean = Dense(latent_dim, name='z_mean')(x) z_log_var = Dense(latent_dim, name='z_log_var')(x) -# use reparameterization trick to push the sampling out as input -# note that "output_shape" isn't necessary with the TensorFlow backend +# 재매개변수 기법을 이용해 샘플링을 입력으로 푸쉬합니다 +# Tensorflow 백엔드에서는 "output_shape"이 필요하지 않습니다. z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) -# instantiate encoder model +# 인코더 모델을 인스턴스화(instantiate) encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') encoder.summary() plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True) -# build decoder model +# 디코더 모델을 설계 latent_inputs = Input(shape=(latent_dim,), name='z_sampling') x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) @@ -181,12 +180,12 @@ def plot_results(models, padding='same', name='decoder_output')(x) -# instantiate decoder model +# 디코더 모델을 인스턴스화 decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True) -# instantiate VAE model +# VAE 모델을 인스턴스화 outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae') @@ -220,7 +219,7 @@ def plot_results(models, if args.weights: vae.load_weights(args.weights) else: - # train the autoencoder + # 오토인코더 학습 vae.fit(x_train, epochs=epochs, batch_size=batch_size, From db7a6fd45105aab7cef771dcc542be95a612284d Mon Sep 17 00:00:00 2001 From: mike2ox Date: Thu, 18 Oct 2018 16:29:51 +0900 Subject: [PATCH 14/49] #25 : refresh --- .../25_keras_examples_3.md | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md index 34e8d6a..097914b 100644 --- a/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md +++ b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md @@ -1,37 +1,37 @@ -## 케라스 예제 3(Keras examples) -[원문 링크](https://github.com/keras-team/keras/tree/master/examples) -> 현 디렉토리에는 케라스를 활용한 예제들 중 Generative 모델을 다루는 코드들이 있습니다. style transfer, auto encoder 등 generator 이론을 알고있는 전제하에 실제 구현에 초점을 맞춰져 있으므로 코드에 첨가된 주석 외 정보는 직접 찾아보셔야 합니다. - -* keras -* neural style transfer -* auto encoder -* lstm -* visualization - -### Generative models examples - -[lstm_text_generation.py](lstm_text_generation.py) -니체풍의 문장을 생성하기 - -[conv_filter_visualization.py](conv_filter_visualization.py) -입력공간의 기울기를 통해 VGG16 필터들을 시각화 - -[deep_dream.py](deep_dream.py) -케라스로 Deep Dream. - -[neural_doodle.py](neural_doodle.py) -신경망의 낙서. - -[neural_style_transfer.py](neural_style_transfer.py) -Neural style transfer. - -[variational_autoencoder.py](variational_autoencoder.py) -변종 Autoencoder를 만드는 방법을 보여줍니다. - -[variational_autoencoder_deconv.py](variational_autoencoder_deconv.py) -Deconvolution 레이어와 케라스를 사용해 변종 Autoencoder를 만드는 방법을 보여줍니다. - - -> 이 글은 2018 컨트리뷰톤에서 [`Contributue to Keras`](https://github.com/KerasKorea/KEKOxTutorial) 프로젝트로 진행했습니다. -> Translator : [mike2ox](https://github.com/mike2ox) (Moonhyeok Song) +## 케라스 예제 3(Keras examples) +[원문 링크](https://github.com/keras-team/keras/tree/master/examples) +> 현 디렉토리에는 케라스를 활용한 예제들 중 Generative 모델을 다루는 코드들이 있습니다. style transfer, auto encoder 등 generator 이론을 알고있는 전제하에 실제 구현에 초점을 맞춰져 있으므로 코드에 첨가된 주석 외 정보는 직접 찾아보셔야 합니다. + +* keras +* neural style transfer +* auto encoder +* lstm +* visualization + +### Generative models examples + +[lstm_text_generation.py](lstm_text_generation.py) +니체풍의 문장을 생성하기 + +[conv_filter_visualization.py](conv_filter_visualization.py) +입력공간의 기울기를 통해 VGG16 필터들을 시각화 + +[deep_dream.py](deep_dream.py) +케라스로 Deep Dream. + +[neural_doodle.py](neural_doodle.py) +신경망의 낙서. + +[neural_style_transfer.py](neural_style_transfer.py) +Neural style transfer. + +[variational_autoencoder.py](variational_autoencoder.py) +변종 Autoencoder를 만드는 방법을 보여줍니다. + +[variational_autoencoder_deconv.py](variational_autoencoder_deconv.py) +Deconvolution 레이어와 케라스를 사용해 변종 Autoencoder를 만드는 방법을 보여줍니다. + + +> 이 글은 2018 컨트리뷰톤에서 [`Contributue to Keras`](https://github.com/KerasKorea/KEKOxTutorial) 프로젝트로 진행했습니다. +> Translator : [mike2ox](https://github.com/mike2ox) (Moonhyeok Song) > Translator Email : \ No newline at end of file From 464f7fdc2259b6573581c851e482f2d9632af9f7 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Sun, 21 Oct 2018 03:06:03 +0900 Subject: [PATCH 15/49] =?UTF-8?q?#32=20:=20=ED=85=8C=EC=8A=A4=ED=8A=B8=20?= =?UTF-8?q?=EB=8B=A4=EC=9D=8C=EB=B6=80=ED=84=B0=20=EC=A7=84=ED=96=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...g_a_simple_keras_deep_learning_rest_api.md | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 327f151..8caafb4 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -152,6 +152,12 @@ def predict(): - 결과를 반복하고 그 결과들을 각각 `data["predictions"]`에 추가합니다. - JSON 형태으로 클라이언트에게 응답을 반환합니다. +만약 이미지가 아닌 데이터로 작업한다면, `request.files` 코드를 삭제하고 원본 입력 데이터를 직접 구문 분석하거나 `request.get_json()`로 입력 데이터를 python 딕셔너리/객체에 자동으로 구문 분석되도록 해야합니다. + +추가로, [참조한 튜토리얼](https://scotch.io/bar-talk/processing-incoming-request-data-in-flask)은 Flask의 요청 객체의 기본 요소를 설명하는 내용을 읽어 보세요. + +이제 서비스를 시작하겠습니다. + ```python # if this is the main thread of execution first load the model and # then start the server @@ -162,8 +168,20 @@ if __name__ == "__main__": app.run() ``` +우선 `load_model`로 디스크에서 Keras 모델을 불러옵니다. + +`load_model` 호출은 차단 작업이며 모델을 완전히 불러올 때까지 웹서비스가 시작되지 않도록 합니다. 웹 서비스를 시작하기 전에 모델을 메모리로 완전히 불러오고 인퍼런스를 할 준비가 되지 않았다면 아래와 같은 상황이 발생할 수 있습니다: + +1. POST형식으로 서버에 요청합니다. +2. 서버가 요청을 받고 데이터를 사전 처리하고 그 데이터를 모델을 통해 전달하도록 시도합니다. +3. *만약 모델을 완전히 불러오지 않았다면, 에러가 발생할 겁니다.* + +당신만의 Keras REST API를 설계할 때, 요청들을 승인하기 전에 인퍼런스를 위한 준비가 되었는지 모델이 불러와졌는지를 보장하는 논리가 들어가 있는지 확인하셔야 합니다. + #### REST API에서 Keras 모델을 불러오지 않는 방법 +예측 함수에 있는 당신의 모델을 불러오는 걸 시도할 수 있습니다. 아래 코드를 참조하세요: + ```python # ensure an image was properly uploaded to our endpoint if request.method == "POST": @@ -184,10 +202,22 @@ if request.method == "POST": results = imagenet_utils.decode_predictions(preds) data["predictions"] = [] ``` +위 코드는 새로운 요청이 들어올 때마다 모델을 불러온다는 의미를 가집니다. 이는 믿기 힘들 정도로 비효율적이고 여러분 시스템의 메모리가 부족해질 수도 있습니다. + +만약 위 코드를 실행하려고 하면 API 실행속도가 상당히 느릴겁니다.(특히 모델이 큰 경우) 이는 각 모델을 불러오는데 사용된 I/O 및 CPU 작업의 상당한 오버헤드(overhead)때문에 발생합니다. +어떻게 당신 서버의 메모리를 쉽게 압도하는지 알아보기 위해, 동시에 서버로 N개의 입력 요청이 있다고 가정해 봅시다. 이는 N개의 모델을 메모리로 불러오는 것을 의미합니다. 만약 ResNet처럼 큰 모델일 경우, 모델의 N개 사본을 RAM에 저장하면 시스템 메모리가 쉽게 소진될 수 있습니다. + +이를 해결하기 위해, 매우 구체적이고 정당한 이유가 없는 한 새로 들어오는 요청에 대해 새 모델 인스턴스를 로드하지 않도록 시도하십시오. + +**경고** : 단일 스레드인 기본 Flask 서버를 사용한다고 가정합니다. 멀티 스레드 서버에서 배포할 경우, 이 글에서 앞서 설명한 "정확한" 방법을 사용하더라도 여러 모델을 메모리에 로드하는 상황에 놓일 수 있습니다. 만약 Apache나 nginx같은 서버를 사용하려면, [이곳](https://www.pyimagesearch.com/2018/01/29/scalable-keras-deep-learning-rest-api/)에서 설명하는 대로 파이프라인을 더 확장하는게 좋습니다. #### Keras REST API를 시작하기 +Keras REST API 서비스를 시작하는건 쉽습니다. + +터미널을 열어서 아래를 실행해보세요: + ```bash $ python run_keras_server.py Using TensorFlow backend. @@ -196,14 +226,26 @@ Using TensorFlow backend. * Running on http://127.0.0.1:5000 ``` +결과물에서 볼 수 있듯이, 모델을 먼저 불러옵니다. 그런 수, Flask 서버를 시작할 수 있습니다. +이제 http://127.0.0.1:5000을 통해 서버에 엑세스할 수 있습니다. + +그러나, IP 주소 + 포트를 복사하여 브라우저에 붙여넣으려면 다음 이미지가 표시됩니다. ![Not Found](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_0.png) +그 이유는 Flask URL 경로에 색인/홈페이지 세트가 없기 때문입니다. + +대신에, 브라우저를 통해 `/predict` 엔트포인트에 액세스해 보세요. ![Method Not Allowed](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_1.png) +그리고 "Method Not Allowed"(방법 허용되지 않음) 오류가 표시됩니다. 해당 오류는 브라우저에서 GET 요청을 수행하지만 `/predict`는 POST만 허용하기 때문에 발생합니다. (다음 섹션에서 수행하는 방법을 보여드리려 합니다.) + #### cURL을 사용해서 Keras REST API 테스트하기 +Keras REST API를 테스트하고 디버깅할 때는 [cURL](https://curl.haxx.se/)을 사용하는 것을 고려하세요.(사용법을 배우기에 좋은 툴입니다.) + +아래에서 분류하고 싶은 이미지(ex. 개)보다 구체적으로 비글을 보실 수 있을 겁니다. ![beagle](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_2.jpg) From b26b82cee0ff2b9a689736e20e21be632cd44ae8 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Sun, 21 Oct 2018 03:58:19 +0900 Subject: [PATCH 16/49] =?UTF-8?q?#32=20:=201=EC=B0=A8=20=EB=B2=88=EC=97=AD?= =?UTF-8?q?=20=EC=99=84=EB=A3=8C.=20=EC=98=A4=ED=83=88=EC=9E=90=20?= =?UTF-8?q?=EB=B0=8F=20=EB=B2=88=EC=97=AD=20=EB=A6=AC=EB=B7=B0=20=ED=95=84?= =?UTF-8?q?=EC=9A=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...g_a_simple_keras_deep_learning_rest_api.md | 104 ++++++++++++------ 1 file changed, 73 insertions(+), 31 deletions(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 8caafb4..29c6a87 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -106,36 +106,33 @@ def prepare_image(image, target): ```python @app.route("/predict", methods=["POST"]) def predict(): - # initialize the data dictionary that will be returned from the - # view + # view로부터 반환될 데이터 딕셔너리를 초기화합니다. data = {"success": False} - # ensure an image was properly uploaded to our endpoint + # 이미지가 엔트포인트에 올바르게 업로드 되었는디 확인하세요 if flask.request.method == "POST": if flask.request.files.get("image"): - # read the image in PIL format + # PIL 형식으로 이미지를 읽어옵니다. image = flask.request.files["image"].read() image = Image.open(io.BytesIO(image)) - # preprocess the image and prepare it for classification + # 분류를 위해 이미지를 사전 처리합니다. image = prepare_image(image, target=(224, 224)) - # classify the input image and then initialize the list - # of predictions to return to the client + # 입력 이미지를 분류하고 클라이언트로부터 반환되는 예측치들의 리스트를 초기화 합니다. preds = model.predict(image) results = imagenet_utils.decode_predictions(preds) data["predictions"] = [] - # loop over the results and add them to the list of - # returned predictions + # 결과를 반복하여 반환된 예측 목록에 추가합니다. for (imagenetID, label, prob) in results[0]: r = {"label": label, "probability": float(prob)} data["predictions"].append(r) - # indicate that the request was a success + # 요청이 성공했음을 나타냅니다. data["success"] = True - # return the data dictionary as a JSON response + # JSON 형식으로 데이터 딕셔너리를 반환합니다. return flask.jsonify(data) ``` `data` 딕셔너리는 클라이언트에게 반환하길 희망하는 데이터를 저장하는데 사용합니다. 이 함수엔 예측의 성공 여부를 나타내는 부울을 가지고 있습니다. 또한, 이 딕셔너리를 사용하여 들어오는 데이터에 대한 예측 결과를 저장합니다. @@ -159,8 +156,7 @@ def predict(): 이제 서비스를 시작하겠습니다. ```python -# if this is the main thread of execution first load the model and -# then start the server +# 실행에서 메인 쓰레드인 경우, 먼저 모델을 불러온 뒤 서버를 시작합니다. if __name__ == "__main__": print(("* Loading Keras model and Flask starting server..." "please wait until server has fully started")) @@ -183,21 +179,20 @@ if __name__ == "__main__": 예측 함수에 있는 당신의 모델을 불러오는 걸 시도할 수 있습니다. 아래 코드를 참조하세요: ```python -# ensure an image was properly uploaded to our endpoint +# 이미지가 엔트포인트에 올바르게 업로드되었는지 확인하세요 if request.method == "POST": if request.files.get("image"): - # read the image in PIL format + # PIL 형태로 이미지를 읽어옵니다. image = request.files["image"].read() image = Image.open(io.BytesIO(image)) - # preprocess the image and prepare it for classification + # 분류를 위해 이미지를 사전 처리합니다. image = prepare_image(image, target=(224, 224)) - # load the model + # 모델을 불러옵니다. model = ResNet50(weights="imagenet") - # classify the input image and then initialize the list - # of predictions to return to the client + # 입력 이미지를 분류하고 클라이언트로부터 반환되는 예측치들의 리스트를 초기화 합니다. preds = model.predict(image) results = imagenet_utils.decode_predictions(preds) data["predictions"] = [] @@ -232,23 +227,25 @@ Using TensorFlow backend. 그러나, IP 주소 + 포트를 복사하여 브라우저에 붙여넣으려면 다음 이미지가 표시됩니다. -![Not Found](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_0.png) +![Not Found](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_0.png) 그 이유는 Flask URL 경로에 색인/홈페이지 세트가 없기 때문입니다. 대신에, 브라우저를 통해 `/predict` 엔트포인트에 액세스해 보세요. -![Method Not Allowed](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_1.png) +![Method Not Allowed](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_1.png) 그리고 "Method Not Allowed"(방법 허용되지 않음) 오류가 표시됩니다. 해당 오류는 브라우저에서 GET 요청을 수행하지만 `/predict`는 POST만 허용하기 때문에 발생합니다. (다음 섹션에서 수행하는 방법을 보여드리려 합니다.) #### cURL을 사용해서 Keras REST API 테스트하기 Keras REST API를 테스트하고 디버깅할 때는 [cURL](https://curl.haxx.se/)을 사용하는 것을 고려하세요.(사용법을 배우기에 좋은 툴입니다.) -아래에서 분류하고 싶은 이미지(ex. 개)보다 구체적으로 비글을 보실 수 있을 겁니다. +아래에서 분류하고 싶은 이미지(ex. *개*)보다 구체적으로 *비글*을 보실 수 있을 겁니다. -![beagle](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_2.jpg) +![beagle](https://raw.githubusercontent.com/KerasKorea/KEKOxTutorial/master/media/32_2.jpg) + +*curl*을 사용해서 API로 이미지를 전달할 수 있고 ResNet이 생각하는 이미지 내용을 확인할 수 있습니다. ```bash $ curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict' @@ -279,36 +276,60 @@ $ curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict' } ``` +`-X` 플래그와 `POST` 값은 POST 요청을 수행하고 있음을 말해줍니다. + +`-F image=@dog.jpg`를 제공하여 인코딩된 데이터를 제출합니다. 그 후, `image`는 `dog.jpg`파일로 설정됩니다. `dog.jpg`보다 먼저 `@`를 제공하는 것은 cURL이 이미지의 내용을 로드하고 요청에 데이터를 전달하기를 원한다는 것을 의미합니다. + +끝으로, 엔트포인트를 얻게 됩니다 : `http://localhost:5000/predict` + +어떻게 입력 이미지가 99.01%의 신뢰도로 *"비글"*을 올바르게 분류하는지 보세요. 나머지 상위 5개 예측치 및 관련 확률도 Keras API의 응답에 포함됩니다. + #### Keras REST API 프로그래밍 방식 사용 +아마도, Keras REST API에 데이터를 *제출*한 다음 반환된 예측치를 출력합니다. 이렇게 하려면 서버로부터 응답을 프로그래밍 방식으로 처리해야 합니다. + +이는 python 패키지인 [`requests`](http://docs.python-requests.org/en/master/)를 이용하면 간단한 프로세스 입니다. + ```python -# import the necessary packages +# 필수 패키지를 불어옵니다. import requests -# initialize the Keras REST API endpoint URL along with the input -# image path +# Keras REST API 엔드포인트의 URL를 입력 이미지 경로와 같이 초기화 합니다. KERAS_REST_API_URL = "http://localhost:5000/predict" IMAGE_PATH = "dog.jpg" -# load the input image and construct the payload for the request +# 입력 이미지를 불러오고 요청에 맞게 페이로드(payload)를 구성합니다. image = open(IMAGE_PATH, "rb").read() payload = {"image": image} -# submit the request +# 요청을 보냅니다. r = requests.post(KERAS_REST_API_URL, files=payload).json() -# ensure the request was successful +# 요청이 성공했는지 확인합니다. if r["success"]: - # loop over the predictions and display them + # 예측을 반복하고 이를 표시합니다. for (i, result) in enumerate(r["predictions"]): print("{}. {}: {:.4f}".format(i + 1, result["label"], result["probability"])) -# otherwise, the request failed +# 그렇지 않다면 요청은 실패합니다. else: print("Request failed") ``` +`KERAS_REST_API_URL`은 엔드포인트를 지정하는 반면 `IMAGE_PATH`는 디스크에 상주하는 입력 이미지의 경로입니다. + +`IMAGE_PATH`를 사용하여 이미지를 불러온 다음 요청에 대한 `페이로드`를 구성합니다. + +`페이로드`가 있을 경우 `requests.post`를 호출하여 데이터를 엔드포인트에 POST할 수 있습니다. 호출 끝에 `.json()`을 추가하면 다음과 같은 `requests`가 표시됩니다. + +1. 서버로부터 온 응답이 JSON에 있어야 합니다. +2. JSON 객체가 자동으로 구문 분석 및 추상화되기를 원합니다. + +요청 결과가 `r`이면 분류가 성공(혹은 실패)인지 확인한 다음 `r["predictions"]`을 반복할 것입니다. + +`simple_request.py`를 실행하기 위해, 먼저 `run_keras_server.py`(즉 Flask 웹서버)가 현재 실행되고 있는지 확인하세요. 여기서, 별도의 셸에서 다음 명령을 실행합니다. + ```bash $ python simple_request.py 1. beagle: 0.9901 @@ -318,8 +339,29 @@ $ python simple_request.py 5. bluetick: 0.0011 ``` +성공적으로 Keras REST API를 불러왔고 python을 통해 모델의 예측치들도 얻었습니다. + --- +이 글에서 아래 항목들을 수행하는 방법을 배웠습니다: + +- Keras 모델을 Flask 웹 프레임워크를 사용해 REST API로 감싸는 법 +- cURL을 활용하여 데이터를 API로 전송하는 법 +- python과 `requests`패키지를 사용하여 엔드포인트로 데이터를 보내고 결과를 출력하는 법. + +이번 튜토리얼에 쓰인 코드는 [여기](https://github.com/jrosebr1/simple-keras-rest-api)에서 보실 수 있고, 고유한 Keras REST API용 템플릿으로 사용하실 수 있습니다.(원하는 대로 수정할 수 있습니다.) + +명심하세요. 이 글에 있는 코드는 교육용입니다. 대량의 호출 및 수신 요청에 따라 규모를 확장할 수 있는 생산 수준이 아닙니다. + +이 방법은 다음 경우에 가장 적합합니다: + +1. Keras 딥러닝 모델을 위한 REST API를 빠르게 설정할 필요가 있을 때 +2. 엔드포인트가 크게 타격을 받지 않을 때 + +메세지 큐 및 배치 기능을 활용하는 고급 Keras REST API에 관심이 있을 경우, [이 글](https://www.pyimagesearch.com/2018/01/29/scalable-keras-deep-learning-rest-api/)을 참조하세요. + +만약 질문이나 의견이 있다면 [PyImageSearch](https://www.pyimagesearch.com/)에서 Adrian에게 연락하세요. 앞으로 다뤄야할 주제에 대한 제안일 경우 Twitter에서 [Francois](https://twitter.com/fchollet)를 찾아보세요. + ### 참고 * [PyImageSearch](https://www.pyimagesearch.com/) * [Flask 웹 프레임워크](http://docs.python-requests.org/en/master/) From 022e97c446a3ed0c7e2ea9820e7499009266e8aa Mon Sep 17 00:00:00 2001 From: mike2ox Date: Mon, 22 Oct 2018 20:37:08 +0900 Subject: [PATCH 17/49] =?UTF-8?q?#25=20:=20=EB=82=99=EC=84=9C=ED=95=98?= =?UTF-8?q?=EA=B8=B0=20code=20=EB=B2=88=EC=97=AD=20=EC=99=84=EB=A3=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../neural_doodle.py | 79 +++++++++---------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/25_Keras_examples_3_Generative_models_examples/neural_doodle.py b/25_Keras_examples_3_Generative_models_examples/neural_doodle.py index ad1f5db..b9bba49 100644 --- a/25_Keras_examples_3_Generative_models_examples/neural_doodle.py +++ b/25_Keras_examples_3_Generative_models_examples/neural_doodle.py @@ -1,35 +1,33 @@ -'''Neural doodle with Keras +'''Keras를 이용해 신경망으로 낙서하기 -# Script Usage +# 본 스크립트 사용법 ## Arguments ``` ---nlabels: # of regions (colors) in mask images ---style-image: image to learn style from ---style-mask: semantic labels for style image ---target-mask: semantic labels for target image (your doodle) ---content-image: optional image to learn content from ---target-image-prefix: path prefix for generated target images +--nlabels: mask 이미지안에 영역(색깔)의 수 +--style-image: 학습하고 싶은 스타일이 있는 이미지 +--style-mask: 스타일 이미지를 위한 시맨틱 라벨(semantic labels) +--target-mask: 목표 이미지를 위한 시맨틱 라벨(사용자가 만든 낙서) +--content-image: (선택) 학습하고 싶은 컨텐츠가 있는 이미지 +--target-image-prefix: 생성된 목표 이미지를 위한 저장 경로 ``` -## Example 1: doodle using a style image, style mask -and target mask. +## Example 1: 스타일 이미지, 스타일 라벨 그리고 목표 시맨틱 라벨을 사용해 낙서하기 ``` python neural_doodle.py --nlabels 4 --style-image Monet/style.png \ --style-mask Monet/style_mask.png --target-mask Monet/target_mask.png \ --target-image-prefix generated/monet ``` -## Example 2: doodle using a style image, style mask, -target mask and an optional content image. +## Example 2: 스타일 이미지, 스타일 라벨, 목표 시맨틱 라벨 그리고 옵션인 컨텐츠 이미지를 사용해 낙서하기 ``` -python neural_doodle.py --nlabels 4 --style-image Renoir/style.png \ +python neural_doodle.py --nlabels 4 --st를yle-image Renoir/style.png \ --style-mask Renoir/style_mask.png --target-mask Renoir/target_mask.png \ --content-image Renoir/creek.jpg \ --target-image-prefix generated/renoir ``` -# References +# 참고자료 - [Dmitry Ulyanov's blog on fast-neural-doodle] (http://dmitryulyanov.github.io/feed-forward-neural-doodle/) @@ -42,7 +40,7 @@ - [Discussion on parameter tuning] (https://github.com/keras-team/keras/issues/3705) -# Resources +# 소스코드 자료 Example images can be downloaded from https://github.com/DmitryUlyanov/fast-neural-doodle/tree/master/data @@ -59,7 +57,7 @@ from keras.preprocessing.image import load_img, save_img, img_to_array from keras.applications import vgg19 -# Command line arguments +# 커맨드 라인에 입력할 설정들 parser = argparse.ArgumentParser(description='Keras neural doodle example') parser.add_argument('--nlabels', type=int, help='number of semantic labels' @@ -86,7 +84,7 @@ num_labels = args.nlabels num_colors = 3 # RGB -# determine image sizes based on target_mask +# 목표 시맨틱 라벨을 기반으로 이미지 사이즈를 결정 ref_img = img_to_array(load_img(target_mask_path)) img_nrows, img_ncols = ref_img.shape[:2] @@ -95,12 +93,12 @@ content_weight = 0.1 if use_content_img else 0 content_feature_layers = ['block5_conv2'] -# To get better generation qualities, use more conv layers for style features +# 생성이 더 잘되도록, 스타일 피쳐(style feature)용 컨볼루션 레이어를 더 많이 사용 style_feature_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1'] -# helper functions for reading/processing images +# 이미지를 읽고 사전처리를 해주는 함수 def preprocess_image(image_path): img = load_img(image_path, target_size=(img_nrows, img_ncols)) img = img_to_array(img) @@ -137,10 +135,10 @@ def kmeans(xs, k): def load_mask_labels(): - '''Load both target and style masks. - A mask image (nr x nc) with m labels/colors will be loaded - as a 4D boolean tensor: - (1, m, nr, nc) for 'channels_first' or (1, nr, nc, m) for 'channels_last' + '''목표 및 스타일 시맨틱 라벨을 불러오기 + m개의 라벨/색을 지닌 시맨틱 이미지(nr x nc)를 4D boolean 텐서로 불러오기. + 주의 : + (1, m, nr, nc)는 'channels_first'에, (1, nr, nc, m)는 'channels_last'에 사용되는 형태 ''' target_mask_img = load_img(target_mask_path, target_size=(img_nrows, img_ncols)) @@ -171,7 +169,7 @@ def load_mask_labels(): np.expand_dims(target_mask, axis=0)) -# Create tensor variables for images +# 이미지를 위해 텐서형 변수들을 생성 if K.image_data_format() == 'channels_first': shape = (1, num_colors, img_nrows, img_ncols) else: @@ -186,20 +184,20 @@ def load_mask_labels(): images = K.concatenate([style_image, target_image, content_image], axis=0) -# Create tensor variables for masks +# 시맨틱 라벨을 위해 텐서형 변수들을 생성 raw_style_mask, raw_target_mask = load_mask_labels() style_mask = K.variable(raw_style_mask.astype('float32')) target_mask = K.variable(raw_target_mask.astype('float32')) masks = K.concatenate([style_mask, target_mask], axis=0) -# index constants for images and tasks variables +# 이미지와 작업용 변수인 인덱스 상수들 STYLE, TARGET, CONTENT = 0, 1, 2 -# Build image model, mask model and use layer outputs as features -# image model as VGG19 +# 이미지 모델(image_model), 시맨틱 모델(mask_input)을 생성하고 피쳐용으로 레이어 출력값을 사용 +# 이미지 모델은 VGG19 image_model = vgg19.VGG19(include_top=False, input_tensor=images) -# mask model as a series of pooling +# 나열된 풀링 레이어으로 시맨틱 모델 표현 mask_input = Input(tensor=masks, shape=(None, None, None), name='mask_input') x = mask_input for layer in image_model.layers[1:]: @@ -211,7 +209,7 @@ def load_mask_labels(): x = AveragePooling2D((2, 2), name=name)(x) mask_model = Model(mask_input, x) -# Collect features from image_model and task_model +# 이미지 모델과 시맨틱 모델에서 피쳐들을 수집 image_features = {} mask_features = {} for img_layer, mask_layer in zip(image_model.layers, mask_model.layers): @@ -223,7 +221,7 @@ def load_mask_labels(): mask_features[layer_name] = mask_feat -# Define loss functions +# 손실 함수를 정의 def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) @@ -232,8 +230,9 @@ def gram_matrix(x): def region_style_loss(style_image, target_image, style_mask, target_mask): - '''Calculate style loss between style_image and target_image, - for one common region specified by their (boolean) masks + ''' + (boolean형) 마스크로 지정된 하나의 공통 영역에 대해 + 스타일 이미지와 목표 이미지 사이의 스타일 손실값을 계산 ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) @@ -254,8 +253,8 @@ def region_style_loss(style_image, target_image, style_mask, target_mask): def style_loss(style_image, target_image, style_masks, target_masks): - '''Calculate style loss between style_image and target_image, - in all regions. + ''' + 모든 영역에서 스타일 이미지와 목표 이미지 사이의 스타일 손실값을 계산 ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 3 == K.ndim(style_masks) == K.ndim(target_masks) @@ -291,8 +290,8 @@ def total_variation_loss(x): return K.sum(K.pow(a + b, 1.25)) -# Overall loss is the weighted sum of content_loss, style_loss and tv_loss -# Each individual loss uses features from image/mask models. +# 전체 손실값은 컨텐츠, 스타일, 전체 변화 손실의 가중치를 계산한 합산. +# 각 개별 손실 함수는 이미지/시맨틱 모델로부터 추출한 피쳐들을 사용. loss = K.variable(0) for layer in content_feature_layers: content_feat = image_features[layer][CONTENT, :, :, :] @@ -310,7 +309,7 @@ def total_variation_loss(x): loss += total_variation_weight * total_variation_loss(target_image) loss_grads = K.gradients(loss, target_image) -# Evaluator class for computing efficiency +# 효율성 검사를 위한 평가 클래스 outputs = [loss] if isinstance(loss_grads, (list, tuple)): outputs += loss_grads @@ -356,7 +355,7 @@ def grads(self, x): evaluator = Evaluator() -# Generate images by iterative optimization +# 반복 최적화로 이미지를 생성 if K.image_data_format() == 'channels_first': x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128. else: @@ -368,7 +367,7 @@ def grads(self, x): x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20) print('Current loss value:', min_val) - # save current generated image + # 현재 생성된 이미지를 저장 img = deprocess_image(x.copy()) fname = target_img_prefix + '_at_iteration_%d.png' % i save_img(fname, img) From 05631192c45e36c9099381a38540af3c287c8495 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Mon, 22 Oct 2018 20:44:16 +0900 Subject: [PATCH 18/49] =?UTF-8?q?#25=20:=20VAE=20=EC=98=88=EC=A0=9C=20?= =?UTF-8?q?=EB=B2=88=EC=97=AD=20=EC=99=84=EB=A3=8C=20=EB=B0=8F=20=EC=98=A4?= =?UTF-8?q?=ED=83=80=20=EC=88=98=EC=A0=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../variational_autoencoder.py | 64 +++++++++---------- .../variational_autoencoder_deconv.py | 11 ++-- 2 files changed, 36 insertions(+), 39 deletions(-) diff --git a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py index 060cf71..a5b0853 100644 --- a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py +++ b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py @@ -1,12 +1,11 @@ -'''Example of VAE on MNIST dataset using MLP +'''MLP를 사용하고 MNIST 데이터 세트 기반의 VAE 예제 -The VAE has a modular design. The encoder, decoder and VAE -are 3 models that share weights. After training the VAE model, -the encoder can be used to generate latent vectors. -The decoder can be used to generate MNIST digits by sampling the -latent vector from a Gaussian distribution with mean=0 and std=1. +VAE는 모듈러(modular) 구조를 띄고 있습니다. 인코더(encoder), 디코더(decoder) +그리고 VAE는 가중치를 서로 공유하고 있습니다. VAE 모델을 학습한 후, +인코더는 은닉 벡터(latent vectors)를 생성하는데, 디코더는 가우시안 분포도(mean=0, std=1)에서 은닉 벡터를 +샘플링함으로써 MNIST 숫자를 생성하는데 사용될 수 있습니다. -# Reference +# 참고 자료 [1] Kingma, Diederik P., and Max Welling. "Auto-encoding variational bayes." @@ -30,23 +29,23 @@ import os -# reparameterization trick -# instead of sampling from Q(z|X), sample eps = N(0,I) -# z = z_mean + sqrt(var)*eps +# 재매개변수화(reparameterization) 기법 +# Q(z|X)에서 샘플링하는 대신에 eps = N(0,I)에서 샘플링을 실행 +# 그때 z = z_mean + sqrt(var)*eps def sampling(args): - """Reparameterization trick by sampling fr an isotropic unit Gaussian. + """등방성 단일 가우시안에서 샘플을 채취하는 재매개변수화 기법 # Arguments: - args (tensor): mean and log of variance of Q(z|X) + args (tensor): Q(z|X)의 분산의 로그값과 평균값 # Returns: - z (tensor): sampled latent vector + z (tensor): 샘플링된 은닉 벡터들 """ z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] - # by default, random_normal has mean=0 and std=1.0 + # 기본설정으로, random_normal는 mean=0, std=1.0로 지정되있음. epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon @@ -55,13 +54,13 @@ def plot_results(models, data, batch_size=128, model_name="vae_mnist"): - """Plots labels and MNIST digits as function of 2-dim latent vector + """2차원 은닉 벡터의 함수로서 라벨과 MNIST 숫자를 표시 # Arguments: - models (tuple): encoder and decoder models - data (tuple): test data and label - batch_size (int): prediction batch size - model_name (string): which model is using this function + models (tuple): 인코더와 디코더 모델 + data (tuple): 테스트 데이터와 라벨 + batch_size (int): 배치 사이즈 + model_name (string): 사용하려는 모델 이름 """ encoder, decoder = models @@ -69,7 +68,7 @@ def plot_results(models, os.makedirs(model_name, exist_ok=True) filename = os.path.join(model_name, "vae_mean.png") - # display a 2D plot of the digit classes in the latent space + # 은닉공간의 숫자 클래스의 2D 이미지를 표시합니다. z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size) plt.figure(figsize=(12, 10)) @@ -81,12 +80,11 @@ def plot_results(models, plt.show() filename = os.path.join(model_name, "digits_over_latent.png") - # display a 30x30 2D manifold of digits + # 30X30 2D형태의 숫자들을 표시. n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) - # linearly spaced coordinates corresponding to the 2D plot - # of digit classes in the latent space + # 은닉공간의 숫자 클래스의 2D 그림에 해당하는 선형 간격 좌표 grid_x = np.linspace(-4, 4, n) grid_y = np.linspace(-4, 4, n)[::-1] @@ -123,7 +121,7 @@ def plot_results(models, x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 -# network parameters +# 신경망 매개변수들 input_shape = (original_dim, ) intermediate_dim = 512 batch_size = 128 @@ -131,32 +129,32 @@ def plot_results(models, epochs = 50 # VAE model = encoder + decoder -# build encoder model +# 인코더 모델 설계 inputs = Input(shape=input_shape, name='encoder_input') -x = Dense(intermediate_dim, activation='relu')(inputs) +x = Dense(intermediate_dim, activation='relu')(inputs)계 z_mean = Dense(latent_dim, name='z_mean')(x) z_log_var = Dense(latent_dim, name='z_log_var')(x) -# use reparameterization trick to push the sampling out as input -# note that "output_shape" isn't necessary with the TensorFlow backend +# 재매개변수 기법을 이용해 샘플링을 입력으로 푸쉬합니다 +# Tensorflow 백엔드에서는 "output_shape"이 필요하지 않습니다. z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) -# instantiate encoder model +# 인코더 모델을 인스턴스화(instantiate) encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') encoder.summary() plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True) -# build decoder model +# 디코더 모델 설계 latent_inputs = Input(shape=(latent_dim,), name='z_sampling') x = Dense(intermediate_dim, activation='relu')(latent_inputs) outputs = Dense(original_dim, activation='sigmoid')(x) -# instantiate decoder model +# 디코더 모델 인스턴스화 decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True) -# instantiate VAE model +# VAE 모델 인스턴스화 outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae_mlp') @@ -194,7 +192,7 @@ def plot_results(models, if args.weights: vae.load_weights(args.weights) else: - # train the autoencoder + # 오토인코더 학습 vae.fit(x_train, epochs=epochs, batch_size=batch_size, diff --git a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py index 17b2e09..5b3a9b7 100644 --- a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py +++ b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py @@ -2,7 +2,7 @@ VAE는 모듈러(modular) 구조를 갖고 있습니다. 인코더(encoder), 디코더(decoder) 그리고 VAE는 가중치를 서로 공유하고 있습니다. VAE 모델을 학습한 후, -인코더는 은닉 벡터(latent vectors)를 생성하는데, 디코더는 가우시안 분포도에서 은닉 벡터를 +인코더는 은닉 벡터(latent vectors)를 생성하는데, 디코더는 가우시안 분포도(mean=0, std=1)에서 은닉 벡터를 샘플링함으로써 MNIST 숫자를 생성하는데 사용될 수 있습니다. # 참고 자료 @@ -86,8 +86,7 @@ def plot_results(models, n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) - # linearly spaced coordinates corresponding to the 2D plot - # of digit classes in the latent space + # 은닉공간의 숫자 클래스의 2D 그림에 해당하는 선형 간격 좌표 grid_x = np.linspace(-4, 4, n) grid_y = np.linspace(-4, 4, n)[::-1] @@ -161,7 +160,7 @@ def plot_results(models, encoder.summary() plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True) -# 디코더 모델을 설계 +# 디코더 모델 설계 latent_inputs = Input(shape=(latent_dim,), name='z_sampling') x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) @@ -180,12 +179,12 @@ def plot_results(models, padding='same', name='decoder_output')(x) -# 디코더 모델을 인스턴스화 +# 디코더 모델 인스턴스화 decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True) -# VAE 모델을 인스턴스화 +# VAE 모델 인스턴스화 outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae') From d38ca65e1e56902cdd20855af3ac251e1ee0daf4 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Wed, 24 Oct 2018 19:01:50 +0900 Subject: [PATCH 19/49] =?UTF-8?q?#42=20:=20=EC=9B=90=EB=AC=B8=20=EB=82=B4?= =?UTF-8?q?=20=EC=9D=B4=EB=AF=B8=EC=A7=80=20=EC=A0=80=EC=9E=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- media/42_0.png | Bin 0 -> 15281 bytes media/42_1.png | Bin 0 -> 87608 bytes media/42_2.png | Bin 0 -> 13839 bytes media/42_3.png | Bin 0 -> 14054 bytes 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 media/42_0.png create mode 100644 media/42_1.png create mode 100644 media/42_2.png create mode 100644 media/42_3.png diff --git a/media/42_0.png b/media/42_0.png new file mode 100644 index 0000000000000000000000000000000000000000..3eeda8eb88073191a7e680e4fbff4adeb8096f5b GIT binary patch literal 15281 zcmbVzWmH^2v*6&tCc$+QEVu<{aCd^cI|OHNmk=Nc1b26LcS&#<+}#Fux8-~9?VkO! zfA;?9TV1ESw7a_cp1R>mic*+p#ApBj08>U}uC5Xd(H3X%UXB-2t*A`^3PHYej`=3+Aa z!ofzy&CC3Sius*b|%&@U{)?LJ1^P)JQRXxWcEG+Ks?#%A&%nr_$EMIted0AN5SlHN@-bOIFc-ljZJ(%oWDE~v^ziEh@ zyO=s#J3_1-?8*K^)7Zqp6(T@E!SWv_EdRshO+A+Xqu*QS|5(V}{>?1TZ~AAD8l3_F z@H+Q2R8-E- z&rf9OOG-*^Zf*(-3;Fo?UaqcA#3_#X36Iz@pHELTG&JJl=^&-Zd6qxagdgl&mT#ZQ0oDhllU^_}tFS+*aq#($g=C zi!X3;F0isbUYyUdu+XjrQUCyC02y%+HIK!k^y4TsO;Et8oA=m@7X5Hd6`F1TitRa) z9>VVIZ}O(^e?Fut{SKTkAVHtae3Cn>y+u40=vOY8Ah8+3l?^p}X}W8~c@&JfX9@FE zT&eDp%hT_abvV1qF>_p19rvj}K<`!k!^bj|s0brIooW^l38M>B;u(@jmO}D}TfZ5S zxtE-jQWeFrW^Of!kWz0SkSuu!WkWm9Nh=|!RQv4$d^AKx=^dvrt}-<@6%V$}(%Tpb zLMZO?+dtM9PkH$`$6H+#6g;+G z?e?QT0nA>O%AB8z|42Fav8W^_hU3;aerEkq#M)j$U#Qc*VX0Yh43)u8l&M&5GC9Z( zwgl{Nt3*-7kyk^g3L(T6X3vpsv0UeI>?{V(pS#c0snoEhx=p7_l>~->hIDzV{{m#f z%E2ltbJA0j@jZ#1mRRWz&+Q)l*L*R)r%B2eMmdpqRWu;Y7Dpxkt*_t`ZISiW z33dJ-ihdqHCFl85@rJ>e+tDb~pa}-M0vy8QzDwG_`J+w`O2T%Yy4KHzm8qqSNM)k= zRCJ1+Bnw~DMfa21h9c8YM6$VJqmzR%cwF{7Yt0Y^Ce4J~a0$Xg_@BmAuP13vvHVjM zdH4z~LPzwYA^e3YQ95vitirho7Ax7K;-FJF|Awse$Ri{^&eruQ#^5e455z^>?d{6E zmeRJkY_gz*!0w)~k{cx4@;kNgkuRJa61PBuKsU8m#uvN1uJ2gy`dkIJLiXd`aMHo& zeP%0UBjk5<7xw{;UKXFUkL1t=+%i70y`K|FI)-ezYEFcC4zU>j`18|kXajzF3Qle5 zX9va%)foJt+FrX+AwDuMXYrwY6#MW$K3!I!53O)!-?ZY`f}qq9sB^x^puu|DM(kJ{ z-W*ixh&fY$??)J8scfU~-)x9@>!k(aaAG9Y?diai4<^d!zA^IC%J#GQ`4`LcIT6}? z%6-H`L4CZk(#Z{E%LY}NbBVw3H1+gwY6QkM@7Wmb@+q<1C?8vOX*G}dzwznQd~jkC zz~h}M7kgyV@t(OWB2brLw$0mG=trIV^X=GwYJ$}>>3i}gn}RtStfQjHy$UEf-M&|= zRo>Lra4=-o?y-f{XGQO(V-6C>FUjuO7nxKl9-5DRj}H;zi{Yx1`EB_i4KBCN`$HME zuTdI&B+FZG6|2(9SF`_Oa#fyAeB zMgswiFD(QXnvuuSioasz9<(vRC@>HiTTp(YOC%f10hct<2`fpZ6np$|Dg?^r(l34BBv9pYQ%OG58O08AIb??_Sz6^ z$X+dDnheQcQ&@wms1bD~Wa{*3m`Y^(9utuyLiCENk4EGGE$7L6it)TTL+eeDs1GcW zC%ecchTv({BHj~1dAZw&(kloEYeljd<-}eCa2%^!P$BFoy^pBR`)*I*gWQ1?3ok2_=!)$e-G>042&dHkD#d1Qv-A$x%R;SHw|G*~ zc{E-+-BU?1yVAne3*i~>0g*0IF_W-{3@pm`jGcphEY_`Cqi=4%2b-4(+SrLMhsctF zwEJq2C`AgHjT@K_TSA1Sro<%lLnWT6R*{~pCds4hj`1A=0J{XF{uQQDqJfOLtiWM@5oT6l^g6E3Zk6E^GUkVmLy z_IrS^dyuoxgh?I(0QMdb;JlB#4-l^0Bz-~vm}vl%3@*vY0c6W?0X1kSj{tx&5D+)W zgZ7r6i@XQK3C#arNrM%Jz#t&N<-Ze%hy=Z8eK_I7dNC}3uyXq45XtH44C<=y7M=|n z;E-|qxN+6TkjCS(ytRlH&{zA1f#4SC9@I{-RuF@a;Ql4p?@ED>GCj%%VpN2K;d3W8 zjSCnP0Ry0>e}Dk8HzSZhHW?=202H7%v87P}|AqbE;2Zuw;Ex_8K;9Ik79TJG0>~QA zW3QjHY}%g){eeya!qVxg*;O^n!gz~Xs@^Y;s|&TK&ko~i1*!)8Bo`)!llKSRru{&_ zOLoji!Bdcc+tEO9FqQJk=JMAB)8WK^pulymgM$>DFI9SAry<{t)3I#=1ev+bg?>BJ zTPJ=Do#l9`#r2Tp<6vFn(He2p>BOlDxBg~jCvQEe(9PoIH$lJgZNGy~|2L`jZ0_U&y<2wQT7G)Exncg_Xjo0V*bayIP8a-mAP=+ zk(>FKllOXeAuP=Ng0348PGilbLYX$|;LMlxAv^|+vKOCF_3f(;zBW^alcvY;O6bxg z=moO>aOyWNs}WOhT}Dllo;XG`XRFtVw!dB8$b3{dB@R|4SgTHld-d?ODe|2pTJG@v z?h345&ga>iiKWqv8EXU{84@_b*5@*P&idC3Q(t#daVqV5-e(ZxO&Lx)dC{c@qHB(P zGwgVH=u&#jWB2fK|FHLK`T6dfnsei3TI{WV?&p2dlr%#2k-;S#!#mrt z#LCopE;n7b6R&meZ_$d@hhz`neq5qPK6_f2i#_mt zy)t;o-GOb4yzwBzB_pH#bo-O;zVxb~4iN}s_^BwT#!t%J$7`zKJpJ3rvh!25-ej_z zT%k(xTh^DJ@x{R^Da`|M{|c+6-Tls18SKsS)DPa99SzLplHD8V!0h8)*|AIE2Qe$2vkS&bwn1r0mBA-Ul1Fdr_wuq~*W(2|BWO`tEZfZ&73` zZ7ntN`DQj_Bm1@1-TsSRnl`EXuhPw5+voGs+{paV#{|Dd=b69I5|WrQZZ?qx=Y|C| z$C|*0cbPSRcf?#r`~Xe*P%#vbSRdHX%#mvUDW+=iL^fRKW5d2!W9f{nu66xlqVHO@ zDlwF5D&TbE7xlt1P}*ZQ(A zha373N(_9@(#(LLMazA-p~VzQv6I-9@u!-$*E7Fl%3Cjtf{lPXseFkaOD+8#_=#aw z^r9(CG=G)Q#8a78_rtd>YshZG%XlE0vNRYFppd^vh40^)&0+Ye!8vQ`cjEs^h&KJ32Wo*B4ZW?6bw1ha?e6 z&#hEQ!A?K1Aix!GkyS+hdHeR6ldP%VXpXyA5)tJkcD|wrQ9u>Xp}~>+=0AU?I4wA4 z%bE)#^zN!0RKj^D<3lZ8f3|*W;1siBWr&xpoiWi%M|TH(WGE9EB(&Sn-lw;4R-*-t z@AYTnG|mg}4O|kKMV$9rCE*A_T9i?-bR~?&&NnbVCZY$ajR*^iHg*70c3j4QGE@}C zywtc4D;f4u6_Y#1EVaM0Wh)Q?PFu1bMU+$t-dfH<2X5J3z+hfw%!I%oLX3;71mQ4S z@+@EJuXUCj>zIGJkeKE%7OJDlQ{>yJRi|>VyWB1+E;x+2q1isv2?a1r>*^nhFDo^R zql~JRTiUh0wRUY z(~Ao34L_J+BGAY|h$e zu}jUN)eZifPxY#$;V!pKEv)0r%SXC;#(>znYbI!os0oJ~=$1RYd#2r>^23rvF$G4CH{+#` zg%g91i%_uCMDlzTyE)3RmoTJ1nKmKs<`!g(QruPs}V6`BM&E^Q+V9**ZT_F1tB#ROIL zEAvXsz0&k;_!=&Zgm)^>;oY#T*m?G}Y~c%BG_IRmiL0TZ3o@)-MuoY&;pJ@S6?F!N zucci++9~RWT+FQ{>LmlF40WDikBRWkb(S>9A~e;;(*u^Z^=R!uP5otKmjvb6$OcQ!k?0LEaT!R^N&c$xSs|qCd&V#Xv;CPeiuahtol(AI1#b>_fiAx zKW>{{N~WtXcK6znxjS+Al&So+H1h-=jBd&p7Pi;Ua9!| zqVJv9&P$=nmn3>g`ZO?h61#K7M3N`uNP6rsi6OY#jEyJ=0`^3aqj!iOj#by<3uYx6 zv9bG#7BE&kg{(EaVf5oecvWuK=0?*zuOrL|{Nw6n<95Q&U0e-n-M9|GdfV;HZMzFt3#jKQH>244zV!mfVS&Dlek13nUK5jpjHv#0+?WjK&GM>UiH07aVX{~ z9K=AsrRZcvL1+<~frZ<`%li%Vt#N_r!Aw!}j=V6yTcswFH6C2QOr&jR*zZRgin3y7 z(Y`h3m|+Hy*)rvb+>sU_(hZSfNZ-U4h9QE?5;y@Q z>DSudheTuX4W*59Mp~itiZT~v4hAi?uzDs}aGm`!EeAZeW zGU_k-IRQc8G+4D?Xm#NWg!mUDT#V)p&|tUECs2#f zVoZ@pieWOxbwapMM>^J-ml6MfkVrtkb9otl=$bx2453vtD>hzVRMlx4&Bf9X`8`g2 zZeRYE?2l$_!7|n~7TdDA(XmFti@wFU9E-}y34l5h?eE`Rq{csUCng3>_lYQ&DJml< zsgrmfTZW!kG9Qw}JTLcpzXSnI?W~~`jv8{3gL))eronc1U3 z^Fs`8hdQ#@C;H=lQ_u6K$C1zJUN-cile_N~sMAk2cm3Tk(si;$yAr%s3taFCr)_%6 zULpIu7!AL%ks!`Y9LSo;si}k04n0Hs?-SrZVA8%1iF9+e(q`VOg)aR4j?Rs)=LS|L zzmqQmkRzd&mwi5A`fJ{6ik$-an>r<<0hEupuDX91jF*O=TaGl{CF?Eq ze;HhReq8$)5j{yKFiUr~`|b0t=@MGHS)-<`@M_cQB@9$@FG&j_^S(K9wLyg_&_Gh^0%>c-VDFr|7PpB;yYnRf2t)2obGjKN5xdP zV8*tR{9@k^EuJHTM^ZXAzN7uwE#n@N7gNi(lVfJ@iVJm$#QUOZw)p~B`|C#-&1&`U zI{;R7jAOyoV?!myWQ{dWxfXyW+!v#cEb3h`l_V)Dh5Y3L*Q>af#o-o8W+}|(>GLoa zD7xnm`0@%SC4m8mJj&A!P~$!5Oa;O& zJ|#@sgFsR_SRMyDJdFTCLL|W9MLi9X7OheS68#jr=}ms36N0=G{b5mMNlNXAv}+q4 zO+HZTm3`$o{Q=Tf5(yn?1B|V77z6NV$vR9fb-ioJv2-|3e{{|&ZV^9_r1`5WvIt7?&l;p8h`i$8k|(!*V_E9SU2~1{ z@%U6?qp+M#7>=y^6!Q%7l{TB|kt_cXezUTpD)#=*L#m>d@|7YO zsDkm0oc&a@+-`_OMAi#olj_7oV2vNDl-vbQT0R8GRAeH|S_q6Fq<7MxqRdMM%OOQB z2@)GwxuLdv%%L9PO~}>V)%XnAFwY67_VbgAp~Lo!$3|8#9%IDNR@<6?lJt&qm4S+ijTy<*a$lu?p^6%8^=MUhKYI^j(N1rQ`vl1kK>u2MeD4`S zO+wsKUup!8yjk@f=#E-Qvi<)$I4JBXLRJJ`-%dq!2CQbk*vjC00H&l)=d08T(0vW51uW(U{`1) zWiR6#^u+s7tQxNmAMi7>(kLWZH0%5wLy`9@bu^9Cy zt*Ei>+izY>a_mtQSCvgJjogh@yHkys$lzEV+^K`BRNp>D(g~Wu&$&l(i%Gd%QO7aIx5^`hJwo4#E(E=XPRvGI%g z;n^-iZY0C+T7)emBx@&ISXcwb2gn65l=KrR5&o07rYk!?Ijr>*UviTC+g!-6F8X(- z6KU&S7mp0r&j~M%=gwX}D@${lE|;yaY}IC#r6MRXmDslm)xzaSI{mUN<9s3-c|P&z z>`Qyf5?Rq($JDgK$yk7Mhv{fL{9pt$r%Zr4fVJ5m3K0}Zf7$n~0YYp26$6un09=~b z#-{l6IWzdW^bQv;Ys(nO)FrJu8b_8Z3v|sRFa|$r{j5guojw8vDI!%eFbmAECV91K zmGyl1ApSud9|NRZ__H9gUq)=CSZqdB^ilH{$h0s}!_L&8!b!y|S^ZU*ez_90HRLd= zm71}=*-$d&wm+q#!(bS}0Je_FD4tgGT^Q){;&h%k@5w zGidpJg|e>ELr&^P$h079lHe0#=Q^D7ONsNDyT64M?N{ZJQ|3V5F+VxZ9i`Y20(g z;7Jtro}eOe6-pBQ;0je(me@EVND2eY0s;%}dK6uT6c5~=xSx+>f*g%T>3JumZ>2+{ zkiQCw_n`vP?a)0%(wUmFD2JRs%~iP8|LMi+55Y*Ymp4~}SM@PA*w>a6Q-!UErZb9O0A^vKQy)UCGYCd(pO$>*)+!SN4#Sp*#&9oykim&d%LE%}Co z%~~f=m^UP184GhE zwu?s=R3!E8(cHq+Nq-y6%P8|=?T+eDX=yX>sKIjm2Q$9u+oe)mV3cf& zwYt+}uc*!3v5Kvl1EQzJ-qkBiQyqBHMah!F^zHTs@9y?86f3+$hd||Ly}TGpR<%@< z%dtt&E}uZhEM6!bsm@*kxxNvh9F5-KWxABw)xmA_uJ3U*#76Mx*V<&DV`cgl(E=*9 zZVhED&ZxTPuK?oPL;6;n7_x?{5tQ_wd`eVsJLS>~=(%CFZuE>09dgbhmN1b$^#H{o zwE=E=J-k}0{OREj@$bG)w_eWeFZU!b z(RgH=I{yA0@7XWCdvYPWT2^2~s+kSkWsc#g?2~q}vQ0$*KhnWS(NI9Xh;n;%C}6XZ zNzcz&wRA83@)7HgYl(U9P(jhUl2X-{+K+6*I|3z4E$2s2Ub5V>1bz)r>yN$0ZzaxtT*2G(FR}mviCVz$#-IJPs zc&1??0;kHp^es~h`DszmLK-+!Gvf3{N}l+qmT{hyc`oahSbJ{>pycjI@+t3om3gSH8 zygKz&_RXtB0>5Rk`rMFGs8li4{I{9_Kf(MJ4CXz1x>Z=ZH=zA7wxk{ZIhP>srGEQIj$$?RHC zZHLGH>6WeuCG8`m=1j}tr~3eZ!TOFmjQ?HHy9jEuAQdMWb*h-s~4{Z;>|FiIXVn1-gkU75hc<-~yy8v-IDjB7M z7$ld+(n2QJ9vJ8KZ#Dg+>g_XlAOZAd)hUrWM zfJX$T#AfCv4z6!Oq{{Zd*))cU_YT{^%lK@r)aQ>)LPXcr^@o$mF3vX9^{ zW#rGQgjPsOmC_*sf2W5VgQDJRmON8$N9n=}t2&sU z1X@I)%@Kw?>)XMq$Ukp}dP%DFstOE-VaulcS+S+abWg*6!xvRdQlOcOFy!>q8hAFt z;A(phYd!ic?vG)Zpe}V`v9Yv}vD8z8Zrzus)AeQl=MyIWYy=1xp4>1H*_8%1cJ^np zG+Dhx(|9J&8{{Nh7`xS}B=b4adho}G*P)4D9H@2QzlZ6fl0bbS4LG9rfzqqj?LZgr zlaim+PbMEiCkiU7ZapSBY$m+${en>-%*7$tq0bagbVYe=`#Ui_*;9*uEibowC%(o= z5d?E-9-~1QatmborXQ#vv&fK1RZ$@R*(ckbc7i~lQuy{~RVc%DFc5Ci;2VX>@#2aS z=sg4i3gj{(t&5smk^UW`=qn~BuIygU@Ieut-BHh>NW>^MU&L_|FM*yrH9o<#!eAt) z{2dW^A@J`OC6#}{dgu&SVoQf($z0a)xnCgBwbPAty}@ZA;3E`{{tlvqDKt-nk*y5k z+@K|fXpN$Ml(COGFa)_TP;P3T}K=wRIL+s962f!du3x=_IBHXk)xFz4qIBx^Vu{-J}ibTFblBTSXk4gPc+ zlr;2&081r?^210DW1-rVP;Hf`r}IQQ4^fmRYYE6TwA>X~7x0dJAjfI0Z@zjXyf@b3 z5kEQLh&BoZJd+dfEM7Ghg$cxLcIUX&6{F~-g-uTkz(*E!!ZbQsce_rCCnNNwJ29)Q z5Xb%9gGuARk)G(2L(d63(Y}j*ls&q{vbE8#`{7eQQ<3@lQmQJjXz+9DV$Dz-^`8Hl z*PCfkl`uRoT&!@LEI4B@8I&Ws@RU5=lz(?WL5<}j8V>33oD)na*dOJ5!5;n1ml2Gcwe4jrt>DQ%S(HprbF|A@Z3NkDJ1aEZ@FC;cat zBT8dZ^^7cv$$2%|wkX&1(l=$vO4!zcGAu6m>2+|bu-;0bdYS?|7|4nci0f+&qEeP# z7G8$Jjj@q4r~5l<|Fy9BY$zRHiZ5hg3#*EAt`OH_jSU$i4pj6S;_lw(?)cjMrqMmV zxNPW!mCRBY4_;nm=UJ+({!us%4J3fhyvi6U9wW-A`+iA(+8inZ)Ly?p=R0^!(;MG;WQMMfA&`wF+5#V!{F z%Nk-V7(jub3jf&IMBm5Vp9sS%!%mFrw}2gnF^Hiw$$W9PwsBQAyYe!yfD%}nT#qUfIM_c6n0P{;2Uft|un!6c70BZ)U1>u*;na4dF^@D73Oafo~jC%w7B-=qEI+wUbGbP9yP=Aj}z4@s@jYz zfkb!++ZJ*(%7%zvvJm3*5`y)>wDC;^H=wzh1Q_$Ire+Eem%;5@4!@bwh1lWsAqgr& z$LEXvv9QyS!1t0naFpij3%fFGx={-RMCt!tjltfkfZ*Vk#IbQbw=+ue1d232Gm1qe z{!#R-p`hh6f%vt?;O~HG^IlD=^mdnC%ypmoT?M6EZ(%ikqK4DY{#^+mX85nPprJ3t z|6B}%=n;R`ZR<;{M)B!qr9?%rvxkiS)LSeEM+rH1KvT%#M-?$p&WFQVhO? zM~3_j0xB($rW@V;2@wc@Bdb%|ZKsy?<^rjJmF?Gc|5~Z; z{E3+~QCpbf?*c1z=I_ro^?xMC5-!N=iA~OkunBpSH_n2xDZ4^|U0_=^mx#qo z>gXn7>VS;5cQz9M;VMa5CjF~k_P$)p%v9D~c?r!b@R!WWUoaY; z3pZl+(U288>(eGtgFm06o#=SUB*?bHiLBRo&D=GD<50~d29Xx2)!A|LWjM0hOEb~I zr6epU;K9H>kEg2_tEk#RA)m31+f05TqOZRSIwzLr)57q0jB*di!;}mi-_B_TfZS!^<0_oXR3T?0!kHo6a$aOzXr?Y#9 z_UD|H_zXS2-kj_b3(0t&$V!`uZeD7k8RgRVoB7n$I|c)lO)LI z2eg}Ztn+bu>M+&cQmpSI=ki&2xRioc{dQEPt*t=K<1z_EIXsR>4V+XYly*OyZ>WEN zT=>Y6`Fvk|UEf(P0li(TG;DK!$Kv-aKyvHxu(G)DW8CNbrGJmB!OzL-(Y2srlHNUG z{qeNpdH+Ml^NF9^RCV3!`q)FLzOJjwb4Pl;)axL=On|`wl8vG7W$&}t-9~EVyn4Sg zV7FO4H7W z-uYdZ5m0`cHlO`-!~sed?Ml3j;0N$q*?zkYif~AX0stUY454)e0E9PpfT@5kX~JLd z!W#(d_%ot?J3$l{0C2pMlRqz@g(2YJdlp;(oG!CPJs=`SS!|Cd-cvY_H+Fz-v6&?Z z3P)=XAf1Yf4#5fIg^(ri^`^vzL7Lm@4uV?z-a#m$UjEj4~9459|cQfKoXTm@~t)Jmm18jRAW6- zUQGDe2=ZqbGpMe*I<5wa+z`TXxE%U`oRj+b%q}YDd`$dwn-f|Slq3te8rGN?okh{l zI)CAcIknC+vv7I4^1h{U*-+ug!UJ+=!4xtLnt*~C8PL}wfMaIccmwDXM2rjce9oxU_LcSkQ}P)P2c!f%F~ZSVJt){HTIye1 zYCGe{)Q2`-ai7BvR8@oim2xesgq$PfRLR+U^_rH|vIpno}p z42|vaYbQaw)l_KYf5{bAvI)iUnp4_VZbQjSe7y77br_BE_wwtKr-X2hKgsK=%jR*5 z#$BHNY~zH-p+^L&3%CN~wUao$=1^$5u*4$35z<(1Lxt)nK`r-c;hFRV(Q%67-$>te zm9SZDv^eylhyrNo{fe*!3F5%2XZdWO%R+^@X2 zqX(Ivk{oJ+iNbMqQYoHQNgw4RJHA%RwDK2o#7Bl4M!S&k?26gONABn6`J1(@4(XRs zN(t0d_dv8|VNlshHpsR`X6U(T^Xy@x-gcbZf$YNaps8(TP4L4l8$@&AsgAdQds;4K zW6keNPsXvl>R`~bU)E8EN=5HO=6po=+k)!2s#()hPC`(n{aKgFLEyg>PXeuGaNqG0 z@5ek4C#f}02m5`{rd#pNn)7)@G77U%tr>GEKaioHX!*E2fpW|V7KG5_@JT=_>vhyR z+zTr`q`l>7l~mCw^fN}xR)(cX=qC+s_v6DJ0bK{_xjZ5Dx`0v zlzQC^nJeH|NR6dSm$rNWXa7@9KH)!rM^&R1b>et#M2bEcLu=G-j!vtx@>JbXD;S}Z z_l_rs?zBqeUU?ir1sTP;f83Ab8P!j3n4K7P1-t!LDP|c%jaS67hF#5CoR-;l_mTz7 zK)NT(S}InyZIM}`w4h=6H(K( zzx-r21q$5B-VY;OPqZ>nPn;gj>)a4b;TAd>T`}Xwc|8o}N2a%<6{{w9&}Z(bNVmmD zs$BoH(_oC>HGuw&j#)|cVIe(t6&nigJ zxVrCzLLiq9&GMFbQtQs>vP8SOJ(FLhtoOjVl2`u>@A-E{-Z}pJfiFU?dkcXzBeI`_Vm?1`iT>CQMa9L7_Mvmu-< zY_~qilY$FF3B1hSdZ)_sz|w(328ach%T3Pk>Vac6j{RNP5<|2+Pgr1fj#SG^h}me& z;YqW`4cEKqp;fr(`@=uI5`hxcme@2sP*nG^#%!Dl8GErGZA9&hg3BkA3pV|GitcSR zbsC--&7w3@>c*l{>Jstew#n?nmh2>a)GjwS%u;@5rJ*R%XAnlE~|H4}5uLGg!Un7j1L$Hi)v?XzRoV6VK1cOO{$ zjXPlmc2iHS@3YRP*N21#r!rzBK3BYLXWnM?^mgqKQ}BD&;iR9(Ns3L!WvvY;|7aT= zRy2z#Uq4Tt+?={wA1DBYR-u}@90AwT-#VbUOg8moq5>l`L literal 0 HcmV?d00001 diff --git a/media/42_1.png b/media/42_1.png new file mode 100644 index 0000000000000000000000000000000000000000..49f85fda0da1aaa3d31a8ae8edba5ac3cc389a1b GIT binary patch literal 87608 zcmV)sK$yRYP)Px#32;bRa{vGi!~g&e!~vBn4jTXf0{~D=R7L9Q^8fz- z{{H^y?D78o|LExO>g(|T|NrUf@$2jJ?d|sd|Ns5||Lp7V{r&#z?egpD_37&G=GAyh{O<4c?CkaJ?D6pM_v-BJ>*??3=I{3Q_WJw#@9OpD=Irz7 z^XTgL?CbdI=k)IF>;M1#>hJOI==b^m|M2JT+GJ02mSy@^0 z=K12{>f!3|>GAsY>i%G0VE_65?&I|M`2X?p|M}$e>Cw^Bz`?=W@ZflOdED~u{_g(#<>uSDWBcyyudlCl#D1Kdo!Qyi)x37p z!=2vW;mN#^Td3TorKZ}sQowF$@a5dVW}MHqU47Dq;a^e>khk){?%VFIaL1(4g16cB;Leqc-ITtEvWTq5 z`_KNn)|tV+{LkjEwuH>CaarB4)QH5cebBqFYp|tV!M*3u_r>XsxZ7!6pwWhfsegak zvhV7&v)Gis$z_4k&;75SiJ{H0bJ3ufxR~an&i(r1-tot=v*4P6ZlK}L%$%2=n&XIs z+QntC%J=V>lg9kh&tcZDsHOPJnc>o|vt+BUv6EqyUf?~+-sJ!QAOJ~3K~#9!?3&$c zB1;y)wbt~BXjiT~#olsEyw%(DkSsF{LLh|Z-o{fI}_jiWi0uuoofHQ}!YmB$S1Cf*|ewf}Gyu{+mRqTL2bufB^sYe~hq52!w z+bwSQ{9Eyj9-k2MdLHQQJmMLPM;c%y=Isdj89X9X4v9hFF(zyG#9slFag7EAt})`o z5gO2^3o#IAez}H`_*(gT)VPl{I(%oJww*`ApmAS}8MC*>q-cDOZtpwsSLSItsOA!Z zrSNnjh33i!!GX|amk-(?iVfI~#jWSjH&%+pk<^3!m9JPpV-htdjKckP`dePughm#$T+sz|Jfg>eD84MiY4@C3;^`komt`h|n1XxKOKTcH_ zROLaSzcsl2q)X~8&7rZ-05@&E-dj5&BrFhY7ej6}kA{R=1dLYHh-x>ityIj1$-3h& zppzaXjxJH~+d~Z^RZ$Pwk`eM)<#B&H&ch_di7RuuRHO|lUHE?OsvT?;wsa8-8u*S} zC^V+napI?78P?uJaUV>Z);Jj_y0JN}*~zx>sB@Vch4CB-2QZYfIf8aw&P6{8rXI|K zU~vE)%MT>+0#BT(B%+h7kvCvLVNI_N1+=aCnoMwwRVdI z1ldpxEaE?L6JfH02D3AsL{kr)GW3Vz2zyf9>4d=`Y7VQ$6Jw+L07Sh7jqxmu5LirU z82WHtRUR2PA(AqT`b6DetWawp@T!Up;8myp*auSY&6_^7Cukq3Hh8KgjK3yq z(nlVtKu4*3FABR5bsaQmjt?eV@1XYuiDoaF1AdOx%_tf<9aS050O2G;TVL$5Lqmi+ zKw1T~CGSXc+Oi?1eJv{#3TiCkzv#cTMjf^l~Br=@`MZlO<}#|RFwnB zW34acL}(otIf^hiQq}5I%67#GdKlie$YkKOFxZthyY1Z)9))?gqF)_jlFFRq2#5pW z+IYsft9c>lAwwk2Ac#&Od@$w^{9Mk-iCoUl=QQ06{dN4d+xStmtBaq<_zU`tDb_s; za~KpKF9~y~NcrOIhv+bZ$8R8c^5UGvKfLhGOUl@~llX<_ZhN;WpNakU{kPBj&bc%B zCW|%5!U;+)pMSrUyv~jynw-DzoE#yqiF{Y{-L79aj-C9KC2eH+^~pTm`+*Hz<$$hM zcVZ4s0FcK7$Q(L-{j0)dLz;6)E+@!69`E+M+VxAvu@rnhUdsBj+2>DwQMB1-?h(Bw zch=zK73;C7`R5@0`SWK^&~oJrm>Dy!)FaLEkY6ch?pMgHuby6tpV7{#P>pUe8Q*{4e#_a3qz{mebAca_0ctjDJ2pEqRilM{mO6VcQ>!a5!^3&Cy8 z;AbiL6{ELIavL%j&S9m@ZGF=uubZH@&uPx_A^4oe8agr3O(36#T+YM5GN(X;k_VKx_Kac%~8~?ax?vX*qE3j~kuUn6e%wOA~r31|+j(ZJ{>9s;1 zy@0Z?z$k=pOn1^(KBaf6=JH4*#Y|@*>F3Ln#*suQgpe1r{w(%QcY1?Xo-Xse{m^G_ zz20TP{DeNQcY(fcJvKCd6U}{n|MjZ|GP4BoQGdN{;qyyae+K&-l}+khU4hrF$42I# z7{~Y8Eamf*QclgT*`ia@Iod zQqd|LW|sX}#qwd&+i-!L60pgFKeCAZDl^ACQGu<#ZVziqgK3 zj~_q2zr6f}sJn-jdvSJAP8VI*w##K~Vinx8eDWNXZQFH=>198z?H1iaQY@G75yP&n zea+n9aUM?#?!FGhyE_;~=&mU7pvU{2R)UE7iGx)ccMZ~OB>X%>k^B+Hb`u+D$|5)75?CquYvZ-u# z@8U4!{+3y`%kI71Pwl5NxBcK}KPlqRNfBK(uZcccf=S&`5UFXlchMnix10K!QNfOs}3Q~0cH{ucd$p7Sb&*S0w<8bWv z8#X^BNh1R!?)L-yNFTbRk86cTuc~60&6bGAVzIEt^Pb0E920Sj#DRe$9I@VS$0J9) z_dn6m$HVi-A=$6nAdctn-2Ux$)TfbtKieXYs!tuj{5G2-K0U2WPfOSVkCX@~3e%w^ zN+h&&c$*NNc^eJifUK}CjN-b!o=2+{WsE4xJ^Gix*qOUiQ~Y}#k%54w zDFfG`fzcfvJ3M~M{sCMHCz^%0Yhv=ZNYo1K|CPtC&+^E!gM%za42{kFaChm^qs7J1 zgY_{I&?1mTL!w1I`R(eNkNr=4_w~#n`QvcxSACI0rkcxS!ZD}GBh9*4#ADazczm~W z$n|iH8nCy^vN__3vRA59Ar8? zexE$P(?^2qIC&zqQ!+;_mvGUlb)Uyi$7{3uqo?Py@tPmodT{;b z{rfj>-nww%mtQX2H&3rW*jl_hbzJuA#`5#$p-_5-xfl#?2G+8{;7T#H91InE)`P)S zNI-%+sm<2xYl-P?L}XY8m0fO8AM z<>i@+*|qfAk;^L=S595YGIJM&%PUv11`IARr{MqheXyA2Km5as(LClZ6ho>(eVt-UE#9zU*>%DGI;(D(t8AhfUNaUeRi zgsPau#eY1zW$NRn|Hf;-iqOIM*`?#WFfe@Vf-3e3kt@Kh0uzA<18}=h0WC~ zXQ2>WfQ;VZvAucp`8dMlF|Nq6=8kmRn&(f%c3!?H*Q&4o*nY9Sz59B%UPtm+tA<^^ zS#OK_Xq|}qjt5Sko*uh{>g%Ul^8rPX{dUP7kHsKs_2ttKSK&gocrhKy8Y^?5Y?_>t zjP;fN^-y{f>IT)}v8{R3crVYRywT*5(D+Dqp_$3-B2BEA*6sC9-Bgc+^}nr$392QiT{ zXvwh97}RkuH4;GMIKdEIh99Q~RLx402BfL}q(Mmru48}{DIft0g`)yX z1X72`w&sx=8XEO_z3PC4#aNeJafA!Gnwc>^u9RQpGGW6TQSISaJTVL-Uc{r&DvvI^ zowll~H9z9>=Do8KH^Fra%mA=JodguB>S;jJq?Cj-Nn_}W1aKN)R6kDYB-D!?9^0Eo zSsvoOULWg;hdrKfIF)PuvAt~;)b-NKOu{7!kkB1UA}(3;(Fo7jTGdDE?mTwv*x=ysiC#k`q`qkbB2i2lqNRvQq@jx-2}ppVbP~{%ZWu{a z(vY~3v;YjJQCbP~4v%fcqhE2btmo3so9#yJ)y~W97vOB;ti^&F-=y?;ZV8= zRrR7QQ)!jQJ}9%p(WrN7^an>_dip4gOh%)$RmmsQeAPFs^1W}yEbzsms zjY}4Y(n=DkBfyfPC?zq3CV=SZ@Yq&7k~Z65>{6|4{sdD-(pXCsGAX2#g=XS}&+Bt* znwQsjZ3N|*h(}b1`&;Dk+BJtO61h6+jyKUVDTmuT8qnOcvm+6ccZVSXQbI^t0K!tl zFhrw)VNgs`bP~N9(Lf(*B%c&I!;pY>^2avg(LOyK&b_Rd1x#u8bv0)gARKF^;)UUe z*XPxE@7_2(9s>b|b^0P&v+%m{wy~R2#>Y>e9uEXY^7&cGh&jw|3Ro0+kyD_+FpME_ z5<$Qo0TwT(SJx$+MhXfom9Znk(#jYN~h^|^=H zbZEgyUp^ax3!zYGOl-+2OIh_8$!)GMw2h zAsib!nZbXvcQr9>YgyPSv12>NjtL~#m{@ShOZjW2#eJq!3DXynI6!b|kdT;(_*oTH zP}D^fq^K;qt11o=|Vh;g7`#qOC35_6Pas znE^f?{)~VA`SI`Y`tjFa4-a>Cju+E|p2<{7Ur#(MDC=?kr%fupR>+CV>-lK~W!6^< z2(3tkHyd$*nOb3Qh0JkG<>R^g*$L_>?mM?XJ3{5%($ zjB23}&|-jcc)@*488cH~@G%??|K|5=oS3!~|IbI)T>axcF7AwvG|g}<$&j{u*-4Ou z+A`uDe7Lx{zrTNI@$v9qi%^ou!8D=yqS4fPJR6TEGSmtsWH&b7B(iJU+epuC<(5TZ zMcjI|l~atuc78jX&#kT@T~rtd!U?Brs|q4nj-l;(4IE$I50N0tL(WJZj;ttKo&CqNGoi?&XOQ*CVG`t0B5)7y+qd+0ob9G*Yoi&!QO(Eo>3c3$KGD-Y z?&|MxVLnv2S7w~Tl~%fvNtL0bSvl3<*yEe3{- zb%OA6y#@}uopMtGMagIF9>y(D0#UL{1RT2IRHAzi8Miihi5A>()LhyLUKP3hRlY<>}Ku;fk{P^*B zE)))1d84>i6i{O^Ba_P};(297lfXX4oXj1$XEKsvN`giI2zTT$9oAx)b#PFg(DEKvgd?JCm9HcR5%h$o*eD$&+@>> zR9Ky322mlqOzceWVEN#J`rUF|&Ql5t^WnUiT&whO}%vchzA`IA$l-$lJz zdT#FF!%=zwN_Tf_7as`t{e4sZR&!IKipZ@{NYIeDpcsZEBB%?LSTsyHA?T7YE-B#o zA_^gyQDlN+DsmtI=PdE^c{uE%o2)1u(BjjW~E=jvImuZ&X?OL4g%Z9 zs;`?cMV4{EWLbqn#`b}@D30HwzaS7wv&W+rj;=0Q2JGeBtm+Nj%e`h<6XRO?{PM&virn8*NmV>GZCvo0BoKmu<^IAGR zmv*%G|FgeTI@35YbXs@2-KN{knSAL0e{yc#%~@yRgEMiH-CPpG^R^&M3~vjrfy2Ju zbo16#<+>%!$vfcc%6`?D4n=4xH7|OrN|yeVk4^%%P#QgB#-N=+8l~X0`vgkXX-T<3GK*>uMkGhkre; zvIA?`=ZZgT*=O;_jc_pN{o#l2$NQ4WsrlPV-M8&MRZgrcMrB@=-ktso@9#3GjFi7LPOY@Q&h1b+pG-CUlp%Kp2l;Z1H3x2pt> z+{{WmS(=~hFV>OUC=QY@&DZ6+N#bC;go7a}`_W7D6;ECiN5lGC8wXXU+2c_QhrQpB z(?m^cA!g>TBr@dFB)syj6lq;Wy3;UKIM9oNV2H|sWGYCJ z@XENMBLn6MU@C=E^-0!xh*Pep4MI5(ZpVFJ$59nuVt>ARy$8mhA+2>JJQGx{Z&>rw z7kLWKk=rN^JOvTe>pcuXlySiM>h&H%CaTwanybIl+KXDf-qW1%sD%SqZRO;n++MtK zB{p~P^mK9XzRy98c!&G6Xj>iqRr1UDnxBABX8dNHckz6Gr;D$|GZ1Z&75~+ePpE>X z1;>Ix?fVJ$gIl8y^ziK^cmW}YqN!*q6^$AuiT}!sXAyWHh-N9IBcvd=2>zr%=|D${ zQYaH@U_S)`$tq!uL&e`ES-cEZ@uR|H_(+mS2SksFCUK)SfjjkiN zQ5QL|8xz3Xv_aF2IJR#Miu#D(#8Ay#h8ky8| zq!O~qkt|7rf3J!XLb`!!?nA$v_3+C6_p9~rCXGk!{EJ{8ySud^qlIOSxr3wA{h3=K z)$JYe2K-`6UH$*y@7G-cGStCJ-8Y9K|9HQ2Kiqon-u(Rh@V)j4O7l%lK0**z8t`a= z|G^lDRrF006j$^sK7Olw#O4w6QIi8Rk56`X4w4`j-lgt9x&G&QhyQLsm5uxxgaZ|1!o)0zl7Wy}RH_Mh+4tj$kN;afhOv1B zK6-~*dOv)4e0=md861;``y5`)8}^a0OXj2R{_Q2Jy!++(TX_AlgcJFtqPj!%2j+WG z;hB|J3b{?e3RwgsDM%0n>57lvC?CT(`yvH=WO`>7_h0XvOr?BspWh*B-Vq<^zhpi} z-tE2EwaW9iPo6w^`(*d|?(T~hd+%O8AbSIS!&|R5B z%07OheDwJ+AHCk_5ZyPoczSd^>q+@&W!^#2;b=6}d)a(k+WQ1#1WrDGVinBC-A{W< zFUNvXV1B-^nprF4q+&+OYzQ06#f=2DvQ8BY^NNq(Bp;bz&_e_$i;u}g@E-SP=}8}w z0uDbN9*KqqJ(tVJ7Af_i37wG&IZ;dW&s-_krD@_5D^hXP#O{Hhzjx| zimob+dWv`z=}=!Jr?!%%h)R_%)twX&Ql;L#>MP#yF4g@DH?y|!5{VvGf?rsEz=rjD zXP#&L-DhX4na@tl%=C(LGkqJVlpl*%k3j&eZEa0be`;@abC_Y|GF%2s%|Ux0aO`@l zYHlVjxZQ!9|M>e4Km6sZUjhvcjVu=g8uD4Yy4n_>-|1a@F5?Y(3WJGBuy{536yw-k zOGb`NNsmRPN69ToL5HJ;=&>%o`(i!OZ+GFc^I>Rm1SL7E$Dn)sdWT9 zhPy*8K2v>ai=@Z8YJFZKVCxqIr0-u>>G6a;Zco~*R!bR{WsI?ckc!l#86^e8seTnBj71fd z@?(+evEI>k@7`?Kd0}C5eJ?RITWCE3m&@z**0`X_i6p7D!P7;I>QPql=^Ag9sB%u; zPmWcbO1Z}JP?WKT9$HFzEY9_XK^L}mMI!x)_}2FBPO`Po*J}X0?d|Pe7jGuHFepi$ zu5^CXWI(4u_8_eAs#DQ;f0E&|cMZ&tVB16uy^e;_~H z?rFJn>5>;Pf~&Lh>@`(i>qweK69K9Tg)l}C(?G}lbR?sV`(aW>s#7cJu{ibUk{wEL zkjRlP?n_?y1srI9N`w(FM%>Q{cBzIFTauUCc#`Um?526m!0 zp2zZewJa&ivWhD4T{GoMCebJ^jNI<@pN6IC%*bEwbg|BvcP(*^7nL8TY3H6 z6JocfNsb)t?ym1N1zCH!8bP9_Vj$y6u(qhqPpTwEYFHKhB|R3W9;+$T^^lfUlZZAE#|M-i?_m>k0jnM!AAOJ~3K~%`bqes(Y|7s8UT1SSG6TPv? znZDlTxv8n;souE8!P|dTgh9Tw0=f zeEbEm`%AhYsUE#bYsZzv4SaWPS-cy=Ym4&4gNL!Ce_|4fS}8vktsX}fQop{~+gjgx zm%!&gg$2G|cdx9BeZTV0vDde#9-rKL{pHx$%1V2W$;C6QU72|R<%cu#^D*)1qBt>k z=cnGj-bE~y`j5q`$4HCqmC zqYgDHCdI}pP+WZe$(@NcJUIn6)_n8J$(RV%?#LLGz7H=_J-P=H@3yy61B20y4(#Z0 z62T3A^g$h#%D+JKG=MU0vObtIr~q za9fx4TK$LX@dSOU6)GxDo;!DHpa1Ag^9T%l0)gcaJ%xVfm0te{Qf4Y_nW8lZx^$Mh5?jWCmK^!X>I0$Jj!agYB5I^4IGi2Yc z^B4?#xm^2a`Ep0CH(x(=9srl)JD52mZ!gDZIPUn(AkEwR*zJ+=gWi8gayYDQvQrDc ze5f2uH(x)v_xKbRy{*%JFceREkk4?e@so_mf9&?i_)+Z+ zxS62K=flXvKbsv)ZLRO6h6gzxFgYBc+V1h>8GkuLa@av0=_e2SuJ+ZCJXw1#$0KDJ z-i#Ou+Sun$I;oFZ$4_h`T9XZ z;`M5BJW?j>2V>xI9(Vj^vIRww>^^dPWc`phyOlC2>UW}b$^se7?4{y^EMIn>=rPFI zJ=OWf&jW}px%@~59~D<0RxEFvr2WU*ifn!)!nHURCdZBB%;Y5nw0qrdhUhV59vT^O zU%AeD(?;^6$gmR0j(){S`WlLYNHi*@cmNg3`}r}$E6$zow2s{ALhEo^4AmauM~=st z;T7k2H2^BC!Z73sf*h|V!zYGEtv6pk*bJ{Z$L}Fmii{sLspX0}-uTJ&8e*8;|MA)* z>xT}X^?6;xJJJ3@`c2PnykGHjg?%QMBgp3R<5A;hnS(s3x$)SE(Ak_GPYM~n8Ov%$ zJT}V;3d@(*+_>Sg*(9s?yx%_JRy#v_e}v>cO(y*mhUXDUh8RGN$dg<*LEVt7OQHfnhvDF9qZr&zkZ-_Rc3JjcpC! zqcTSrm6?G8LPzGx0ewa3po~t{rwv75RQ{-tG}3a3tkBfPvS=V7alx*OrfJ$3yO89X znp}6!m=N3C)WuD0+D&6_8k1+-H0EuZyv18|(ak#t_0{&h#`fupJi`|^10#Hg{PO)d z^PTTgk)&FAv~RUCe54WgxAZs^SP#GGpB+XyKFaG%=#N02EBr;jY;^PYe(#U!ftG~4 z+voSgJTd_XdJrTZKGHw?f4+W{Uz<+*_c+i)bcqg!Sr9z0qv_p+!uD8*FU^3{LiKe6 zdBk6T@A`Qq`-Oc}$9%PSz0Fq-_j``^Bc;gC`xith+ag=8IFaBUqt$zc}|r zr>1atB4@pjHHt=9Cvs3mOwMN@O&d{!aOkhU&i8R^c9nu6j=Vs9-3RkO=wbTieh*#h zziBd+`ft+T+}}9vdw*1qf7|ad;n;m&51j3MYro^P*I&wKz~TF+_j?@Z!D`K84xzu_ zU&J$n_Yzka(QY3S?P61v;Ub~=?61Ff{Y0sMxcW%HW^ZjW67W56*EZuhG{@|Rw~T&V z+}K!KTwGkc|K#^4PuADh)|QtSKaPZ*I%S7DJNx`Wz~`L=k$5~8@cO2ge7?veO~Xom ziZk60KfQhcM%mH!>&qM4h5X9ON-^562PoTaYHBi9nH!YCz=`X_t1qaVYt?ukvbOos zrRrnrapjN88*6JD%NvjGKS8pN;apz+$3)P1g&K^xr@gZoIU6xXrrCR$+~nN+ESU7+ z2F(>7&j*jBg==})AAS1*7d&pJ{!Em*?U~JnDl-sHgGc125sFt!^A%8XeOx_7_{Icw z*b@^QkJi`M@2}x2=fsUsMJsa5gT|aUJ}rln*+`5HOot)2^Z?8R0O0B?Jf068TSt+% zPHp8^_6x#C3eUTUtcKDNGRyrt7-8}6F;WAPD^Kb)Nm z!=a3&4nWA!{IqX2W1NY=2%Wo^fs?aKbM&+qakIZPVMi?LOa*hUn+4#%y-@8m5gM&Ri9es^`oQ&f6 zgZW%UM3!a(K3{e|BhLhK#)`i_Cp>z_k*IW6Hm(H zh1y0GKK7vi5~VZWah}UIE*d*KyPUFmVq(HMI!<@E``jIa9VpG?8D(cDqlA(~Noo=j zghWD$W+fCpf=d6#bHby;;_8p?F62KfEG%rVMsuG1cQryg;t@2MPi!9>k!kEZtT+4V zIDZX{c5x7i7!J3Dneowa+c@7!5)`G7l}d_X$Wd9v!ECr*Ws+oqCsjs}qoJ{qU!M~m zfhgU2y}S47Ln0L%=_ENxBj9P8dSRel(6hjBVjfZ0*nFY3y0KYZ`%~j3^>IAb#O(H- z?w^0|hV>U?eI4@X6{oY;*3vny;3a}==_Da%f*hw*s~~Bj(7!>NgAz1Wc>F)%QKR`< zkNtW_bVC)kg{;z=*nG8@80mW4?;5Is!U2!MDe?HD{V9Z@jz9HucVCtj2CKoW5qi72 za6PWAUN&h+lA$P)Cpk`5AS)@!Dy0%q2vDUICP)*>8ct-&BOG zA&WGG%W?I36W1@Rf|Ge7EafDMlc7ZA6r3bWl#+mz^9tpCUIE+35{`(> zdeTEMSO~o!>gk~2IjAEZ zE&;s;#G9_iH$@mq>=oXwM*W1cp~~R!h+56r@>s*!O|6i$Yq?-3m5Mq$FZQ79rpN8B zcH2b&l_-NT@nK%6kR^^cNhGBrOfnzNDJV%&$`Yh1JeE0+*ft_xt#xU|I@SPONB}@Q zy6!w4U>##03We2B8*gz~gAOwoI&&U}ZV{BlaM2PS zj5#_+gF0z)8sJhNqfARO3+iDmX{1@mR(LFH9tG`ZVXMprhr?Vd-_dGa!|nXoSn*Y1 z*FStKe0^*zXkcr^nltv-%xEYTjYg4ae82PT+0M@UtwaJ}rh_~W>|$RIN;ICxrH1j_ z5IJaxX7NN?E=)^F8Nta_cr0%ou{}iim8J++nHx$#B3miw1${6bEfxwzf9K0zdB@lo zp1{R6oH397txs=C^4B-d4h3C(+DebKnc(2y-GDE$43EYEyN%yEAb)oAge{ymWjrmyAiXnM|C{CSm=2_W2C-PFM1oGLIkG zlBX{5=uwCR1A@k5uvpsKLeT^+z%3^HI-AT`tq$<`j;qEoW;o;cQOh_}sgysJc!z;} z^Dc#Jb9;Mj^_&tV(tKu#WxbiHxePruIUV3)cc8hRPG>l*JGF1aOVCRbeONCQcj&NoyLy)Q@YJ; z#`9)DJpiPNVdlJn$ecVs6^5C}%p8gzb947VZqDmlf=LPaaJs@{ne%Angj=`zM_bd| zg~Hq7svnug#OiJ_|Ke$>b(VC7%w^HdF{?h43 zT#m^q8H&%$%VEI9C+le12w4e2I!4nH2k|kE7!2ns_OZ<4M}t`yc=)h?Ji1+2c$I%q zEN<_;Eg-=SN1wjPC)#X`<)7@GO-vg}7RS2*SKIB6?zVB;ZQAYNvN8CR#y#$VfEjFT z+5y{Z@F)_Wg42K|Y=#3$BuhE?#%Z+(MVvT{_JVS94jksdAqOI~2NZ}Xt&=FE%%_lCRQ12>z1Q#6uUrHQx!Xw;<{0?T{roYRGV*~#L}w^eL>tm zyga{X;77~{FY$>U2aO1;pD*m-tYj?v`Spl`Ex*g=Y{*3D^AB!&z1jQsL6SoA z!LsF;^pQ3;r`pV2i3qtsT6TWCMu1M#)K226$uF^Z>%IR z+Liym|JbR2w&CxceU$qK{$o#nzqGiR;-=>;soqrl-bi8O-VoAbFv=SM^!Cak2N@_b zWgyI|0HYH)Q4;`~H3%4}LaxR9LqN&iaDpF6^9A0)n^;mWmm<)z-s2>?yp#++vfrX?(Ll)FQH%ecF(JktUrtA(C-fv zBUumouG{AS@j>S{YPpPo-+ z2RLX6vMT2AX;9!aMl+Wmk0Lo@5khemFlAAI3_csF#r#9AZP1I8t@n7mor7R)A@oj? zF%$|pootsxdAVE8f7F>ZMqI{|&2~v-RtwQQ@Moc~a=ClBzyG%WvVL^Xm>h6URw^4y zl~Sn1B62~hGGeE>jtT=rT?u8F6VI3g4>8IccuyZMh~OvgbblZ9kb z0TLOLWSI#U3j9Vx-{QYtzgXIcWG4bKml7^(#~axekN-OV@zG3!3pEuhxy)}P6+kUe+CWt+An+y5eqp6uED~~s)w3XHTlmJ)rGwW+Jxiz4IS%zsb z|B$sIQzT`5k_3BYuUE2pWjTcp>_%Rbku?^LAKUNO%6~-bBf;rKWl4qOiptNKuqzM< zR2r8!1A_wUM#)$xxR6JgoZp#uz0U`>YW&yvkIS_@wbJQceu{adP5<$~aR;N_SEr{R z$Pw%D^z`)V&5IX(-^>lx^hu zW+_{m4U;Fy4zh?|x10ZnX9Zowcg>8ybQU+3E>8A#>-+ol`azM5?nKQbN$t$<>{v*$ z#pA!ue{_%K?ygYOgH?A^k2mli6JlXDli5b*`c#+WDYDm#< zyg9c8;fexRH=oYr^DB?HmQgIjw4VP+VD6xV86J`!-F$_z@~(NYRV#)!02cXCh(5L!CzfItX`cTi}cw0Qj2`HyY0XnnL%%UhJK z?L)rVpK%AL?+uI;M$(z>GrU6n^DK2Qm>3%9nJ)7l6h3!7dJ2|T;o8)rhxui31uzTi zfBRLaJylmX@o^x6&}#l8Yw_5DWR+b0kTVnv@=?2;=XtxGbA-L1izS`xNA~-b@*gef zWI6$Wj)2Pg*$%c+i5y?n>qi&W%@n7dZ+G@Er>_0Bw z!93E=8~J7c45Dx*o=LqqLw5S?Dr498gWD|%V8-vxKG{;1SGl!Z4zA5;zlmUB87|~< zPvB;5U4@(oT3wIJM-z%eVK~+~FgG_G9Eq}QC^R%Ylu*20*=qblkGGTupzD!CdJMz@ zrBbPQTxlHa;pdDKD54G&12IRMaQTTuGC>4lMJbs~w0iv4pNEg$S5YpB*nA!N*&$lxcFYRmQL zP~u9M>zN+z?;o}+78Bbu&_6d9Othn*Mj~0_mg`Zs*Jy!^1uE5QW21U;@^bg`VqscC`?L%JkpXRSw$tw>I4vu%HQsMB(G|o2R zh|bI@f-H{-0J(`ki9#(=w!~H`ZIM_FMU+O`6 z9M<*7k;E<6Bbm(teM^F&RB4=_>|yBg#qs3E^xuE^B`)_34m#{C8*w^uO*qoyAN=J{ zrN=wJe_(=mpZjJWsm?3N0*Nt?Ltk+CJNBI(v3Cf^Az-kd$ds{dhKvA2MuQ9|S~MW& zDZ>_D57vU#N92VEhll43y^BA-`07dPF|1d%;wJSkEjoqffFNfdr0Ir+u!9g zksbx4J2YSd8dS|hclbnTRBJxuZ$fpnCj6VdGl*#$OXIj*h-;ECv6DJ(Qs=qyi<3YI zCc%&dCQ+yZ*r5p}gdnvds7MLJXj_$bsb~u7is2AdLTa>!of-lm6Mkb2wqLYdv2R+=4&PB)r;C!Vl)#{T*HzW;mQ`<_)m(W8z1==G!+ zl43n-ry&o>{-LSj_Vz|`Dy8{i{&zewe|R244IUM#F#E%cCr`fl_WRj<;%|Di#pqT6 z=}-Y+738cX)coi^sNbDOq+`xD&eS~d+3Ajxr+$scc>IvZd={KxRhB*Caa_PRz?fl_ zCIs_JYgC8hqTm);O3-zi%L*?-yn^`gdDH;nQI%!dyR=a$mv2`vkettc=&%3JV+bG~ z6=tWff4BwVtHM|!3D2~LDZrCS1>o)03c*MWgG)NHzc-JkaP&BMa`43I(O7^epd?DtyY*eKgX~R3ZzomR4Nq@q~oRH#%8%z zt5$2}&8^h5MA#iF#o}~Gin2SMc8W!4TK-jE_-NdpGpNv&t$rW+tBnOXmbII#iv8#G z$!6nG(;7TFEPy~Sknm1C#S1due)HtTv)KvWhV5fJ&9q`3J(x!addX^+@dUZu!Xn_I>Cv9JTjRZClI;w?=vW@h! zF<^~DcBS1?2b=uZNJ_sPOTL;wkzdo_0PDiS9iC!Qj*`Nm{e$GF2&;gM*f1~ z33Gmpr2eg7Tw`22vJ4DEXape-3|KG4IHcBbK1R1AX>=(LkB4eH9?@rbCgk;octEKR zi&t`HGX5Qe;}84$kH32KY-d!WX%@EQ+TX2>oHVvEg=Z%e>nHHnzjZ%X=i=hQ0WypS z2YYYdzTJCsfO_@C-egxBEtzyPQq{IQ*T&g#0lJ_~cd4u^3h}ytZ3RYMLR3F)9>X2y zx)TGzTr6*#3#@IH=WkawH!G{ppFgi20=d3kEmt;{qM9f~;kX{-(t%7OgL&jM((@jV zIGATN04O`o>GUy*!{G_X!ybn-(IvAedTuTr?N;O#0FQV?;XbQUWGZx37J=O5o3Jg6JAFnxVxW_a@6$jIbP zcxUANlq51R2_)NpaA>(hwe8#^n7`B_4fUZ;x=~J z$UN?%Ze=5tnw|i8K!(4bCeM2Li{VHjk@SQaKk)(ks5lXi6u>62yG!*DJv|&B3IHtP zK9iA}1I@*w($(AGQEM@hy^7||j&gafu>bvwf9zlP`Ny&ol9_;F8@kaZ<+1+igFMzB z)RUs%_1=4^U%5A;uS~j@Z^66ES7)Mszd}uZdFP>eYgnJW3Xi+K8O4)oG9kzOL+PP( z>UMP2CQp>M!2d%~KZd7noD3*uGCl=&+jf){3IRb>KCgd^1vn{=gxUlF%mWu=n*gDP@xJ-t~`f0)>qN~Lw z<*|)XkiW+5%^{8lSk@T5Nh@u}i5m+yDFONGl{xm_%);>U)de^wP%}60P9mGRG8bi! z+g}qrNjnw9W9hF$>FL$lUr?)VZ!LL)42%a#rPpBqrW<5FpD*v0t2LyyMQ%XpY*96h z`v8v(B|{J_1`_e;vE$j{*Z{}LiWP;F15A_o>n;c2dAHG9HQ8SAs_wqyb%G}qwK8J{;M($ib z59f4t;qvl5s9$qU-V%r6soU!a$My2|jZmKRsp!C1tkQtx>D<+%6=T1J*-yCd9x0>$%k>Fl(sZEiPVG7;`(|B(Zxx8|1WO$jKTp2%h`zU3RY*Ys1QVFECc5CbF zwfRbMN%F}NR<%Km)WJ|^cluljg_GqPcIM^OSWm=+URpcg$TsyC<2Gs$Iq{(p9Kp8w z`+5kxSRV%<6o_|(0mn5Pk0c-BX}tfJ4(5VEF8lQ5qsK1`qj_0@gaERzjUi~0NVheu zeI)R43SMu>iv9Lt26f!N*x0iG03ZNKL_t(;O%unXG&GxafpxJq8_uUdh6S>XErv*P zv*`*QUJQ;41|KFWd}oPwKtjU^^`W}8*+Po zZ7k;lwj&;?_jp878u8e}0YLhZngN){FyIl7aSzjMJZiYgx8kH3$sY`kj?P}k6>h(X z#bh5X^3pUZ&@}BfyW5&oUu#}3lT0R($z&JadYwox@6Pu`_K~2SCOdi<_yJkqICN1u zWjpGBr1OvvkKs5{*a`K_*wW^FdHcImsZ{DuJDds%^+4c6C?qk7L|>mAD3)u;U$2+f zMspGN9gpwDkB2;>>p3Y^`YsT>#L15CZig(3uEDc|@KT@LWcD%3vkrsEjplL}a)rhr z>ie@_jL8|l7a2}qVUNzEX;L2fi3uW^sH674H#U|_WEf3rp-e zI1*VeM=~+Mjo#Ruzkegk01)m}TbzoIpu%A?#4{W&nR+Q*tRRQIzP`H^%0-aT{xFXO zE=Qy!`g+6^R^w}fNc*7gPiFYF;_H8McYZNxYFP}6$$}E$?IPP!{BzLD@ zzF1pZ+XCJ?Y8!OupdLMx=49(prPEbcrv>BcWO6q;Fc<`{;T%tohWo61y$MI=Rfhg= z-5-c7;N;05*Xc+VbGc$l#)d&YeA%Xrxx8M1R4a@O(b3V-Z4hTmd~w-dVRqj0#3%rWxc)(#FF*t*x<1;K^*s;U^Hvv$nM| z7VWVQazTm={|@vB;Z|^lF_Y+;*6G}6u&T}==ixX_efbtcR~O9PzHxsj6iU94At}z5 zoGs@-?ko$0>5h)eJ=&PD&ns{qFHaHO9d2DFGb@V4%6e|EB(V(Xk_&``mKl%gk#L*g z^;<0?P2?UdKA4-RNK#soq{{l-;^HC@B!ANM7?kVv@n0rw!%u$Q`Ue!`-6sRR9PWdL zM39Av6Rt-D(dj_8$xbImyi+&%b0l}L9kq{m*AJkq{;qGWM}IqGb|931iRpZy#M@*6 zh5+`lM=Q7lO3-@zej81Ponl58)j31$Qob-DiT!d)F3|MX^yu|*fl~g#tvk0C=d#%> zd}ilvf!(?6hIrES7(=aLH1@|Rj2+juw$|3313mr{4>$Uzrlt}p^9k3ZmLPPDjumGT zvmkRQivQu6Yt)+0%g0n*z*&EDe@*#Y9Njb_7uuxt&HOuywt)u{vpWbCj9W3H!Ov@H z0_4bThEBpls^?3ELP;8t3v$6u6JOFJ0rjX6E7|H;=3Fi}_W)$=P>y%*+{#r>oE{D4 z%X_Ah|IMz!29`s1qY#{QWc(Hiq_0G=D>lyLj24lY|AHDHc zWJuL-_{RNV>}#Prx@kG@EM;?L(d+Y2z%0rIg^>sGy3U<2he5(jalNrvtjct)OcW+Y zc{$jDdJOzVk93&t>+@;ZO0FuvY`U0++XgU@ckav;DuI)xM-pYHhhMHdUU^kTmOX!Y zjgG|$ke9NI3f?66q~|j%%g(^0gy2F#(gmtWs$Knz(A&*M!^g82y=7T z1H{2iOsnM*3O~>zEta53d@#2zA_^m=V5Kq$Cl}}PX_0|JD?-zDS(O)D)dYlwsmW@8 zDB#(&2-%>(;96>x77qS72pKp(T;QQO=JsnS52ItkD((=(C?l}ykOo|LP7r2mVmCGX z>em;K-;Rx48y*{*eli^GjK!1I6cD79^!T{{Gd(sb&nQDm<*8H3mX`MKnwk#zj~I$H zL4HtO`520s*qM(zJNv*g?kub>FW*~UU08rSAGZ@JtHm%GpS*r?Xz9|Wi$DG_KK|$N z`P)}b)u!Mze#;?Gs8%5pR^=&u`rM>%LeSd&SL#tS5B^icpw?g#H}ae7p_l`2kPCvK zu#k{y#V}zRoPP@^;Bp%9HYsxqh!eSq%81Jw)X?3DfS;!{T3ZiII|x*-R-jlLyl^eJ z#X@c`U&xo!z#eLCHUbE;P{?I-`E*9tHWI>5b&+^Iy=O%qze>`uCq$uOf9$m6L8a>$da=}}c>W1(o3LG_9M zc(nZ}2L8(p>U#TlMtR|E)0wkMWeC{ELp?&K0FHg)CO~8wqQ|VokNZ1s-t6oH>v-?p zz2)Wo{R2HtTUm>qL($x`2z(P<%J)b~@t4`NI1~>+Ca0qAn z^ac*u2DNg(To1f9|FJ=ZyX5T5dLdux^V!`pAU{w54jwAF?D25C7jh7q6%BeivtB5y zOMG9@Mt39v5kJKNJpw`ESZhPGN~IWyL?WSLxv;m9em6QAG6H)C^oX=MrFWHeAkmVT zITz|Tb$81enPx5y=U%`Cq%MyfR}mmOAUk_(+=C1heSXb@i$8C~LbAxp7)w$0@;JOY3#=9G8v3p`Yi^7%nZ-t%-= zl%DYimn9HF51OoyH7bs$9*F8FF9kU1!+xmu*GK!`gj$Z+$EFtLsp}X2a6{SB-rjPy zNqxwF#F|v~yg#JM52B`S`a>f4iMKzzSzTSd|9Zyh_q)84F;6V!vh*{z|7N){^ib#@ zYW@3d%Q$&`$)v||J!Yuq|Dgz~MnCZnAx{aZ=O3z?uzLRC7wd6&0Mjbta7$peRLG~D zkNmg;#`EAD7;q&pB2picW>Lr)1VNrf1m7)`i=+JDWxKi=c(y7pmV}quo8UckOoW*& zWjD*S#bUcNV&oYT)7Wg9E=%B9QL1e0Z5B#1yX@$rM?d%V$sKOT!Fpxdk#Nus2Z0ft z_WleouZ8?Xx(NQL=?XXoj$O``Sp)709&`IKQXQihmSuuLPT$p~9r=!*^!?OpcSHQd zzYY+iD`Tlh#2e$#fAe-VF>PacJT4@Gd>c1539(af$fuK#;0c7U8j?;3Rbm1-kqQ$u zqakS}c1A5^v{M=h1rFUqq+u&mapGPH4oFeCjWm&_qaLEsoO-IX()3m?D~+^=J?#G_ z1yXjqR8d?;juvE{@t?xmVzzXemj)G?JJC>BSo{4S zfBWa}YXM?)H9j~o!^Pbup6+>c@AF$fJRN#8um5iT%lSu-=D(k2!Obx1&g+4iJKN7` zRy!?bc=MXtkwL@eGv3t?^y93Y!T9*Jklr}nktV}oiydvmWNy5dW|Lm8?lhb}2L%eX zHtM8=o#Mu!&?h5RoL-?-AO>BU*6L}KMo*gU#zoszAOnQhSG7`#DUw>Gu)zHKx1VRv~ujl{m zO%BZY$rj5`VG|SX9vfv^T_rs)twYspR69Iy_wK?GVl8WIVswMR!(>47tHN>**!po3n~d&A>Y))|;3AzkZyRM=*=qQ~;kaonf6(1&R3O z-~}|B>Tf)Mc>=PLr`92JAJ zpg4J)hsVu^A(`xyd1V0$(=>YGZA(OWk79RYxmV!8X`SqLqU$z=6tY0kK z+*4EEhN6UjPULy;kE0;3f{{Dq2}ehZ`hh6)kbZ0`m7R`*thwfk@|*Q~{l)p7@aOYi z{pD=FrLElvs#BxK zbUIONBBQ*(N2#mxaW+v|zDINUp?;iRVj{-fOjK(tX&0i}RC5{~V&km0Qt!ZdyU-zx zTIX`9by#_&R!z9NIZjijEk#mmKr%SmMJ#br%a^;aUFUW=Q3SzI89GEv1VOJ?tKb_{ z5dSJXoIowAF(rr(zjig^_f%I8^Jw;bedXZrqhz+$ z9496oyx!Y-@f-{_+9YA``ESpEeD>_w-s=R2;zEPO?e22#7+Rs*nntHv(vMR_3h|AoKL8a`LO|2#?*pnlrv;-TVB!q7`B;a%3*HkV<*T367i| zqF4&~?6*BhkEpT^v8;tPp>#(Rt241UT(K6FS>6G*Ks* zhKa@k#Bj7b3_7Qc@J(gQ2agdP(PX80;P&0rPdSvC{ulyw!4NdGSbj%>VSI3y`1ow{ z)eD5b7km5r`)^(!Egzvxg5SJakjE!@$_rt5+x=bQln;|egzpK5p+)_8$JcyzEp~L8 zPshRB+3~^g@90#3er&=cm}uo+e^5@&JG(!E-;A7?Fxpb7J_E_gB9zxg^))7w4X5*; z+6(z*fA;qY3Tq2Xx#L`x3@|Yd;nQ5;V>rd*>3&o_EWuj~2YCQXAk?c`9QGhU(sY}@ z1M$^psnS8dm`)`b-{2tULkksTb^ns4c}PN}?cTzmL%1)`!m{_zo#06aQf zQ5yI6<54NSQv~mEeS75~Ewqt#hfRS0rPLq|L@my0o9lhk0duDc6f+?x&g9@a1^avIN0`2bs?a%#$iP zFfeSC0o;hLu6_q*Wx`qX-El1yL!?VwRPhdwpg{qTe3wB+>-#&L>75mnUa_(xX=gv1 z^ZR*UD5`P0I}CRAu#zio92}rrwX_t#dBWEg5TwJCay^kF!!WJKV-IS@x8#p4@1c15 zkG-R#l$V{E8TZrK(n+@Dq*TL;!^0lRn-LP!r}AN58D9$eyU{S+F6FgL^LrFBMHk`s zceO{Th-_kT5WSl|z$4xvbJ!imi|zl}c%+q?0;tDy*pJIRAE6%=teaq|R5@QPd7o7M z@9;?AWWUjYG0sv2Dl5cnl){hjNCP0z7alqgqN5s~!0|TK;%8*Gg7CPxoiEuJ7YC5G zAqh@y0r3l!T5)6NFkP-nVI~rxdIrH(hND7vVduD57BW<;@fg86^$?fPvyhBDm;$D@0wDFJ{o=;Uo%nPETRjG zP#ykLJo;$#y%yooaJ6{EeXe?eREpTM=m5ea4SuBg=dtl@rIGLRFs-$E>~=IAKEqk%6lDX& zyHS?1(MfJ*?z4g6K6yOu_R)AG;RUB+BVF*e9*xC>Dcnyt*vJePQL|DaeCOX z8cOHt6Q8+?{&DP%Ag_jxCtkG6DJXnEVZX!UY zrRdaDC|imvRA?bWlp{?moKs@y(xqz>eSt(PCVwn9f-uGKWc&RxNy>XT~aWxrxV+%nkTvk*#{MC2tl;n*pyh10C$ulb#+y%NVU*< zHk^UZuom|tw-`-|6aUjOi@_Hh4x_4lcB>=Dy!ce~F{-3D*H$pqVT zOuPR_Kg`c%AAhrQ^TrD3M=+5fAO8#eD1>CNhn^*u$1>uGDI-h&fk)hT`CtAf5}{OH zO$IsF>~Ms|eMi0X0rst9ylNb1$_qMG^#y~32m-HFE}QM46OCaIoZ^XFGWZD|>CG)7 zE#Km}kUi}WUkyyb1F;JDYa+xOuLixzz&49&^v9p2J%W z`_dj<4hGYgF%-+65IXu;`xgo zFAky=?RK$oe*SlCSkxPir|o{f3)99P3|e374dK7Pr z$L;3G$+C>5@g9P@;iLV>KjaZ4MM~2cBa^U(U3gYQGrO$P>LIC{)(CUQ9bKU4G+0HL zcwYbe;rEbP(jIZ$0UT7(@928GZrQ-_WIAp&o84}EIJR{+UC2YZ)NH9#*}pFEbZ~Co zFPLC{Cxc?_ct=bYb)9#W4?K8POKK{GNf{o9tkRHPNg zHp}aB)T?VsJruZCz2~8x{5JhyLE3%%^2Lk4U3}j-|4Z+rIUPga-eRKF&ls)+Gd73qDl^?YkqAg{7md6dg=gtJRcN z(S`j)Z8t%FhCE*5{BL=r-t&04SJA#JC1KPh59Ld9F)1VyW??@i=l z)zsbePEEFK;Go|dnY%1_U7lrOgg@eue8?kb*@ECDg*dDTb=2(qJL+^CPt#-TL z9yhM%ua@I*4JQ*g)G1bbS?}`+-;DAYdEV6>xNUghgD%{(cT!-acffA8S&U*9}` z`S_83-f~ysyI>M+M_;FfxaqvItT@jdZ|@{JgJ$0?!5BMVZ#_|cM2dvG%i%zfh7)nQ zgypJBM;7Rfha&C=AkA&^70SbLg#tEZjd<_ZB4N+3hR2!T`)Xt37VVo03j#b6n3#Np zrzwyzfK71LTI|z2YWzy3yiEJl^v*FBrJkOiHqXpjmclL)EF~;{nn$v2%*d@{y9#`( z*K9VYtyXK=oUM&qlSc@uO?xoypSsnDc4hOU@gS_UM)N9@6mu;0S?jf1wk$_(ZUSz2q82U=b=!ldom#CXg+hr$WuM-}A_+dttvI zdpo8X=V>1pff82j&!d*2QB;yE{I$j`WrVm;_EopHH=UC%a?o?+1c)H8xfFjDj~Rmk zS4qmI4JiKsD_Xn#_Q}a)7K@%Xoon*A5iXmTWAud2`X{!KB_Ht^10EUV(O|=-We+FK zp=H*+1k?$Uc1L1RbR)aR~L~9bQ0Fm|jzM^aVR<#NO8T0~7zV@UwFl!luV^TiK z8(PEVQz;QO2W|Hrn#0k^GR=--cFf!6Q6;p#wq}U%6?S29HKmo9UjvWydme+|lvbl1 zr!%sx5PO`-iG}K2HMPW}w8t#ND>+cPq*xBs<%PpP7KnpIfWW~>t`XGWB7HTFIRSYL zL455NSoemb0sIVS;_6xVnmm#iyCdPTAXMo0Eb?zW=A>A_WXBeJWl$$!AV!);unM||;adNw5eU(6Qt$vy}|>2N$4K`_)lIqS#?7Wa3_g@yGM(3N@a7tZ6mIQJcoTkgtO z?_@YI-F_2If45q6i==^rQKv=_N0DZ(;xWg9P!QNe z9lTc;oY$FS0u|;{d;^ILkdxzY|JhJb26tiSg2cHTa+gv&3 z7`P1CH0Pz5VE~UL2vZVY6k$vBiU!Iu4+JMnuz9E5=2<<;5(dAZ@X3N`>x0M==c28XPFQ_!?t$Uc$9QLs9YpU)jI~v+!%Z)@zZ2um=@AJH$ z|M=(YzrTL{>ZLUu-P&DyF}S(D={o%rc(9zpFl;mb1&9%&(b&)K?2yTJ|3UH?I0Ia% zSgs!c9{EtIT;A1P9By>uS0&kl@whu24w1jWEtx`6tX9h{xYRpUIj?gcJ09QGSNLE8 ze|;ojHwpL!#}eR1MOxWlix)**(^O#9{(QyX#3ODf3^$8Oc8fMlGzq|Y;DA~1a2!YD z`rY5eV}JtX$^@t?-?xyzPdoe4H~YIu9S>v-SM`B-B(`^}V@ zP0Tz}HAaCy#;s+0-@ke_c=g?PKQ3-Z|C&y$*>vdFakSH4UUJ;d<_^V2&d*!7p17~(<-AkY(Qg7(5*#?ozEaF)xRJnP{+En)vO z{*mL6xQEA*VBsW^Kiw=m%LL1m1eR~!&0JHHfdH-v_ z%>m(?&(UUq%>(6*b2I9dlUxw|wnEk}>$_U-2m2cHFYrqK`!9_b(RLs7^u+8O%WE_M zmIrPash=szKC`_WzU1=MmxaSieD`j<6RRygCLaO~?N}4X23kAwy0-1O0xPH57##B5 z_W+&=0Nu4_-e?4GoWOn(cnqwkH#V_3H$=Rxjls04hsjjk@_-?ER*t`_amU80_+vI2 zx*-X1P0oR9Rv1UipWL%nph+kiUM*{q0g057`S4~ad14Tg*5}tmIlsk8w|Lya5 zUpxXz7$T5K7B8?mSG|wN4KkDhQ>aEH?5hP5S#DY2TCt<-n8(r}ZZnO&U>PEDc6N|Z zYL&{l@<7Ar!DV~qEkJEy>%+jFxE+;Qs~r||!EE|TG8sCrpPKWffEjobQ^4H1lt=`V zZ4f?&n(({IDY8J9VsHdelFtKMibukmo2(B=2S^UTrvZc~quY@;>-Ai)3wpJ};rc>- zZZG-tX!JI>bA!^{v$RhHk8m035V4hF`)?oOcnmiR4s+_@;D_#1ua+yMuo91$QwSYN zQoH~2&~_%B&K?_;W#Ipb7{L-0wIcpVVDG*^0QV}-f?Wd2&}8HU2AgZ8g7l01J7EMKdacBfbATqOmQ+;{yq6Ho#{eivCn-EavSed7k6)Bw#uBB(;5XbWW$i z&6d(ZIrStffL|S<1HhdnHm3P3S3hYL+rdq`)GiWe)g+`p97M`I zp$XVlj`;jCYd(9*pDzVKbgo5GFWj{H$gph>JGaH4Q(6%I1& zMJlF9s)i-CR;Lq>RBHYRTfig3imUMG?|ug5RIiuT6!`%YE`OboH8l<@lZq7@jd$^k z_rxR3l~i!OKmabNjTk`&?&)iI*zme&>H`%N8&1L~r>*6rDnGE|nnFrpGp!rZyIBU>3-kyU)Hl3H?rO7{zaM#>@fZIY4hcoS6n-NEC^N zRH$)OoDQw|d+%@{0>ct~zE!RA8C+bx7mdosSwb)og|perFR_?fmRaPhytc>0nyjo! zaf%Wq{%>3pKDzs}=QpJdM!io3j~k)b)|Q+#KAQ~3_Mi*?s&87>-18RW<>au5aCyju zcUR$HgGDhe0SoO8U+T}LdZALO`!)}RDNDT{9^td5Bvgz;m&;A9qY`Abnql`ks``O= zBx@WR$at+1;kYo41h|h!IVv-<9041S0amJ*^;4BNe3ZzdLKf{UMcZDWj;eh2T0w_x+=FdL>BE+VRHY+}T z`c-e_Tan>Mk4N|pHiKLbhlwq)6dlWdFZ+FvGVQj0+&6uqa(!c4EZ~t%5i4;Jj2g#+ z>i`_TZ5fSvq*CD$s!YQPfKZ`U@y9rBF2!ypgF!q>6yT|$#whV5SG;H!*$?E8B*r}9 zh)Z;#M8`4dau1Jd%2LlU4C5ECamSYJ>$w5gJs@YcpBK6rV~t{82NDOm%C;9i8sP`e zj~nV;$7ttr*tFAiCa&2xXLAp9+)qFK@Sm3;g~5pF*%wC%2E?IAMiOZ2ukyZbV*|uP zO$PhOS3&3d_EFOUweurJDE6)4V2mz#_U!AYVj5svuNRJ#q7Kgh9#PZ)D)c#{Y^4JH zRjbtl_YK$Vy)1qrcm&@e%Yx8xuzp&8H)VC_#+N; z#@L}$Pymm4{vIAT0FPn3e}ctLB4psyaQ(>Acrl+(reFmJH%mHg1F)u34;=>3%kctG z*3QQErEio7BWJ~@84O8vZLp=oMHeK7^W&et|Eq3VZ`_WM3Xy>Xu-B$2K2jAR1y@zX zBMbiz@fgShPc6|{()m!3c`%=G;<;%pyTlQX7Q7lf@*K4#l>!Y&HVQoFvz_ zHuGW**}-5u|LE~ZE=Sy~!@teSf-yzcBrt{jeI^^?`7nK0a-9V@p1G-(&f_e*QqOEe z4p}%-+v`8Rteq>`UK0qQ##YN^l9A)|YCNLqWZ5Q5@>GP2lcLom?Iv~z1bBvG0tD0G7&{^0 zm^c*3F9&->Nf)|B14u=~oYGuowQ4&^qq4cs9;%fpZDurwIqw+`BSn+*a+qok{S$JU z-Zu9+?)Pg*TP01CNOM^$$pr_0-}C2vf4uj3pZDgcpT88X-Y{EyvMD7?K|d5@BnWuq z2n-9{Q-f>b5U*L}0is?oB?oBl5({;GOJ1+{k8M1@I_i{iWNxdRE@o#ENMFJerTIi4 zBhySQ(P~*g{7_WosxcVgOx6E;ea)*-+{uKF5mr_#7ew*xOdziq-wi`8p$sTK`pNU9t*@D_4!<% z=bMcW+cE_cO3uI}(!*J#)D#G%WtYoW;t}4J?O^@7e})xI#%<$qg-@pek2-4#7x2i8 z@kp7tMCC6Q1JzFFw4n%$8VxQyeD9DL)cFa0 z2`mrLHQouu*GA5IeY9tvS9UPyH8V~TQt)ing?Qw76%Q3nWMY-1EN>LDfJc5~qtzS^ zP?bgzI9%7o!yi>2#D|ffDIv5?iE)f(nR$N3r;l_f8D6Evd_(Mj5gzHY)eUk1kGJNh z=8-0Td)BkKI6rlT{^L#c{(O3wk|EKiGX=)I(0hA)Xyr9dpMi=3A;*%|{ivl3`}^HN z?@_pri8)9tJi~I_86Sw_0-lIy${7d8NeP|3i!5#^{OSf$vmjPa32x|nj*$2JXSg#v zxR3BS(vxzHd9Ns}CBvv8b`k3H@x(!i-esREkYH06nN0Gl#o~OvnX1MGL^zBS z4iAgwKInOV)F1ecCg1Zn{giA9i+KzLENcdz_71To+3z=u1*6wEIzq0HJ+T6%65GzF zs>w>60YH`gAq!%x9o&xxCqKRTO%h!C7WhTX6I9h_pTEv=@LY-=y>Gied?Le$ zKKs1@L<0pFyH>l=-+%S|)xlv~5#c@ZqONl;f@9u3{Mtd7u!k@(YB>$#M=KyK4i0hW zoaNuG%~eZDS}&_2&Z*jT2 z6KQ@CnbURSURm8G@KBd`tOeHcB0I8#{Wr9IKW}bQA)b)5fDo z;&H%ZLMo`N2tJm26v9HD72Vd-)-I;lH0BRbUr{7Z%(zKMs4*Zf>ObDLcVFL{y0iW9 zey%@B@6Q)bEBSabPjAGas}H(IM#zf;;tY>eGAZlXVIB4IIzmXz5WxJIb4PopSvT#Z zA}kxo6&f63iz?z$mMyN!Ax7vy}w~ouFh8Y*bb6&xTLtxSl;s{=BKk817 z1K{F3uP=H$SLi?9Snp34+ik&ws-xL>e2n{~Sq=^0#uwyqHKsj;fq(qC zZfLO@#OrC#_(Keq4o>qVYEesvtVhVVM>5`zA-f9bX>gD-GXp`AJF|mw%v_=WcwN22 z%k&R*AqAJiCj~0i>O^@}7e*In+re@-)IX#GiF-q*|D6_Zo-C-Cm4cx);rB6;6waJ`RJhAmB`r#!&$^ zoz4HlfO>QP;IMf|)O|wJ&eUfBideyKLNW<!v)_zvoskv{I9 zsrSc5qKe{;D0H)Pgpg~x;qf8wI3mO43IT0Q@_Ntgz>xD<{YUn?dWVb;EUD|8W+6cmDR(?9O+d9n`0|LjUomdVgF*U^%~L z4UNVD!qMb7JoWW~ICg=$s>;Fq9nIE+>NP`NhQz1`U?XJLBL^yQ4}9fxQI}dPsov}3 zF$3DDw$!o(wUOw-ucF?o6ketOcwODa%k&R9rdrJx|6Y_!PpixXBQfKKjO<{!o9Z9( zQmwU8$%RGB%BI!L%|b16w~}g#3_24@W>(Ak12YyB#D!EUC@cxTJ*=Gk`r?D;;Y%d!LB1^h!9_bVv9 zql1G(V=h}PT9qciO|{y24enBPkBAiuQ>e8%1EiY2e7XPHP{goI7*@Bo(>e-es!-gh z40aZNryT>&>p#vz_V~|#fAtsCf1I7#zC!=;rh0$0El}v19iH|Nx}8eJU;)0GOnI9#lE_XfjR&scYG{IQFsQQz=KR5+|uD#O#!GV0sY3XQ6Vi;J|!mUaAK z$NzXcpO`l8ERNgQjBPwN9uG4Hk{J^J|33^K9NQ78vE6kH1?mXf2qH(@0FIVO*@U1- zVbMxXkdPuDXw^uow2D?u)i#oSKqAQ@QbduKmIE8@X)k;1VWo1n`rEe+aLtR?C8mMwl7uKa%yq%1^FWst>7uC_mMh zam=XqY;Jbq4L^BCc}A(1zlG`J9e9Oc!l?Fm17N)K`04lvDPI%(nYZV!HtOG+X7lw> zD%G-{rC-+0gui;d+DOXd#or_KP4h=lO_Hn+4CdFlSuO4CD)H5ooh>jrjYp}`*B@7_ zHSp~4F4bf;nVUPio$!YHNk2XeFAk4-l(Nhw+|zh-mCgE(r#0X@{`r%R)Mpw^heu<4 zkN)GK_5Q3bCg$YIyVYllwd^P>;is3d8DFn*bSPx7j*hs)Ha7}JOrELh4!(&JI5*FX z1rxzwCg$=La_cKAH?iY-&cgH$2wc=1ko`8|O?aRfdyoF(L3J13p?_$`evPbyr4xxS z6XIZ0q|qv3F||C7H}dcY)jy=eDe1zeV|`jNS}Yb*939FGwUUN*Sed~+=6IeCN`9hX z4_MnW8DVt$>BBoOzJ2_3%}$2~hWay^i_$*xr>Gv?yRHWwX__g_&eLjbUB(ksHw;r} z0#CD@0Q>k`f2hX%F0rDrTq*_#F-{?{@Qr zIO<>CmFvn@%kgLmR&-GuBT$|Z0y2ezHM6aLiD>LcHS0g3evjsxzZyIKAP#yujh#nb zU+DAo^mG;1*H=of=F>iy7178;Z15NinnryiTv);kiE1vOplt0%xTLRn6b&vCPxOSo z^A^l(o+)7>c03u24G1XY-rS$+y?Rmy)nj(2_vaF+L?WS5NfP|S zaqxtJKjirY5w7QlV?=p z^K5$5do|YUV7)MC!GFPV(%>%U^VLP{v4?8aD=V0ET`m^g{K#-`Z^V~8VQX_bT20;X z%(`xq3nCp;^L{vPhyU>tqxe~iX3%r$$m?ButX*fzSfMcUDw}OhB=+*i_-6F)s$f1_T&>h<`SNBVfR;(6f|#SNPe*DM@+SIT z9&sc5l;)d{KmHRe*BzRpwvP(lU|E>0hgCwawqe&$(a!Ohva^pxBt zy}k))5J`!qgM>I3$8!2|yk#L^X>~dV1VNFsS{293V=|B)_FcF*x3PS0;@ z7mRjHeM*-F>v=I-vz*IUfWd|N^6Ki+v&Gr!&P-`>Hh-22oeP;;dw9a8?gB1NPAB8o zV;^xhyhHO*3ohIT(0@E6kAzn&l(DuMbLVmpLN@crNetstnk1Uy7f>NFE~Yp=VXL2` z;GVcH<&ju3IFhLxo(=?<5is*E2MeJZcmv1Iqdj|NG;{y?^2EgDiHQf-9D*cyy#VBg z@(2zT=BdkiF}-NJC`ur)uO}=UKKKGWk`xc|^-yLged!kzczki^*R1z?$exZkLLvH) zJQ6aFQBJ}=ZZm0am`4Rc?ccw!ek@_8I8G!5N6bMO>Shg#jjEhTwt&k{%)ndO< z&egzR$o--M@deM>(JNPmv-4n0S61X&huKOhGceFK7$^ipWtV}_0*}r;Jf21%Tk79< zj-E$~wiRlPYWdniD%H#*+|s!irAgbWCGY@eHaD0O_3g-G{hXcw9w`%(6iiI! z5%U>ENMxlwARi(1LAp z!mwWFKh=5Mb3MS*lr@XcvCvZr#vu{bKzy~dQ(HJSDYm0z7Drg@_V~hXtu`y?E!oqf z;0<^17hpo8h3)xQm{554=E`Da!P+0QK}-{OdnQku;CZ1;V_ z9?j<@OM}P7@}O-`JVPYGk7krkmzJd`bCuFeWiT4mpRf@P?G%&4obre!#uq5VG68|i zd4UA0k!&NVx}L}}^T>r0iAVok{%Qh&ocQYJ>5CV=l0>Vh-^HVzkmT}lMovBw{E~{r ztRakqv0#XY05-Y3w)XPj%e5QX@k_(oS$imCvS+iw!}BPM^Mlco*GkNH`i;kZuE!f5 zc`oV^A^4H#ZdSc-P_elx!{iHa^+myJ59$A4EC=xYMQ4auhqs7eHsruA=jR8_&>ND72_8k48=l zd{t9-CsAPz0gk?;_Xeb4xNCfE3sep0Z4lfGHzD(ybk~*$JR}ZBTaWwEuLpaklu9MV zRoVVF(6_uBkGpM{pv(5=^KzVrYGbKVAZr*r=XY`Jl%l>0k95&uWmLWAyq)RjxOyG^ zAM_EXxV+$tnxDDfj*p+I$NfByXln(s9p5RTC}M1kuEZ8;(*}HlSpaZlJ2g9>%>jWv zUjpz!4eUPu>ODNto^B6Im#c9%+|7C6 zrw!qdb*M+E(Q5T7o0~5w)tqS8ST4MK`1z=w4Dd z9k?%9Ei5cgPDardbpQ{Cx^rjYlAZ9I;_74IvCzLH@u&`rLiQUihm}>Ow?HCtfM0Q` zLXHpp$8N%1_d{)~4macEM%G{?JXV_Nz{aIXlg4Nv_w_^LLLP{`~cbW+=HcKRcZr zObo-j&>4w_L?Sbs40n1uX)2`Cm9;f~$!UUZZB0hxkyop`W7VgWZX>4<#lbU$c-Z1 zjiFHZK}grw!@0)Bez@8-+Ty+xYhGOLzb3U;_QgFOqbX2%OgzFphYzJl^QErqHy6er zUdkZq5C{5)m*TncWo69}%NKULVk4+F6$0<#1C-V5hMag{s;jJ1u2mc@j}Y2R{P{cq zbgodTc5xo3qlwC;B|$y2IS9d#6Yw=fG96|1C34x>+4;4Vbb5Ow*U)xFTPWPbRE((w zA^UkAVMXTbo>Pt{zs=<4m8|iM$(P6sCXzkjFxuS7sety}^RHh#eDve{|%9P3@<`Lbf zal;M6gjFDw+>jV{I2-21FC&lBa}bz(0)Bm}t3&U^|5_jo#+ymCbg2+CO2 zQ^B$%Zwj~%<1v2UC9vpeK!6zXYyI(DR*Hbb4Kx-E+Cj%$_{WP!k6v{AK9HN+oRMye z)cpL)%KPl*<_cVor?U6smuxCV6SH*$M~nd%CD8CG6R;$IJRj=Qd=#Mxuk^r~B<;mQ zIp5&*+FAT)^8OSyl`8+Bh>^~4*~g=5Y*b~pw@mg)2CQ4rc-&N}QRv6=_WoBpf`lDb z^v4_GgL@kxtQGasp0IPnX+3-PtcR|uEeZ9+vBv$oYXe=Kolb|ta18xi|L3nq)OPjb z(bQkKIHIYkx`aQ7M_FkpEZOqrMJY4s>mQ40?8U{j!o=IHr{n#reG4rt@TfcL_~_SS zfxeyc!slcK9=su^qgza@%^(Nk(}O+`9cG$T5Smk`i_IKG4RO94L@oIC_;kYQkRgvC zjw@JFDYyczLwGDPVLhVND~e91!xHQ;-OmEsZpg=})w~i^h=uSrSoiUWdx;z+6jS)T5H%Dw zpX2b$D%3p6H@{>M5Ajj?YkkNk9;}I=h?kzc*(B{{_VY@?tS%PpOkj90l@D8k_nc&*?Y{zKvtSme zLwJN#)8vkSfzD-4XSG%zpGza#7gL)%Ih6C6A8$W-_2l+{|M;i*+1d1r1SH_PHn+C6 zxxT(VBSr1;?RiLRVI-3Y$Ye9VfU8BCd|DLu$H!_Jg+&{{V~O%4@8;Ev%3`quBL*XF z^?vdnf@u@5MQ!hT-Q27zrGiKU5W zFX4ZB+}rz7x_k;iZftD)^2^XSA3m)9=fgLVAW;_*>0|46m39@F=E>E*F}><{z$1Ey z9mu29qEk~EUmnOG*1Hgmn`H`H1RL3!`)_WIsT6XTFH|o(S{@0=aS1!(NP)gI8ayJ) zRAzE3sp*yN>G^D8SSnbxvuqh%A?QkYU43VDqboNnh0pX{lFgsLNpV(Az(X_)#3B3P zmWYaRe}%pTqUiPda%)nedUlV3*$W6#BtTV&@Ee07OO9+BiJ(p)D6(K|5`D4P@IG~I?aEVL<`>gBB}O5fGhAIz;}5ht@1L4Dk- zLyu!S6@+`#P1NAgP0*A{Vxj_#Hb+J%?&s&K`Z%`%{1*!U`gUgf_U+GRZvFP~zd|OB zhs2%b<=Gu@4Qa-#HghWlNd3z@mC8oGkj*Z?7I86`(9)!oaroTsAMyzD&s6VifoTjw z5JEUYs^XZ8V+pDP8U|3kS;YPrk2XHRE4whP;jFU>kN;BI+JaVnHdB;J(l28}eCXn6 zcQX0u-T7kx?|fS2@heU55 z89RgN(KV;G%YS-*DrszC^E%o(zS> z&Tu4x)w=-4Y&j*OWnkRc7}PcbJLXX?Z>E~{dWh|{b%q?pMVm&2Y%%Jow;zqhz zN>R=5e+zlwk7LYS@vwX_hgNwkXH%lWpwZe}WCU@t-vcpAsX#qe1L{Cp!dfR>c{Zp_ zIF}QJTq%{~TV+ax(O@tP={wNG;S)Tvi!9q{G=i7!@Y-5QG@MF_1+0Gm`JhS7huCU) zt`c|mXe*gqT!c4{?jFBfWqC&?nS2W0p1=FF+j>D2G)vzQk4DVc07=b%@9M|zdnnHt zqEiVKP_3@PBWCnZjDt9?E!-I2+}xa4Sey9xqiY|n`7PBnWLL{ioku0`h~pFpb2vt} zxWv>(p;J?0^+t#VY>nxs@SB~>aLDQmAwEaplT_`&^LoHwVP32jGPTbJ7% zsilwL@NCVou+B>LXHqML?7=?#mD&7GN|X&h|K~qm?d{FX%shN}gw=#Vo0J$ zBpf`}WiUD!5q11`#B|*0)L>m2L)V}LBTn`o;e1&oh8gsOqwftFH8HKj5rW^g>ip+D zao#t$yR{A#`RESP#`UdTzBg_j3<03vA0H>X`%S&M%;Ng|JiM`<@W*tg-+x4Xe)v+- zR#$Bsj7&`481tx2XF59-P$i_&TKgAjdq>v3|I6=&qt@vgXuZhzbtIP)({v|tqaM#` z_j}MtX^7TRWUM4)uXwWr#kwdm4*h62LNZL9g|0Ry6^fq$k8){!JqT4xo6pyg7Lm<7 zgsPs#0};?xP!Tp%qaW_bWI|JqreZDu-GMBx6vbSsm@A6G5AQ#@b?eFf&p-I~#fukz z_<&=iVDvDXG7>#M=4qw^IIbVqu-a_?w#!{78)SX1_Nv!%-dXI9@J@DH_lI9P!v3pwG(D z^U3{T(!>>4@`V+sd1h)L+}CG8o0-($5zl1OPICv)c#xglIONRQL5nUNjXr;J>*168 zmxirlf4_3&*S}#hctEGq1r&7=9h{X~Ue7p`-p)?1uV12Of=;uRvIlRLc4p_$k=-Z? z4ja)r`BJi`fuaQrzZauyOY2^;6AV^|*aWypb-q0>dV z>!axKH(xw>@V_sKZcI-2K_<-SNE)zI6P6l0)@f8dy>BsQ zr-m@lq#x~~g{C==FStOU&@`V@<`Igo)W)(H5z&#sC86?NAzu+xj|OcHhI`9i=k+S9 z^-lVlM`s`!wS*QI9cJ}ApTx}xTV}sp_^tv(cBq|BFjomh4IbOMl|0zK<++`b=#mdw zE`9ZC`?K5Id(S@~jz;8Rg~MX;5Gc-OXP2wRK3gvDmjKBe zCqgFU82ki}4J1w3%}UTbP`%_Xd$F6{O(ww-&O>Bgf1CudP5Af*SreR5109V;K75hzZ@QQqyEDi;xRyxrl4sW?Un8g@e0ruOv%_SwRpt( zEJh7{;*9zKv|T}H+gKX5t5}hY9m`TJ*N!Dmt=LLzS&n1Xb==sS$OcC-E860aVH}#k znyncdOuMi?EbL)QPDvRyrGb)`B~Wm>WwvDwnTwke1~Qw5&`l3zVB5_h3`>}SIqj*J z{hyrF?aVA?5Bp*gIgz9Htl!`NfB*a6_rlz0d@%mmpRS_)#D6<|OURfT6S-@z_OiV0=7(Tg^raEMq9=^BsG%p!KTWR4O&%w}lx`y`E^b0a4qn zR;5;v6&`rU2mG~sf7Qq#F0?CHz)!p9zCkOt{&HvTQZzbnK2u*#)Fs6sxZOFV^%paV zl%y)89x*w^NHHKMC(IlpYuR*pxI7k1G*+Z!a-{;yXl-MsrDBD`PxC#ju@WL+(i*jU z0Eu@OjpXI}#?H=Ga}hom3k%J))s3Cn{TkS0rO{x1#%J(&)RWoVI{?B0^m%)JbMNti zkaxhi8K3A%ihllm*ix+Re9O1EB6+N29(*K)1 zp2#{;G0cV&l;~nRI_XXt=@V1b$(G2J84d|A#P>?r=@LygJMSL-1j$7GH6NVPV&=gp zvd0hNBb6$q(h~TddU9|y&M~8(0o}g(#dIFoBa$|X(KZ^Q&`t@Ip;PjKdNWkf2s|-{ zF)>O+C)3W(b>a5fNP;pk9LKxe6fLW_RXh-N13rR1;%cK-*+A%cfsX|PQDl$T5|ILB z*rTxzwPBB{D!2=UR4OPjiV&WX!NtR?o};{UP4?)8NN2vve7wIcdcz1(L1TacFo{a5 zeLT0*lJyeAjoOnm#VT#@3=5Yq%S}JFk41OID#YjPv<@_mG zhUhk3E)J*svklc8RphK%xxe0A+p4Jo4&I$}dPORL_GLn8C>d1(%4)4;upg97f4S{T@SK&Sb?o4iBY<&!yS0#E>JysVOi>B%o235*oU1p^&kYy335DEcHrC@VM>40tOv8Xp{= z{(5?R0_~*6QY1-Zti%RKTF>)9Nb+5)RJ*K&5}8{-64fz zR*Ix)n#4&v={1{3s|jNm93_cdUK{PeXciwGg;%2N_+oRbqFyc*DKBp4`pwa(-JYW& zkzxc4O_RbSmqS*k+-Pldtm5uMO&$31%kOR&>#LvM_-3vU0@sR?*I}Cnh6wK;+9#c~ z4-Um4hKsm?lZ4o8Hf&?USvDOEr9v(eV>okvHXHNu6O|)Wc4Tn%jN`JR;qTt zQZeGiV{80)vE2d~oXupz>aDMD-AbyeTz`GE24D&3+1yfbi@*<#&!p!!AIxjx)=?7u59e4h? zb@bl&OHsMfM$^ow%%O3yEc$W98RyJA5B(xUFbp3yn~(%5Bq}P(I4&Ts*=*ix%sfMD zwZ8h{>C+oG?tD9^2QwK$kHX9PaG^WCc)TahyJy^R4DMUg6q`a@=pY;uhLlhbr^LjF zj(P!%eSLLx6$rnYyfC3Q8Z8JE*SB_dj@DP#ns?DeIhxL?HQ;W>#>#$SaA*Jsh#nc4 zsG9^~JZg{kA3k~n{#&~MpF2)% z0H}vTk(@nwA%F4u3&R|t?Y=pzma5g#w=V%M;i%|S7|e>lr~lY{8oo~T_MSR(rg!L9 zyKAOkl++6rR(8FtJx`gXf=w@ zs6;p$W?5TjR+z=?!g4*K^@)X|08FNd$j`m+zyJNGzx(G8e+Fxudr|u84sh#I!IjNs z?E?b=++lM#yZU9|YA|$N3ONh-e+-NtUDuGxMEAjtI|2dSJ-JeYh|rkp*#O_Wv$F%3 z2#^Q14-w+}(d+xI9R!gJ_f{CYkHbaL7gld?pL?*JPM4KNtKBIjT5MGkT6*l<=AIEE zK76=j%sn4GL^Cc+`}5~eR&iu(yOVI7;(>jSa@2@blVadx+`3+ch;g@LjV~aEEL8Pp z*GtvXT&YymeU3aQ7@Oq0qyKpN3|e9cIC&c3WAE7${YS#oNBT(%K>z}ZrWoLoqQvt! zn+}vpi5s8j4#Gv>k!5_RA4cQ(sF@ey0;4FMD(0W*A1Z>A=-4CS6qKHbNGP(bC?1?Y ze?IpSFpNMy7f0Hyw1qI+L+}hHAMw_SkIvazBvtF?77Ehe_Cs zkr84j+^BU@d3Rg)^&+E&5sXPOR0keY6joclR;jG4G*(_8HP<%Uw+54+{)e@*i)kaf z;<(4L#{~Qd_yc2)V~;=Z*Ej=%0|W#9Xuz=J0ZL0FM_k(vf@LRG2qg>L5DI8(Dlb&3p zH`FEPjYdo%U_fY56sf7G(d}MWYjCv$Fih7FsuanSRIS2VOOdLw-88`pRj?ynTj|#K zf(u-fO4lB*eFF`*w~I0E3x!;zA(R^`Ok}{pOuAk5o~E82cULao9pczvu&*zeJpJko zwx|Dk`gD02gL1?z>6y{d#>Uo0+@ux<#7M5(frW%@gM)#_#;*7oXMMlEH?`%QFGH0uIr~LiA-&1cfS8Raoq%5&V z901DY2j!TNE(Qz7WfWP2}N6W~<82o4a(w zw@!U_p;?D~W^&IS<)E+4lG1t)1caa+s7Cd0ujux@8!ZiuP!9I-)nSnT(H2<`Ri?n&dQOkZ5Kl+Db!9kFA&zoqXl$mF^C$L#@6a(1X8H*~~z1Oq3*99i72hi9kJ?cE>P zcEA9nT>$HjrI$&rDP7f(>XY;TXec=(4Wug813s$$Fm zm~6`>$}$@Xy21+PB&TV*)PHQ3B5?1g$3C|!ye339@AAVX;J=&2{IWRaR&?;`V3ajF zdwQH5Uz{8}`w0edCnJVpmGH1VY}HU;k4tbv|5i~t1J(R*_-%gST(C+samR7SgcYaP|hIBB_pl= z)R}^)$)?w{h5Et4Nbbh^G^UKNXMGmXEr-G}n&MM^>0k|X*y>DaBcu5b8wW9+W){Xj zQ8LfK{QT-E=1{(RvvPjs^6C=pOf*NsKAN@>`f`prVFQJg2!~lguz6^b_0bl0f9E(% z(hpyQHU9Hw<2XHo3oP14?b^4 z{-boCN2->o1sTFDkx6OV*w}cvv0*Y*n>Jp4Rm`{j&7+qUf^w%x+r1UY=Ook>(tvW3 z7nJswV`DD~^$vh^w;||KDaeF9>Lr|CC=xMunxKB`u-K1%N+vH8kdLXvt3ea63;PgF z8D44!HY&sGDrG#c4A`YSpT9c=Bi__z!Box5kn%}X6Yx#Cu_0CG3v*Lz)6;XC1ux8+e_`BC_n})k)rNhn~UDsqF!!5?P(dTvaO76(!e2G^rr6SOY~^s6dzqHBoE` z9Yx4!1$Kewt2YXpkEb4Q7V>!k&I>mLe`1Tz3gQv3I22F{aJosJ$mauIf(YD$DfnJK zpI>^giUGAaKcAn0!_#-yz#^Z*IQeQRZ}Nf>w2RN45HDOrss>E>QG~0)*^3K24_|Nd zYT{7wJwcOtz21!qVdod{1_U17G~8aPJt|C6#j#_@KKfBYVYmPHdbp3XxNOD_EIl%I z@~h*=Z=D>wb4RQp^$8Z$N`NqPyFwwK5^;sKFg|)T;aIqnzQe32001BWNklZ$tM?Edy^X{z5~R`n5N}WEtMqr0OqlcZPAnET zLKZGKdmbz`%7f`TCFhHtNYvGZH|MU;XS0QD_ymJc1*;KsV))8NLS_7+dU5aUSAljS zMUEVi2p4XAbA1leV`-<84+fny4C@mMn~%kHmZ`-XLx%qGk?$_vzkl=Q{foD^I_lsF zrh|RKAi#CRk~2fQc2P;!XzNv+N;qlRG;bgFmtGK`2T ztIx$K?&Z~GTv1$E9&L3eW7*AOz6M4DHz%y+9$S5Df4~rTZuL0~4Vd@OM2R$fY?$qq z3#suZqJsJT_Q2t8a68~`NFwfVU@C~#b5O!naRG6Hi);Pbck>GmDXQ%VOZni}ild;f z-{tM;d|b$_cZx-O&Ym6n?A#p$wLHR-gIIUs_^`@S?-~TYYsR+EP(sf{ZpLr`~7&_ znHmEIyZS0EIxH!r)8nkWh$!za5M4xhH`c{|eEQy-M=SU5|Lu<}FCW7Iwt*l#h*F;s zR2n#I2sEte1dl-8wOXv#nTq%Ha7~>yK`?=go7HO!4iem%-r><`V#BC5L}W-og+h88 zXXENPn*siCHYNJ}o}StkHX03uvV1X&cj@@-RADQ+|4e4kf4y_(**0R(URzfVie2s2;>o^MQ3FXCCOo*94cQpB0}9NAv}7$ zf=ypujE3XTpHj(q)M#L{)W>+NsH~_sTiNjSZ$=xyKm6Rsc=Vc+L9BaZ0xSsr^yi7$ z>x&BuBRiL`{2qo1o-7UrJdL=&E0_e!53WXy@Msmpb|&tT%|laP43Fq_=uj19WtcdV zu!Dt(QJ%W`gkOqY^fvY}n1bbVnApTc9>f`E+-VTS<iRNz9~BY7F_X?5fWO7w!Tjp&S2;=f z`2NbHH?M~#C#UL$5mNq{{y~87p@Yo(;;;G@q2bV`Y_!o- zchjro+V!mKLThUf$7EE(d>uiUJ`<0VlYX4SdM)v~7GClSX@tO|QaW8+M!8e=mMDSD zYdI$h9gkl=TKVZ-ERpPTOM;P9+bJKS6Fi8s!Z6+fqd3@mJjzE>p-!@TlU37E3pykd zMM+UNp|jSI{r`i>&xb0>f>$&Xcv4Ess*m5@fV=cpxn;4G22YOM)D|WGkEnQ3rc(87 zw5xMw@@-kb^Y`kebf3+|EQ<_O)}@1FcYx(|Pr~yFc$-pPgMm03~U>O>gE99;o6H4C~OGg{|gG{DaDuGVn zU!}s-e==gzcRek!d*9!>^>WbP)fZFivyvL8g0OMsJsv?AhJC=JT5GMcS7D*8@#3K` z>Y7m->WnBth7OC8BoR9M2_7*eI;TR5(LkEbVG8f-(}!7#0<+T#C}^-@H)oDU3k8%` zb39Io zvLw#PYMb6(3#mk5f=ToxhTcB>$MYXwJbdwTI>wCF$TpTZ2K^~{Argg-IC}q@@@)Zr zYM-XOO;fhDZ|FJcU;p3p$8-)`&&Xe{T!ST#mq#LPt$16boo>4jw|Bz^yxGOO!>L}J zMtJ0mH8c*o8jd5BS9h*V%=J4hhA)LjtpW^wyqQh7!}YevuB~SNlm2EX{VDwZ=oHGy z2b-JAnWgm6F5(hPqBksQl^P+7;JCWS(=n!fsVKVqlv%!i3{#ZCs24sPj|vLMdr1(| zgiTmuJ?-$OVJc>0<2D?IqjWYDnksPJgM)*wS604%{n(YH8a<Z|kIB{KF61$LJHK=oV;dw|xV2vW>9nW{C5;Y79m$B(v6Hv84~@ z*XEBvim`XFnqQM!S@Y}Z29SRtqS9;B*VNL9*wFL04_`cge(BQmP+!8IRGH9MgU#w! z*vY~;7DheZ?RN!j$}jjeSD&k{tgb*zyYm#k#&De@FuZX6%9CqXCKkH~f|A3iv7)CX zxwDZbl(!$uE{=3t=%gM!1*4UA0Uk+?)q{uX7-ji_cm#hRMGn{F%;bbfdJyj4BWhb zcD@$Oyj;4O&b!)Nv{tYMX3~2bYvrlw&2)Q*i%B>Kho&$6v~ugsrJvo{_V@)V8z*D2 z-nfTx)WfhgORyRvSxIuF2WB0FFzkC(%6dV#MKzt`*AN@aYloYgzX3dI6dIciC3!u1 zy;vgFGq!-j;Lgs^cjp+Nk7#KT1Z!B5++LAOIfoZ#VKb|v{wt%v308UDm?cR{Id}QW z<;Z~AV=$_~s8eq=mxY6@N%r+Guc20$&4gOpi^0?zYm&XJ6|#S}oCHv+GNEXdE9S~A z6H!c;UgdAx$b&JR>|)wTj5ux= z*yAIVwO!kX@vcw!V~xKS+l#@*c42MFUR!p+0-OLMJU9u`y0UnP&?(_sB}J+fqUa9A zRfMAxs!tAyLQ$kRc>p35$yY}rls;5_Xv)K>Iz?&ko~k}nl{&NLsx&(F(w-5LkpSE4 zpJ(R3JM$mb!WLda5ENa>RLriA@|At`B(u@1gBKy4)^i+)NG;|1f!mah4dEiJh}z!k zbjtmN;(tBgqmpnWWOBwpYE(k^D5&>;cig(BwFc9~xj)>#eeK%kkIzj!cV^?Wu4!wZ zn}AdnNSa1QoLZ;EBgB530f#r6>u|d%g6QJ7CSCmC=ISaW)xFID45y#e4vvm)ZqFwm zj6+nW2&K81bdSJ)G1-UgBn3R4A`~boD9UZ)ATr*<3w~$6{QBr7&iDb1t4EsyGef}{ zJ*Q_ww|vo2L%dC<)Eo1z4>*TXX-LPqDM%M#8|UxK4#(`FkVhBI)g^gzeQCcg=6IKa z)P%jac$Wci-2MHza|<5M=NM!dBW^DNod{1s?)O{qrO_B4Nm(=5c)M`Q{X6?{>48xnRFt%c)d` zWNK@9d23F=s5?0ur;R8bQrHj_?QxCE<%@P0ll==xkAio3*>sF?lMEtFH9b@sHF*-(NNye_^_6!U~m z7m3(On|stP;M|5@!S8H95L5*FQ&FFdY;D5P&bwmq!&`vkMqPASGD&z~mxDv2y-1@6 zSq7-TxozMb&-X|rAEDKroOe-;`U_&iy<%ynJj+q};pdO9-MjbM-~KX@Wavv-1)G2b zjm4E2y)7abL*$A!RvNlh$kai#``O%_HA>l{HX$5pG;;NlsTJytPRh-(4vj{|b-T0Kp0NIBpb-^9{DQ2Lk4R+1#e+Y?XCT zZj7^%;gm{+ttyfoA4D;wl2FLe@jEOeH2GrXq+?`h>9AVK6;z%)M$c}Dj{jJIThea$ zVH=~RwR*SDh5ou)xf7lJ)U*tDhFH|i8r&%@Tkz>z7b6%_I&H(Czl`H z@T=u&*_-Pi8pq!~x%FUq`SR3NIB1jBu3-NV$%ey**pD7uygW5Wk!B_CrDu>wmCdGt zI@yy)TbhD55gbwzKHfFk9%1B?BLpYYsnzh}y~txaX^Kv}G>eUqxoUmCibEcyMoYl; z{jaRfbbrueGdfe*o^DR#$v_#E6S2YXPSF*K$I=WJb-Ou7IH8Nrme*IR6&uqZX!i3i zb2_Fk)DO|wwpd?BGerSPZd{_`;M8{VJ9YkluOB5%Oe2ppflJigqdd0zV54EBV2s;_ zv(iMuy{ESyGCFS5Ihe_05)K`Q-m0Vqn^$PMgTs7*!Glz1W_tR7N28)UpI_YEJGpKm zi4fGptKSUXC(Z^0uYiMmiSBIH2#14Hbar_XjzpLh43X2lnVMv)vnm06P#eo2o^ZD| zh9U!A9z3$VlP2ESG|N(tDbA-tNgGtI2TBh)AFz$4i|MEhmRs6(kHY4nPQj6CP> zl&j4ajw0TQ2lS=Mk1szt|Kj}l3pcL%QOJctVd~Pe%NH+9%|V&%I7M$P z8B3@692CTdcXk%NMp|pV;#>5lQ{&@WsCuGqt+tmWK-X$Ou=)6CS8#FVOyla>#>VTr zulB2l(w>dd;o(l1R~d~iT{2~c+Q!vtkJ(SQ!j$%N!|9}5rxVPKGsS95QLze=x)>H@ zbTcRol1caYa%cm-u~b@uk2f#ik|2r4HwWPVFCLXRFRyI)HEZdfmd@!YpSyF2<>Q?- z)1b#Ne(p~g9zT2f=|gowAQ`enuWveSq;UNaT2avwk4-8GN8mANAyeseGC7pU<5lHV zbRq$Yj<=7GkB`3Tx`C2%8(U-dwvRDdZid;@nZTf)FbXw?Ah2T~mIGs(d(xN-4(88q zh63^0CKinIiG(1SHM+JIU{QY3mqFl{RJgv)qpa6yN}8O$@~idr_5F%iF29t9!IoaX z{^{R$cfVNZ@wG6_Y#Gv7&daj4(N5YD2#|E2->(XXKu_T5NGlv20^OyxCR5~Hccbz}L-jmay+eo9W7EsD2!l+75dQ;%>d_0IvwNvAy+3<-R6@~;=qKU(wg zI11NmhfE~zC0WSKv7w1%0Y6)4LQ~|83ik(mxmQ zHXOW4hxM0b6nFcjGo?~#Wd#Ily`1BDp>}I=u~38$qpKj+8}P1FrnNCflL!r{fyc0s z-(R}B6uUnaJ1mQIN%Y);K_Wsw5#P?c#N3F;pX)Fiej!J=AcBI73wHcu=Zd*X_3%1KMsGWJg#sScGOEw-Q)$d*^!iy85$Uym+uRHJPOen{^5heX%uX^YVOCizy9{o5G1r zi#a4Pv*Ly{xN%sA!ei0vEsD~qtgn0!&KP}*-L7QiEiOJ+u-Y@3lQuBW0u}U7l74yP z?#p5s3P3=5Cs)Xc6|qpvmFto_Vq-vQNAwwma99W=?5UXq)`&YiI}-}`_)n>De^F0F zZQ)Ov1Mi9jevEZ9GHjj2Ed{7!!ZQ7T5TXC zY;HpR_>qKXZF~Fp=;)t~^nHMns9|a@8Wn`pNLCq|KY*g~U#wkkNLz~)k680;UY%F( zOY#~`!ppy#<>sHsz1Ji)AzlN?O-O2`G0!GgKcpH^7UJXKR@X0uLcz7=#a4JrZBat4 z$m`prrmsWGT#K74ZPzg#`|x}Y9;LXhD`VyD!x-$Gn+RQB8L}>dAgILT=Q-zh&iVaL z)z=BYySV7mX|(=m)YKU_5Rk2-s)k0qUkG?~U{A1iBI%8Pek*Ab=zza zvQTjUaBc0Y`)mJrHZgVP3?%VK;y&GMo+LW$c9oo_fY(dWWD>F-E+=~HG}Y;$oY;du zt`urcU)m|D3kH+tCchYdo=SD) zp6J#ivGjuKZ6Q!M5__Vn>yCb?Iift8aLu{^Q3xU9xTS3R= znbk8Bn|X^VZB%IV?=+hnA+CwY#q`w)*wkE^x67y)B@yx1BzaprN+ea{XeAlMqf*w{ zuC$#_-W?37BjZQEescBdt>d>o{q)nnEdN#(^`}h{y_CVt%IXW-L*Puhj#4VkW}En) zMLM`kg^eMkE*G{%u?pna4S#RiX9+=ecp%c5V!+4x7w>%guiXas_RsD>))oSbi&hqU zJIG)jpyogfuKtKpBWdk(^aS4vHu=yyuz{%JvEG4(TK@%)1Z5($`&Eq+SFbfH)9z1= z+YAvRw}uN8U#-nn;7zI7q~hLXRv6=Xg&dMk6_2#0qeTj#O;gt+mjlS4wA+LKBWEWa zKGsRqC6hp{AD#>9Xl0v5x=-5iT|6>&yZ+-Fr_SOud*sxqFGkLy!TCx2@{=R9r_oY? zM=M3g!!#A)QP@`9{;Qc5&LNCMc$P-pY1SVVNNJd^!&4DV!qZ-^0Dmi$u0trbx0+~L z)@eC5eYe{j3KR-6*}qoirbHFo6beG1s9so~n8JY|u)fVQpW!$)9aohrmn)+od6$uB z)9c@nnp7R$4kDM+<*Rsnmd`L06{dTnYWo4^6+hVj4UZ0SXoNEsQe4t8*b-XcD8YD0 z8l`eyKl#n?mXE{7`0|k>sb;{Vyp!omvxixylXZ&m!r?Ib0FSny*Ne&9$cn;yMC0QJ z*sAGbt$l$^CgPHtz0+JTp!3cUt2G#6eUQ*anbqe@IR9H(+6E4ePENS6^Y{lF8~9rw zhC+E6)wi|DI(s5|^Y=q~MQZ~DDcSypdZy(7;Boiy=6tOH2%%T2zpw9b+UJPY|2TEX zY)PzD6*5Q1@)o{~N^Y^}udG5%HWq-2x*iY78Xb0?qjNxadg^G}l&6boYmvUW-zH@AspKao&Z3?9%F6KY!xMO|5YzYMM~RrW*^Oo*qVAW2 znhp5x+@nWTkD02+Yo)eDKd}%j{c>0UntH0HL?Vqc~It8fSGlB$_sylTa=#)m|4%gpdC5>eVO9 z;h3E29S1mehXj?2()!u?Yk#^n?_^nsj6R>k!Lo6u&gS)+@wq^%ef;?G_BOgS4GA(? zBcK=XMoa zF;9XPvI`{h&Hl`rg22j7CbC>>r*SIW@d;BiWxk{i}~^>3j0DO{di<_3oK`wmbubYZW)H zceVhPbJw2%Jt<(DyJiUc;h$HdiG+^N>tk~H_tY6AU=ylA?o)* z=rtI01oi;%rnZ`~x_9SKxc>uqWOmOhG)wi(qL%%&d@UEM8aeYqYVIWGs1rjdPrg=rmTE#pdPZ_0c>T3wwNS-V1nCwCW5rp=ePb z11bb05oak9aTp~1zu>V)r_@5U3dduV%OMB>L1@*+V!83-P&)ql*7A7DYcCXv);`MB z$wU|pH861P;=Ox6y?yh=Q^>djy)63z!{vaJ6(2*}H*a1%|I5DvL2+BSP^dlajI*rY z=V#Nt^was5=kMJEM9x1QfNvOL;m0pQ`li}c+K3MjDT?+#zW}x5hQgtuu*Dw?+7ORM z#AB1g>R0sq2Of9tj!RL7hoaEe@ks0N28~o>NC55cQQHH4XYQZ(zx?JG_rH7*bJ=II z1*5CJy#DAi%7Yd*vC5w~0Bt~$zf+Zh$iDuyHuj)xC2I`>6HLp+h=ilOd32c^*+2u3&mofL1{4jsGFaCHdjKsxy|Xpk5e(Kg*ohqSa7$#vO42{*8%8e zZS_hDRfuVyQ9|_S5}JdB?6-d_U4}w*w#{MmF2wcYaWD;7!_4l6|}?qI9A>^uOOUC)(@`?bPu zAzxn4<*TeDu^cPymdoqe`E2DpdmiD*N!3ceQa~lYgZ*N@oV`DPe;!~iVTasL=|tDCwAZa_7n8iRo#VAT}6+9#6n*Zpxr;v?;fH zDwBD-uu@kD5AS}ryu7?Lxv?SlS*6Z8ty*o{8+2^MT>t)j!`~r6-+1MzFZu#4smGag(?0%ts%7_`^q^dw3h+n1~3kkB(kO zL0&rT?j=2E-v#E~f*ms&-y3R6Pt7hZqiG6jOFfBZg11>Z)cnS^%WKQL0fRfY%2gp1 z+Q+8|fS=IN#~?0q_JVKt*8*B@32KL_pI>@uMKRVxz0uk8V8L9$#JE8W~?5UA=u3 zRb-xzPX|PLdL)4lq))f$`do2|lXi=x+&VyYfB)d1k}s9=rF<6nTrcleD#d(hr&P`X zaoJqC_^P(ISFIMZ0B#OYgkR0?*D6_rV@3Ykot>RLm|Eoe`_p4?i1E&Uh33kC_Wk*| z-ioq@GLIgcy&pWcT|t6=k9z-~$2Q$-8Wr6w-O8iNmhPXM|EN&tNGoc25742}$4~CO znwUNooE`f_Xzxq(_<%=qC;2`upm}^QPE6G&6n!8UH5OOa)*hmHZWk7oY@LBX!yE8u zhyU5&ryrvYV1Kw}^*a0*?!-~Yt&eOmVTX96XX@rVOTiajt^L-QW>O0R=!K|b4yFJIR6yI z9RnJ=4aAj&!R2lY)Lx(og`!q#PtenA=%7u)Y29+($x#I3dxy8lYnm}|1uD!OHEaLNBbs^@cwH}Sz+5hqcp^@T*rELj zC@o45I35fhC*i6Sfa4QyI(Y8$d!a}~=kt*`*bJIxEWNg|-e5SM^fQ@yFYl$bg_Ua% zv(@vyXqC^HU2T5@9toT@{syJE|M>Bz7p)isTR2Vtj~<_sKFni_5&$EA%FuB1)M2A#qXO~7-eR}yx+%RY<~tpjdmjDNaz zVe#%#YI1TG^*gLBPbJ|KdOX2a1}#FAal}w+9kgeVRCJq6uI{c*-#b?AmNzMa3z9`o z)JprQ4ozq}L{8Sv8!t}UBC$vdiQ6d(H4LEaDrFyF^s!Ks4g-+?xi;O{a_p^n90v3F z_}d5Qkk-u1gBw?G+_+IcK=|^(_(&fei9{Unq$V0RCt0bsQz`<7g-WToC;1J*CbYu5 zf`FT^eEof)w!go(TUnox=OJFw7L$MF!4^0C@@oIAmREjq*_ ziK{7!B5<3>qM{>!`1>h<~#MsW8Ff{=cnG_;3> zu@o9xxBvnfV#lYMD}KtPx9Uh;BQ5vdqRrrDy>+k^jRMeWX~N#sZPf-XWLL}^GpW&1 z6}3jw$~c`t9L#nvpF%S4`t`NRx!$k~O)eMh z0vpCT3Sa@V<%9Z4tD85gIV7R0Qqtw+BI0gw9hu9bl+-k9+YJmn{HHlq>@XPO9LKV< zHO*CO;Jv|{7bKJps#GO0+~#az{1ip&8~uJvp+j>9)P%i>X?2pQp+_b22x>>GtFN)r zXgJZ46dkB?e27O9B&-)xk)9)p(CFWPyz}GZiCEx$1Kp@s>RqD1GiFe8O*YFI2Ev1i z`{R9{%s;Lzt}O7K2Gv~3mRXjoH((!^dIN7%pTYTagSUS8E!qnE8P>0!`2gw(5T_{;loRpRu)n= zL1i2<8g<=vyM@xw5IisfN|uh<-hEGJP>Mo&ZewL}@$MpMpSu|o=dgp|(YEOt(Ta@* zH7dY^mLEhd&dqwz$mxOhgpr>W8cgPxUS9r<6NJUq(-Um!zzk#QOiE5irZT*3fRK5d zLOen^55Aoc@Xn!AJg;a_ARKWuI!W82P@zM67$N68R5Fj~JCk2GA4P3;Xj+i`=IeZ{ ze69cj*O4eH?2e$#mos&PxOw&Q_-HKR2#vm&dHM3?gWDqsh1lZZhuOV+zFaHr?9`-) zB#EYQ&#ALL20g8D_+3eft(CLo&%Tfsteu~_!h-euH*HrF+D3YY<=C27ifqkjGO`tG zq$J}RDY7h$q#6Brq*(TVB^gVyP-{kG5+B?|5QB-AWuZwg&83iCElZR@lC^?G8g?ly zD0oR-imN5T+f6B?L6kt7?V&6_G{pxym-N#28+jLELvz{sV6exd@ytBm`@GNly+5sn zGCemgJBp&zr~$V(8zOFZ3@}5UtK|Gmt2Nz?;gSfaHk!2>9OZdX-gR&>TeVuX)@*|< zrD>nX&rF|Zls1sJsU?yaiC+I1gJOO&KQW+5@Zq9?aoer`b?udm^40#Ekp31TQ(}CsH z4?ewq{e#uTC3{fsJj~a}(J1yol$bl#!J}_E{Qf7u`02%;UHRnaG34N47M#@?KzteW z*3g8{M^X-h(;hyDDM@w5TVO}Uc~O#zq~OzRiP;O&_1NE@$dUjIQmSa--3W* zu~>AI80i%-)O{3{8xj`WVj^MfoAm~R;6XFgZ!IF20+c&-mdC&d0OXLpMOmW9XkQqu zHyTa*qu3sTrb!l6u~_JED$wn<5XA6sju%zGCsoFJir}yMXpg|qfuXcLA8|NvYc^5j za8$-gv@6NsT6OZnd_% z&5M9AwMfLy$m4UvIcI@gTG&LLNl;LGQ;f6OBq}ZF=lLiF@&g_<`(5zd+0kHD3xS5Z zVOTF01cL3vig;9uCGzc^Jx!ZeTi|IH&9cnI;j-fWAY4Zy(>l{PrD4N)D*IV0sjWX`$LRYA(S!Uu;OeI9Gv6qi`1 zV2!HLsH+zfyn?>h9&u6}8qZTsdXwbPKzgLWNByD2)qj1q_Vs-q@RZeh2#;e&kDTok z(DVN2?GR*YJo6p|x(>-2xfP03Ju8 zGeAu4J~F%b_1fK+i?U;eqT&fF0cZa2dWDJK?{6#Lh>c7|aXNs#gD2lruW)c4d#6t= zT(?t2L7*l%0c`{(g(E!rz&8Or2IO+tYBCCrxzQ0-O^_s$FZN6Et4X~sL)r&W&!v^p zY%Yf;|CK;NgH1#ZBiffPJuIOim|wj(KRYLd!fP!u* zQOa(!s+7(Qcytdqg3{L3=L?(rb@-d&^zi6VGUI>y{wbc%^StUR>~~Lr8%8&R5ow&G z;u5ds6*!D{dt{r)E1I(jbpK#?Tj7gR0l{@AA5Yt`@>ndb=bVG7Sil`Ix}1JN+-Yu9 zL7elvqHOQ98}%AGx7BWMZ*MCK->6mLlNypba=Nb^8<;9pRzS-9=RD6D1X=cF()l9G zMeGDsh`KPdK=zyMIKwb@PG63tuwb5by2ny_6;wtd>s!8c^y8%3gfxZ$xAMpd@@w@kk>kSBxy`6cd zG(sfaw39-f<FzldF z!Y5l)w-!D}dnzbq_^Xc>L<$Y-OpxP(BYg~j2C7kUgCnJ z92Quy@L+udFxEnWq-JZk@dgDm*WZXzyf;!dbDR_$l=~=E<)gTT759+yS*>oY1LfB{ zq0J{C^TAA_)6vg&Y7fK$7bts3X9W#O_&fNH$QWY4`6-jl%s6`qS#}tSvYZ<9uyNKH zlNqDkZgf)l!inSTXU#<5Bm0z5x6efJa=bw8k_Osq1lZSnM2)s`}`w#vq^P zFj)FNa%pKRtGQD<{4_7A(PFNIY7PK@uU>rn?Ag6*uR9LowdYSqW=EM!BEy^^^9MrKxVz>Xn>3mhs3jhREL3@);L4|M9UliBjuRk^r&R z2hZ&7`^R~yzo-@>cI*HijZSAUNEM6uqATjc$v{fZf}E*0*PHc*!nb$#5X7pQ$pbd$ zz;Xpul{JOIYVrzbA*BKK@X3>Eb+5Izx3|0dW&`Q`Y6~R~}&L5ia`s_|->^_=1vAmp0rMn1iFx0@nJL(I4i^$vk zbZvE+MAf=*o&OUaJJ-duZlsjXp|x7pGg27Q7z10P~+K6)JEsS5C6Q7m>eD;ghP0wqCKpu zmthX#k?A$SU2(xKgV6{ki}uKN5Z;|dq^K@VQnufq$orjYa~tJu*l0Z~74z0O%UHpO zXaL|gp1?wRpa49P>Tj>t(6%}cKLCR(Efg2MvtHlb78kbE{f;PqeF{$52`&!E4jv1b z$83{KT9Ct%(%4&XHKcs8s4{^*vzf4aZ63iBai+OBhr?=2djCt?)y1@tU177pnApb5 zV8AwJ491wTjsM2+fHA|5@$6#a7!TfPt+5fHD!WS?R1~U|hmEvPm8w;jstQEftm+3V zqRCT3WHqI2R<06JWh2@qvR0K+ZIJSSRBgortSo^ldd?Wy#F0WDx}!V<&1kH9=eyte zx#xbTm!+8I9{ub6xwJ}USZVqG`L}v(8a{((l_iWzNB+wZAEs*na7qS4E>OL*sz~!|1~Nc!&@3eyi1%L0b$>K zYG|-V+ZF9vE2U~}Bdks#s^U1{DPH2E;r{7qqfaWxyWQEcHwhjB$7>*-vT-}5>^d$J zfMj+Tf)L88^7EK0S}RIP!AhcIT3m;_%lp}F++2^%loe6#&vn@CXPAtS=aZV&_GqoD zIphkAbGbZFYkn7am*>0dT5K6EA#t`ea5B?{E?(3HK}e)$tS1twjAwutmD(SFqNINh zz;wv=`qTH{|Fp~6*jClaw2oKeAOe9b>M5eQ4TYW68VOm48C)?1)6GQO{b$cMkIls= zMzQh5t(yzuQ_i$?KucrJ*?cM<33}?%uP$A>c8!gMLN?OvHh&j*M3*-p$1^NjKDYuP z4B2XcR}haF&05{rviNsknJ=9uFFZYIe2#~~6*HVH*E?dWZJg%2)w#X;z+9-h*NWh?0;zeWF z)Ov;jXB6+|IIal(M&aO4E-*WKJU3yci6lHKaW42Nra8FD!JGDRlXGRiD8X8mB+-{E zz(VBOglS`4m!7eO23<}Q;4#?ad%Yi069hE0RvUg-JM8F?^F1H@^v_oq=XfAtCdDNA zDh{xts<7EX@Gqq;S=bJX9&RVCt!KRxu|(9BO!`TEm)q>rjaTB4z}5gp zny$p7oE8mGv_&u(@zAZc!71qa0xdQEB<5U9CaYf(%jt0`SAs#T?e1U-XKihJ9f0FA zrrUU5LX>=dCTg+^VH8-`1gg{EznBTO*W(4U<;D3HtmN1@g2Dgp=;-KRE{0;)xMsJr z+sC1K3Ag0I7X|kQex+2}&F$rJS96yW1GU{fJ@~v!;_%ojlL=Pl!wnru3&b+v^B*gf z!wx0tNqzXyKjR6P%Y{0anhO0**Xk-tNun-h+~BatvYu61)TJD*DvMO2D5W|cw_Ja; z^!cUh4wNXZs%lanONgRq=9~0L;S~wjTLxL#R3oczo~8qOiXQ1pcQl&BOTwry(qZ`1 zftOOP)z*lbgv)>;nX;;;hz=vL32@17G8T^)Av9gi7UMIAkGHq8*$psWz$P%@CWGU_ z14Bnud=V8o)yH!K^N>hX!JP;2t~I2KjkU#iS?6@?=XgeNYwOM(ROUw2MHHgFz3S2) zOLtbq%L=2)vL5Nn<5z36YKt0%a-!~99{+zYN0Ti1ijS&MCsx^nZ$j-*m^Bz%0{&?e z(oSg`Py;S;emy58G$MWhE!;qjQ)PKdEC#jVE%}qF)dm9$1Lx17UR=CzcvQYN3dNlr zpCl&4o%I!%F1f4&L5Q2|4_69%`)gS&^4QpreVrW%egK9i7SR)ki*Z@3_BB0HAjJ;$ z?T^8YL_Mitt@5|O_>tQLVWpO}208LoePSJ^mu(POyW>|z`qR!pMBmxgW-?P%ruz1j zULR}jCOo4zuU-GcAD8~|=we4|Y+@n;{Ai&)w5+ypie59>(^IR6>-Al-{)|GeKTRi@ zIz_MfgdY{H$RQf^yJ*Zg+693&s;Ow$j=L2E$@oKlO^dVI8{*NBfscpr<-?7l6wE{i zI+CJnb1uvrK6`ff;$UwX8BZ=ejKzXUEQJt7i6?2RZIFrR-StzeQ#vOrq=j>wv;%>7~3-IdbE&4jEVY9@44#rT2OEmpdL zEz=`$GX92g83eGVg>7ghZJ8iC#VG7<>9^Zib9;9;V^cVLr!66V$YT_Fh){|$<8S08 z&N!di>SX6}fT)84wM|dUhFe zSg?8vdj~s`1i?8h-aQD6fG~?Wxu9-i8A5%eOYp^EC1in3Rq1puQK8oi2vjQV@BR=B zcZb7aknQ!A$q_riPHm#P8S+*A_G*u=#TN92MW2q7N(!?cmGCP;rWNvezugtt4inbCmnGv$RTJSeQM$O(=c%MkDIL-PWCH8!*O z^y$6Np8fT2PZxdC3Z~;80=wi3d%2xCDObp1X@oPZU>KDuFu$<6urOe(4T{o1ex*zo z`#w?RlefRvI_V{}wY9m)ox@Z5SQN%(#`M(qWsff4bm;;C-HF~&g`Ph_YgiodNnRhv zkEab3$AE(Z=epovx;C1 z*QQAb@37pIX{Jn7^-ObF)`sm3_X}0-t}BywzrK&NiM@&HAN z&m=`yyzBcpUW_I~VpQ_2iLOBl7YixXpAu#Ov?PM1dztR`0RZ^Eu7rj7Ir zn;K7S$Hrr`9@al&XBlRO9~+O&4A^GOVz7iU0-RAQ%bpeCz#&qQ5UHwMq8=Lgv=Na~ z6De(jgvCOg9AYAiD$#Q9Xs1BT7}`_U*P+` zci#W^c@$-NLBnSIbZc(WFojbFyb(P$U%ga+8sA5V3S|Oumcp+1A;ca!g=fR|e!h=) z{YUs9N-JuSa5$BQk2P~jHbVN-X@!+!&0mz!|A9SQ2Z!+yR#5~M$IQArue|*FuV3D| zgH5#)yezL{wr=U@_^7tBvQk<)0{vG4%NI|3+NOHg#I423tv)3`kuR^*YR6@;WlOc= z?OBbe{6<|pzS^$z4Yu=X35TxkS7)W@n3Gz(wYa!A6zJ2s>+D*0*9pE8!h;^N-PVvt zbSG`PI8{Z1E$S&E5U|{V9>6WMLg6W^5e_i6$?e z1JN}zqah+Vqk@w&x|-Y&+Bq@Bj4T?#$!E`|E`Ezc|FEu2@LwL4rCmAS2w*U?nU_>m z_O_;X^AJ#93(=j)R4O?wV^`ABqhp9|g#7EnLo82yQQOR`h5lsfB;Uu!niKhvmJt6C z0dg&8h6@I78hLm~gB1?<$NWD$&rkK3dl%+zU!I#Qj6R!ccX?V{$L9kYbWm@vYkqE{ zSf3)eJRj)+X+&Nky9F6TjK)yt6h0(S4C7QFkqC)+A?VyGJVf^Q^C5vUZSXAV?3%sS zNQB^WnIcYgT2(`0ceg7e%fi75Iy#SPn-B&cK$KVpF{_ozn6gMH!Sjd#_4xYZZ!b=` z#56Bx??Swpsci3q1Y@ds3C=@SbHhO%u^^Ub^Ap1U5&W{(k_5JXy}CVH)#gsCdm3iR z-tIU97G^rLJ3Cw1)|gQhb{uDb`U1W6wd;&N-$@=OxW!;Qe^@^zenE$lMqOiF7K7hl z2lI<8R`B)kpJq}9R%S^e))|X2e2YWuPr68WuE-E(!EI8OA?^j2g&|c5w5QP?i^c5V zEO89oLLjnF{c=u>UAc7O#dm-E>bqyt=>Zi&qoZZqyq_3FAB zD@*EpU*B#;%5T=teX6bC;0x^dcJgs$CNc`^b~}b{^|c19giT|YBiq_et{)VlP2-DuXZ>&a;_zsjNfQ!K z5vKYpbXSa~q9~8y&Q6bsYvM$Pc5@tgj!_$F7`A_ACS~>b+)lYgj+^M%OpNBZs5#u( zW#eQ@j}1OtG&(VL1;HJ^dK34FaR2}y07*naRB@?C?CsX8!n7pG8OCh(MpOwmYrUM! zUJDyJH&Y1HDGQ@_Gj@-ajM(fPCPL+GLDXlR5kbIo!=qYR@W*Bf;gfuEEeIg4DM#ez z(&FpXP(~4Yrr=&;kOcC_{E<9w509*EAtrnA$>n**>^3lcLn8~zb8~Zui1+zcu`!kZ zyN%z>XX0WCZ+2a?58@hwK@?Bn(-h$*b_5ckkS~|Z1q~tAaSD&6{r!BJmLHfo>-y2a zYl_3jM-&w+(t;$&a(*A|u9lHrFCrxW(m8v*TJFRKPr`K5Nqzd+Ji89uc{g7n>VX_vtaxC<}16E ziX9@15DcMWti-= z5{>Y-u+PXD-AW{D$tvB+R3YL_Pe-vZuzn!f3WEcJz@6mNJo7;dCVg5!CO>Y>bM=kO zLbTm;wwq`lwOT>5S!s~8<%N-6WW!jvyz%sTolNb?H**UMAc$*szhC{s-HnZJzW>MS z)nf7a?~$?e>A!B@9#4FtG2hsl5Jc7F(Cg9jK|jva@YmfW{6pZ@nN0ONJe>5IEgVI( z1rZC;$&z*6W8FLYfu>zLj^j*(o|OclMUdm1TO%iSu<^Jh${{4MH{w7S%hqHvSqTze zfWJ#dqLw&^7xwoF!U5JAUhluryzHIY68JbvJ9#4)b@HS;s|eZ2^asBl&Yj26K&_?m zv+H**%1T@AmxWszz2uE*MPb>5G&@@^VT$b-Bca-6sRmDe3#M?V0Ydhm4wARMT@j{y zu~hH;Cjljr_WBALDG?7DI&zbRL7&UP4CFe3?Oc1uWQUes0JTJpS_xP`X(q}4ub(iP zNn8a(~|55 zeXUFg@e_Q*KhcjnGTs-@=#hSf9-Hz9gXe>}{Q;--x1K!R zDq@Re@oI5(Rl|wUVDHxE5b;qjc8jn9&2_BN)>cHueB1Yh4@usHeTa5M2V^M+I_bt{ zh>U4pePB>W8k(?)o`FLEF|jg9aJpmZs4RqNQI#>SEenDlBNT6Fx1M%kGZYm!cNdZ= z*JN(em*zros+YDYd0|?R;g~^g^Ac9T!0oG*K&8Doz=o<)t+aB0hM~u7Yt3X>r71W3 z3kWFxe*Mp1zx?CB?|t_8`b(!%iFoHLT36VwZfH1+vuJ`3A&Nc*UHIzN(faykNt+m~ zm8;bQ_^Ce7#{3?DkI3UdfT#yWq_yceSP0xqb(}jbTJen{24s_OR7X1h$Jq6Sw3Vgt zWYsjab-fyHZjH$eV@$41jLA)I;>|Tzni$AGs%a21F(LYphk$|5!muy%HuzMr&}d<{ zjzdLi-4$iVSq?gIyNfm;JtTGo9{JJwE6$k;u8f^ZhyJ z_x(O(u~RA&^5c~(7q?ss?}yZo;;{cWa^_noHU3Pds5P!pI|h*Gi2~#l4dJ_51EHm% zf%KZE9jEn`6M(uC{o%L8c!fT5Aac zNBG(s6;X2;k3RILA@_{;mvyL8tTj8%+hjSxvLOgWZ7OZEx*ZRYItDE5I>8?HpWz`k zF;fNsJ}8yu>REViWR*rAL26^|Ms)9!z zw=+B)q{?dRKUM*2RFq9i@Mtj59C#g8(|jfn=;Bh@l+b5K?$*@Q^maCt2Ctj-r7(;+ z@C9Rua@r~gMl-E;m|0^g=ds#HtU)}7oma|84-VI-72em;_P$SwCfy?5*B%VH9nmOb zDVO86;Y*k9-yi+^&p$#=eSh@++S`9WU5m;-RC#!@dsiuKRvMD;?kX&?Nkuy6#lBKk zHRl_9vV<}vyOXH9c>B> z1qD2IKt!*1{f5U5^QL1f5RDItS2Ps~Z8G7Kj~B+Ng#|NuT(0FH4UiQ z2_lq(eN<`{I+bx$7OM#e{WKo73qnE$Cp{C+C*+(DoCqgcsg}SG1)h@JG)0cYQLp#o zAUZ}SmK;t3B4O1(ty`hoB8M%fG&aofC{450`p0w{!jP1u@O8?!j{G>F!)g$_3rXz#kfn4JI7}kf0F4>EYoA>T8BB6v38 zivZ8J8H`<_4vK0n004X1B&%K(?R40${j&9ZYiB{@aEnxK!QcP!quq{$3I8pQSd|DK9w%$;rT;@jKJ;oxe@87Lm=mUQf>hUfujV(QSM0&N{ zdx2**05L^y43Zn7WH(9B2zZ=_e*}Dgb|zfWp&}Gv@Qf|VC!L?iBR2TpmBql*ZxcK% zEvY^yLofO2X%6tnvnCb*sn?vq3K(VmaCEy|5G%Jd5hQ~@C-zYf*0RCClIGzmKxKzR zgp4H_&HV6{)5$Ph0hJ-f_F8i1lbtHP&f8Pij2^)tw zVIUrz874Z2y!de;_r@6sY-PzjO+NnZ*>mLbp!R+~+J1aZYu69#|=fn9h z^20-QW>chk96T0Lsv8X;Y2|&RCPId~O)npgk65&n)pZdoQ;PXIkLYOVxkdyi0IUo7 zQYlG`Ijhy)&DT-6oak_foB>x30EmaX*e^^4+gq)KhJZ%^0H;!Gc+xx9q_o&QrL|Et zbwEv^5CD&av6No#vNY)9a(6mr^klR9hyPrjL&rGvU>(4=KR>`JepN>Kv4h5Ln0Q(3FqlKUoZ(m@XLd98=@)ux5yS;)=Xh-|W z>ac-Xw@%POoTt|ddU(NfTJl*{jI%SEOeTX;e5S|7aW1UVNzu^@!w{e(J*bQUsZ`L7 ztyzb`G`@NBdT)3STUbBa8+O14VM(9%q#EM_2*y?I&dj+k(~P&jzs+v6<)lP-XXOq8 z@I4tjWLBFmRIAnZ1g-D=g5DJg9YZhW78Y&KDng)`tvp;@T-+cvVH*qnzNkYCO0r&e zs>{MSk~!K~SFS8T0&{(%STUC_4VoR0`to1!O$Le^U^;DFJI}Lpy+pJ49rm^cR`LW- zv|uU3qsVDaV9E#4yh8p$Jl5byX$pnN2`x?xt%6Y3+$bS8B$Yk*4;aM5{cQjoG6Fn} zlG~kf*W?;LtlwNlpFEBCbW?!+2rmF3>CL*qSH(hXa(;b!9daQ_F6YB}J_GjA z(c6&d_v$(~*WP`DjK;^0zrEnSx`aImUJz;8NfcHDGx-F5H?t&v9b=qrF2LJ8eD(Id zCkEDLjWsj`&qJV)b#pO35a~4-PC1iUsR7D1w(sbjv7r zhDBp(v9$O4A3v{ck_N#zz%0JTp25%l`)b&o9o>8O?DbwL%Aih?x8K!Dt7+JavbB{B zpk7Y(TBREHM>!{^FQST9NDlgEf3Z``*efa zrG|gJSRu!X-(d-D(chQJm^o3jnvH6U#b|cWF+1c>`S8NE%0e+L(3?r71CmJ7*+`(P zopEL|1F$$66KRf3EZS?mQd&@NqV_b7Wwj!~p`#iH3oZ&4*}0 zO(Xu}O3QxhcCb*l0qX^4_`64jDHz@;YT@b0r))x^(XMXD@B0OS=25 z%^&_-zYl%x<>ph}Fw);I5IOy0S~ZOR**D(-eK(-;EnL2DZf<%yI(n@-du=!|o*23I z`~ps9t;4#rGI*d9)BUd2)6_0cep%|&v!y(&^Supk?AN-YAq985N!VYfzr!u~_NPt+ z*nzCu9iSY(kbk_$*#xuB(zaq~)VC`=qY(B&{EC3D)a4yx^3Xi(s#Lzpq$rU*utglC zwu4*`1{rD_k2p61db+u|ost0fWQvhQWH^C#JCxZQ3A-^ENug|Lb2T-VwJC^4b@u5J zdWt^2U#;H9g$41?q%`)Ijm}Sv*{Rv;$Z%I8LVGa1OpFi2-TBTu9f>5mDjREyt62XI z9)#Mr4L`sfltEmJJ8)cF{y#cbYkg^TSAGCk5C8Pn_dfdhd%wJRxS(+XFV~I9wTGuO z-cTy$U`1eZoV`Kuu>or=$uTs`2KEM%M_h_*p+n&!NVL7(=s(;XO*AupAk%Jp9gh^^ z5$hv*W!E!kEI4;ckrD`i#1Ivx(JIubB)N@fF=N+K9MBx{`sNd)h_Dq2PJ^JC;->g%&m5C=!QDq3l5;SwGq+)@QOJbL=guE^lPXioFl zut!fMj^4a*=EBV?2@n(%@j+O*>m- ziUv}DG~u)9Dt6?wQQ}6!x-X~g*SZG+Y=`ck7{6ct*{C-GIADub2lu)>C^FeYat#Q7 z3{v?vye3TT#G_J(#~iiOK;Zp$$tf}9>j~dB18|9csh?*vfJX}ONL;_YMUxpyqY0Dt zVw|Pt;SwH3kC1A3_bF7urAO;a>+4Ho)gyx+R3nK*IM9Bi{X}M;7R@CR5w8i=yBHpb zi+pFNh(WmZiU@!^c@iME14rn|(404*KjTN2;Vsg=S9kE}Dyyn{7kI?NTR4m!c^79K zA+I;m6Dhh1ht3z;=|Q1=CP{%NGr3x4pc7z@l#`~z-jErZfzl!hf?Ko(Z0r#JEjXrF zWKB(?Y?Oxbyt1FuOBmo~^XYcV;jqxM%(rw$HHTp@r^{+Ki?h3ReGRgkVesyDiW_3> zB5hN~W4-0H`aO2!ug_k`bI`VQ-UBqh6}>YkQyxvv`TCu19{Lk4l<^4qi?e^hBRruP zjoPvxx3hz@cfCGFclo%;AWor%t%JioaYG%hPiWnKu|B`@@a~kFzIyL6T5gkczwfSo zQ@=I)-Q*lox94l{wI&P6WQ-y)+_XDy@5B_B5HeugIW^NFZH;Nc2E~CoVgO`=$%$cA zufd}%h_*cr##$fpp_;N~(ah|JZ%MYm!*mnZX!TemD_y7Gv}oszlEDkY_LQ1A5(?XB z!w`5{6usUe@JEJn6vN?!wLI}j(zaE z6CXN1L%eaQ2{2i7`As@$tq+z2+@1Vm;ge z;Og3RCRfQMwF(NF<+WNy13gozZGZCWMg~NcybC<$;1zg#XAU1aJkw{;Xjc_?dw?0Y zOF|s+i0ODHsTBi6nnOU4WB5m zHY;ao*+a1aT9!4qoMuU@Lr~)k&eo!~mECop5`4CsySsB*6JNHgX|-I+kEOr;p*~e~ z_EOrwNrJq`8O*dgd8f0<1U%50%DBi~45z?ndBYe|$+ahF>up&*#CdyqBKx`$!-pI56R)oa+IZvO*ER$sh$e!n`ECKBb!sVgMikBDY6RdD(0WIK9KQD`~R znv6}f_d0?0CZdT@Xb*12@YYl=NIY=NxZT=-c=Rx+Ytfqy8=dntcoYOnlbvd^?JyQu zu$hD1DL>%REs8~pY&06Yv&G|-6pKZq1=cQs3gh!`UKFho)jW{US&^4Zc)H^bN4(|+ zg;5k;*KJTN;{5}%;N-Ysya&qS@jo1U^`H6qc_bz)&t4rn2|!tX_r%*rj~*QvnR@s5 zu~)BFRw9+k>C?H${=&u88#n$w)8`5JLA~U(4-cK625nM_f_PZnfg{wvT1C?e07!V_ zL!TuVwB!X-u2%bZ?c-@ijzc-pwu|XnEeOAncNhFIfpKG^FwvSkazG-X#XN>?x4I;H zFiuDbZnidxw1nZ09*KC~G@7hL^WOJd=70Up^~OdG+sXqO06CxlZuI;S<2;nS%v4ors&1~{yDN;XShY$m$=@pvMV1z@MtGD+tE9nZnE@aqyn&btPRS2wuzmG!PB9h%k6cVvE6b(!jcZyE2#qMV z`2AM4QO3Pk_*pG2PA6Wku*hOw&F6W5XfSB*4*8oT$>=0_EPBmemg_w*_vzgEPNTnP zaG;#+pBfptj|r?FoCLw~`9F`n_0Ie6AKchxdCN%ne(~G2jYV9auhz(dJ`2=u`Nmpd zW=`U|-mcUx{_DmKAdG9s2^WzQeuO2jmZ=;aj(Gi<+E=v`cpAbPZaS132a-`x&;h`Jx?>)@Ea}j|B}32!6`c>xU0+DEzEY_qv9g>mCc zm9umx#Rc~IwDC&RVi>%k_V;6hp6waDJ3L%{{17lmB=i|*S{H6VdqGCDo{gj@Z;^Dr zQ-m^Jo=mnE3jbs5d|;YB(>PuZt%x9P$*<*4TV_jvwe+8De``zW*3tyq21MeH)Dlh; zqC^Y76tl%Fxih*nI)i$n!g9yZQwh{?%b^&=1f1sX@bqwKWb?pO-Q0qunb6JA)2!y6 z=l#KQx#Xg+#`M=eN$v0RKF{-g-uHQ47p)1L+kWoK#6}q#rd5eVjx?CHDjS8pzOeCh zxl^M>myPYygc3SW55&MF9*@M~bJMt7ffjibyh>k-x%uk~akf$5gE?HKzAO)Dmt?|^ z!Qor5u^sq8qZ>PLQ|ohyS-!4K86YRjYWDw zp`OTHg!I}N&^q;LXzI(bGti)}Ru$E2g9+cG?$fwpWZ$~0=6 z;qI_gN%^Vl*UE<*?7Zq~Z<({Jn+92i3DYVLrV7YOk(3JV0Io%mE>9rTG`NULQJ}VX z+FYlG4?J32*gfMdu~KWT^KUlUNc{+I{51J}=xBX)>wRoHVl1R&E2a8PZW=VXobZS( zjxukMqXdFlG+pI-vkeCaz<9rzPOO@H|C(t``_TQ6+=&Ex+5WO94~8^NcJ zKMY(M8YFFW-0-CwGPBKA8R+UNs;xGgNyWCRSlzC!_O`T8{Jagu+V2t9Z=Gak<`x|9=L?iv?UsBNN8vB2IU`0)V5zj6sn0 zw3}Ai4S|3`)AarKziukC$Txw-%+=5|+%%Xz^6X3=1!}|IY2PfD^ZDrlkH=bPYy15t ztX3I6wg2axz}RF6ctYCHDBhh4h2}#eH^^g*_$E=Bus}|Z%#$%SH5Hl~i9{mv^Ut3@ zpPx-6Zj6mBMWYG0oS#oD1%sV1=RBT9XP1V7i%x?}O_|L_>Y8?y-()J?yH`}FbqDBf z#%1*D>TGi3Yvdn;!AeziahbE5*;edjd930{v)4&98Yl9|!%j}PZdwZWg7|L%i)V(? zsVU;+=p8w>8l4UzI6R+XF3Ij@zkI$w`Qx;&&S$b=mHIT+A>&@`lWdmoq|fA0>I?J1 z_>lY?pJye7=%mi{GFHWz6zS<13Y!T#AFR#6M4es_=6HoiT#V#WRGL&T*K?=F2c{?D(H<5OHO0o`n*a0)RJyoHh z36JJd@UkT!Y&fg3WU#QaGq}`+2d)_0@DrFYC-o2u3>I43b|8(w*3{I<*ucOZ z7z5V`Vl03PsJz~|X?R8fh}w;Apl*3o0t?s?MF z^%&cI_~f%6pIy9&+XG2gne$hlk)?Cs;Gnmvi##UX z)(aON+#c^A92_6NefwVzT3WpRg*$^C9fJ?IV->-bTdaWoT?e+gtJId(@(jV|Zib=# zCcCmR(=2TQ5{AEYYSCF}!G&Xp9(HOP*|e2PRg9Y!xoaVKMIeQ_>m(dXQJ^mq)yl92 zDGSR{rB$M;SMg|Zlr>c}HI?Z}wniTQucZz!TdW@)9$v}pfwdeRz56kU2xTtd$h7wM z#-dEOqZnix#~xgYCT3?7fJks+`_P?KYW5*ZP9!n{UUrt)xRDzP9J8~_gu!E=YG8~t zd2l(46ynDsfMFsLizQ--1X2ip;JxJ2Gx5d6#V2tf6W)o}GYI5TXD|>51{)9l{PzQ2 ze|s<;CWc0-@wj*Icz;K$ORT7O8Hc5eq;2Ob&IGUv(0F>EbYx@t8KCJ*^MM4o@dmI^wP(J09&tv9>HSp_bK z%RzX=R+?D~Jb@hI$125-#xQM%a4FmrhIk%g ztKRVPVAs6DVTRUoIh0B!l9bc5UJ5+Y9Lj37V>SW|oCSDPmGU5}V3(IgB7oCq&~zCf z;-PSRUA8eDQy1ji4K75Y;pJ$5MMuCa_+WH&_?PkW<-M0L55rS1cI_G+#mnFBeEPJn zk7?cmW+@IF^f@)Tc0&-&)C>YL8=G0NV>2_v@Ak~&^YG6+K7o)D=4Mt59lm#F`K1|z zG8)^rxOg@mTMB9#gG{h57C(FBNIb?gGmK-KlXWPz&#ut28^hh)bI&ZX^bEG!!*hQbtTU)F0>XsXGOeY^b>VM&D z0U~SAQ=hqd0zCX2Y2Xe5#>(a9fSrLEl10@PtED_pn=)xtrl-7qW35!YsVK9$Si}cy zT3Xz$Zjd+|19hea;H*Enu#m)%DVaRJd`K@zDyCAY(~(He>C_$YVGBb;LwFfE9S`!Q zQmLL^_KbgK%@9UVJs3VsF2`D|+{6?wo?$C1w)Dj!y-`M~Em@r-m5U_J&F7k(`Kwp+ zN{c9k)l7RDDJe9d0&0+Bm{C#u8%$Vg-?x)dcQ515Ka z5I-^^k@(_A-#m%KOEENRaX@Twe>@uf4_ntC(?ptvZ#Ao{obkfqKbA#`6{w1~ zZo%$hYd~nwrr>2YxpO%!a&=O|9vU)rk206ITZk;(5Xy*#U@lz5AFP*XPP-Xy<%sB_ zH_IAww5d4I#4XihyQCraydTv++KI}ivf6L2qI z1`CeV2Eu{b(CDY-G<$F$CIfLo%<1DepO1@1&?{Mp1fxw&QS?M|c^W(-9}G5O#o0PV zka<~FlzOGUzFtuTm&eoK65!X=)EtRLuqR$t4kYC0Y&(DXJ*0Q%(Q93AYzH*oTl)d< zn44=fn$-<7O2Yxirl)}7lIByUM&uTKym zs_Dh$r=6YjxD`a-i_WK&>XP{&HMM$~roy~4E!nbb=?-nz=ky>$ms*>wUsax$3@R+5 z8&>f$GEY0H{PlAzzMz?GD^=His5V0!u1s68VvW)G>%|+3i#NQreo->r1=AOm4j^eW zrDhiv7r$w=w>0lfQYCIMQt`S6U$6%he_}=uq)Qi!_<+TfZZO`}!M4$9yREwJ{5ED;vZ#e$XBk83uo%z1p5tP<^qzeK>x9<7J*Z zD|cv%>Si1@IXN-nc89~~ zfket$9z7x!69omNp10LjU8P98ix(89!O3x)6G4@*UFS?USQ7IKE-D0dL1_vGW3foY z&%1CWk(VSP5D)}x35a>HIT=rl9`qC4k_17Pc@F_l4$qmuw)OBrI2;JKwhFw5c|OAy zQ$^bA^WnSZtpy%;Ey>Jm$!z(O;c@qxb?dU#RHAA|txd}?q;K1+(X#TW+LA>{dsbJN zsc=@W_o3QaQNgNZ%uo*e_ZPQsgUWhYJpuskoK+1_C zVR=>o7j+MI(+5o<@FyqxZXO5o*_)@Qeqc&$y7^Ge|UN> zq)km9M!Erm_=5G_=^GjXZm>gUH*a3N_^he!_cZVx57>rASDs}E^ze!s_7}H8H~=82 z88#~cLqJ#vEKUS_dRFEEk;v(~y1EF%l!$%F01N|?%xFuNfG0AdI6z(G1-3?>gSrqm zevSbpm=ciu>_YIkB9T}m27dIrL=hnw*o}}8;u=I5OEFl4*b|*(O|=&!TvBKikZY%H z{(tui*UnqDaLId_R0^eKEqp>4%?h5o=B2b&(@#Z*dc8-rnaAJD)%A z>guAeP_Wt?NK#%rpP9LR4eHR_rRi_6WhE~*y=mT9ShTO$Y}RFG=N8yX)a*X8Rjbn( zbtY5JMoVc?X^Q2YoE)8Lr*(#k6n=iK3&%AkznxUB=eSjz-kHB`+tNLo3wNYAoeqxE z=arwzOV>NNAWv6mkkeG45O_&v%Dl*M5+^a*6^!@C zjbW-meC0L}@;&0KR<>`_1yPtJ(wX)nRwH0LqJEKW3YYx}5}>6ZHNuVh={muGb~xFJ zphRGe`5b7|ziQ=5Rqt=!^g14wy}JwY5tuAmyJQ(4iB(S@cFxSuOHSS~N`sks;a~+^ z0^Mz@(gK%#zsKu+G3Sn1)ewkQ_M#N*M@@+_Jv}4WmXNP6J;`zT88#z*wXCr0L z1N&2{E|$9glBTMrw*JKOLnfULz(4FI>m9QmHf1bWl$Pkj5b(h$mzSqkaiygoM7Tc3 z0sPEpG)OYL*lc5C<188ACW1{|5|$O^bX`E8X60CnUGWh{7jmE;fC^E* z3baC*jm+f-lidmq%#O>BO|1x{wA4M>1}uZckd4A>LLrL5Ol;9240;jGyg=vpJ$4Y3 z1cIX_MU|3=)^VIX`1+S6w`&Y-e9k<4)(%kK9Uhf$4)35$L2G5ICU1;s-csw@KiKN-O z&wA@I-I>2oY;giICkx9u;NA!4Ssa|+U~rs`KvhJdj=ZhwEQuZ z3NRvqd*p`)55Qt=?l86$*uaO{fCJiNMHL=aTS5kh+H39EKrp;=v7_T7Dn zAOGifl~!%>Kv~%pA~A7%{2%{4bLOijU;CnjDGy^Yhu*O5Ae}tGgwtCF28vTSfLRpB zgIu1$kdXC`9uOlIOK-{8lL(B(@FUTxr^jcKFp?kwD5FwNWm%VFAfW`;@u4mER}0o9 zT7YkaEl0}Im0L_X1&CX$)sdc%(X!iSOV@KnKK-sQ?KvBgmRmTq0=%3haY05x?#f@* zvtPLI$R9rV^tT^UHL-8t-GB1^_g{ZY>Q?CZ_)1w>Wo6lw|FLz2F==I4xV9bIX>F<% z#cGQRbS&b-TBsIV=@Zx>vM@gPHKf**xM?zM!?Y_z(%GGfd61N_e|E#Bsbqg(*Jifq zB=dtGW6~L8f_Z_!ABJp@4TgDa_LN27Iywt{TUQ2NMc{G5FxYD$G3K z&ELYzgQ*4+ad-dT7x$)p==wNrqdF&)651`oQ+OPELPct*K#qiyc!4XuU0ehPMTT|( zh!cVnE6G@5W4^|D(279;dlM3v*nF@eAdu$-ReT9k-10n!_*>vMx3`yyu~@9;Rrq4D zxU{sriLijyl|*85dub`Zxj|fArIJjdD$s>~k;e(M|9hM?j|gFU1;h<}DXHZ+Ug#Jx zLbzBc8I43D8x@cdHJjw8bm9nM#>Fr(dDiJH7FJd&fu-6S*<1fcy71#)Z@FB;BE|K> z&Q4*k3}tU`Z!=kejw;DWChv4Ij++NY4bVt2*CtK>dc}iXa+uDx+gXK_0zmSB3y*wC ze`3_ZFt0X1q}xaB=F~)wnnq0Rd#MJMS*2ZipJ1L!sG z|9SohjPb_ev{Lzf?@5o%hIj<}QG#z6*)10;kpkeH|8EfhH&4d$K!m~KNaR(*IZj48 z$m4`B@hCq!;5EQ(1c(KLd`(#hol1}^l@=ElLF8&?sf1%kR3o1!?1|^!6FSU%Ps%$n}7cJHTlgCoQRM$`Q>_= z$hdwu@S+k(I-RWjvc)iPvu4F6b>K%OO=^Q5tQa+=v1k>PQPl33joZ9l#xXi(QGq*B zc;lWp?HDwIFdUvSc@b0BL^8YG-rRCTdc$w?h}>bzlqC$1QxE8rrvAY`i)9?5fZ;Z& zqs}2|9LF5n;qZ*%BHc>sOjDM&mbNjCrR5`ut>dGyu*D*?%VeV$J59;~wOg;zxO;j; z|1X~T%{}qzuOIi?Z1spo!L*=xKvM$%0yl_XF?^#K%_C9}tr&d)*oYAe)@&G0{?|AX zPyH;DK<>b7t&u1%NL0T?)j|YWgQtLP@MaIeRs!luh`?ZI+V>A6TW{VHJihg9`Fzt` z&%b;6^zCn-050d|Ue7%&@dQ%~JWsNEM+4+87b?P%s%St!LM4D_xWV{kh+U2f%TJ*m z0apnWNI^LgNCs94XSeZ88hfmD~t{7<@uu zX%}h&PnfKAwS&N)uYjsF^aV@@kzPsGR>hJv+laU{E|dALqg39gCwV|I6DG-s zSyx}nj7eHx7EzK9^ymlqA3HIn)Su>*t=|J4>v|E7arC-qmjHm^eL+BxOmKMpEr@Xr z`m^Y?Ub0)wEN;4bnNF8o&8M&7 zdZH$EzpkraqoFAIk+u$rQ6KKJQS|wyMv>9jr`M>}hAxx(Jf4iJP>8fWYLnPtkU|X< zaIz*{t8M_7&@_pFUY@6E8|7%n#R~XKnDl&qmt1-l&$e%9z?DRe_5G(B8-|+Aj0E@X z^ml1{dK#sa0xa?Iwzh6tGg%b{F=)x|dt3=(Q&;O?{gTU*bcZoM&REN2~zm)QsFQi*0**VU_6 zt=3pR8hDY6-Wwk42L8^n+mU24kj$H{F0)y6>GtOjGt;@{ynVbG59M$b3x#x{kayww zohw%^d2s*8I*jcl1O~(8(Uhh0o{6h=g~G#@m)CKBlhbwOHp8&YeiAXXZ$4*F#Q+)k zD0CFvjxj*J%!<@D9+nizTdj6y%%Na$$KLvK9sqT9Xh_B2GLJ%`%z8d)Yiu8deo@Rj z&1SR9WyW(}0Apn~#xjiAW6k5SFK|~L{19ePMQUB2OyJ()%eTw9<@6qFJ=Zjf2i_Vf zTG!ih#t3+9(p!vnau_L1NmMGA8Gn~;?^o(HGhw|7RH?dUM(NR0l)~H$KDVLa_!(_W z?-8xG33DDcHZ^Irr|M+5BMh@{<9=7lCY9noMnEJaiufAV<|Jq9(A9J6$oPw}7wDun2mqERIYO>S63*in#%#$N-i+1vwKQ6@6 zv{*hqVSxr-L3Z45k6EsaE8-;x|Eu0q>Riv4Q*s5?f=Fw_Gj3oW{wl-=AW~AJtocIE z#pyZU-2Dd{sBCJK4$6I7bG|pvrYuHEu2mbpxc}z)7NFChcaJ!{ObP%Q&9kbZ>k|_b z-DE{iCVxBZR(H-iCKahA7#PxNU`Bc{pgRvh`VYkWdup&X7*Di2I>$u({{t?-!b z*IoGxo|fdXcgW=?Iot=PF?C%Z6~Xj0jiY9~w&OosU0rXYS{JTuwaTw4Kku8BL=?NJ%Gq=4UIT<>22 zde+*y_p>;%tGO0H?zn5pbNm0erR`h0k3zKyFyR-E$E!~ZXl2^j&~OT6eFICPDoOs^ z%}=4Z9NcHIM*C0u$EQ7V6*^TYQrnYB%PvY)zN9!hp@zp~%}}ul_Bv2`K?v%A$7fzp zi)nJJ!aH7AEK->e;IdkoXi>Qn;J%|8L9;n(5#^e$let`q$F&dunoK2Kz`(1;c^&YW zoAW8>wNs>JuGmuPnyp`-)bwumZJT5HsP9=U@_ZlwHA4AvnfOEh@y%gh96kJBcelTQ zAe)21LRdrWyMBCpjaJ(T414|a-(NqzuhU#eh9tN@etu8$N4CSQU&%A$$@lL85UGnA zRIOpc)qj6||NHN0U&}Rki;aIw(%-bFjT33%BM%Adb+CTjBih_`NsbpLFx~n&krgGG z?KHd+)<4oX8mu%;tdt}u*jGH-RUam;MIc<4~qOw!KfY zxV^Gy6MwYS2>ShX|8?yzkms5TV-dc;_d~ zImz(X|J$#{rRRy(n1N4K4N|>lW}1K)%%W~9$Ij>TqjC~FWva8* zRtM_w0FG*nBvC3u@pJ=5RS4$DOtel1im1|@O;g3r`8-@s>zJRHkO?6G3GL=h6SvY* z`FtKaqw6Oua4TDS)A#&C{}IUX^!>+i24dh3cOP$VK3rUEx7)BCTl|91&FMIJFUY)$x0T zyz9aue9fi%6AziWwXqZgV->9qoy~{wbVKJ*qwXr*-_2E~0gzj!>smE!hd%|Mw=2gX z3&0m)h?G+S<;Vn+Ks~OOB&}H9OgmF#E{R3E%?7@-xf!}~Tr`>^Q4RLttI!~nooSj$ z(!Helnr$zOam={G&Gb%j@Up?glZi{}(rd70%}Pa@cltAJx!rz#A9YjdB~8IN9Td9HwjpxfRimtTzrF0t zSg>gLt~ZEcAs7n__2ioIP^rXoVGzECDpV>}BtHyagT*3hruKLum0X6&c;IB*j*Fj{ z5KD|&$!0p7I?f0HDVm!bnLWVhwA=E~9k%0hqk+bq4(-^Z9`MQ{bCq+R0gpwAsPTn# zdl7X;!bA1@GB6bI6hn5?!!heQvZb6A3iGQpdqbVC3YnDp28*LRAB=i{M^!Br%V%&D zibSTCQ^3rbt3tYSIs_Qsqg3YkN zCYs~e&Fmlgk2k3x>BYr)#>(8?j_=a{uOX^Y`u&b~iM82`^vVq#>kZU+u!zW@8fH!B z#Hd)fq^+|9viucd4!h;(VdZT)*Ue8$y_Q9u71U|#aKf#OC8kQAXWgdfIb=0fk(V1! z@8sd8it=J}MbT#6`Y`q^o|CMiSC^sTj0d)kq~@r}cHD9F(Cg`)q1^E#qaqp2hsY2` zU0<364H4&mUN-PaH!E>#b(fsLW?R(I#K9{{)If0V@|=%$jA=m zd?wrvnmRd@vyuIYZa*BgojpAjgETS5r*fMul{v6{-DEqRQ`0=kY?|2^AvSKIUVx^C-v=p{CFDzaN$uwtl(jae3mtwo|q`%T6ZW7}@&*=j7iM|DUkZ?-VR zm_;+K3(oaRs+=#esFdf5S+>NS4m&KH&D$80P#=44&SoylLbQ~&Yw{l(4g4b1Z2 z-M+l7H>-@mPtJB;3-A7!F?BNOI*D2H&N+fgoCGN4!COh?zceM;C8i{yu}MSkqHBvq ztKW$MxKg=HHg5o#NIKk8vUrq1CEB!*nU$iIHywWb;*JjwoM?E|;jA>zK>X$zh9|Aq zFJGo?#v8|Sy_O0wvhK9lvPV>!z%3_^bfdX#HJ1^PAAzYN$CqX=Qy8OF@Aq%-0bP4c zraC7OwZyvwoW=D#nAaR<*@VamJPkVHan!^5HR|=YVl~+IaF?ir3xC_|Vdbvh#bm0H z(eIKu0Vb5yCGJev-2a)ow&u2RD@;-pX~_~KO0WV;mIR9Hi(;j&yqVb~ti#o6XOeg) z@lIa+&`zH_oy~Om*qQ#q{;HnufEUQuO=7p9WSJm=b3X?M5&=Mh&yGL5|227eG#Tu_ z+driN-=$ja4nCe<{_yVX{Lja;=ibX7pUuwBoAu|lC(lO9ML(#&KL2?9%k;E&QVaYE zg`dvWVRQ8I=;d;}9gUK8^YF!skLSO=`qPW|hqa$5V*R{+bNuwDfBa?f+xPEIkahJ3 zum1SrgS(pysLNj`gVQ&Ed-GlP*VX0aUK0KK@t4WZM~CnK^Wnw*`SaznODw$|xsG%C z_H;04wAp?#lABDBr`pInP;9%!~g4OHAYjV3M6w~07vzhzq zjFQLRpiiD~es;bfkDviHMa%ZM)_=Wty8MyGJ6OjPGbI;;1`S=t{aUZvJs1oY{imBY z!K(lOAOJ~3K~y!$FA3J5dvLJd8g?7&(PVdT2WyD7!*Lt&@oW%{+LUE(KgHyiTKD+) z@Rs!-x3|M>ZyvfXjc2eVTPJGxNyls}5k-;Hn7Y&E*4Cic!|W}@=ji!v(lOf&_lTTk zz0+x6h0MlqR-cJ{db)GcYYc}d2WC^YZgaXDO+qa1xaLIp`BsxaH}Z|#g7LVM_>Bn` zwcr`?qe<7N@WbnQnf1`fFPy`nS*CvN^cO+2H$B)t8w_~G8!@X7hmFC>Nv+oDL`|`F znBW=V-K$Sr%Aq0gz&Y5bEAHWbF9JI|6!n^Kw}X>m4-fhFqh2GJUApa)VK6Z(>C6sy zNC1;|P|WALbd%1OGh92oBfAr9PbPchv5vz#Lg3zv6@Q_EZ)myKhI6WF^%+_m?M|}V> zLdU&9Gg%ByhEBflVJGTsCzy@jT|@A_0rapYFT*?x;)_7liT@srqI;mWpku2``j0vS zPsgL41H6uFjEX8wx1wlQxXw*;75F*vFfgcrdz?8P8*3|txsQ{4in$8@H`Y9pNaQdq2o*Oek*j`hqu{m zPxkJ%$2JbEbv)S(f_8rFwy8Low2Os8JFQND_*fjY0oX%T`eO%1S7vw zmdQP$?O?YA6i~4O8{9@maa^Ut-3!{2V00_qvf{mXO}T9UK=OEO{tLVhC6CAEKZ=s1 zb8)9V@^C+j+80qfniNMcK@khwntD}pe-Su+Pj)EV8vW!L- zQ3VJ^t8^P3W%(){BV^G?`EJHrR=gLlDVOaZOdgNSe}VUbfF9dJZdzU?WI21n_DMBwl!Z60>Nun&rcu>u& z?xTi)ABq?#i|bd*A%l*q`&0w4%KL&5tD^i+bbKz}Z-tI8#rv($aUb50_f@{l9_fC| z*e-T7RQN28IE+1zRPLhWVGAXX+i+9NQv|_W)!c{`ejlfbsrC3T)r&>tYo0~}a{I5{Y0gd{LC zVMCc8@&$V@eP$3M@Y5a7oO+BxvKqiS&=~;;{>>GH$9SU&PI=4<+H)@12&!@ z0c31sK;{cg6r#Ho=Av%l@)#qbHZ8Yup^}7-JUxetRq*8Yipp zq|G3LoG!*i+LhT&LoaDIjTJ?BksP?fYjP^rX`z-{5&%kVg>+MZ05|~D+9jOYI2H#Q zmAfJvPYP`dqDawOxp-+8YZ|~J=POo2rZiik=dlpd(4tpWbW1vs)LNLUbgJCgWZE2u zl+l}qNa|_NC>{<(R8u{v+{}$-0gFE&sf_sGaBGaw^m(C|#+AN|FxAGGGh@jLj~5Lg zEimogrb!|!!4!%%#JiOC2$Jll6UHKJ}={=5D-jSjkqQFjbjb^B@v|Kn6DVG zU)kv&g@V+kKV`KQk`*Z{v?yBUPDnkT(CS?akj{

s3q}AK!2vzGR6_mO`r(D-bwn$|Ji+`+K$?5>8lvJXM-Yk@+tJ3_S9ENC8=H|tV5UE@V zDvRfmL$5IXo615(^}W&{$xO*?q$ucTWeBKMKo>&BN{Kvc@pzj@T^N$-!bq^LAsgtk z+!*=4Fz)K5d3qsCO2M6*tRjkLPsokmfKjaC+(b=mB1v+RCpJ4M`g5xQ;kuSxNVYDs zC{uN;=9WG=E3ovbVqqMqu%?*9i{2Q>ppD*G_er{TV4_B<;s*3XbOPBNY>UK^_d~xk zJ_9Sqjom6NQ8-FdWzb6Qh`A2BQ=zo!gDSBZvSZ^?Fw5eaqEkd_W)60! zwa&9umr_LJ zOjaUKr#w?u9iy;LGvn8k9u&Q;G11tqa=R4ojN>ijibNKXLr7iNvUUsqRE3X_Swk2? z<_08qcoJLr6^T(=3OWD<{NUkekMhqw!6e%z)dytr9(hd&_jG?7SnKOd}JF~&2 zBV$8@^NyVL1NbrKLL{; zcf&9}R=W_%6zUwF3%<9woMIC^`X7MAaMD~q8EjHMm(PR)q{Ivd+$B4_5E zSjdkL`L5)0&JTkatZM%^9-c~B#U;#r%L`%+p(O(OE63;gZfYn)IVeMT%ImkuckM_xhYc@pw904tkhQ|C8>p@!Hs$svmooSs zxyRezt$w_p>jy z?)D5QLvyRRH+-$Ij8jA~)fQ z$Ef3Cz5=m!V7wqbWaGf$%*Y^PrOqJ*>b%yl^(mx~C!>m$tU{{o;mP*4UFQqcMQ}?{ zb)%b_N^tlJvTky?Bx+TPd2Y(OZKNZ{)aTtv79Im07N0?ev&>2&;6N^KV@MKuaNs7d}85w6kWz5wgiR$5!dXbFdTDZlYia#QXvjEEn z`UF=KQv-dDh1Djfw5*Uleym~*3y%=3Uj-7~%;cHU?@lY4et<%h4&`@Ng(r4X2^}k5&V53&nZ3wcmL%xXH~ z<5EP&%Ng>lsGbaTNM?|xp8aa6w8xq>$m-tKJ0Sz^_ww((?RGAi1$?l&m+_t@l;yz= zUAeF5%UDoH`*kbp`^ye}S6)`8x~+)v>r!?*)9Ny-%@F;4<**Eg4#noTr9i?BJwFyx zc^NA@FKqAS2A7K=*U%=Mmez@=p{KKGTiqVc`+wjOb~M>i4~V#&=L_x}5rZh4z-GOw zd#_U-&~VZSWlp5(M9bQ@vxzsfG9Dc-U3Yux+qavo_h*jiYIpjD`W_sGEo7Z`nJNRIR-hJ61`>!?6g=8N8 zp6?|hkOEJ{inMA3wrg6bfP_fju$2V33!r7e;)dyUe1w@cmf@w;%^tx`i1+iEX{9JR zrt2-lc)4AE)6XETvZ71zz|jBcY;Gm!#}I2Jzoh6uL`&S9k)3Z>1qg)p6XP6!S&nHU zg}pI^+40a{pNf1ubXay6`2>3j?Ko-aesL50 z&2qdLrrTPCfw(H{L;86vIztmm*uUtvQFmv@!y`NSy9bzLJpD1tIN|>dW6<`4| z(JF#MHP7>6sZ%1+`|Y;(X@~{kxSHnmtPp^ifCCTvWgpFhBoxau&sXi}P=MEdtLNr* zeuz@v-}N)}+6_E(cic1xV>VyLn^S3s5Ky>bl1u`Q(4&YvO%^lRYZ}Z_8w-YM*Z_%d z(Q=X;M~OqVvca_SYMUnvb-7FC)}zpmxl0aKhO&S+quGtkFM=nBlE`3rHdpaBaW3lcQ&S|yr3%b8lGVO~xNE|C}} z2|}a@gm|TrZr$S#K>B&w=d(_u7E0+fHh;8_j_2^CS)$&V2sJchN4prRakU~rGhuk5 z%ww#K`5lw7Ff5Nva}^;EF9KvTO|*Utu-MWW3r#{~RJ}DiOEs(_wWS^@rDT^qXcZ;+ z!8GtAJVJokpvcX+)&|@NEEwN`|or>usv4xnv-jG_s9a_TxA-LywFr znxgNUz8RhYvL9i69H08Hs%3#M0t|%raq967z>&ZcotNu%aSM)g;Q5+9-1pdmWPsoE zb%Ea%M>VLY3Da7ujKHHG?;}v8RfZUBW%(cA?-}!=2t%ls`vx}d$^DHsJ^gHeF}*V( z=?4WsCM?jrP|#N>*0p+Id*pT0MGgoUV6|GR*Rvkp(>3$B@ku zXAA|J;hadXXCzcu#;ZV0j}0ix0`9>}M}87#X9$2jEcUN_Jrt1$P~$ z0TnxpzsuJ-_SZ3ZNzj9Oq5Y4ttJif^*}>8@(@{~-9BJrIyUHyJgp`?TIyxi@=s*Hd zMIa%ir%J~I!x62scjvYbm9u&rY#=jtUo&X4T|_3)NPLR=UlxWCs zEj5Ulfe#KkuFAD!Gw*a+>eWDALVePu4}nPrZWU5KN#9>qne^n|ATNpfiM+_OJTxuo zB|gWRB>hy6X~rZ>+_7kh>0E}i2+oo+Rl;LzA9Rzd$`Ag{2i+WOB~p5onEbAnZ}p&L zC>Ko2#y;Bv-weU)B=p9B;9d0`IQXFZzwMF6TD0h#GhUexG_vzVl2LL1R$Rb_z)s|x zB}l3z~-3Dk3n}P zYvw(Yab|*N0>F~N^$L-Zp3qt;3q#S0wz;3Y)g7m zl12r?mR$Eu9)?5M>qLptp~3x}+sQ)q6C0VagpV>(=~!36BIZn2|4R}h46XwGce;cg zI3F{-p5&~TZ}lKK$#cZ%!yY(db1uf^1Y+YdKrK0EQQ3z*d{KX-J-6KNw~-f<5#ifo z+pc&bJ0q1hd$%_UB?rwh=`>rP;Q} zRR$4AgIBD9P)wAHHM93P)1@|V4yJ*S>p`nex?J-ETkcVDeTR0ur!@_s+>ph4S(a15 z)@VNDQQ%(ttnX+u%=dYCT4%FCv}|(`ao%jx9-3?BsQM(K1M^B30$FRowtCTGx+l7Y z{<)VVY4I^1diholp)$A3k9kQW=_0+_18-Jhy5YHe!aFdIFr7^UndL+xqV1vylst0DvFJ9;J;9!3XU-dQ7 zm&Wl7Hk4ooJYwKlyTYdP4WQV~A6uAj?p>&%F3@> zVVHD#+{SV0vM~NuvG&$8U2@;ZeR^Qy!6~CY=+cJmZYIsHbtY5F0GoJT?@bu!!jh7o z1{g{bjgvh5wa{7fE01{g*X!)=$NNWz6nRIEB6X%|r?=b!rKbD>rb{sWNtfDHELtax zt}|Vo)J>tyl{#2F<9pJ}*Lr}GO@8yY+l?C&pY~u#@XWNLaG-OzAZ+f+NY&^>_pAO4 zauRaD+RI!_#I=3y@^QVJ3h+4GRmK+47V1f0GA5 zXX>B%HD%rAxnJ3PSq?fGw@tWT84^S?6Fs`4oBBeRyO_{TUdv&X54w@#0=LWp&@

    -l}pm}Y!8t%Lm_{A0Ax47gD#i801xG_i zc(r#X{6S7@6hM_FSPuU8?%I#j{u9ZGm}$TE&Gg$R-KS=}ZrihcI}d9=*NM!#WLyzAVGNj=AUe zArCBR#noT`Xd!7;y~GdXrK9II9q_KB9nQ*{6eNeyptdY$NgIsZ7?(mEFq zKU^;3sfA%Uw7YbOb=}Hs3qvHDe(W|ordN%9G*_-}xcANz5S*e6W$8-Gi6P-0f~v$c zcMqxO-u|VxbX*Yu>|QqwZ0v(>!ga&uvE3VKFTK;1lx?ovbWfy^2YVg2Y2xWKIt-R( z^8~gemp32sNO5(#Kk;J^Ntf=oF1-@d-5Q7Dy#(k%hicO<8zYL+GhO0j+^74f8Pz3c zx?pUcAj?b+u%#b*`BD#x+j707*T569@Ag3UDQ_2SY-%l~V0vh#PRjCUy5HENp&PSH zk;+NFH1v{boHL3;ZmXm{6Y(L=<^gpj$Cv{Z{Z%9RFASmYv0tAJ+nzSh^DX7(IvNrc z;6E38na7 zDVW~J_)M2i3*xsP95vxoz$;x2k=a^NjW*{r75C7Jqejc|#VA@9+|^?%M(Kw<5%6%d*-;fz6>XWWnE>w#;iXJBu{Qg%j0!>bjb~ zO{FH-%H6p~ro9{kRw*9sRREvR$K92?BE*$bQBbL&q!3+`2OwVhw!B7(mxHZZjcwUP zL`jOeB=H<_;d4GrH`9*0T~PhrO+JZTmO@ayXi59QDmOD0}s%v#R;(2El7LpoB(!}F5FtFL@7+^EPyq(rqa2X0o` zn~uFGj$FQDL*j=DULF-|A%`s$WyrNdG(8SSCEEYc2Revr5Dot|)Iq_&y2IWYqVv7A zV<~=xHzQ}QxG%*Z%RDq*z(dSkow(q)(N&9&@Lvr_{2Lentzic|As2l1q9i+)aS)WO z{sZ5aP(H^Gy?}dv~>LhNW za(PyRgx8&MM!2}=ZsMQ>XQt4Fy}KHs6(PAdj;$x%Hk|c=cQd!P43H3bXh|~pC>@3) zDo|C*SHGl7^s$lu&cP@2*3RBKjxhx3nmWg%<(wbITf`6fs$vQV<2dZsaYP0H01`<_ zL_t(r|FYuQ7rlJ%*CNjm2qR`z>?#n^6Nd(Ly3@vRyr;MVS}L`AkZuV8g>XBQ;B>(j zGy^L^RjXVCFp(hievp)c4}!o3f_T3dTk+Uwiv`32z9*t}#AQH;`(}Jf=Ps37%VjBQ zhQR(yA1EsNlb4h9CKQMCMn{lvDj3-k`rv+ve*`^=xD|weB-bPnP=vhdoDrNVhnmP+ zF*qdy#LCH(R4yh0qAj{Yg?x{3IdPm3 z;m}vY)}5PYLx)$>DSD12B~`FUO>rF)9l&ZS9-_v*2$>|UY1^JBWma-ZfIu(wI2Uo` zND4iW$616(5OT>79?U=SH8zN?`cJ*MRej(G1PTcxBnMUtL+Z2AUS}X*O8CR$bRotd zsIkb1%7pceA^F?J?+uYPg5P#8*t$|39ZtfBAP7F}X4Y0jd$vr34pQS3H2thyS;Ce) zXwY#7StizQKlFhhHDFg>)+IR!a1&B2X?Mha0=Q)k#O%kf&PQc3Kcy5N2pJj#$KA?D z*>F7_r*3;#`K%fpz*1=mv7HC03DqX*Du)TXa?%jId#ZAkj@Rqib#=qp^!_Mk93Q#A zGVZTN98o>!GW$*t{aWO(0~b7^)CwW$>I*!+*ulc}wZKv6E}3(o3S`ajfQl-kwnLl< z?V;tL>f$6BXlutr-m4}+vJ{^=q;i@^semknC+DFqmB$ywjx?;{1|D2t+zAtH1-)1? zAsJR|^me@(l|i(!xGDl#Xad{PvEd>N92hKE(Y-HQ1yeF@=f^Lp<} z2nBy}rJXX`6{70)qM;)+Qe!T2ZOSHg64QoKa;X}2NdB%5B#HLO^b>Lj!*Mhju>B}P z%}=gXE|U)h_}}`->4O7uT0Cc)$WqMMRSznO6IKyc(`G8+T)%*5Ay;tIXha^UpJ z5}O7@hkY&7+kYRZn4p)py}svEMykWC5yT8!OYB8VwLhNt*3>dqJi83GRrR4pq0EEe zi<10S#wKQVhpsQ4e@X%9^ewaRQv^OJGr_t3b12RzN;4IReMTl44;>gf(#r@6>~AFG zMTG5V29CQ4u1kO)E0-G?s#?XdC>^aBWEsPdWd@>|iL14EdB;|*lW`!^<3W|ipZ=(P zhdo)|_S;O@nBNB%9#~Jn!rLKvV3Wy)0-!N9e)w_KigsgiVfwYCrZieAR{-p7;vOgzwrWRuL_6h?=}y1(fI zHn%3CCA7teG;Et(TiUKXpQ_{dzW>enXmsk=2+|=%NNmJACvh~3+q#f>3T5%`PMWwR zp>#R>ak}XwROu8PuXZ&*^>eyvKgzY6KfBNX$Oq!&$NHW{SW^O@{OEbW1gi-g58RxF zsW_DVz`fL4LDq`SfOdU`5*4|h%J{Lz`H{rw0MZ{rs0%C30`xft|3P)LwcVz>7AYfz zw3yp*VqT|g5>`<4ht(}aLt9JDBU~9KI1H9J!TRN4pjL9Yvy08rRJ_=+;xr8?FD82L z^PhB&Zs$kxF(AipLuMH2%jIYf#;XXUB}Di)!5=Aw1*0lf?`vzwr%$O)x3}AxbLh?K za&WqShyZ4yOx5X*#mYF0K$zLLb^ZgiI^Efmvb_uwZnhzTP5*+^eZcq$7P6loA*}wl zK0sZ|gLVo-=Mjjjj%G5+KFpC4^9W{%-{pJ2fexxb@YH}2$nPL|dMDZd@ON zI(FcF?T}uCg&3Os^)+qns#0#orxJJV+)sa_{2E+4e+ukUG5GOxYgHHS0gsOM0HficJ2o8O(_j8Ks=Iw$3a+EKCxKX|PCF{V&<-{MfD z5f8LLnL2w4JM9Mh6+5r|4r{-iF z*MhL5-Nb=h@PnZ39|^B+8@HeSUwL2KwzfPu3(A~6=5buDAywE@*mw~MRTOVH&cp$&no4`-j^%h$u zc#w)6j)+TuG?RM7D|jTC8k%a*=@p+N9)9bZ>iz!ba$nr?PXEd=?VF;SDHQ-t*lC6X zh+k7b03LRMBj&q~BgO2)w_Vlj5$dGx{<;c3g>#$Hi(up266PVGXds$;3quG4mMCk|7Pk%o~w;_-Yg#nOkA3za$~wS~`y^K7YH_oXU8 zbQas9SAvC4u^B-uzU`}K4u^?Rch_8nt3Sg>{ha&<_XRi|f$5A;B%))NgAleJ{T`#e zx@n;G@c9HKyyUY5ryJTe97-oJs; zAA3B@h#>wa5sjFcB^+uQ>N&T`tGnVyA85yN5pmc|blEbTBCsgj45u>64aw~KRlN}8 z*2j3%p;{(mD*4?2klAg=v2zmaMg`PA;;b&h7!cYIi z*QvY9d#IaYa}`3w`-K7Y)O6|Jo%BiSKXKMnYsT*g!A=8(svug$2A{v0yLwf}buJ2O)VWfj=77|lHs!&EFj%hKxih#(x&$0j znHU1@Q@Qg4_%Y_??*3Ek43|4|;Dhg^n&f0+S=!p#KWi;x^ZRlEd#S4NY>E z6;5NSnR)n{hyPtMIuf?=^LFe9Jd4g?F#Yx>r>YHAk4&&UU0BAQ5#%RVyI?=ekyxTeHlF$MF`iD6Tbj{|Yf!F z`kZs;zNlI5fDTJ4Hu{*$(B8yOJooJBahhB5XvATB(P{$aP}AG+wpQ; zJ3=p*VDG~)4a1$fSrEx~FzC|MOfrO3tGXCt{lV!-ZGwr{?tA?iQ`1@7CDzt6&hWY? zaVA)(ODwQHY_NGcHf`CKLQ4p@xz%6s!QcJz_cVAqa23|N$y ztEpbNu0<6^mtVOT443ZCbzI??@RkGuZnw*A9oQ2WJZQ8I%wMr{x->B4`;5K;n}!Xf z15KzbH*}HQFqWSWskg3Ak98HHbY>m)wdWfjnRr{bdo8>r!Un;)r*CHpDd56$H`j>1 zvX;`nD#HL~64I5v)wj6Ji%|PCgkj5ieAOFO>mGceu3xq4m5`0&EJ@1vA5xAL~c$$i34Z>C@mUz8xejzi( zbt3pX%-4E8$9cUmv{o6&vd-&(u%to91IQ|RaCO3^(s(X&CwvZ94+t}UFp9JMIr>~tG+|G_~7 z4y%XL?kZ~lH-X*5g2`?Otm)y7NCL*^m}X5^%l3xM#4m`AT(M9;3>85Tr7+}}f5OR! z?Vh0)&NuVr`!P%&Dd9Aj9$VpwcISpfStX>KQpfRQo!ca88>Z!M`j^0#Aw2E1+Uk*z z$mU9jyQ+eB$2t;D21DEV7$^>)aHJUE&Ed&+3;(=g<)zT-MNb(b&0fkS#?ekC#enE+ z`}MhQVeDNqh8)93n2+-KXxBM^QB9}kfE5WrgS#5*HCOHlqasAtbA+x)Jvcs4;G|J5 zm1~bdxRE%V0ivej`5k*i(hI>%4o)!!H7g$Ea;ODq-H33s=|mk?PvB!ta8;t0N>5H( zY9M8Hk5DH=dnNptlU6MnG-+BJyW#n%1?~lPDZz*%P@Z&j9qXU6p||JLsX31Zb+%C# z7Y2=CC|}<7b&+Mm2~uee#T?xj?qH%XAXi#ZJykQomsgDy{#YFvQ(Pv8AyI-QQ5njC zKk!MF5htl`U?MqbN3e9Hms)bJ-}G5~fgPn6c)v`GglwkA8e)&>7Zoz|S2@}{QRgJ) zpj723Gxq2=9o)YnBm=F5(EhQI5R%prv43*?uDQ!-I$?YjGT9l`RN)lPv9<-1ilw)a zq39-&UcXu;u?sHG*tNzEC&!$6XDugZEF2Ux3($S1Ka+qkqI^io5V;tMAqQcY%0)Ul zC|i%T_>93xfmFA(gU^|@Y4-d=s^De&@fgIsO5fB_;XAmbKkn4alAYCptYw-9VybrZ zehS(d8e`H(*@!vMCJ}m-zMV7u#xbh~0t?5(CXkotRT?5S^G^x+k->LpeMZP~h(k2g z!}VHZh`klmD)Ek4w*edv3q5hsVMn+|8}RmR?nBaUC)4x0HU&$t1ZH*;aVSFx=|XlY zZEUlt-Y^v>kUTvBacojcr>-a?Nw%gOwFx{Y=jehheZvYfjeeiPH$v~pGYaUIb6PY~ ztfd`;t4PcRshTQE(oDuo9+K$t-{W7a8u{0;=BG0#gUns{#IoXok6naZpmr{45C>*{ z`WNym2*!a$MBaqfafWw=*x&?KB|5O_(E`dDCHf3bniZCr2ULe75Z%Bu$Mx`9qbF`_eByiSVy6uX@FCm}bQ)9V)a8F!5dr9fpc;cpe3jjy58P zz?P42tQ^G;cvtdME4)p5vIlk|Tz*u0ome`fKFQu0!^GNWRHwkS)cdrZu-%VyiS&HK z7F^NHSBlFXv<^*GF+5aa97BJS-11Go`!D*1kghasTSAIrIGngNW&0=(nddzUxj@@~ zj#`MCG9^0m>Cq2?-IGl^2JiH_-_)Z`I*B!TBx|h*;%7OZ0hvwD*Fc?$B$q<|&dbv~ z=w~hv?YD;q^Hi}b+c}fH^|~&)sGuDGTpbRMEhxYdpymy4nO7f2YdK^9sch1M|<=;I|ffz}I{@?w{|99*Ehdunh4*x5A)XaW( z37KfA=%D`E&)ToAs=(U=O6XZNtZA~b*X<7+c90Nx%lG`d8<%zXG1L4fdw5@mzkK}z XWO1*#po>I?00000NkvXXu0mjfHo1d? literal 0 HcmV?d00001 diff --git a/media/42_2.png b/media/42_2.png new file mode 100644 index 0000000000000000000000000000000000000000..34051e0959e4741a144167ceab36938d3483fff7 GIT binary patch literal 13839 zcmeIZWmub06DEwiLn)pLTCBz0wODYs;=zi$I}~qAfdUB{+_kv7lpw|0;)LP^mreWb zUi)6V`~Um%orL6h&Y77rbKf&_`0*qTsIEweM}vogfemd&nDMPk94RZC6`QA4_)|6lY7%_fD>! zP7d#$dE2;qIJmkz^5P?VJ2q6OaG;n}9czJo}=jYkk*^&O_<>hg6bN~GL z)7;$L(a{kfAHTf3{Li01U0q!&Dk={T4*&o_Qd08v_Lh^Alb)Xb{{Ehci76x`1YinyMDJfoFUe?yuSy@?ndU^!~ z1+A^E3JMC8l$8Db{d|0UL_|awuBxh< zoSX~{3~Xv@`tjq3i;D{z8{5pxjGv#MprBx8W~QpDYI=IQfPg@3Z0zCT;q2_}=g*(# z=H|k}!bV3&qobqg=;)@Wr`Ollb8>PpFfi)t>jwu1OG-+Lii&zmn?{jXhk0tLYop*; zOwCPpk3aj(cRDSF8j8M2w(>FZJB5kc*jK+77_?Xh`?E5{OL!xBt^Kvfg!V2WXy-~S z7oMeNR`+5vR{ zD;m`ggbx{LzmuKU$Giw^y;}y6%d-9IdTW!?Su7Uh(gDxiV}yktqb7>6n(I$j~; zjAd-TG0x>FkV-a72s?ds5UVd$&7|fW#(Q>U6@>x%+-M82wcVyJ zChp#XX=@2$@k>>&5H=-IyAc$nSjkUv!IHB^tt*m7qS}@?+_C6&=rxLZcMCw1pRhTy zg0?D>!`>0)q{|Ai#A}~&a7|MupOw6g?)#(TnxV~6k2-u-)D9_2K>Xl2fF_}QBtm~* zV%guHr2s_ZQ^m3+g`scVDuwPz@wh>3lW)=6f%NY7;S4}E1#PVJjrQ01>vXKo8OE(z z$p|Vz%ug_;F*S~_@1HTgu^bCj9^e#5-b{(KHsUaf5X+j*0XyeH3lzyARW$8EVt18rxhw8h9RZLE{&8qWtFAhXLA^+frHgU?{Fx&`PP00t&XZp~scM3t!Eu zf10(Ldk=hawri!3AYP1(P;ItR64wQK@X5+$tdzq}%kC4N+KxUXm#dt9SX=s5f^)W5>fWSz zN3}vYqHKtPdsxbp7u({K>4lgWCZtHF1n=tw0vzqe-<`i;qyCb(ud?k4_9-J%00c@l zALefs!PP?Ni~$ese1?vAXp?*8MoNFK3e*xLK;(kw6TYqvlW|z3IO<7lD8%i_5y4VX z-fDU`y4w&18LF{!C5r7_QH5pE%|j0JCZcYDZ4f+bnk@mjVd2mu`zHFs4;@2{;7Dga z2)^knU{~ZlJb@;FS&B?api1vutCw3O)R?f0n(ldinTEnjp*p)wz9t4|)_|IKlzb@W z9iDi=r|hB$L#SfMFk^XfkG?>PmE<6f^hB_o)6Q8+SQfj2fE2@*Uh(NS(H)WMTxL(< z@vA7701E8yxxcMP0j@m-r6fXg{=lh|E19S)DME818?Xe0?<8e~bX=!ZW?GviRM@1yGd{oB1`c;@#nZX) zYC-T3=(XK4u099t&}t5MpYYekKAL* zAZ%J3Qu`T?H5ZeSR-xe5tr%WJJfLe^wu^=~0m}|+10J6F zDrdoQtC)wkS5(DGp5X(|g2l&+o||FA6?yohMP`17$2S7)mNG@^mi^y9YW)Tk>w&!m ziT39)(h~P|)Qcs`n0vNlg7U+|?1k|8M_?Cnlc!#D+D~gT52&5p0x4uCCU{>ralxoH zCdf59o?c&kD4{%N+;%%BgK`dQH;)o&5_q!tBwkG!U_)hMeaP8nxR*b+at>)1_}LMM z(F_JTKt)Ml3-WZ(}CU5kx{Yz%*eyx0p_8%3XXTJTgCspTG1c`qC z&GGygRsbv*cvo%p1Rk`7-4NuTF(|~Rv8V5&0AsbL>gXQQ%1&17ZbD_bvkcT&^;g2n$ zvF0$a11ccv`HQcufSWOloB~}zfIfI$7f}5Jjm^I9LeN?KFShPiYssJci{o9n`*W1wHj53N_%blyi7#;+}-389?w=s1Z;>#pTRoz^q zJ6DGnUwB>3@G%dPV0^FR5YV@_}Ol1o$5Syxy9 z8^IJ84=R6^C!;10DF$k5LQA2nu&E}OwNBMW!_`h@$Xa|`&?Wy8ZZ^!($5k8JoX@3Q zoEHusqkwbb@*jkk9EjubN8Qw>RhwO(Xn0I^<==Il>f-UwHAT@#-|gi6QodiN0F`kQ z!2tJLOZxz^mWw>Ws`iA*BQf7cuIL-lkhb&e+wTa`jNtPVfcc|nR2E27I%oqMV45zO z8vI!KO>pfooAJ@;W~Uqnp6$B!Dfq;6@8;Wg?w_3@`v=uXhnrGMI&a{Wj*=OLml7$3 zA*-pIwBG?$FQAflt8?Y9E9q5_dN)7BmHqeWk$!pC0n$7VMroiSvEYZH?9qdLfQtEJ z^r_u9Y15rWX`=W9ZzvmViWzp9df#%H%lK$~!`=jF?tF~%JnM5NK#WqOd3m{0yr6*F zk%QRkf$cJs@ki~53GA+=L7*&ZnE#s8!B9OG!uz0OiomPkPuHKEF^!ia^h3fG-i0P< ztf~3B`I{}Rl^<0;Zjv#Tn7EIyg-8*lrj4!Xjc#T1^KoTo09J!7?k*NBBAgleP^`MC zuxz^*U$>7RXYntJ#aL2OeHkh)Yg=~Vy%3|Az~j{=b8f!XH|_EcxjU;a>-;F$aYBp5 zA8{iheZOHI{E!ZaQ~7weHAqMva7zW7BSuVhnrolURvQ%B0?l7S9}$;%ArB*Y)Gz|L zBMRc^@wgfAaFM_b0tMesX-HTJBP|)pfdsC5!M4EqQ%gv0a;uJ`aaP-FsScV3X-P)H z^HA5%N2|Tb%gL5~_9wfiD6%r@R39k!tHv~TckA;%;I!fpAE5ToV|s}PjSywIqMk0* zsW)&Kh+S|{rCD>)JG?KD<_%qnLwi1BvGd`B`>j=#Y+`0swn3aDDyDY|NCA+@?nhkvm+WD2u^;ADQVpTX{kRmp^w>~$x zq6fH^8l(Z2x5ItgWYATGginDe-a|l~8^Ek9YURW%F8n;!CiIWD(2zqB8Z5^VE}i;g zm2d#tev|1~JqQ=JH16VB|1z`-g{O?}f|nNPyl*j(daOEJ!zK1PC%}oE^4EOTFNtt+ z)JBgw>}YEz>PD+&mEbqfa&q{wI4+zTUjyvoDMkXh7&?g#z)D3|(ui%Ee7Ei9L4W0j zmNkfOt53dd=}LcBwj6b5<@AXq`BoO3BzWCI?xgY(u`5w|7;2QRu}X``RAQVAkcy@$ z0l0+r44hctTVBvzQvf{{7e8@Ci4^Bijb-aLgK2%O)_lBa^}K3$F*L{GaT=$-WPBZ4 z60P|(O`yHp-4TgN(6}4+T=~~fNXFeepuRBdb%zqmigAn=ht~SAK_1fGUR~^=kl*!71|CsW2C7rdJ8E| zT>{X>oa2Ah#-FY{|6^c}Ce5WqngqP-V;B(BbN#6Y- z#_5a-G!uaa`{xe}%xCKy9n?w?Mx#f}3RAQe-58w=`x49t8aOX7>j<01P+gNwplIkk zF!-#aL7XAb9_xY4Pm~BAEbY@Q~x1-`LXmJ;$Vpm8}!@H-L@IPd8| zNGt2s{-*iM;u^1fK_72o_Y2=zx82PXMq3wL&aV`13UJ{0&sN%C=Vx5gt? zbQPg{=tAF1;gYoml}Y72MZAj3(B)8iOqd!oDBuS-mY1#!Vfq_=^#ysChk6k_{z7>u zy$^2C7-Mp<@P6G=Qtg zY6}+K&2gmfW#tuXQIbQt2k42$fC*uutBOe)gAODBoO#U3UydG3wy&fJjmGX9Xc(|` zQkjnBorH4;8qJjACVL!+4wUEz2vMDv6M%iMN_JU!pw;`ED*NZ6nVFgy?~dfJCgvYZ zANtIW@OF72K6MeZ&H~q`wbF!7k9vR3u3cOT+v%TScdn6)a(cB#m;%;4m9Y?t*!=7S zAY^;YlAie6(EfF`zaJqVfeX)FB=NS2N~ViENdle9$T zPCNY6rl;;QqDVYAr6-zg*p)Pn5KqC(G(c*&GOb^&Thh^+3Blx?>;pDxIAK`ctP)T$ zL-~gE@>R+EPHK;%Yeq{b%(`P%vwKD`i3G38z^#m4C<8K2DBD6` zSWH9g+@t+{$)9y;4A2U%w{0K7Pf`ofl`%b;tT_#lgA=*7gr9#b2Cyh7qrWZ6UhP2H zkj8|GEcJYZs0;T2(}JbE6Y1!rBeVkU4ANN%K3U6>!9@IiXtUf`qwK4psK1n6?Qr7x zntA?CyYouept7)0dF}*7U5#dysBULUIpJ=Cuq3IS z3_Jb9BHizFp?lUSqYz#;>B^H>`T_c_N7Bw|*~;{%Q4lhiTxVIN|C5bCmGCxYlQu{0 zaKU=E+EV+K=3Y&xrKAD=h?}nrWvkg<7&IN3!CW6C{&4H1 zc`J5VKg)3DYQ8CCsX(x{#~G%849KG20?Dt-$l%th!}5&KxMvg#JhmsyUsJPgmTVT{ zfn7@j>H#F9>mApT7TUbfA3}(16`Q@%%UzI(@%?}o)psjkNDNT_V=l8`^Tmq|B=BC1$+^HIpAc)HIVx0sue6Rz*ONw*!@5*Ut3sNGfuMvd!F`5i zf=32wO(4_MWug?s*f?vVu=N!FEFoEs1$K(mzx!4FXaNjSY~Jd!dD|oPG552S^C%^x zc;CfR$2uE_!L0|}6P zZ|@kqR-wXAL`Gk7vKh=0ZDhjMKvGH=zJUXeL7_`5+EY|WOY^=Oh9~!%g|}4^8~st< z^%&lZAst^+n`6Z^Dc`XC8uQIT$YlePtpJj(n`^2|3n=t^ki0k)FNU8bZCpjfcnFz@ zMWHjeyvX1UdxMT=286Kwa;73SBjLochHtVC{jSW_8@o_x{sb|6JeFxBqg~ymsh{xl zNOablsC$^HY941={_Zrfl;xuQZNU1z&r9t>v>AY7uxvGOc6;-X(%tjLTs?PZAbscZ zF83@jZ^5k1NdlIw$F2bXB1OOC@Bgsd-JSVpCVI1O941cHb=8q1;;Chiff(3edAJ@o zfc2z`G<-U5U5LSY;2}e>3^nd^6B7`a(!bgAJ&W0qttNhynsxaa^ltaZ@$bavFz)tZ zha*!VPp#R9`=o2&?fpYpz9~2hTZLL0fRa)&3j7 zct~Yob?)rLB0y~EymFSsrJYXhewH^6R8mD;5PZ1VSgrxuF~f&Q+!jid<7!Aob77VM zA|yj@<9sHfmso|d;fvChL02pXQ4q+is~I)j)#^|WVc}0vN{K-}U!BriBvO(v%Px=Y z&D$k=7;?8RI=a9&o(+4cl9B-z2KKj)XOawezXS)<)*yCV_JWfp?E=uBtZ=BL_;tZ$ z5@^W$-s1S>LGK8uV7x7DsnyNzglJT55WLI-K8`hse+S^0$!yz>Era17&1Ug))ZPbWm zsB}^8{t6nJ703lE^>NFgfq^?&pl5<}3;H_|G_Y`B#sMqqq9zsyh%b}#E)bm`8UgCy$N_U+mO)}a05&60_k_B}5Fw3XBmW=c zyGP-NT9GvB=7k|?$_E;US0NcTe2FBQ6CYtxZc{Y}stu=sy#ww&f$ZA+2meo?|d%9 zvSyfeK3@vyWgrc1%jMK4mPX(fj9K~OeR}1BbWSa9$tI9C&c8##J0RUfKWA(c%5rlM z;hztzUNq6J_ZzVTe-_n>FnR@V>6n-}TR$Az!-fu21+(%8O?^r82%{aK!$fXI*4VtV zr(sWL>OPq|v!#y451oix%BNJNb}R6e6c0pg)PWt2*Uf>k8d+gc63L_4s3722n`XPY z&t4eP_IO9KzZe5B!iWTZI9L&Mdyz?wpf>}+v@}b;_}-v`c(~xMhuBEaAudPNAXiu9 zB_>3j5DNzW+)$`*P;#1%%zSAmub9NvIBMhdnnWuT$nlq0htWTi-`|$I8~qd5;Z_gm z*2npaskmCHE^obltvVq{7+P7pHjM$|d4TLAJ>-Bn{D6cSp@1wg!wM8ke~PeEAqz?v z$vfb`9z zoqrK{YeX%jBRxh!ynIEeq?oCtjV{}0R}HsEf~*$TRx#;P&7N>KpHi-)7yXbwkTcJ4 zHrFae2zS#IUFMh<*r9^`_^CdT`4!@t@BiuVpay%T%yBzfeRm@70wmf z5k_R746AQ8&4frpkxOMpPgknWkp`S!!J0}3I6rY9Q;`h*`47#UFaQR&MWFw0;zU3& zHxFb68A1gqZs#s+{{-fyd3R-;x)x%`ITOhbVu-s>_x-g$FJy()S7}eJIVSvj%SQT?aCiu zV1d1JE4dGFlllzkI3EjAO#(JIdf?u=QuSu8m2NrkP-Yt`^c+q(M@>L%F2!koQ#)|= zx3Njn-#rti&6488_KFIqk|??0#{Ap7a$U>|r=U53r$PLMm-UxvNH%PMsm|AoJH`a#}V?LSDA&N5FFiadqdR0hA7AxF|#^;TWI>g~gy zcy456{Ox){$(jqQTAAMeKs1~fc-2Mwmm-mYo?0tpyAx3~)I}a(oVWx&yy`ddnHV5%&U_ zYE23f8ps~4BHi&?xE1vLZ@jLEh}8e8(*JoGR8GaTqazza29>72b$x7tCZ>6m-R&14 zx59uaOgx=yT0x?wBIT^J179udg_cfYf~cYF&fWt`gdok?Qz|GP!}-+my^{{IqC^a=X!+y7eme_p0SkJ7i1C5D6y z*~82V{G`;XXT*GHy$VJ2o2Ir7Fpy;e7AW&jzW2`sP|c!VmtI&)@mI5op0* z>L8P^I4ugX8lihbf`ROY$kPppfmhX9mCfI(8B8-nCU~<1QxP=`y!{)$L*Xp+-)4lM zEbRYPMgGXLD_*ciR(aNcd4xPk_xO=Q=QI<@#`vEYI*_=|I`R?!X9ZmBhpfDROaIjW ztw^h#A&<^~0IF3ZtL&e;f8syGLFI50%(=ayzv-mqRkfO}`yAO2Im^Tp3}dq9caYhE z`GhBaO23Sq97#l+zq5>tZtqi-cfG4>`K8<%XMwE?-0?0qC-UT5FH|Z_er~$vZMW)r zRb{(S?D1E*ZZ`u;C!aL4iL)e|ZQPt@v5aP`E*7DnkhcAO7NCOv60z`IEG;{RL~Z zk=AGDT_|6v+ZIYB6CKCSiSzT%nwqZ8Su`19c2QO<`+I5WCGGRN&+Ucz1#fa&qn5^? zNA~>+cJU!$4jxF=s18b^dh=cOVy^R|Q|46XodSgGw#qEzX;tLA9t#{=i}feXt?3el z;gROrsEjLB^plA~Rl25JAMUD>la_aX&v}Uukyejz#>9#QPc0YO!00ER{rDQtO*TF@ ze8Zh+H=7`o;~i5%@cVdGLMd?CIB3p1d?~EjCz#?zaL3L1!;JzD2GIqcv^_Se(LI}a z?Ng&}3B7xl&YX9Iu=hLIDM*h%35& zwpjP!n^!_Hxc57omZ(7K(iP%p`0#cDnl~SMZOCuIa(u^HBrAZm@Hu>1v2Wx^lPfq3 zE2}wzN%fWx*o1mK3vTdut%<2GGcWIMTXQhD6h@xctWYp7 zrv5O^G?OQL>bxEDN7+<5#?f5a>Ue5$@}0G7AQY{<-T9IEO)N}uiRDYM5MPQWKGnhf zn_q;@kR@qHG|9d<<#?C*SNbvK9 z#vmC&@1@m+zu05QQ_R^HH2q|pH+_@Gz_@0FzCQ&Pqk(*?4R%PkkgYdzBh9F9AxkUw zzI|m6_ED{*(Jt4O>SHkB+kbeSriz+e%T(d3x4m22TbU2hA#0+;Jf&{n9a~$z)nIz-i|OJRdR( zPES~|bl5-gJKgTQ*7di8rqC_1po}djjt<3|ffPDY<{8to_?*lpCS0C;33;?3rPjna zTmJLMb$BMObVBX7od-EWuJi+;bG5QYZDr3d6!jZ=UqVKq{4bTyXJ>G=2`gmhl?kx| zz2CX=;SnUBmkzm(bysjj%@KJgF)lq%f3+*&W#aSxJ^2u|JY!LvZ72eOwi6aMnHp%v zPe%P{=EPSp2_z8_slJyxf66xyYWxG+r0L_S7y8TiMd7r~oF5}Q6SXM(bb1~f;R$A* z;i6uLtL?nlflHSE1kl7?e_jAzzWdNWehbwOx#nUBiP`^B44a-*q{ifP#&u6(49dZ? z^W@Lf1-P_#>V>%jU%mbDM`)7cu*2Czd-(0vvcT zHW_U>x@k&*z}fOWX{VUH8`B++&pDhg*8gz zeRX=8!wF7ovYt2j83#30Q02C`z(T24znrtsiwWoa^*!rbo=>%8`~lFo#5Ax)K-!)0 zoK(m4+RRKox_xbm`a|ntajC4fv)bMMB4*4 z2%h?A8hv=R+iU4;sMu2DIGUOMOJMS}c=dO;%SiYoZshzRJ|Db6Ms1>H=DCB*28Zjm zyZZuYq^^JtZS5nen)zF7Rdz+Qrrw*&(k*psEA;K5)L&ss@$$beo)|}wi?rLyZb~2o z@7%lgf?Rd3p!N|nJ%U@+9eY7M$w6-r*f|~Fjy#?$;%+v>_BY;iri=?t|M=!L`l^P! zW?~94;d2~i-M@bpzSULQ*}jE861%X2upH>BvX0d`L@ssK-XRECR|$kT_wc7Y@(+Wb znk#M7W?A@gr3XC2gd_52(}=$C(mEV!@jlAA1;5sBneobj;7OG*CH}N|5=fnSWMEGH z(RsBm`uO$@?xFA4Yl1%Hn~!3RL_VRrgMyZqlkH4p#^1C}?0pfSgvmxuQLTrJXN>9J zW#g7?@zPd9;fJdgTNPMDHqEGmGIg|NFpcr4ud_2DOWtf6PyR+8$BM*i-^dP9Zdd1f zv`EjXFelqfW{J}-q2-?cOt;gI+c(I64OCYdjZMV;`X&*nxE`^F5%$+Wy=7cpPR{R5 z4N8t;Xa%7FwAUckE_rN=N9&1tR7In?3=~(Pk zrWQ_JWN)22DY8(W7hrxM5!hv$)3MdH0*$P03bd>GN$q9(Id~q5*>oRR^yP89yL@!a z8vgs5oeFV<74^MxZ@D_}@@=o?NrU+uTw>3$)n0{uT<3wOfJLoW#(AUg$VnN`Jno5Y zkbm2}KqIa~knY1}P?5z9gIz|HlV1t1`dW#)ZtBNAnln~E{gP}hrRtd*eWm)H^M1As zr@cURZ@aPly~l%`Yi%P#%Aqb|hGeGXpeH6uBq>GqS`ga$HM?=OMrIoBzj%iIl~!-?u5A}Xg#s@c-_9k$e!s#QGD4LvaDOBGdB zfj~RFmDZfDzVFHXG(^MSq}F0Y%gWk3Yz=iULwHjl8;~@w7^lf*9sbYJJc&%)GN&wf49@vi}AeV5N;Y?IuM4kATo6(Jg7kl3lYWAXVnIt&=9-j zZ>{}WPIz7nxhbcG9ZOMXCo@N*PtU@wXIqSpd4e*Gw(?Be!nql16?+@d3+KW}i|+Afi-wX`fL{vE#= zOKJj7-nX|9_*G1>58{FeOBC+_Ix)A^i@=^svQuOzLkx({&83Z!M)r*$1_V$`T7Uc} z*NIq&nu=m^AszH>$`_2u!g$CBS8^VpEm|@mb?)8AgJ4{TUVe-7HkLr{yLC3on|d$k z=zdQa@4$g13Or*fGZ(u8DM!=~bEi)mJUO_!8$C(nnv7nY$ zF!M5;kgLJFA|W(*HW8xl_I`pfH*(7c`}AJQ&{5HGh*WN4(BKtb`7DJM_JFd4CE@3C zP+ygkMEemq_6LQ~4rRw3u*JU2^Y;iA^OE&ztDstP6`x;Fp)0?bckF!kMv5zKR&A9| zUv8TWxl4EJZ2dk-?N+ODpl)3fRJj#2z0iM2cSUp&h3GFwi^)nh=3$a)Hlk@w7@?Qd*5ff{xBx)s&Z!T+INL9^;0OO!a2Tcf)j- zStf`l5yt4uJ|!;C8Q(IWG=DQ6y2)kff-^6+CJ*h~Gm2?@fw_9l zz7Xz$kGp%zu<`0^ruvZ!X+5^DXbJr~TV(+6w8;8A@g|`q_Vtnx?BKao+Mk{g?LXt) zMW;TgzPiRUaDsf>gbtB)_`EPIP>juTjo{W4pF>;xqbvca$>`_ncoylb;PGCm-sEkP za}f^hgK*(3p<&QN-_iPSzK>cR`$kLVR&JYVwL`Q=XeqR~2#FsA^-zuel|CicX!lp(YPJH-fDEcNTaeZ(h1v?x2KxKi$MMjcXs1P+}j6iN&*F5XD9L}XQ7pur?gwGX>N z5WW7$6}QLTvsE|qVC|T!LFGba%6=LoWZ9X)PR~z;&C=cL>Mf*8D0dF**P4s>h90=> zx2M3lcafKmVBjpVi9OIKVeuKTJk9Qr2Gxz`F(1i6S@2Kc&jw**hMPmW?lXwx>$izd1h(<^kiy$AggnWC@0Rl~v zneo+Qg##CI?0|iK)!m}&Pr*qKD^qY$uVd@UE3RHVWX48 z=n%6PQ7_oY)geS&gIf0JASl literal 0 HcmV?d00001 diff --git a/media/42_3.png b/media/42_3.png new file mode 100644 index 0000000000000000000000000000000000000000..e594bcb1e0cec45d8b126f46a9f9fa8ae0b84bc5 GIT binary patch literal 14054 zcmeIZWmFtN*Di{?6Wl|v;O-8=VQ`0_fx$hvLy({Wg3q9X6C8phkf6aQ1O^E%g9LXu zlf37w?Rkrk$v=36p-pCX!j5uabINaMt=uyA7>ADMgbng4;Q18udk;BFYo_5fXBo84R2gA6c?cc+f&ul2MGz6 zFk9#|9_AFgA6=9Juw8dJcSc*fw%+(1?T4GI5{~Hc#4aQ zd3kw%|Nd=lZH*C@fD=Yi(@W8;pP*YRW*Vm_^q4D_m$j!~YzrX+d`SbAba26Jp z>+9>An;R}Jt{*>sh>3|IBO}|`*zoc3Nl8hqtgIv@CDGHFJS@l1@%eHa9n?rKQ>1 z+Y=KL=jG+)LSZHc$ z1_lNi8yl08llS-c>+0%uc6MfEWo>P3?dA>*dU|$ws>Z~`?CtFh z4-ZdFOoW7lR8&;N#l>-OaFmvoPESvZii+an+A0B?(g2cD=8@%92|^{jI6G%?ij`GKyV%D ztEa7xGZl;`F_se_lSTZCeMkF;Qd4spGgIse>ngyz% zzT{syACnsRdu{kq*evLV28+ZZP-_!9=hKjw(D1ae(kR*d`@f@oYP@Dj$nq8Ksnc_7 zwuHUvahR>(wXA|G8dmXI9_s&8T~rU<1)3Y0wd&k;81|iuYs*g4tG96_%|UuC&>HxZCj zv)Y;?WgJ+%DSK6^X`Nn0477BqA+Vw#$Q`Xk`dY)5%`6qS{(O1*tnfszE}I)q>f9+tu?k=J0I$+ZfT)jNMls~1Ym++_8@Hy~+7|Gss-zpX;s-Vz@~HVjY>GVQ#7-keKQt&$3fA|BkneHY7#jUo7S# z4C3zWNwXE0{NU8GAU66p`Rbur$w3`Ubb7nUdXZ*t-ehH+>@}Zp8K6RMkvbfA`2+?D zG-smcz6r;`yk94veZ+o2L!rGGD)2Sv9+h~l1$A@o^;;s@r6>Y%QRbnrJIeYG@kSHM z75rXpSOx3EV!~8KADN9U_zUAqHcv?j)_%afVEmBWQ5#JIKH6t!ry2LGQ}d90 zucK}vRe}nhUY8(Y1Z0lK88*~s$l*(i^$LY8gE^^14rK*m0qr+E2{ndc2~v_eS&KHr zqey)gM?v}xpV5>GehIPa_364ypsAvaeE+U9#&#Onw)Q4Itb3S2}?mbfK#WTRrGl?-7@$*96J&Boj^yd;2u{2mv zff|owQR4NNp~nIydSMYk%quB4VvB_4@}W#J7>}*d7vSVKA7|iktUO4npPA-y%;gTX zdLl7xXVKtHYN6!7W)ouZAav27x@Xf0V4h;5PliZzL$Aw81K7$K@m3W{Tq9sbUpA)~ z3uV0N+)C5hkrYCTMf`~&BaYv>Y#WK;v$+Wsw?x1n-`10^Gw`>!*CxgFk)&N;0RsMd+%QCRwLn(U3LsYMMY=r_>Z;$B`H$(yE z+7jOf>PwZq*WjCKCWEr+DZgE{xK^SDE_y|Bp!&V3R{s4>Hl33s(P=FgR^(l3 z@~&>}ml-eCNn{Wy4T#=q2;&vX<(NR+mH_Ha>Wn&y@Io?E^zD0q@#)V*xS=EdWIscL zViF%@d|cnOS?{;gQB`D;lfvq$(7S6q?z~Sa4yZ38H`jsB0gL(@!M2NR1!1G|HROvy`qqlz*Qkv zCzD?_t+6R^LFs6A(3lY!L@ek>7nFJv*z_E~aC*`1+U~yK4nk2E=aTOSNKIeE@e zrej`JJF?&OZIj*mvZ}{DEo=e8Ru*|9BUm7XMUFHD{EdJx;lT( zQBwp)R}&z*V2{yNeqC=)UqiX0eq$Jjsvva{${naar?7!M6(DoaIvccy|PLhi4HpZ0(ZNr&j5Y5>^0iR+ZcPS zgL8ktA@(5~025=c4g|9UV3IvQ**|v&9I`;jwl-K=>6?fsl74r|E4S*C1)4(4)sb5| zmCxV5Ap5DEb?pGYv=ZK%VF|$z{^=ecB=an%3KcnmCBYtt9cmEFr0>CjBTN>X2Y2v$ z`l?l}aIq>A8^i|x+7qy=#SZwh6f6f`N$66`UBUP%$^n%m0S4i=1z)j4pTT#450`b} z54Ho3=6jP3p_vL=tuEkK@51kW9?Kv75UZ_3-nNH#o*fMpFW;Q^m@}+DvK5D(e~iBk z4)Bl)$>_ctd0!lIG8FLi3S0rx0?6Jj6d%=w=Z2il4yDT6O6A@LpUQeLL$7{^7u-Kw zWd_J|{eD=bkIe&81i}|Re)pZdw?92g%^b0Q5bu0szlmSOy6Affyt@o0%nS(6 zx!)SFKaKCJ0O%jG-lc1{_)SP(-vI}ZM!$N=FG@zdL!#kih zaR@_8(BT&Y-vI4P9&y?2R?<`tq|FRG_;%#2wV&p)yPpnH_XxE>Nu~nb5&+yw8A&D? z!QnbDq=RffMl>6WIrF-!>ufF`RD|r}%`nHJ`l5O>U4ygH)0B5K<`b&O8*Jz)kJhqE z=m*oqw1X!Z6%p6-fk(JbbRMen+VO_R>;>v;fT$oeRA#H}GF2bKOrRDCbBoX;F5Qj$ zL*an<-^-z0oMF8wfoIeDTa~XvOrxMUx2BW#pHr?*&O3RAbvpl3WU-*A=X|HxVYrl* zD>73JJ^tMt(d9|zM#+o;ep`Z6XLeCS|*`uq|0DsMzo{kx51bYElg zcR>|B4rFuR%!NDJd6Tv$$m z@LC0Q9}VQ{;Zj?rcmO|jCQa%?cNdj`LNrC6`QR--m??GLMPIc^nCI-oEQ zhF>PZv(%GZ9)}UcO<);7R&-3*FD>MRp;SI%++7L~;_wYsxqw4D2qy`RLd;Ln%gFB? z$(oJSW{mGM)v)+OTGbi~BF;;a)BZ}1$PWyKlwxB0Yy`xFWuGOhkGq8=z*l`_kcT_U zbIlK~eP+}4a9o5=BS}E5+`}tAH^=iT5b}M;y;F0)U26r;=d9Har_8vQ@m~(VCSm~1 zMK5~j5&Up~P!h(Qc*Ab3`YtAGa67DD}wBQ=rpJfOQ?ft&59)wD@E z2dsAQ(81{%4;AzZdc=Ty-8C_p9Z-||^(Wq-uf|Ci+=gQ+E+)oaLgoPKU;}XmkO7Z< z@*n3H zY0~$OO=bn_Mt2c|jue)>Y2g|=Om4(}Ax1wVW?E^@!B>9EbZd1X zQ#eFG^nn6VzPd`-w5^^b#mOTj`a$dXyO5uuimpmT>UH;SW2wQj^Xs#(B5vFJt_yK4 z8u}i)^1ohjfFA?sYQ$P><@~|D=R#Xz$ZzY7g+Vyb*9BeJKqlOi4WpnDJ9_+?8p=oc z_o*BBs=UDb)-cBFL`L7A?bq@U8fN{clYB`oeNh}JX)H}yWprYh^>jovAP09-dH=;u zUaY?i)v>uDBw+FNaUGuUd7zpkhryhZ;N`oe(SCsGs@i6a5=DRfw{L4LF7!c}v$OiI zua0f$-!Xm=nkT5F1NmPJ2#Er}_gusx%_(N2`zWjaPAK8K$pt??7+XJVdpy|#L{XqK z&GYy(vvuh>P$krAg@?kV?zeNyZWum?Iy_oOwKx|=j2D#8AR9hq_rupMIi6vZxLRe; zY#`5%UCZxW!!!1=c>9ORuA4onkUoDN5ZN{myZ~b^6@q;Ia2kE1f}1gdzJ|{%LZT|FTVP0-)jPlA89|xFZ1O}z{h?wN|$d9i8naO*^oqsCR!I^yMWrJkSlQf z{J98)w=lo?4-e7{RiCrHU$94*NTUa8gNzjsOk1PR|3US{K!?wEy8G}sWqnq6;Q8v# z*0beH!|UbjO@5m8hK_%-e`geWu&%N36Po(D{B~yc!#m7GQyk&x2pJPk5-(QYUZc^} zm7{SNfcc-rXApd=Ku|}?1Kk=bXA7QM6Q4n3-mq~i;EsP{00$}(h+$JOYJM{=fGf z3*61xN{uSBsZl4<3^Z~kh7E}r8tA@{$G!=H& zvl+WFoX8~vr(>7aetLTAGI{7%=_Ol`HaXe(7R5hiWb$A=JfP!~EL16|O9m)x z+VrMWX=tRF_uK}um8HO$3*&l=0Xii5;?v%C&(5wlc>v#Q6K7n1X!w;4TH9uU@4W0blBz4^V_B6D2@=sC2 zeMb-ILrTRQX*v6Y&hGf5k?wfu!Txwr>hN+8W46m_dpN%;7<;G&B;2?dvz@%9J{ zoG)_frWVcdgh>ero!*nN4DIU_5{C9YscC)og`cGF^b>mEfx|AewC;|VdQ0;zi?8Ig zN@Vphvx|xdrFkE$DG2U)8yk18D>3`$QB%_5FtOMkgB@SuX`#eNwbu0Gnc0PTw z#g2lvG;D7-cc-tiFhnrs7TR}F{p!xJ@t#tgm`gU!`t8~YA!4Ouv*5cvSPe7tkhQF& z)cL+n6s~HY592-M>}sj{MYpiPeH+1D53`CjbPz!(i?pEBz54ESMSS@EQWYwA>q+)$ z?O3Hx$SmltNV?Ke;`A}s=0QPM^&9Wawk*nog^aN<#+#O}KN-h6#irj4TY1 z#|HwU^OV9(2+kzmNyN+3<%pjMRhNo9oMQuq>{1G^c@jj}QX=!-;X$w=@>+6QSP*Pv zVI~mkzv)a~?IXWr=m|1zy5{UKrXzEyWa&<*?9+qN{MEn{^i5%{H zMpsakzz+Rf+nPD`aJ_A~_BP$5}5g#DZ%s zsVXc}xfS0UA)yH;paUlh`b#hXq`F%wb1g^kFaW7My9+pDS=z}V_pXHZ$k>8<9{8T;-F=jzfiP`P>&976YW+fFbslr2D4M{#TO}<-H5FBi^9U$>AW}AGwXp9a8_4YvYG3Kr&)sZjk zQPKSHbZh2)W##bUZ{?TM=|<<*Kocr;_4pu+Rw~^$af1G0PLmNn?`fbQ5oX}d4xYei z9RO&DFxE{0D$;{OMR-vwRgR(GAV~}5f_U7xM@dWo%%kcXl3)jKfB>DZf~{I_Z;;_t zaX}5^O}`wQ=Lbu|O!@#nj1dc9>r%uc7JvvY!?n;4N9M0AIZcyMH@n+mEsKSs~ zu$Ae~=k=85kPTZ=d1-#A@o!x;1Q#G9^s6O(xqn=okx0|Ka6;s{vHZNF_3%d!dKsZS z8wf+mqq;(!tyX6I0YemUjtjN6`O*p40<=XC7YfRTJ^40jij2B;T1sEBd%?w1XQoSUUO5pEb1~r%npEJR~9(41R zCkoQ!D?YhT=vE6^K2>~^43&TRF9ND7`S@*mDtQGgC*-7q+kmY(L$jU$1Qb@?)CAqI z`HTKT=T5?Ivix3k*--mK=gueerlp!6t zIVMFSDEsHken~kd5V1jY__O+v{~yCwq2%QUh+!|Fj)4lK5c$w%h`E{DRl^F-U*{aM zFLifLAp-$P@UTXZ+Do`O;}k8#1@jPS%>v*C>=eYOxm(q4A>&BeY?if^&umVVQDxHsIbkN-BU4ZfL zafD0riTx&UjNNW$Tt-vN3113-97VX*QAv7Z<^Ddnxt@wZqTN!w`tO2Nx*n9_h@@UT zy3p5;hJAqKj9Z@~iCW8X*XUR_4Q*_h27$=^RlI)Xv?H)0O4rhwmYEd>5TF6q_jqX| zPYU^-#qiHp+@t&6o!vYL2Jqk{KdJs^5ab>rWQ8!32@=ilqVgW8l@>bIBY2V5RZsLc z;=8yV;;nRzryr17YqPDW{)R(r&|*QZUukjyD#H811W9?>sQ(hO7C35%|IZ75vd`)0 z=>IvhJ~>MAimwnC+SJ9s<%H784kg3J#wNr5ReX&C1o%{72t(MY{8pkzKOqVtc}&ns zt$20J(chfc_AxX07jjrb&o>MT93Q5^*`3DP|C$TC%MIIS`9t_$`*4 zcTTh>Xdw)xglZisbE~Ocd_YvAXjqNG;5OG0d(_{;uz9wL81M{QLqhVhtr>*aN9tnO zz{fNk*C|6J5ek52K>lZTLkA*(u_5pGEUGuw{>BRm5aG+Xte44o;s^pFyQDajy(%zT zA*ZEvviv7tKRitIRwU@4CqTNON}jv4oPvq~kq0^x?kgzJrI7!+mQzC!SLXj**8kJ= z_~njR^uG!JUH1Q*^dhDWkq-#x{%?%^Usm`pWxdm$Qw#pTUb7*nLTcrw^@ZWJhWxGZ zNEcgcAoZVrSw)7tUjqfr+)*O1n?f{}JOA2JeQzSxm;BcuAd$G}Idd21y-2fH8oY>G zivS-pGeqW(k5l~rLH55g@?UuWccuA$DYi#OHIjd;a|G;TX-7!ash|omqV&;3CsUgn z%J-2XYOXBMZnNNlr@WQ`;ma5BlFiM;&3r9e?}JRIL@n(tH|-<$OBLg=rH~TWhNUC7 zJbTd_zrh!SwBK8AV{LI&BHh&I$A@XZ*OI%0@&E1JPM2z?i?^AU&2K-upT=g2AyQbE z%^K9OsLW(0d)ga)mr3^t-O7T0W%p@8@>hLJ1{WR$t(?;P=j4Ss!AM9X>3^REz`>!7 ze(p`274eM*i#_p#grwX};w`qs+a%j(nNG>GH}kU0vVI*bf@cmDvk!9;=tn!Kt7apg z;Ri6o%4+|g^>qvk>=rUZtKG+~FvEwXp2$QAT<5@xvya#>sYfh#>^MNLkIQ#l;hM66 z}bzQ~%@fYCUvV;j-rnW~HJ|$U~z& z`#WQ}fr8#3bc0XL@=<2k}5lG-hyOmcS2i^FCjVJ zV)z?mNq;_yo0aM}o_Vo7uz*EcP4D(ghlfW+Q&$F)A&SOr;qDI&n~TUD5ZvEt9jwJ< zQzaOjy>n|BH?Xt)7)lK7w;?|g>X$*XlPmV=iFEDcmLVu+!%0c_L;c^sqXPuU_hCAN zw*0libThjbHaImLdjfWU(xOf@ZY9n`!*{LFyBGGf)zFXpr5;K9W zL|q!A*-ilYI5SzmtSIawtEWMJdWJ5A~4y1OT~8o?RYpr0^O%pC77sXLs{kx|ycoE=|FJ9|2lCw7ozgP)cX+ejff zq$3ZuGM-3Lvmuv5m}!K!3E0w2pJzONeIeKH>zq$)tE~OAPpyChR5~TcW(XlL%uBfC z^zH366UbGs+09s?z?r&EO4Mh$7}y!!O)oIfK3r$MJ2Givyf90;tEBe~M<>|K;rITu z)!oyqk8vgaht(mFq!SQYRnIsq8NPAPB4zd^yI3`s;6&w-{=7utmo0uR+$r$6tZ3p- zO~g}1Mj3MQS*m1XOlDJF+;ldmuW7QWSKnA04$bZfHC5J^pku06goj{BsSpoW{f73+ z$SbR~kXrHjjx<#gT<^u#ycX69yApp}Z17w{hF&XzEzxwaNdUn2GDV-kAij#>1!R}> zI-@*cb|WD9y^V?*sx+pH;IW>}`6V@is`el?Z_Nu?uj61-kB)7@^y0eb*^8 zC|VJlqJnmQ%6;N$YLq&L&ws${V!7Z~%DZEyftisw-0otSzGWpHMm;y=nu|&clMX|0;f$HSvD!fW)A;?XSbx`~p+8rQ zIR;WcIo5kHO^3bah&0uQ{+I|hmvxdARMuv<*THd_!CJT!$KlSnN2YOw+HsS zZ;3Y=8d9V!i`f9FHOlF3GgkKcsPnom%ZzOn$*&`ZjSAeq^8~Z>HjYJQtP&FZ)*OCIOPoKsOJE=x){I5I z?2uRZ)i;)I}3ZXajiAU_Dzv>o{gh#tjaMP z5QhjdZTKi@(jL?;1^`z)I79 z$+bmsE2pSLb{BmBCwkJxKKjHu=xO7SN`&u{1_;#X)6d5kYHinhxTSMVoGF*yZhz;Q zWR@6PgJu1RE`TsUf1dJyv$e}x{CyX)nKe)11oQlamAlEOfle!Zr7Y65AUwkSy5}L^ zySR8Y%1MGhw6u?~;dS4F8N@^*XcFVLf_D$TPF`@O=B%v~(*%711_i8FxVgR*w1n>9 zb)h%@T9oKFKDeM1b`@ehFc{ry{dA~|T;$T4a!hu7zle2xuCn2I0Z1O&Fwt_^PaelHb+I%q4q$@KW~wT?tkNC;lm510y%bprq4c6x9&W;r`ecv+=Hpk}XF%%WMn z!7@XAY~#mir-RIle&bp%(l9~00X}|8)$%Wudw0h)R%S1Wt-Vr6xsw=lzy1C(FQ#eu z71$*UpQdUE`{b?_^u4NN8Fh`*hyZ93pGeFoX5S4Hsbu9cR*))WOF#aR*sB#5!Cg^V zBo$otHRo{`^|J(8k|TvFrclN5A<>X6M!uWcHL2Lv)n*9T^HeM}Z&~T0mf=2I;^pnU zn8>xj%Ta=oGzo)Op)Uz>E$dQ)m!|as!_HR-GjT%G0>_FdwyAgEHO$X1Q{dVS6d_5P z%xeNJ^AyLUsTN^(7j}ha-7W=~vGsF>A+y#Y53-cAg>}wf8)ocE&uv;F=E zsoAHlJf6Wca%1%Ft`R#$8n+zefsAj;`CA3>B_>Kir!-_P5z&$TL=_m^VQfsY>(`Vg zJ_VumkiA}p6tFl#(8D0vQ#{Y_7_M@kWA|1zx_bu7>`;}rf zbE%xRwdu(uCV!9jdBOogUyS<|JBkqt@j*dJ^(_gO7xDR|(Z=#DN=0Hi`_ap7M*cq? zUHq|SX?SgZOw(WWW*IxV`K}u5Xox9o-}q^6HQQLQqP~1#E_8a`G>=79kl+}LQ1EtA za9~!gi5vVu-$#|r>}N}+8+Pn!y0(Gii4HMrT8AASF~3-kt%jSz-dusIi;h;P$KF^$ z?QI!p`I-=zU1}!5ozm^S=PcuN?Xhag0;4EC$vAE~DN7z6!e-P^Ws%>C{}ZdNa~}Q_ zkF)}R<#w7jbYg83Kk9n+FVy|1#@dws3Dv_AnDeIy(fMoY=VbgJ#Rp+b`y`2faetyN z?f#YFhpmjjCv?_Z%&)jZYnlVVKrEE{>i72w*l=5n9zr2pTCC&w7zxf-=0XTt`g}VZ z33e0Dv=Ajc4Z*-aBG)}uUyu@w$Hi&N3DLY1K%RE3&BXq%bAO1>Gx6V5Fp&wTxHrT!?gfE|iRtsFZo4UPoXVMMzJCCjXxg*84LHAjd>w zZi!AN-iIgmhk2R9JZz`JWUA8Ez6;){3Hyg)6@u{l$vIhpWIqe)h1+l|jCz;5KMW(_ z(Kk6K4LOZm9#j>H&mVqu%4G7Ba6z( zBfpvgB)+okIKi&zNND)hZv5eA&B~_A)4^d;Jb$|D`O(~pcYB{9!Bpt&lmJ7}M6RF& z14p$KQSGW9?@A}l=zdY`G)-{U1U;hS7tOMef^l zx#hBf)Wa{%&>b&E-HRL6M(X3eiPzyH%EflV85{YH-W8vSI$C&P?@M$%cZjdBo3$mx zcE=1^;PkAQn1-5HbCpy#V3#utcs9t|^hM1TFB!h~NV|4hG;;U2{|t+2 zQ|s}CINtc>E9#C6W@;L~J}$*Wj!%}~&-)a$jJCIt1DgP|jK30ETGiKK^RajrK0jyj zB?(6XIq$!qr%Leio1DN_R}jxrkFBW)#Rz&oyWaeg!iYupnbnG@Nm zXw@Xh)_7>=2Q_r@6#ZF6H?6HPJ9PM4=M_a5H%dD(TWE2~!`KkaB2ERu68f3PpP8uw z^F^Y3zg>IT6WQ(@$#;r15ddLOnrM(l)*^u75HOHRo#K$08b6nA@$yrte9sjMQ)wMB z^V*Pjg0ts=XL?;I3v*{3So_}W3eGmgUJbbF9Of^j)M9K_#q?mKron-xQ!gSMV&r5o zsdD-@m}u*YM7<;Z>?cf)n=NtpTD7FZw?-whYn$8=qebZxiF7p(FtoF!F|j>861U|)_&^QQU$ftixjF$aeD~O2t6X;9&Ha%VsriJ zGJY>qT!7zPzp9*9`EhPs!*QIhv{(fGE{hD;%MJa8c$_}fa{G7Z21g>&M5u?pABitEGn{N@8lkc3-vP9=FQr%ZEt)-%Ac#)=`WfpK2##_B`-f1y1Dw z#4UA<#geT zrrXCud)&`X-S4q=?^aMr_iE8fL)EwqP*bGYHihMw;8c$1-_R1;D;8Y(lfc`Z={%$+dg3=4u8lTI&S^o$py~El2~I3 z_J@Y*k!H@KLwIr6Cbqn#+hy?#nE;NeW-ZradRulyw{Y?CLwc(IZ((&LhoSsLbyXY8 z6jMDz22)wv16tn+Q)LPbIl2XqpA+KIV#C~mk3NM$=?I2$vubUX@LfS$6G< z{gSAgx(bCXNj7vW;T@$B(I?kDAG}-4+A&^{zmq2$ECwusP~qgAim`b@mfwXTmBXSN z$sZ4{^b{=_(h8rorvW>ih#QYzMN6oBG&CyhGL!lsSjNK>92u}1IA#=oms zl%^EVn^R!EV0J8;;cYv3^QI1~{CMJ=9W0{fC>anI$KLO}r+&%4wJQl2TUv?C`vxVD zJjeF+fy&cBayCKPO|G+`Uf6Do8F8g&24(*3T5a}6vkB*Pnfr8YOtGQi}~B|Fq&BjATckvgUBKz)FqmQGlzFFr=z@mlD6!X{`|X z)MS8g@^zpJZ0q|6s`pt>nx4x)buBdiye0ZcKm0t6j0e2eaKK`aLcikBDBYD{Hp Date: Wed, 24 Oct 2018 19:02:31 +0900 Subject: [PATCH 20/49] =?UTF-8?q?#42=20:=20=EC=A0=84=EC=B2=B4=20=ED=95=AD?= =?UTF-8?q?=EB=AA=A9=20=EB=B0=8F=20=EC=9D=B4=EB=AF=B8=EC=A7=80,=20code=20?= =?UTF-8?q?=EC=99=84=EB=A3=8C.=20=EB=B3=B8=EB=AC=B8=20=EB=82=B4=EC=9A=A9?= =?UTF-8?q?=20=EB=B2=88=EC=97=AD=20=EC=9D=B4=EC=96=B4=EC=84=9C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...h_as_your_first_deep_learning_framework.md | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 42_keras_or_pytorch_as_your_first_deep_learning_framework.md diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md new file mode 100644 index 0000000..fddadbb --- /dev/null +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -0,0 +1,85 @@ +## Keras vs PyTorch 어떤 플랫폼을 선택해야 할까?(keras or pytorch as your first deep learning framework) +[원문](https://deepsense.ai/keras-or-pytorch/) +> 문서 간략 소개 + +* Keras +* PyTorch +* framework + +### 소개 +![Keras_vs_PyTorch](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) + +### 좋아, 근데 다른 프레임워크는 어때? + +### Keras vs PyTorch : 쉬운 사용법과 유연성 + +#### Keras + +```python +model = Sequential() +model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) +model.add(MaxPool2D()) +model.add(Conv2D(16, (3, 3), activation='relu')) +model.add(MaxPool2D()) +model.add(Flatten()) +model.add(Dense(10, activation='softmax')) +``` + +#### PyTorch + +```python +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + + self.conv1 = nn.Conv2d(3, 32, 3) + self.conv2 = nn.Conv2d(32, 16, 3) + self.fc1 = nn.Linear(16 * 6 * 6, 10) + self.pool = nn.MaxPool2d(2, 2) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 6 * 6) + x = F.log_softmax(self.fc1(x), dim=-1) + + return x + +model = Net() +``` +#### 요약 + +### Keras vs PyTorch : 대중성과 학습자료 접근성 + + +![Percentof ML papers that mention...](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_1.png) + + +#### 요약 + +### Keras vs PyTorch : 디버깅과 introspection + +#### 요약 + +### Keras vs PyTorch : 모델을 추출하고 다른 플랫폼과의 호환성 + +#### 요약 + +### Keras vs PyTorch : 성능 + +![Tesla p100](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) + +![Tesla K80](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) + +#### 요약 + +### Keras vs PyTorch : 결론 + +### 참고문서 +* [참고 사이트 1]() +* [참고 사이트 2]() + + +> 이 글은 2018 컨트리뷰톤에서 [`Contribute to Keras`](https://github.com/KerasKorea/KEKOxTutorial) 프로젝트로 진행했습니다. +> Translator: [mike2ox](https://github.com/mike2ox)(Moonhyeok Song) +> Translator email : From e2e8d70879f0456916edf6f7c88d39c9a0351132 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Wed, 24 Oct 2018 20:06:41 +0900 Subject: [PATCH 21/49] =?UTF-8?q?#42=20:=20=EB=B2=88=EC=97=AD=20=EC=9E=91?= =?UTF-8?q?=EC=97=85=EC=A4=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...h_as_your_first_deep_learning_framework.md | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index fddadbb..f05a3b4 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -1,13 +1,24 @@ ## Keras vs PyTorch 어떤 플랫폼을 선택해야 할까?(keras or pytorch as your first deep learning framework) [원문](https://deepsense.ai/keras-or-pytorch/) -> 문서 간략 소개 +> 본 글은 딥러닝을 배우는, 가르치는 입장에서 어떤 프레임워크가 좋은지를 Keras와 PyTorch를 비교하며 독자가 선택을 할 수 있게 내용을 전개하고 있다. 원 작성자인 Piotr Migdal과 Rafal Jakubanis은 자신들의 경험을 바탕으로 글을 설명하고 있으므로 더 정확한 선택이 있으리라 생각한다. * Keras * PyTorch * framework +![Keras_vs_PyTorch](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) + +> 본 글을 읽고 있는 그대, 딥러닝을 배우고 싶나요? 그대가 당신 비즈니스에 적용할 지, 다음 프로젝트에 적용할 지, 아니면 그저 시장성 있는 기술을 갖고 싶은 것인지가 중요하다. 배우기 위해 적절한 프레임워크를 선택하는건 그대 목표에 도달하기 위해 중요한 첫 단계이다. + +우리는 강력하게 당신이 Keras나 PyTorch를 선택하길 추천합니다. 그것들은 배우기도, 실험하기도 재밌는 강력한 도구들입니다. 우리는 교사나 학생의 입장에서 둘 다 알고 있습니다. Piotr는 두 프레임워크로 워크숍을 진행했고, Rafal은 현재 배우고 있는 중입니다. + +([Hacker News](https://news.ycombinator.com/item?id=17415321)와 [Reddit](https://www.reddit.com/r/MachineLearning/comments/8uhqol/d_keras_vs_pytorch_in_depth_comparison_of/)에서 논의한 것을 참조하세요.) + ### 소개 -![Keras_vs_PyTorch](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) +Keras와 PyTorch는 데이터 과학자들 사이에서 인기를 얻고있는 딥러닝용 오픈 소스 프레임워크입니다. + +- [Keras](https://keras.io/)는 Tensorflow, CNTK, Theano, MXNet(혹은 Tensorflow안의 tf.contrib)의 상단에서 작동할 수 있는 고급 API입니다. 2015년 3월에 첫 배포를 한 이래로, 쉬운 사용법과 간단한 문법, 빠른 설계 덕분에 인기를 끌고 있습니다. 구글에서 지원하고 있습니다. +- [PyTorch](https://pytorch.org/) ### 좋아, 근데 다른 프레임워크는 어때? @@ -67,9 +78,9 @@ model = Net() ### Keras vs PyTorch : 성능 -![Tesla p100](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) +![Tesla p100](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_2.png) -![Tesla K80](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) +![Tesla K80](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_3.png) #### 요약 From 545dfa561546cb17eec83ac3b053a013b1ea3b49 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Wed, 24 Oct 2018 21:59:08 +0900 Subject: [PATCH 22/49] =?UTF-8?q?#42=20:=20=EC=9D=B8=EA=B8=B0=EC=99=80=20?= =?UTF-8?q?=ED=95=99=EC=8A=B5=EC=9E=90=EB=A3=8C=20=EC=A0=91=EA=B7=BC?= =?UTF-8?q?=EC=84=B1=20=EB=B2=88=EC=97=AD=EC=A4=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...h_as_your_first_deep_learning_framework.md | 44 +++++++++++++++++-- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index f05a3b4..213a647 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -17,12 +17,33 @@ ### 소개 Keras와 PyTorch는 데이터 과학자들 사이에서 인기를 얻고있는 딥러닝용 오픈 소스 프레임워크입니다. -- [Keras](https://keras.io/)는 Tensorflow, CNTK, Theano, MXNet(혹은 Tensorflow안의 tf.contrib)의 상단에서 작동할 수 있는 고급 API입니다. 2015년 3월에 첫 배포를 한 이래로, 쉬운 사용법과 간단한 문법, 빠른 설계 덕분에 인기를 끌고 있습니다. 구글에서 지원하고 있습니다. -- [PyTorch](https://pytorch.org/) +- [Keras](https://keras.io/)는 Tensorflow, CNTK, Theano, MXNet(혹은 Tensorflow안의 tf.contrib)의 상단에서 작동할 수 있는 고수준 API입니다. 2015년 3월에 첫 배포를 한 이래로, 쉬운 사용법과 간단한 문법, 빠른 설계 덕분에 인기를 끌고 있습니다. 이 도구는 구글에서 지원받고 있습니다. +- [PyTorch](https://pytorch.org/)는 2016년 10월에 배포된, 배열 표현식으로 직접 작업 저수준 API입니다. 작년에 큰 관심을 끌었고, 학술 연구에서 선호되는 솔루션, 맞춤 표현식으로 최적화하는 딥러닝 어플리케이션이 되어가고 있습니다. 이 도구는 페이스북에서 지원받고 있습니다. + +우리가 두 프레임워크([참조](https://www.reddit.com/r/MachineLearning/comments/6bicfo/d_keras_vs_PyTorch/))의 핵심 상세 내용을 논의하기 전에 당신을 실망시키고자 합니다. - '어떤 툴이 더 좋은가?'에 대한 정답은 없습니다. 선택은 절대적으로 당신의 기술적 지식, 필요성 그리고 기대에 달렸습니다. 본 글은 당신이 처음으로 두 프레임워크 중 한가지를 선택할 때 도움이 될 아이디어를 제공해주는데 목적을 두고 있습니다. + +#### 요약하자면 +Keras는 플러그 & 플레이 정신에 맞게, 표준 레이어로 실험하고 입문하기 쉬울 겁니다. + +PyTorch는 수학적으로 연관된 더 많은 사용자들을 위해 더 유연하고 저수준의 접근성을 제공합니다. + ### 좋아, 근데 다른 프레임워크는 어때? +Tensorflow는 대중적인 딥러닝 프레임워크입니다. 그러나, 원시 Tensorflowsms 계산 그래프 구축을 장황하고 모호하게 추상화하고 있습니다. 일단 딥러닝의 기초 지식을 알고 있다면 문제가 되지 않습니다. 하지만, 새로 입문하는 사람에겐 공식적으로 지원되는 인터페이스로써 Keras를 사용하는게 더 쉽고 생산적일 겁니다. + +[수정 : 최근, Tensorflow에서 [Eager Execution](https://www.tensorflow.org/versions/r1.9/programmers_guide/keras)를 소개했는데 이는 모든 python 코드를 실행하고 초보자에게 보다 직관적으로 모델을 학습시킬 수 있게 해줍니다.(특히 tf.keras API를 사용할 때!)] + +그대가 어떤 Theano 튜토리얼을 찾았지만, 이는 더 이상 활발한 개발이 이뤄지지지 않습니다. Caffe는 유연성이 부족하지만, Torch는 Lua를 사용합니다. MXNet, Chainer 그리고 CNTK는 현재 대중적이지 않습니다. + ### Keras vs PyTorch : 쉬운 사용법과 유연성 +Keras와 PyTorch는 작동에 대한 추상화 단게에서 다릅니다. + +Keras는 딥러닝에 사용되는 레이어와 연산자들을 neat(레코 크기의 블럭)로 감싸고, 데이터 과학자의 입장에서 딥러닝 복잡성을 추상화하는 고수준 API입니다. + +PyTorch는 유저들에게 맞춤 레이어를 작성하고 수학적 최적화 작업을 볼 수 있게 자율성을 주도록 해주는 저수준 환경을 제공합니다. 더 복잡한 구조 개발은 python의 모든 기능을 사용하고 모든 기능의 내부에 접근하는 것보다 간단합니다. + +어떻게 Keras와 PyTorch로 간단한 컨볼루션 신경망을 정의할 지를 head-to-head로 비교해보자. #### Keras @@ -58,12 +79,27 @@ class Net(nn.Module): model = Net() ``` + +위 코드 블럭은 두 프레임워크의 차이를 약간 맛보게 해줍니다. 모델을 학습하기 위해, PyTorch는 20줄의 코드가 필요한 반면, Keras는 단일 코드만 필요했습니다. GPU 가속화 사용은 Keras에선 암묵적으로 처리되지만, PyTorch는 CPU와 GPU간 데이터 전송할 때 요구합니다. + +만약 초보자라면, Keras는 명확한 이점을 보일 것입니다. Keras는 실제로 읽기 쉽고 간결해 구현 단계에서의 세부 사항을 건너뛰는 동시에 그대의 첫번째 end-to-end 딥러닝 모델을 빠르게 설계하도록 해줄겁니다. 그러나, 이런 세부 사항을 뛰어넘는 건 당신의 딥러닝 작업에서 계산이 필요한 블럭의 내부 작업 탐색에 제한이 됩니다. PyTorch를 사용하는 건 당신에게 역전파처럼 핵심 딥러닝 개념과 학습 단계의 나머지 부분에 대해 생각할 것들을 제공합니다. + +PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않는다. 이는 초심자들이 사용하는 중요한 딥러닝 도구이다. 능숙한 데이터 과학자들에게도 마찬가지다. 예를 들면, Kaggle에서 열린 `the Dstl Satellite Imagery Feature Detection`에서 상위 3팀이 그들의 솔루션에 Keras를 사용하였다. 반면, 4등인 [우리](https://blog.deepsense.ai/deep-learning-for-satellite-imagery-via-image-segmentation/#_ga=2.53479528.114026073.1540369751-2000517400.1540369751)는 PyTorch와 Keras를 혼합해서 사용하였다. + +당신의 딥러닝 어플리케이션이 Keras가 제공하는 것 이상의 유연성을 필요하는 지 파악하는 건 가치가 있다. 그대의 필요에 따라, Keras는 [가장 적은 힘의 규칙](https://en.wikipedia.org/wiki/Rule_of_least_power)에 입각하는 좋은 방법이 될 수 있다. + #### 요약 +- Keras : 좀 더 간결한 API +- PyTorch : 더 유연하고, 딥러닝 개념을 깁게 이해하는데 도움을 줌 + +### Keras vs PyTorch : 인기와 학습자료 접근성 +프레임워크의 인기는 단지 유용성의 대리만은 아니다. 작업 코드가 있는 튜토리얼, 리포지토리 그리고 단체 토론 등 커뮤니티 지원도 중요합니다. 2018년 6월 현재, Keras와 PyTorch는 GitHub과 arXiv 논문에서 인기를 누리고 있습니다.(Keras를 언급한 대부분의 논문들은 Tensorflow 백엔드 또한 언급하고 있습니다.) KDnugget에 따르면, Keras와 PyTorch는 가장 빠르게 성장하는 [데이터 과학 도구들](https://www.kdnuggets.com/2018/05/poll-tools-analytics-data-science-machine-learning-results.html)입니다. -### Keras vs PyTorch : 대중성과 학습자료 접근성 +![Percentof ML papers that mention...](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_1.png) +> 지난 6년간 43k개의 ML논문을 기반으로, arxiv 논문들에서 딥러닝 프레임워크에 대한 언급에 대한 자료입니다. Tensorflow는 전체 논문의 14.3%, PyTorch는 4.7%, Keras 4.0%, Caffe 3.8%, Theano 2.3%, Torch 1.5. MXNet/chainer/cntk는 1% 이하로 언급되었습니다. [참조](https://t.co/YOYAvc33iN) - Andrej Karpathy (@karpathy) -![Percentof ML papers that mention...](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_1.png) +두 프레임워크는 만족스러운 참고문서를 갖고 있지만, PyTorch는 강력한 커뮤니티 지원을 제공합니다. #### 요약 From a7d4106ac21789d5bd058de29e264004b516d147 Mon Sep 17 00:00:00 2001 From: Mijeong Date: Wed, 24 Oct 2018 22:26:34 +0900 Subject: [PATCH 23/49] Update 32_building_a_simple_keras_deep_learning_rest_api.md Co-Authored-By: mike2ox --- 32_building_a_simple_keras_deep_learning_rest_api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 29c6a87..89b84f5 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -7,7 +7,7 @@ * flask ### 개요 -이번 튜토리얼에서, 우리는 케라스 모델을 가지고 REST API로 베포하는 간단한 방법을 설명합니다. +이번 튜토리얼에서, 우리는 케라스 모델을 REST API로 배포하는 간단한 방법을 설명합니다. 이 글에 있는 예시들은 자체 딥러닝 API를 구축하는 템플릿/스타트 포인트 역할을 합니다. 코드를 확장하고 API 엔드포인트의 확장성과 견고성이 얼마나 필요한지에 따라 코드를 맞춤화할 수 있습니다. @@ -368,4 +368,4 @@ $ python simple_request.py > 이 글은 2018 컨트리뷰톤에서 [`Contributue to Keras`](https://github.com/KerasKorea/KEKOxTutorial) 프로젝트로 진행했습니다. > Translator : [mike2ox](https://github.com/mike2ox) (Moonhyeok Song) -> Translator Email : \ No newline at end of file +> Translator Email : From a106624186d5c673b9f1362ac5b868a9ebf5e4bb Mon Sep 17 00:00:00 2001 From: Mijeong Date: Wed, 24 Oct 2018 22:27:05 +0900 Subject: [PATCH 24/49] Update 32_building_a_simple_keras_deep_learning_rest_api.md Co-Authored-By: mike2ox --- 32_building_a_simple_keras_deep_learning_rest_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 89b84f5..973fe3f 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -12,7 +12,7 @@ 이 글에 있는 예시들은 자체 딥러닝 API를 구축하는 템플릿/스타트 포인트 역할을 합니다. 코드를 확장하고 API 엔드포인트의 확장성과 견고성이 얼마나 필요한지에 따라 코드를 맞춤화할 수 있습니다. 특히, 아래 항목들을 배울 수 있습니다 : -- 인퍼런스(inference)에 효율적으로 사용되도록 Keras 모델을 메모리에 불러오는(혹은 불러오지 않는) 방법 +- 효율적인 인퍼런스(inference) 사용을 위해 Keras 모델을 메모리에 불러오는(혹은 불러오지 않는) 방법 - Flask 웹 프레임워크를 사용하여 API 엔드포인트를 만드는 방법 - JSON-ify 모델을 사용하여 예측하고 클라이언트에게 결과를 반환하는 방법 - cURL과 python을 사용하여 Keras REST API를 호출하는 방법 From 7789034a8bf7467a6eb342e92c420776a93eaef2 Mon Sep 17 00:00:00 2001 From: Mijeong Date: Wed, 24 Oct 2018 22:27:23 +0900 Subject: [PATCH 25/49] Update 32_building_a_simple_keras_deep_learning_rest_api.md Co-Authored-By: mike2ox --- 32_building_a_simple_keras_deep_learning_rest_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 973fe3f..ecc49a9 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -25,7 +25,7 @@ --- #### 개발 환경 구축 -우선 Keras가 컴퓨터에 이미 구성 / 설치되어 있다고 가정하려 합니다. 만약 아닐 경우, [공식 설치 지침](https://keras.io/#installation)에 따라 Keras를 설치하세요. +우선 Keras가 컴퓨터에 이미 구성/설치되어 있다고 가정합니다. 만약 아닐 경우, [공식 설치 지침](https://keras.io/#installation)에 따라 Keras를 설치하세요. 여기서부터, python 웹 프레임워크인 [Flask](http://flask.pocoo.org/)를 설치해야 API 엔드포인트를 구축할 수 있습니다. 또한, API도 사용할 수 있도록 [요청](http://docs.python-requests.org/en/master/)이 필요합니다. From 1f86369e9754fb9dc0c6f3113dcdf825fd2c4d76 Mon Sep 17 00:00:00 2001 From: Mijeong Date: Wed, 24 Oct 2018 22:30:27 +0900 Subject: [PATCH 26/49] Update 32_building_a_simple_keras_deep_learning_rest_api.md Co-Authored-By: mike2ox --- 32_building_a_simple_keras_deep_learning_rest_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index ecc49a9..4ab1d7e 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -36,7 +36,7 @@ ``` #### Keras REST API 설계 -우리의 Keras REST API는 `run_keras_server.py`라는 단일 파일에 자체적으로 포함되어 있습니다. 단순화를 위해 단일 파일안에 설치하도록 했습니다. 구현도 쉽게 모듈화 할 수 있습니다. +Keras REST API는 `run_keras_server.py`라는 단일 파일에 자체적으로 포함되어 있습니다. 단순화를 위해 단일 파일안에 설치하도록 했습니다. 구현도 쉽게 모듈화 할 수 있습니다. `run_keras_server.py`에서 3가지 함수를 발견하실 수 있습니다 : - `load_model` : 학습된 Keras 모델을 불러오고 인퍼런스를 위해 준비하는데 사용합니다. From 4e9221b5f96cffaee5c21016a9015f5349f107d9 Mon Sep 17 00:00:00 2001 From: Mijeong Date: Wed, 24 Oct 2018 22:30:43 +0900 Subject: [PATCH 27/49] Update 32_building_a_simple_keras_deep_learning_rest_api.md Co-Authored-By: mike2ox --- 32_building_a_simple_keras_deep_learning_rest_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 4ab1d7e..5402c81 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -116,7 +116,7 @@ def predict(): image = flask.request.files["image"].read() image = Image.open(io.BytesIO(image)) - # 분류를 위해 이미지를 사전 처리합니다. + # 분류를 위해 이미지를 전처리합니다. image = prepare_image(image, target=(224, 224)) # 입력 이미지를 분류하고 클라이언트로부터 반환되는 예측치들의 리스트를 초기화 합니다. From fd360c36d824f2b0e4a82e951913e20b5a5ebc84 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Wed, 24 Oct 2018 22:53:25 +0900 Subject: [PATCH 28/49] =?UTF-8?q?#25=20:=20=EC=98=A4=ED=83=80=20=EB=B0=8F?= =?UTF-8?q?=20=EC=88=98=EC=A0=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../25_keras_examples_3.md | 6 +++--- .../conv_filter_visualization.py | 21 ++++++++----------- .../deep_dream.py | 2 +- .../lstm_text_generation.py | 3 +-- .../neural_style_transfer.py | 11 ++-------- .../variational_autoencoder_deconv.py | 2 +- 6 files changed, 17 insertions(+), 28 deletions(-) diff --git a/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md index 097914b..f72073c 100644 --- a/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md +++ b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md @@ -20,16 +20,16 @@ 케라스로 Deep Dream. [neural_doodle.py](neural_doodle.py) -신경망의 낙서. +신경망으로 낙서하기. [neural_style_transfer.py](neural_style_transfer.py) Neural style transfer. [variational_autoencoder.py](variational_autoencoder.py) -변종 Autoencoder를 만드는 방법을 보여줍니다. +variational autoencoder를 만드는 방법을 보여줍니다. [variational_autoencoder_deconv.py](variational_autoencoder_deconv.py) -Deconvolution 레이어와 케라스를 사용해 변종 Autoencoder를 만드는 방법을 보여줍니다. +Deconvolution 레이어와 케라스를 사용해 variational autoencoder를 만드는 방법을 보여줍니다. > 이 글은 2018 컨트리뷰톤에서 [`Contributue to Keras`](https://github.com/KerasKorea/KEKOxTutorial) 프로젝트로 진행했습니다. diff --git a/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py b/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py index c84c048..0ca2c66 100644 --- a/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py +++ b/25_Keras_examples_3_Generative_models_examples/conv_filter_visualization.py @@ -12,19 +12,18 @@ from keras.applications import vgg16 from keras import backend as K -# 각 필터들을 위한 생성 이미지의 차원 설정 +# 각 필터들을 위한 생성 이미지의 차원 설정합니다 img_width = 128 img_height = 128 -# 시각화하고 싶은 레이어의 이름 설정 +# 시각화하고 싶은 레이어의 이름을 설정합니다 # (모델에 대한 정의는 keras/applications/vgg16.py에서 볼 수 있습니다.) layer_name = 'block5_conv1' -# 텐서(tensor)를 확인된 이미지로 변환해주는 함수 - +# 텐서(tensor)를 확인된 이미지로 변환해주는 함수입니다. def deprocess_image(x): - # 텐서를 정규화한다 : 중심은 0, 편차는 0.1 + # 텐서를 정규화합니다 : 중심은 0, 편차는 0.1 x -= x.mean() x /= (x.std() + K.epsilon()) x *= 0.1 @@ -33,7 +32,7 @@ def deprocess_image(x): x += 0.5 x = np.clip(x, 0, 1) - # RGB 배열로 변환 + # RGB 배열로 변환합니다 x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose((1, 2, 0)) @@ -41,16 +40,16 @@ def deprocess_image(x): return x -# ImageNet의 가중치를 VGG16에 적용, 설계한다. +# ImageNet의 가중치를 VGG16에 적용, 설계합니다 model = vgg16.VGG16(weights='imagenet', include_top=False) print('Model loaded.') model.summary() -# 이미지를 입력받기 위한 placeholder 설정 +# 이미지를 입력받기 위한 placeholder를 설정합니다 input_img = model.input -# (앞서 이름을 지정한)각 핵심 레이어의 출력들을 가져옴. +# (앞서 이름을 지정한)각 핵심 레이어의 출력들을 가져옵니다. layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]]) @@ -82,7 +81,7 @@ def normalize(x): # 입력 이미지의 손실과 기울기를 반환합니다. iterate = K.function([input_img], [loss, grads]) - # 기울기 상승을 위해 스탭 크기 지정. + # 기울기 상승을 위해 스탭 크기 지정합니다. step = 1. # 몇 개의 임의의 노이즈와 같이 회색 이미지부터 시작합니다. @@ -117,8 +116,6 @@ def normalize(x): kept_filters.sort(key=lambda x: x[1], reverse=True) kept_filters = kept_filters[:n * n] -# build a black picture with enough space for -# our 8 x 8 filters of size 128 x 128, with a 5px margin in between # 128 x 128 크기의 8 x 8 필터를 저장할 수 있는 충분한 공간이 있는 검정 이미지를 만듭니다. # 5px의 여유공간도 둬야합니다. margin = 5 diff --git a/25_Keras_examples_3_Generative_models_examples/deep_dream.py b/25_Keras_examples_3_Generative_models_examples/deep_dream.py index 54d9087..bd3e455 100644 --- a/25_Keras_examples_3_Generative_models_examples/deep_dream.py +++ b/25_Keras_examples_3_Generative_models_examples/deep_dream.py @@ -26,7 +26,7 @@ from keras.applications import inception_v3 from keras import backend as K -# 입출력 이미지 경로 설정 +# 입출력 이미지 경로 설정합니다. parser = argparse.ArgumentParser(description='Deep Dreams with Keras.') parser.add_argument('base_image_path', metavar='base', type=str, help='Path to the image to transform.') diff --git a/25_Keras_examples_3_Generative_models_examples/lstm_text_generation.py b/25_Keras_examples_3_Generative_models_examples/lstm_text_generation.py index 372e6bd..d631137 100644 --- a/25_Keras_examples_3_Generative_models_examples/lstm_text_generation.py +++ b/25_Keras_examples_3_Generative_models_examples/lstm_text_generation.py @@ -33,8 +33,7 @@ char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) -# cut the text in semi-redundant sequences of maxlen characters -# 텍스트를 maxlen만큼의 문자들로 잘라냅니다.(?) +# 텍스트를 maxlen만큼의 문자들로 자릅니다. maxlen = 40 step = 3 sentences = [] diff --git a/25_Keras_examples_3_Generative_models_examples/neural_style_transfer.py b/25_Keras_examples_3_Generative_models_examples/neural_style_transfer.py index 836c82e..486af50 100644 --- a/25_Keras_examples_3_Generative_models_examples/neural_style_transfer.py +++ b/25_Keras_examples_3_Generative_models_examples/neural_style_transfer.py @@ -105,7 +105,7 @@ def deprocess_image(x): x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) - # 평균 픽셀을 기준으로 zero중심 제거 + # 평균 픽셀을 기준으로 zero중심 제거합니다. x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 @@ -115,7 +115,7 @@ def deprocess_image(x): return x -# 주어진 이미지를 tensor형식으로 갖는다. +# 주어진 이미지를 tensor형식으로 갖습니다. base_image = K.variable(preprocess_image(base_image_path)) style_reference_image = K.variable(preprocess_image(style_reference_image_path)) @@ -143,7 +143,6 @@ def deprocess_image(x): # 먼저 4가지 함수들을 정의할 필요가 있습니다. # 이미지 tensor의 gram matrix(feature-wise 외적연산) - def gram_matrix(x): assert K.ndim(x) == 3 if K.image_data_format() == 'channels_first': @@ -155,8 +154,6 @@ def gram_matrix(x): # 스타일 손실은 생성된 이미지에서 기존 이미지의 스타일을 유지하도록 설계됩니다. # 스타일 기준 이미지와 생성된 이미지에서 특징에 대한 gram matrix(스타일 수집)를 기반으로 합니다. - - def style_loss(style, combination): assert K.ndim(style) == 3 assert K.ndim(combination) == 3 @@ -168,14 +165,11 @@ def style_loss(style, combination): # 보조 손실 함수는 생성된 이미지에서 # 기존 이미지의 '콘텐츠'를 유지하도록 설계되었습니다. - - def content_loss(base, combination): return K.sum(K.square(combination - base)) # 3번째 손실함수인 전체 변화 손실(total variation loss)은 # 생성된 이미지를 지역적으로 일관성있게 유지되도록 설계되었습니다. - def total_variation_loss(x): assert K.ndim(x) == 4 if K.image_data_format() == 'channels_first': @@ -238,7 +232,6 @@ def eval_loss_and_grads(x): # 한번으로 손실과 기울기를 검색하는 동시에 계산을 할 수 있도록 합니다. # 왜 그렇게 진행했냐면 scipy.optimize는 손실과 기울기에 대한 별도의 함수를 # 요구하지만 따로 계산할 경우 비효율적일수 있기 때문입니다. - class Evaluator(object): def __init__(self): diff --git a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py index 17b2e09..66822e4 100644 --- a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py +++ b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py @@ -82,7 +82,7 @@ def plot_results(models, plt.show() filename = os.path.join(model_name, "digits_over_latent.png") - # 30X30 2D형태의 숫자들을 표시. + # 30X30 2D형태의 숫자들을 표시합니다. n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) From ab3f4c9bdfdf2241496bfb5e309763379db60f70 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Wed, 24 Oct 2018 23:04:08 +0900 Subject: [PATCH 29/49] =?UTF-8?q?#25=20:=20=EB=B2=88=EC=97=AD=20=EB=82=B4?= =?UTF-8?q?=EC=9A=A9=20=EC=88=98=EC=A0=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../25_keras_examples_3.md | 2 +- .../neural_doodle.py | 20 +++++++++---------- .../variational_autoencoder.py | 16 +++++++-------- .../variational_autoencoder_deconv.py | 16 +++++++-------- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md index f72073c..59f2a37 100644 --- a/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md +++ b/25_Keras_examples_3_Generative_models_examples/25_keras_examples_3.md @@ -20,7 +20,7 @@ 케라스로 Deep Dream. [neural_doodle.py](neural_doodle.py) -신경망으로 낙서하기. +Keras를 이용해 신경망으로 낙서하기 [neural_style_transfer.py](neural_style_transfer.py) Neural style transfer. diff --git a/25_Keras_examples_3_Generative_models_examples/neural_doodle.py b/25_Keras_examples_3_Generative_models_examples/neural_doodle.py index b9bba49..f04889b 100644 --- a/25_Keras_examples_3_Generative_models_examples/neural_doodle.py +++ b/25_Keras_examples_3_Generative_models_examples/neural_doodle.py @@ -93,7 +93,7 @@ content_weight = 0.1 if use_content_img else 0 content_feature_layers = ['block5_conv2'] -# 생성이 더 잘되도록, 스타일 피쳐(style feature)용 컨볼루션 레이어를 더 많이 사용 +# 생성이 더 잘되도록, 스타일 피쳐(style feature)용 컨볼루션 레이어를 더 많이 사용합니다. style_feature_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1'] @@ -169,7 +169,7 @@ def load_mask_labels(): np.expand_dims(target_mask, axis=0)) -# 이미지를 위해 텐서형 변수들을 생성 +# 이미지를 위해 텐서형 변수들을 생성합니다. if K.image_data_format() == 'channels_first': shape = (1, num_colors, img_nrows, img_ncols) else: @@ -184,7 +184,7 @@ def load_mask_labels(): images = K.concatenate([style_image, target_image, content_image], axis=0) -# 시맨틱 라벨을 위해 텐서형 변수들을 생성 +# 시맨틱 라벨을 위해 텐서형 변수들을 생성합니다. raw_style_mask, raw_target_mask = load_mask_labels() style_mask = K.variable(raw_style_mask.astype('float32')) target_mask = K.variable(raw_target_mask.astype('float32')) @@ -197,7 +197,7 @@ def load_mask_labels(): # 이미지 모델은 VGG19 image_model = vgg19.VGG19(include_top=False, input_tensor=images) -# 나열된 풀링 레이어으로 시맨틱 모델 표현 +# 나열된 풀링 레이어으로 시맨틱 모델 표현합니다. mask_input = Input(tensor=masks, shape=(None, None, None), name='mask_input') x = mask_input for layer in image_model.layers[1:]: @@ -209,7 +209,7 @@ def load_mask_labels(): x = AveragePooling2D((2, 2), name=name)(x) mask_model = Model(mask_input, x) -# 이미지 모델과 시맨틱 모델에서 피쳐들을 수집 +# 이미지 모델과 시맨틱 모델에서 피쳐들을 수집합니다. image_features = {} mask_features = {} for img_layer, mask_layer in zip(image_model.layers, mask_model.layers): @@ -221,7 +221,7 @@ def load_mask_labels(): mask_features[layer_name] = mask_feat -# 손실 함수를 정의 +# 손실 함수를 정의합니다. def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) @@ -232,7 +232,7 @@ def gram_matrix(x): def region_style_loss(style_image, target_image, style_mask, target_mask): ''' (boolean형) 마스크로 지정된 하나의 공통 영역에 대해 - 스타일 이미지와 목표 이미지 사이의 스타일 손실값을 계산 + 스타일 이미지와 목표 이미지 사이의 스타일 손실값을 계산합니다. ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) @@ -291,7 +291,7 @@ def total_variation_loss(x): # 전체 손실값은 컨텐츠, 스타일, 전체 변화 손실의 가중치를 계산한 합산. -# 각 개별 손실 함수는 이미지/시맨틱 모델로부터 추출한 피쳐들을 사용. +# 각 개별 손실 함수는 이미지/시맨틱 모델로부터 추출한 피쳐들을 사용합니다. loss = K.variable(0) for layer in content_feature_layers: content_feat = image_features[layer][CONTENT, :, :, :] @@ -355,7 +355,7 @@ def grads(self, x): evaluator = Evaluator() -# 반복 최적화로 이미지를 생성 +# 반복 최적화로 이미지를 생성합니다. if K.image_data_format() == 'channels_first': x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128. else: @@ -367,7 +367,7 @@ def grads(self, x): x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20) print('Current loss value:', min_val) - # 현재 생성된 이미지를 저장 + # 현재 생성된 이미지를 저장합니다. img = deprocess_image(x.copy()) fname = target_img_prefix + '_at_iteration_%d.png' % i save_img(fname, img) diff --git a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py index a5b0853..c335cf9 100644 --- a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py +++ b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder.py @@ -45,7 +45,7 @@ def sampling(args): z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] - # 기본설정으로, random_normal는 mean=0, std=1.0로 지정되있음. + # 기본설정으로, random_normal는 mean=0, std=1.0로 지정되있습니다. epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon @@ -54,7 +54,7 @@ def plot_results(models, data, batch_size=128, model_name="vae_mnist"): - """2차원 은닉 벡터의 함수로서 라벨과 MNIST 숫자를 표시 + """2차원 은닉 벡터의 함수로서 라벨과 MNIST 숫자를 표시합니다 # Arguments: models (tuple): 인코더와 디코더 모델 @@ -80,7 +80,7 @@ def plot_results(models, plt.show() filename = os.path.join(model_name, "digits_over_latent.png") - # 30X30 2D형태의 숫자들을 표시. + # 30X30 2D형태의 숫자들을 표시합니다. n = 30 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) @@ -139,22 +139,22 @@ def plot_results(models, # Tensorflow 백엔드에서는 "output_shape"이 필요하지 않습니다. z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) -# 인코더 모델을 인스턴스화(instantiate) +# 인코더 모델을 인스턴스화(instantiate) 합니다. encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') encoder.summary() plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True) -# 디코더 모델 설계 +# 디코더 모델을 설계합니다. latent_inputs = Input(shape=(latent_dim,), name='z_sampling') x = Dense(intermediate_dim, activation='relu')(latent_inputs) outputs = Dense(original_dim, activation='sigmoid')(x) -# 디코더 모델 인스턴스화 +# 디코더 모델을 인스턴스화 합니다. decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True) -# VAE 모델 인스턴스화 +# VAE 모델을 인스턴스화 합니다. outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae_mlp') @@ -192,7 +192,7 @@ def plot_results(models, if args.weights: vae.load_weights(args.weights) else: - # 오토인코더 학습 + # 오토인코더를 학습합니다. vae.fit(x_train, epochs=epochs, batch_size=batch_size, diff --git a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py index 6406876..b11161b 100644 --- a/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py +++ b/25_Keras_examples_3_Generative_models_examples/variational_autoencoder_deconv.py @@ -56,7 +56,7 @@ def plot_results(models, data, batch_size=128, model_name="vae_mnist"): - """2차원 은닉 벡터의 함수로서 라벨과 MNIST 숫자를 표시 + """2차원 은닉 벡터의 함수로서 라벨과 MNIST 숫자를 표시합니다. # Arguments: models (tuple): 인코더와 디코더 모델 @@ -131,7 +131,7 @@ def plot_results(models, epochs = 30 # VAE model = encoder + decoder -# 인코더 모델 설계 +# 인코더 모델을 설계합니다 inputs = Input(shape=input_shape, name='encoder_input') x = inputs for i in range(2): @@ -145,7 +145,7 @@ def plot_results(models, # 디코더 모델을 설계하기 위해 입력값의 형태를 가져오기. shape = K.int_shape(x) -# Q(z|X)에서 은닉 벡터 생성하기 +# Q(z|X)에서 은닉 벡터을 생성합니다. x = Flatten()(x) x = Dense(16, activation='relu')(x) z_mean = Dense(latent_dim, name='z_mean')(x) @@ -155,12 +155,12 @@ def plot_results(models, # Tensorflow 백엔드에서는 "output_shape"이 필요하지 않습니다. z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var]) -# 인코더 모델을 인스턴스화(instantiate) +# 인코더 모델을 인스턴스화(instantiate) 합니다. encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') encoder.summary() plot_model(encoder, to_file='vae_cnn_encoder.png', show_shapes=True) -# 디코더 모델 설계 +# 디코더 모델를 설계합니다. latent_inputs = Input(shape=(latent_dim,), name='z_sampling') x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) @@ -179,12 +179,12 @@ def plot_results(models, padding='same', name='decoder_output')(x) -# 디코더 모델 인스턴스화 +# 디코더 모델을 인스턴스화 합니다. decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() plot_model(decoder, to_file='vae_cnn_decoder.png', show_shapes=True) -# VAE 모델 인스턴스화 +# VAE 모델을 인스턴스화 합니다. outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae') @@ -218,7 +218,7 @@ def plot_results(models, if args.weights: vae.load_weights(args.weights) else: - # 오토인코더 학습 + # 오토인코더을 학습합니다. vae.fit(x_train, epochs=epochs, batch_size=batch_size, From 01954ee0ce63d9fc3f64e184f897f99f0aced5c7 Mon Sep 17 00:00:00 2001 From: Karl Kim Date: Thu, 25 Oct 2018 15:22:00 +0900 Subject: [PATCH 30/49] =?UTF-8?q?#175=20[=EC=98=81=EA=B7=9C]=20=ED=94=84?= =?UTF-8?q?=EB=A1=9C=ED=95=84=20=EB=B0=8F=20=EC=82=AC=EC=A7=84=20=EC=B6=94?= =?UTF-8?q?=EA=B0=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 12 ++++++------ profile/contributor_YK.png | Bin 0 -> 24010 bytes 2 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 profile/contributor_YK.png diff --git a/README.md b/README.md index 7eb6bc8..187017b 100644 --- a/README.md +++ b/README.md @@ -21,10 +21,10 @@ ## 멋쟁이 컨트리뷰터 🦄 -| 😁 | NAME | HOBBY | CONTRIBUTIONs | +| 😁 | NAME | HOBBY | CONTRIBUTIONs | |------|----------|----------|-----------| -| 😉 | [김수정](https://github.com/SooDevv) | 🕹 게임하기 | [Fasion-MNIST](https://keraskorea.github.io/posts/2018-09-28-딥러닝의%20Hello%20World,%20Fashion-MNIST/)
    [안드로이드에 MNIST 심기]() | -| | | +| 😉 | [김수정](https://github.com/SooDevv) | 🕹 게임하기 | [Fasion-MNIST](https://keraskorea.github.io/posts/2018-09-28-딥러닝의%20Hello%20World,%20Fashion-MNIST/)
    [안드로이드에 MNIST 심기]() | +| ![](./profile/contributor_YK.png) | [김영규](https://github.com/karl6885) |고양이, 춤, 유투브 보기|[파이썬과 케라스를 이용한 알파제로 만들기]()
    [Keras의 Autoencoder를 활용해 신용카드 이상 거래 탐지하기]()
    [Keras를 활용한 주식 가격 예측]()| > 오해하지 마세요, 외모 순서가 아니라 ㄱ-ㄴ-ㄷ순서입니다. @@ -32,9 +32,9 @@ ---- **METORED BY** -| 😁 | NAME | HOBBY | +| 😁 | NAME | HOBBY | |------|------|--------| -| ![태영](media/readme_tykim.png) | [김태영](https://github.com/tykimos) | 다른 사람 취미 같이하기 | -| ![미정](media/readme_mjjeon.png) | [전미정](https://github.com/mijeongjeon) | 🐱☕️🍷🥖🛫📸📝 | +| ![태영](media/readme_tykim.png) | [김태영](https://github.com/tykimos) | 다른 사람 취미 같이하기 | +| ![미정](media/readme_mjjeon.png) | [전미정](https://github.com/mijeongjeon) | 🐱☕️🍷🥖🛫📸📝 | ##### 컨트리뷰터가 되어 함께 작업하고 싶다면 언제든 연락주세요! [📮](mailto:ninevincentg@gmail.com) \ No newline at end of file diff --git a/profile/contributor_YK.png b/profile/contributor_YK.png new file mode 100644 index 0000000000000000000000000000000000000000..d465e10f72ef457f262a1b521a0e4c631872f3a2 GIT binary patch literal 24010 zcmafbWmFv7wk~eL-61#xXe78p<1PV$ySuvvr*VQi1h?R>-8h5&Tq;$*ZNVlsyj+WNg54>7zGLn3Qbl9sP^Y-_ScDo@aNNMD^&vu3R=@z zLPAAWLV{Ao#nHmr&KwGgA<@*xNKKZRVc6K%$Y^-t10#xyr&?rWteR2aa4-6oPE-?A zl&BFfd>k~iuqZ7m8!?_=2c#FN5O5qa&QHtLB->=rJ8$ysVm780n8h|*1XYtVvt>s4 zf*7fu{4QFKkdRP4$Zq#42?nzRb{L-#h96}(7uKXyVu3{Tga+veNn24SfiuA$c34wp zQU)%w?;dt|PF5USS=vP66du|kZG0;vdr&S6$`q!C7B;0=GbPAs|2br|aQt^HHoUYE zLkW}D9_{fV10fM%CMR1^KWX8kDL?b)Ne0GI`dWZAZ2@RE* zO$Y@Adtj}h?WV1$z;Ehk&uVPuXkyOlW$*OI8VX9#i~rBi-rUWY(#zh?!Ij@ji0U5} z{C~!Oi2+oU|1fd06{6BsRH2k`bTOyoV*SX6H)67%LarJg^GxlO}aHam+$$$9)n!B31SUb5{ zJ33JQ<=5E6(cMjmit4XG|9$+Or@5E)zalxf{$^Or|LhU>Zaxn<`TwEZtgDT8s1Lk!v6*QABulN{xxs- zm0hg=H2Lq|{z>wG$o|t_&D_<|&i${c@X5j2P54jy{sH+9@jp?2ci`XF#y_UP-_k!T zPZ&iI@ZYOg7{$`Pb{z^z6iOB-rr`zc-{l=^e(1j7xn}TW(Dljjm(axFB45#a=`AXq zV(^c8W*Ow-g|CT&kywn*L6lLjL+mhwY**57NKx1VF?QOjs4@6Ek=V4VTiDSlyjjDa zhZoy@+bw<0YmSy$T6)sf&sQo6US%C-EAQ62)?K=NGR6e&;-5u;|i63*(R+P=U9)LbTp z))RgcIoKz?E!QLr<>HsVHE&IdfeYw)S9?ryQ7%FVtk!X*l*0CyWbZlkOC?(Yx3l_0 zA|fb1qxHtreU0eX+h_#%WjCaGRsN_QB+YSv&U2a;LJ-=hn$v?rQeMdJ#{JaI#!U90 z2lE~(6E0tob@CYTqY-*(luilV!D0=nS_Z%dWCOFD43*sw^M&El18Pt}t%lvez1y(u z_-5e|=66aZcDhm*heO(sQB0vBGjvMOLadShkT=FZb%f3<}^=+m{F?~wT`^ten_IP{@iT5R2xjuP2toXlWU5#2%($x zC>KHnn^v0b^f_Iqv?FF$&2`ihN2qHgE*2lN7$3o=WJ!pm#^*{T{80FxP%~3{I1}y( ziP>ZOcGBuST2>k+VIesYr{1y~!t;7gvweJec|{K&B1S9wQxNM!b7Ajkw1XKE8SnVM z$q+nn6*DH%Z}>SSV)rB8r?_Ge&bJArP|>m>y9QZp9=rQA?nk_0kGd~Y(|*CInM(Dp z8P}dvwN!Fuz$WCNHSFh5m`T_nu4IUWy>eabJvT>4OsLV7RT>m9A!;6BOd+Hj@54ZF z>wC!!!i$M;p!~%h`*rXNBa|LHJ?J`a&yIC2k97Gw_Ok@X3oG_@`-iOr^$-1h-m~fR z*csmXBS8Nj0IVUFV@_=9WX&7IuBA_ltibV6VGhC5awSYRv7LAG= zXsu-$8s?Zk@S9<)gio=ni~+_Tr)_d`NJsB@MKzj(FOJkR3K3C%nwDjn)>UA~ajU-c zx!n#F(@cQR910o9pxnEi2g$r!OuRZwnBuJ$x6vzX7-Sx4&-+P95(%-K-3J~CbE-8&ISac z0=~GUNGeeuqhtf$P @m`-;3B51HF{6#bp87txtLogD{iLv@5pZd$>Eavyd`Qkqc z-X|G+ZU#V9!;q_7X{siQ%u@>BU0E7nXSW8hQqhZEX& zA}-;bWGldTlS&&C^rqmVHm_(%cZH6mM5`H#Vt4JTh2B!TVJguSt3Z3npzg%SutM#V zoCHRCA@9lG?=l54)de++`(Xg-nBB{AwR2pZK;S(?NNR83CPQWJ}}MdoN9e0&;A4y}Zl!>*yLF{``p+r2(rYIYBao)gcwH+A1<<1q;Sg zU5$kGjb6XvxeL4J7PAmyklzx{m#>*3RKD+7~}|2m(o1R1a&$W zY`Y~1){Z&z?v3NTqL!`s$RsJ z9S6IDkW2(LX}yXD{wl5`hNX)rK|cZ3K1z;?0YBVNPm&I8MzapMpS6DT7v{QJhsIe0 z-cUbm8OV3LYW^V0CRoJtTLq?HvY z9=UD`r?1B`ez^6_hxI~l3h`n@(uAw2Q2nab8&xyzO{9FYN3hIPvOz#+zKFQ)YByx^ zRRTnXDzU^ts`l**UmYA4uEhv^z0uUW+~la%lyI@W-CPBrt0J|d)3?&ZF9m7y6_k2p zI@zWtq*E;>3Z<@o1e&c;0XLawF%atkJE;!4$@PJ;hg?gdA}Ic4yy}NB;?W8cfkz@j z{pO1Wt-Zto+BhTy7?)R3D7XITX|k+qB7z^Kq18@k6(7S>ZbxwAv>+s;VvvcX4J0g!I(99v zJ|tm4ZSluweb4E;+!$s~ZrT$uO=yja zL}4`4su_e)6)O!x&(|4K;He*ugZ;}LR|)%|5hJ3m9fBI<+J!AlKREK^73yL$5!ZFI zQ?IOy1z!_l6ez37deyQUg8lbumL-ofgr9&|Eg$uMEXg*YJdr1g)Q2cQ^uobr*Qjmtr+(qO9}xVsz@*T{hPt$oH&O zxda}N#)wEQPNdJ0Asc9i1(}GIqOsqB$8Lp_q}%G-s$|vFNs?>TIuU${Y)^6>+-lQY zr3mac?n_2M($6l)3I$|v!OowHzsN7NBAAhK*cOyjl=TdX8*8_Y*SapU@qI*eE*BA0 z<5?0d_E~~|3Qpr~Y>8n#nBx0|Nd;>JH#_xBuvenFJRdvxf)9Og+=CVW%XP*&uvooa zWq<4&<6^)bbEqsFL5?Dj;pG=XZy$X0jvP{d5S7`+I~bW_{^64h|A|d!MOg~2LHa=k z5Kmz&Fh$Y>ofTJNW|3`4Ch?w-f&|&5>&qCs+nZ}wuj$(4RR8{xz6wXyx^-5PkI`V{ zLP_JhxiEKQZYV2*D8U-CDt5%w9zpC~Y)Zh-uTxR6p^$PV^x)Ck#!Vfuei%mL8^LYA zMMxWsC@aYzqmV(UZ|hk|(z1G$kg-55dvwfBrK^>1Y#D~ga;F9dv`%9I4x-IaE@QAL zEwn0^|BU)l!gL4{yLhZcv;-6zKK3v0sDh6c$cOq=QWoJ?o1z~9YBp2e9!J>t`#Cpz z92KzyYodXoNOL2~j|dUNYO(hWL5RtmER?tgpUi|ewUt3PL zZ}fmLU|fT3lZrH8Qp_ejdv3LBNPnd{GaUhzkEpD7`~JtWaKWkjxM6}Cc@thf5joN* z8xj6@)D-fFH7&YOM+CR?q~(!fMwC-Afsy+veA29E1AW0O#o7gEq)i(~dRT_1 zyW*1A@*~i4hYqb~@h~zZczWRPK*D{=o;-x&S9xL|{2k%&@%ONiza>l$WF&tKn`G>% zfEY>r6lc}_zMI1&n?4Qkp-d)Y$`MD3t=}z1{4h8Ouegiw*SXRU(U6i&`k>!?)D5yt zCXDiejHRGXtP2aWaNABNKdai#m`B#jfA4RAGZR8PYlOL4WZ=3ofTpBjKAV1@?0-2u1|PN(@oEo zA;go0LKw?kGKM!5l~`!vJrkkC@6{VyikLWz35eV-aBIb*AI=z0HHSnfGk0?5mrf#Y zgG%BwX4uw<8l6|WD<(*z02T7iHLuEoP$}6g>TOB_P-raC-m96IdowiVoy<$P76n$j zTv*KNJ6=nImu20aWx#A43RLTskoLG;phB)k9|Am^h+R%jv-HnJO?IGW* z1mBz%O5}dHB^r@qOe4_9K3?Uee{Z`wbew>L+04? z;NR+z1)q-~s#Cfe3mMnQ7P`|Op8{;Mf8u!9FWe`HAKXebW#@nqe}4-#b>P$mehhvG zNg2a1me=OhqTVZ;liJivuDe$~(cc)tf6))+$K;J*QztTs4_Z(tx+j~}8=9Mmx)=go zx%&{RCw+Jzz6^f4pd8BoB=G5Ra+hVo1|}IOGal6fBYJ6zxDMVo9P~mkH<&qdQgmi1 zF17j^V?mle88<@q(WzNa#fK`kCT_N9)ce^qf;8rwd9lq!wnpGeuH2ti>sW^6**k!oK05XCL2@}11g z@beO_8c|IA>_}X;U_M@KHsMl23ABvWF~s{_Y7rRtcLGrq?v?Nx5*yL1HSg`kv6r}? zy>NjR1#ZG$#Hce^*(&neI>=jU3}Vi^FHY~TYG1cW_XolB`aUN`FdrlJd4I_S_?gix z`K_(3Dk?Rtd~4_+sP7_<7s0KELz7ZbR(^g441uR!?PeeZg{M-#Fw(LwikV=g(y|sj zC;IoRU`}GrM^vH&5=QS$w^KY66b?5KgaIFQ5{0< zBc{77lA^LwDr6<&W}jQv(tB7JJG;UKqN59w`Z&NG8k2-KXf-FtM1yx^vTx+_1IOxp z?tD7$-<}PgM|xhhExH%|-%gfQM&?a??vS1kZC+qBW~b1?DAuCwQN+s8%E9l@3Bk8+*& zx^GnNH}zi6GZJNIY9|g3X78fy(!+ubI*#B>o z(TR?laL0wi*{lfmH?6b1^@-L`qK!g0Wtb@FrSCMDX(rSUylJ7oVg~f9C0Og~q1But zt%NDQykPe&zDqTwh=&c@MHt%Mmq<0cE{=HDz^IijJO#Zc*eU9GhpR{QEP6Q2_| zcNMy@zCxoi(7W{Ypg_#L;Aw{F1*R3juqgjXBW$)eKUVIA=nM0ZrZ1-LU5jbMVjLI@ zPA4Xh_k7(9BtZ+vef_;(h^|Tunzq{H8^J}^j`OZVpJQVEooPl>LUpa$KSg}H6|OE9 zcne79ajv)?zCrP)vBR2T_{^H&DrqSlLW$1o-j-g~x8bLluckvIr{or-@_qt3+Cvs~ z(tIYf^nSny(b)v%_$b<(?6)&&seo0UM;3#G;fNNQDpZ`?#l^2af>LG-D$74po<#G4WI<1v8N53&D%*8t~DKHG?i= z3jQbLf$QDYFPOpK8|Y5yI^nh>#~*9IK9ND7uqgEh(no}bPq=p?NKbcU7h$3TCI?&t zE;*>lb8mI#&dIZf;l1NLxUYO!hik`k5O9>rH(Sn6?FMZDfz%E!8v>N4gu^=FO{5*T zXd#TzP--v;m{On2+sarOKUgZL?V1)%7aTd6n`+f1J+)(+=Z!=RB774jhN^380tpd= zC^~%s{9Uz>un&%q@m-!Rb&(q$)5D#CVohMDs!AWjnLd)O9K!ZD>OpPtZ=X>FU>5C% zl%4xx0`?_!H zg}l8E^IhXuvJtBiC{Cvkk$2#t&#qRFo$nq?cprv;U3R4z5)`$0&53!W>MYs_9Z@!O zS%rR#Qti{`iIm{?^NbZ!5YM~v;-iZkO|uvpIr@6h+3wu37-g&16K>ihNBlZ8Z^sXFqty zF1Mi-i$5)Rs(p-kh)-dY|30`HSn(BkAMb-R)gDov1oTwf!G`q|L8!G*r%6Zu>DUJa z2E;{f_>Wi$$y-irwK1RK`VF^J5+C}seN1y+URr--VXxyy(D z)vE|6?N-;BJZswC)6~TW|Fs%2Ge{%Gq!10~v*Yt+othJQYjdMg4Ox{Y1lu&EuFOnp zf?yDCic|)OU99bjkQi&Mdua>9fIN6VtLD3Jr1t!tb|^W+I;$yQss9 zOTB1X5Mz?{8Lc6<;ungF?jByHBrfG25k&?yLmgV{QS0~gJ<%v)V7v7`YGMfLS^4L$ zLY)kkCfUZ=9AsF9D^>0E{;MXQj@S|+?=B&tS5a-#UR%!^erDE!lLmq$4H(-cv!z@0 zl`UI+9{}K?+EcLOD)SJ9eZkv$4_@{Dva^XB`;m)nghtfU-t%v73Zvv`GrBx6J0k7N zTC4=B`|VRvZBe!_2cZWk#js}4Y2^EQ<+tFp)1R>LhYoMw%z{sB8e1qskoG=pgd$=yqK8xAYLz zu65&03H!az9Q#nQoNlF1+;?`Ps#=Dd0YRf^(eYHc7t7{I4ce~Sl%?6+F16fVa3f=6 z#LAc&T6suTjW*zL$IMnvptxkVlyE*VXH|T6)Bd>is)H1y+4WMi5 zR77a+>PfRkvrqNn>dSE8cDgHkM=BS*Bo=PUx6#idLW3ib)vu%y2|~-x&Y+x^;wIwq z#bBJ(JbItmT7@Z+@!*hjUV+~61eWnaf+uLT`jLB&Q;tbb-~9PKkxQUl#1<2wUW@_!f8*2^@-Wc2U9FvR;h*`7@niLWVKy)-3+5voAo z=h;3%_k_lV7G(#N8=&yXi7L&;J#aiO=}R_YOnC{s8Vxm37OK`flrcvl$8F*}c77fT zk>*C+yAA+2YrPfg{DynW^7z*Qm^E(DrcoAqbQcd*-a)h8!waf)P@>Q7^XT>+?5D3s zq`b8G2D$s6^d-_wcSCozKl@@|icV60z6F`bowC@qOq*$EscXA@wb`r`?al5#_~qVU}tDqLD$gNB;dD=Sao`ON!}J`yC0^u00H6RSe;NB-Bb zIXD6^d_|?3vRL+`lc+9EXkj;R3Khb)o=3xx>0vF>h-L{?Tav_^gm*VOz%OEJNM9Td z6v^Pixp*6qaqPLOKZ^Nt`8uZvJ||8alL~R*a5f|p>K5K=;<&TnrzX~y#VgW;H2hAr zau)sB42z{K;>{Xx+4I3i|CahU4@WQnSp+M)Vu%k2liGjZhk(1&$j7VflKa3ZHNWONi3^8Q#<&JFvn zZfS`!3~gTQ7>2#GR7WNR*UZ?BQPW8(7ctnV8gb)$I8sWgA`1a_uji8YI*>}Y_-VB_ zj&*>Ej3!n_x=LtWTuqmf&G+Y;wTlj<>bgxSh&%2ezA)1Q;ow@8593o^GXVxm9C&DI z67p*ymYTD8;j%gQ-q!7;d4W`Cb&$>~Anmm{gS&7r+#X96{%z1nJv<&3LkM8#2!bUG&O?V$3q z9`I}>_;eXRI0on>uk}B!=6(8Y7*`2+F;-v^#19am#N%x@fy zxO%x(>6?_X21}#~crtv7!fX#$B%&aZNN$TC&}(F@x1=hyK6Y5`A-JnuH3L{MLE=G@ zj*dLJZ*7K=mcp+x&x`Zjcs&p6Z#vJXfsZ|q`jTsQ*PX=lGKZm1-?XBo<>-gppW%7%%Ta3+}*S!ib*>4h&y0jK>x>+gT>2QUX*?`lytZKHA4WzmooYJA+$&!^x<7khEy;BHAP*|iY5)|Nk2dgl2_NwH-OC}i>2a1!% ziI5l&;^QW_-C0`eX(x{tc>)UF&Rnze3**zX=0>Mx-I>JMT_;kj9}J5L-n_fpUfidM zceR{^s4HiS3Y^w? z2P33=V6sPAkefMJ;F77o>tSDXv{SlRZn+Td1F}I)Ij61e*XX{}pzMKDjY_WH%tx~Z zvD>tKN+hA)b|$Rk1yjfn8)|R&rCaTw8?}l7@?m!J%RSi_5425F<-Dk5`RiU(=1=Je zO>^|bjzt`Dgs%_Qg}=k(NPSNf6a*iDs3H{gS66o?*GsesahLGj>{JhVuT!r@UXD#f zZx{21pv_^g3PJ24_VBEw_e6XmE#xsBuG3)11j~4@0v5~dXPYt}PgoE;|3dhUZ~%tEaAgG(OauFX;* zVvBatp%A@%d=7Y*22FTbR1LBXxglu_ye!J|%C$QdbiLW`hX;C?Fs;4yRn|APVK29a zgVc} zN@vmvatoaIgyj~G(U3Fl9Z5<6v8&1`p&(5EnvpX&VvW0|kS{(FZ0w3ewgwJl& zvPA?aX77Ysvm=uj90c1oR#zvyo<?(V3&o!qAS>SzYEw~<>ct1R_|0^LbcHZRS6|Hr% zs}&hj{@V32>W`DSY!QO2th>S9p-4^mt@o-6vO=+@OqVHmjWr%WQ(U&Zw7 zJMrR$Uo>K46~ULT^jm9abMA<6BK3sZm`|y-hM<IqGg3=Xg-XvA;C(qk`<-yGhZ-*NVgkH+z@TA=9PyjHv_vbb$PSn97mnj9& z_7W#PFZwg86`7I{hY+#6sYc2;2&@#y^v%I>Q_i74t?si%DTb&HG7ScEbw&+j{4kk4 z6w9E;nWs76f;*LIHJ#U)St&<9awLXL1_R@u@8%$v567MAF2i@s8FkkA`jzwJw26#GWSs=k3u{?Y2PrwN5|2hF_%_$B4E5Jl)qffT8Cw zOD9A5;)}evM~O17VmBW%!iH^L+Wkpsg(PJOv_N!$q5-!X{7l4^Z2zO_SP9MBzPI{} zA_P2kes*G>vo%>DAZ}t)(yeSJJ?3+>=<-3L>RQj+sBPb#wc1_JYsp(W^RdGOPO$c0^Lx}VM_yf*hdp0AXJ$mvzml(YHd5Web*qdwvKvwRFeTAF*&rYh_K(IfPBzP_QK(Az9X3_Z;JcFWew zj#OuxX0Es-Upwhy$vq1~;K=71`ymR|+~R5K7_WhPTR)a})L(z7P^RyFr}q5Y@WC%T zX)EycWnr}-u55mM28b5lH@G#8!+D9+VgEdV0d4-wUPycP}-Vcch{zn^#Q1bWf+8oa}KCTsQZUg5urV zi#N#q;jk;m{HaL`khLWr4M280yvCIw?ezUk;H$ey-wUB6bvCI`$TWG(b^L_Mc&U?A zna#(I!DUi#etg2!WiU+Fr0^ufD>jcebUXN0gC~UmL)KSQg4Va+PhKtR_h6`=LKNr%NkeBn?`~jJ<;ZqDm*r(hwfy|@= z(Ux;Fw3sf;5J7Kst@i$q zU+DeTv}8~~-= zu3V!$Y1{i0;wKbB;c-Ao%ruRFSSrq9v#m{OPO|#I8^qR``(vbynH4kAmxX<&?TZG- zPP`BE4ui*pwe+vmJ;a_4YmsB?kCc;w9>T0iui=5FuLmEYr}Y#Rk`I~Kj)H`qb)P~}jLFz6UBxgYY<5WE+a6&4fVVDY}2xlIJ%Tpwc zS%!;jD9fARKr&d5fqjm097Zp6sl_fu{@_KmE6H^{@zuvwq zk2zucbPC6|@i>BqX9P*)#oExet4@Yy@6MC$bAaFXl8R)$7v&D$bvBW5;w*bqZ^v9dIj%+;K2IlKj<>a0i6!`YQq;x&h`oYK*BbN>rF2Ov zCk~`Znb*+AcflwdW3k5q18_>zkyYQr_Y9#u>pJSkO)j{(aYq)5ycG5F6F=EjUXGS@ zT-pX+M4$LLB_vS@;9DwD^v&jil&oXgJMP4ol;HXfghz(B*XV@tYNOI~GbwHBu&zx@ z(u0hT7cy2RH!5z$@)gIltxJm7;nn#Qo|luE=_?b#4Ai`o?;RLO~Sr> z+_Jqn^mXg;C8xEEhpZHT?`J@4I{h^|{RuK3D4hayZ`Fp?^Ga_RH>5lcQJx})tzS+A zBQHCVUXFfz>`=UsrSrq|CdeYPT;t?IPi-WssQk$atjdpzCy&Aksg20xqqx&tGZi75 z74-gSk?X?@k+EO#IaRY0JQxJLs zu`{6Pnn6tx-}J)=rdOyI=(&lZ>FM}=5mpAd_8!qaRXC5)#4s@*O|DZC(qPbgopY+< z3sItH!nu^_W~4SYkQv(8xKO%xw{&7dE{ft`ygu$JZINN3jZN>W{-#7GK=sH#{-~@! zn5+Wzo=pdW5B=jOGu4biT)c>npzs=R2%_47uR8sjG0pt!(9xj8tXG{gXRd*EWY1kk zi)n8D)vr=|!deiy;n;KfQEj|N>LTZqK;g`Q8k9Kh=g@vrta=8v6Vz#*>F0|z78+bx z?^{{!7!Q-@#C?;iokWHv&+XeIkIuIM0VQFHzJbzl{N%exKKCzV0>TJtO6O2%CMh!l##E=d$>= zrU2yS^ftzu9i-25jK3c~CR&mW*u+)F`dX(BEub={wuKSC*RE=X(fD*;cvK9%^e&2E&$HE`F-4_OhGY2r|AP09BL5aJ4>; z=zVe8OoRqSDSUcM57VzXYX zq%N(eirw+{YN&?qY(&RbHeHnS#qriHTCmTn-L~(|V)T@eTjFv3jpQ>0RM$+E34t>s zcw!};EM9O9OzKqDN4n@70EA{+X3U1qJfCj?Soj zd>K&3#Md3reLa`M8$!(Qf9n=2;`|=X&fS-cx|6~&@FEB+Yp1e?j=qxYXaHG*pgxHh z>dPpC{ztJZ&TS8!6^xJ~RWRtyb|2}MyqnOmr>B39=!va)2wZI{KOPkMWIx_3Oi@Aj z!d|?zLv@v8>k@Y)&~nBh$rZ<aE2P5E^=WD~x%`>;<%WQ^SF)t&= zbFL5e-3<3>{SL8B@=q$l!9&#(32pUvbPATRbxQRtkLyz`+NQ1-9n|sJYvT>HV4QrP z5z5-(>P4k2q7n7B!#QN?7pE^&d=~2I2-aE{YK7bEpmJgR`c9CYF|8k4;K@u^O!(b* z)UsejqVLlwo12BhY06lJ?i_1}vMLwmx;!epc={{e zHXQwhSq*y#0Nq|fd-2SJL>xKsf|?-dNd*XJo?rORrrpYWpMcA$bum8z2E7`{d_di} zMZ5rb0fYD0Quxo4nwN{|`?V5`t)q^XBITJ10-l5Zl7$F#8+-qz2p$-E(D)Elz>7Nl zH>BRy<#J&h>gwgo>HR6lIC!WiEUV4$c&xb4I0=WKP<+b?NvrANg$09nV{6VSaTS9R zr7DX4XZ07%_HJgKPZ`V5eyW)%#6mShNG|B~#>xKIAUwj$y{67z9-D{zCgvDL?ZZ+8 z%e~qn-`iq#mYodmoT}?RLC#41EI*sr?Ta@Sj;Wg@-yl7L?hyPfKo+w3Qf> z2no5+WP-rjz}l$<)iYD&LOcr;S@}vOd4$L z<)u~D6O_U62)H^5c6UEkDVh34t<@q*o4kZ{-j9UtlXbhjUv5Uy(|_ju+1~y|hnY-Q zgAx+I`N$Ko+WHW*yzWHKUy^85NBaY8F$p#08qW&;C_8gY&iB1?e(Pw=5dxa^PZ5D9 zuzFSv810X?FLyYN%st6fj%a*yxXq4& z$=}7W*czc)Q>C?i>p-3PTV6|ba!x1$Q>yo?noaD`<#;pWYG)^^%TtRN5jCsAxAWNz&MS7AbVH++u4;qD;uvxW)vS&iy(j7@cc1!`Gi+L$h`x>KG2&M&P) zGXt85(^LL!Gc1H`eQEKgj`_4G)gLf;+4wB#&0c-P%{cZ%8oBeqk)26jvssB5$ zoZffENk-Y4!5FFBXT1P>_ja|Z^?7afzHKO>n}erDECtB17DGOYs}wZ#y8267T){DY z)Zghl>PJQEOR}DDk1~M^A*H2n3QM|3T>vKnDeW*QIYScUISJeTSU{IMK`PjvRz@Ks zrec_RO9g}Dt60?!LU-S8#-fDdYV72h1>@7YTW%6V|2Zxr7N0yTrwcA(t#ZVLqImo2 zA1Pg3VTJ9}RFbPhlG~G?+8n$w9IfIHNNDD(`g=ud1t}Aj&mhE9t>G=d!bbgh*Rs?- z?hu{h?Hq_;TjYa-wob^~*L2Yi*|RdT(dcl~eL%WXNP@PX^oEf6!krfvj?}U>gU8yJ zX;_@!S8BxCIf`fB>O60p-kIQ-o|v*#(ft1N1HsQc(_YrN5;mLe0NMyJA6DLUnRQTg zPg7@u#BKY?*L?;lLNN)CD7@Q<%OI=9cg`Q2Y~SH&52+q}Z}!cqUg*<2GaUv=Xnf{p z$FnCKOePejmb4#9V*D0ll$q$t5J)6%C*u+k1%g-Ihh*Ot8;0RIddG%- z78jaB4}V?wr=ADWC-icK!fg*pd`s1{~xWgj4yk-%a!yINyC zW@Lukj3(9z3N}OIu9rv0{Eksqz*cp%ASXkCA;2Bgwen*+pX`DS^9UshUJofmhT||u zFHxC;Q1tVCk{>SGvJ*>6PgkD>T2d$gdUH6akQi6w*GSR9PxTmA=a8t5pwQAle zk5htmgQ1ct$dqt9<`8?Q=F6^r53@K)!Q)kqK*GgndwsCtV)*i(Eq33$ov&f$b{n3x z0hj~;gUG43`yq}+?+7C&Te2vLiV8OQc&NE&GIVDu9a%S5g`LA{JY?}!qJE4;j*D*h zRl9Z1#bj&oGP1o-nTxbkn=(IQ{B1TX;uVwiA!0#pR%qg|PP_VTZd2@jKds%}e%hcM zSzbm1S1@_z`gd7cug~xgZ~knXZgg+}jo|pqh|lTstc>ag>Rd5_>wc*_EA3P$PDi-z z4qBCC#!R&I7ItZ-A3=>nGru*MN#E=DgVD=TsdYVHETUvJ7e$wJ+j2zn^2k7M+q27c zgKV#(VHv#KH3-HS9#2AHD=7j_B!OzM9N1xsx715s)UGjh}AscHP-bEnidSavMW7Trr2q66jR7BzqWn-4u?6oIeTL;_Y3 zjWOmL_Mp2IIIExiYbVAtG16$Gu~qmBpR}}TFW#>i$wIQA>c7qUQ-!4p%vIMjM|Z0R zduiK;^t?wNB_8DN;Q(E6g%<=Et!L&<087eO(D#VQX@ww>(2#*{-J%92_`9 zO#V+aaK`lxkZz{g=*iyi2%SXO zVK|E#Tg)6`{_NvLEo~!`(!q`~??gp+;a-gOuAt`rH+1HR8i+)2D1nNn(2*hSEyAu4 zp!+W18Il%&!BJ0XLIj7?Reg@3<~8&=3Lf}^!K%7DG<7X+X9fChR3lILCi&$^y<}Sg z+Q`Mh{CLQ-4fYNx`?EN#TTs`KAID~wFU1DDbA)3cDKZ~SA##W+0Sgjacm=GZFQ9xwfcSF zK%KC7iV1|KNrg$B2u8`c|%s!vlDX@YQ`99FHi6XPoQx57{!ah} z8~Wrs2)>J;!*N|AeY_pi$~E>d<`{Dt>GNq0dR8NF_!>6j8imX_erXF;&prRG5PLi= zIQGwOlA>Uii;FGx?kO!oYDeJuQit=KtJI8*vT?V_L(PuHK9amjc~cuZ>qcBY&6?iY zPLrD27#XLFj?~XH98hvf5bf47_tj(9`sBr|cisU$v(F|-mAG6N?|C!S)FNyXeISdL6VIWA9kk&&OAcqhfq2XA_{hEYyfo$r%{e zfD9b~ZAW*2h*Ge&2^X1S|Mtr-7DZ(yCTY0Aj-HD%X_@~Ai*p8lN=F(gm&$b`|32ag z^J92>Y-+K$&02_0f4U)N4V{ktvJ1Z21Prv}|jZoLT&aO@$T%;5q;7spPXFxWpv-{dY zWl}DVBvTAWI;cy*#;gfPq|H5%CSsvU02EX@+6EP6A$l5hRn`FU^1)v|yk&xv25x_x zG0oUL$9Qp(D%c0}EP6n=RFFn6p%e23tnR85_ZI>PN5p$5_A!$j__5@B#k+1tMZ? zn_kU2xwjRn4cidoB+>o#ceb($r9lo0ktNXGi>u2!6g%pC^r;W4aVr8gk{aSUc0Bda zyhX!;>#k~qaVwo29>+m&hdVx7BGFo!jFsUGc0~gW9$+GvUf|LeTZXiYedc))WF=sN zU6_$EdyL&dYJ7sV|ClEKF&JytX)tK69-|C%2W~n1ry@#_w;ngheD06{+a+DH2Q{~O z-|h36FkWX0Wt9>|7A2az^bm9kj9ZI{mXRP}6|RluR%~oh)ku-U&gvcZI7asx}&lO>lOEVh_9U3CnfPc`HQmEG~>c>zjwZ6RCbAI`c#5 z!c%k^wly>WVdZ_iymm|uw{V6AakqX%pEBc*8s6p*x4F~VP3AG{Y)ZvcOqG0_sU{s0 z(=7~zX9uBKq>hlPua5~Jm_;wT!w2=?!$eAl!nJ!y#V(!;f)yKPU>Tvi(@Ih7%Z*T1 zByE$vv&5*t8f02xKsn(-+BVf`%1v^keB zcp^ijXR;0WOr3Lyiv?ly*^{GG7mP81diwS~d`mqO57h7k;$K_ej0M!j6u$BfNz`TT zA2db*4B)3Ngn;(A^}YDasfn1&K$A5yw|OVwaV}@iA_QO&N2Z5NiySX6leenJ20NN% zGSp>mAXSn@dXZU%R1n2`=*qMrbU2cdTr3bcs+PO_8;5^ zC{!VOS!V^y2Ew(wEDnSkV`wu(Hlr{GgEOz+eQA4gF2B-N#ifTQA&R@TILEaU*aD+} zV}lNeu0hy`D7v0TYPMiFZ;f+bW@yEU5=~o+Aaok0@W9TJBC0cxiAgD804R+4ym&c} zbxRxrO=y)81JB3Aa4pql1^~co!JGI{t#%-E{?jtX@owP5P@s*9?K?hjScvHUirdc40hQ zXdvb0F6~X5FfbB~ed9HUT{P!paO(fA@`pj4C7Pl%p9nGM@aeja7?otnJG|29fh&nhY^XU z^Q+6;6lWx@{P?6D11EKW#?+lNRg%%IpLLN%2w52b6ll3Y6#xM{NZRVnDDP91P9Z~w zET=cr%ZVZw9Ofe2@AfC93P96fGiPz$b}Sg=OT0X1$LtA2w#Q=;`8gtEyKt~bABYZ2 zb7Z2G?j5|rRjSn+w7*E&TQ%#dgx!jJhA-xEbk<}_MObn=r~HebXe^=VknX&=@=4qp z{cRkCDJmByaf??lwFcH^d4<{d$z-DI$yiFMNI1Ca5?Du)g}MFUTJY$giis~S+tFuB zjlyAi21TJ6D1sn1?jlld#vDG_UJ=a47=ipUs;axQ&Zhkos?Okuaa}bdD?x=(1YoEm z0y3WPD&yJ=9LfiLX7PL$E^)%|hkb15ciZqc+sk#PL2P-HJFN`JH2C|ZO^#p}j*(S0 zN_U0gz&SFdClUEcqC3BqUMWc1D50GLDbQy45!^ITEmlkNF~`xaB7|h6o7gDwX1bh= zVaD9waqx2=blmBu%@L;yhq1K779N+I^Z25{IPfYJa0 zDknq%dKrQxw*t>00puv9Hy?RXFd#aa>qw#ZnM@FXfp=2xE%I>Vaq*b9OLR$pNC-yZ zS2g9m7zf>RE-(*V5Q6v2Kk?Z$rfL#tz^!2yLVwi~ZUbv?%uHxociG38oh9ag+TK_+ zMlH1P_Nexh`NWg&#%Dg}To<2_uyuNLLQ{BH4cu#zi||QOm4q1sX<+r8O+#xt{GQtu zEGWB5Cv~jiOL9 zwSm+#H6jD~Q$i{dHrZXo0=PQBa@P{`?h~W6DZ-v=%(2%ql6bQKLPoCO^yDL@7*)n_ zrP@7hWL7rPPTnmT09f-GfJ~r>iRgSx1XLTkX@WJh2u;k1E~D_=18{HvbHl3at{3H_ zZT7zN-v$9hs+434kY;HtCO5m<=AtC z&+dJ6QE151eKupB$EMeC9NHFX0CNooh_1|oJwau8yZ9(lV1RkJ zXC$+@UiVSV8CDzIL#I3HBed13iA*9;+Tm5PbxY&J0gNb9)h15lTwq~lAIs69P(a-% z1nCmPReo{IqyT?aT_V4byuulfVC;@S0I7r|DOHI2Bx+)kgyj^fi0}cu&v{gqqv~zx z@=CNU!J%&p&t?xIc;0}^w73Y!xJF_K8|^itN=XKutVd$ARF2|Yx~R^_T-hbBiF4)# zR*N@WpUbnF(lLE@y!T`to!yHuPHoBoFp288g%sIJP2s!FPsbfD(mne-EgiExl zkB2B8%*D`NNbvMy=mA&QNaJl8i`Fd(BnLsn44Txh5JVed{OY7>7ZO;J#|ai|KVKOI z^R8wv8di|>oHVrNcE*uNi=4+0Yaga;C89$p)&n@d4+ceQ;mrUiZA9KUnQ0ZcxNz~r zwjdDX1CjYSlcwnn0=vWkwFJNt79;FDB24(0%XlKC1Ze;~XNSlz7J<$v3}TU74?F6n zp5CXAtbIU!tVuGmQRV4Fax)r6TzHFxc3P%3;n<_{Q+O?|%Z!8$kknObnGagXm7LU5 zVT3w47sbPUVh7B}vm$!x))oPz5>dr)TbzbI1Br+uF6I!zx}E0&?B`Js0puXw5)X9= zCW^>hq>7dj$TG=i`Th*vo!MpGSGBZBJ9#?)^BZeCR4G`nl(c*bp6XR*Q&fScOQzfhBZ)6HMV+{fK%34`n*J%??itXQ z>5CiR^PjA^7?5gL-+da8A|^L=Nu{@WTwR6zhg`Hs_J=;{mD%TDam8D4&9PJ$y4VvK z;c>TuomXNM3OyxA@HbEvC< zE^&|c&~yvuO#`{y+>T^sxz8gdMXv+E{S^h~nkLv*8v|&OBvVN8I)JIpGRFgF<&Io2 zj9o#Li_29pJ{$;U9l=V&%n2!mJ`#$S`NJ3z#+Unc5y?ufSvBNX z5<-G03HWSmtktPbEX4zotUf7Qk&;l}H73IMz{w~Kik`wwz!*qU{^NRy$D9($+@MvT ze58Lav|vDC)8+}J2m|xEOc5n=^{xzpG{AC7HH{etO%k(c|HmISb!Zhn%MmQXIf@9L z^9+2r8105eQB@)*YkyQ3M%1PsES=#pPK1_(zHqe!$uU^8;|imFh>nh?*wq3)7uWSmm1Gq0={R)Ge`_R{u?7jA=e@><)bn0r4VJ$)P(l0L zWVA~Vn9@TJvk_KK_85C(cZ=ILU^FW0HU2V>j*X#3wr3sMIhK+0l~Df{cpF4|+cf`i zlrBObQFv=WRp-}6Fn*qsI3hG|2%M;~l#Bfyt zM(YfEZqgkvhI3}HjhZVlhPguXSNU8>7{`@W1xCKihH^WIPPEd%$*sW^8R|;Ch~k!B zd}$m3?`dE(Rwug^)GH=c!0F>l>e@dSEukSS=aAe)s>G0Gy7<$!fw{AyMb&E|wi`F- ztpqkD%u(&ad}*`fYLKZ-nZQl=Sz4+FVK(kTo8* zm|(FkI2SDk3oLyET7qQ_k}Kw$TeL>9syb8J;zEbdJq`0}dYj&m>cT(~@UR%J8>A%% z^F52MS%+2Y)_O^4?Sh={gtCW45{~IMYL*Iti&U5De-M`ta!#pnExzpRcY|TeV_N`c zc~Kjf0VD}*sjSN0pG+bGyqj}8GCXc@%i2D$j}Sb%&txZ=JQ)ZAXB4mNP08w$A6)|P z3IP|AtV}Esje1iJFo9}I4tH^B;N&s@5dk7Q4IdK~5xTmW^NBd;R`eb7K$Xdnyc{Pv zopiNQNvoW%3CCDJOKll@Sls>0I2G#2Hh3`4*`?$0pZ@IA@#3W$@zs}akZj}M`;bkbtOaqRo&da0*1#?A zGsB(u+n>G|3p_!HLJY(YufPdhtcJ6}wxavJ0%!Gz|L98H>c4)glcM*bmC1Lwk_i~x zS*DaoQ=j3vSM~VLOW&msZi7-D3LIb_hD$~enWi+1PYuko5q+Hj7uQPi&}VZre}7!UnEr&{oNOuCgS!HwmZONrrxKwRn*_#?dMO5jKt63oT+HlT? z+YhN|#}$SH+S&>P6s!onfl55gHfEJdN88E$>h%Y3FiK~L}-9r zVNxAA$MF+$@h}fNnBc!OeDvb;8SMHbYg(nbwYHgs0}8Ofpy?`MPpjS$<+BT?@Jyzt zTN=fGEQ0{rV00|@VrvmD8R5J>fBthsfEhtoy9xFF`x~*bOjsn>yHMSstMMm=Vpdk z`{{i*^vTq-R(pXAiW8ropNc z4`4AqqaKkW03Hpo$7GupCu4WKr_TR?CNkU`aor>Oqwu6$;^UFV31;d%iD7+>eE=5a z(|n*KO`V*j1D_GHC)LwN+=+CM;R1k?4JFV45{pN4r!^_4Kwpt(VCrL{v>9@yDnc*E zXL!}H{&JpRNm{cDN}5l3>%-w=+!x;PhE-~|v|}k!zyN0bO?#EqF6^nOzEXVx%Pv)g z-Uq*qfp#a=wtX0J3>64Pvi6{AF`W$O3q)G@#%-R=PthMLZ;7IZOAojgp1Y=$m@VNagUq&@xEIA23@oIDTGzHO|!J<%VPgNs`Rh8Jr zJ0p1PWboK82J6H_-0QbEGnp=-lcnp+l_GwCW0_8`jjjbwAeza{5j+-Mz!Z59E!U~u zbWt--QPk|qIlVkN7m7ewYMOw@=kvnAvFKCdux$om#xtoB=;JLG$spJix>Lxfm!va+ zb76nRk#jAJRXvLq(;&zPg$s1Rt>54&a1|!$F+STbK*nw%ZtaMvkA&1UiA;J6Cm>qE zGr}{zu07!4pp=hdT2V==bGeL{xPU>t-_cqH<2r3njw3;q_|G#^OpIw0F^_pG-xT#@_jt5M9WLL(X&FP}CYfZ1BJ>9kNRQ0I2~~jO=T-ynnZjXQ z|3kwxk0kN9%TvQ!AiTljWL>v=DF7&PL_i@mK;)4b2>NrMBPA?^_ePbhh4!cj(f}}{ z&QW1dp`0icIcm||FaklA#z-tFWKKU-QmP@obDGgO2hWRC&O50U^D4jGCZ>`55OTFM zq&V~0Lt7$)D4lKrNF2SmEV2Mr$M#m}HJ&nAhl?jH#A8ko1`j0C^{;eNd?Fq?40Xr- zexz%3<&8ciP9`3ambwy4N1dC^ZZ*9t(;HTm97hU!NX%Ud(o^#3shty2nO7z*yi{qg z;YO`r~$>xEh}>oZuq) znv2v8EX15iVJYv(^}DjFM2Jh(jcJISG&_RqWbpzRIZ;(543H={YSiH{8L$?FcS1Q& zg6M+k0OUXMg-Hr~!8JLbqmM0dx6uD0on!?FBWqQW#8fKNjb|-m>_Gb5`ZXll*eGEY zDa^HxCpCl37>D^zkcYa(9uIkNZUg6`2;R~R_$hs^MGU$Q&S__T{CW*M_|9ax3%Ceu zA6&Y{CvHER~|}aK4_7KhEEPyB(wO7F~z%AuaS9a4*WX1?CJZ z<5}wCGgG#CiuN>5$2$gLbW*MyJXF(LtVh|AoI=uk#+TSLHd2l1k@^&*3OMwhd~;Ub zD>BJ5WA41a8aFRrk24eE7kSm7$Yj8fxJiP5acvB1)Xx3CQ_B6z)ga_j00000NkvXX Hu0mjfL4i9u literal 0 HcmV?d00001 From 11585f6884896f7191a067015af51ddd2634a1c4 Mon Sep 17 00:00:00 2001 From: mike2ox Date: Thu, 25 Oct 2018 21:17:05 +0900 Subject: [PATCH 31/49] =?UTF-8?q?#42=20:=201=EC=B0=A8=20=EB=B2=88=EC=97=AD?= =?UTF-8?q?=20=EC=99=84=EB=A3=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...h_as_your_first_deep_learning_framework.md | 49 +++++++++++++++++-- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index 213a647..ebff174 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -99,32 +99,73 @@ PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않는다. > 지난 6년간 43k개의 ML논문을 기반으로, arxiv 논문들에서 딥러닝 프레임워크에 대한 언급에 대한 자료입니다. Tensorflow는 전체 논문의 14.3%, PyTorch는 4.7%, Keras 4.0%, Caffe 3.8%, Theano 2.3%, Torch 1.5. MXNet/chainer/cntk는 1% 이하로 언급되었습니다. [참조](https://t.co/YOYAvc33iN) - Andrej Karpathy (@karpathy) -두 프레임워크는 만족스러운 참고문서를 갖고 있지만, PyTorch는 강력한 커뮤니티 지원을 제공합니다. +두 프레임워크는 만족스러운 참고 문서를 갖고 있지만, PyTorch는 강력한 커뮤니티 지원을 제공합니다. 해당 커뮤니티 게시판은 당신이 난관에 부딪쳤거나 참고 문서나 스택오버플로우는 당신이 필요로 하는 정답이 없다면 방문하기 좋은 곳이다. +anecdotally, 우리는 특정 신경망 구조에서 초심자 수준의 딥러닝 코스를 PyTorch보다 Keras로 더 쉽게 접근할 수 있다는 걸 발견했습니다. Keras에서 제공하는 코드의 가독성과 실험을 쉽게 해주는 장점으로 인해, Keras는 딥러닝 열광자, 튜터, 고수준의 kaggle 우승자들에 의해 많이 쓰이게 될 겁니다. + +Keras 자료와 딥러닝 코스의 예시로, ["Starting deep learning hands-on: image classification on CIFAR-10"](https://blog.deepsense.ai/deep-learning-hands-on-image-classification/#_ga=2.52232937.114026073.1540369751-2000517400.1540369751)와 ["Deep Learning with Rython"](https://www.manning.com/books/deep-learning-with-python)를 참조하십시오. PyTorch 자료로는, 신경망의 내부 작업을 학습하는데 더 도던적이고 포괄적인 접근법을 제공하는 공식 튜토리얼을 추천합니다. PyTorch에 대한 전반적인 내용을 보려면, 이 [문서](http://www.goldsborough.me/ml/ai/python/2018/02/04/20-17-20-a_promenade_of_pytorch/)를 참조하세요. #### 요약 +- Keras : 튜토리얼이나 재사용 가능한 코드로의 접근성이 좋음 +- PyTorch : 뛰어난 커뮤니티와 활발한 개발 -### Keras vs PyTorch : 디버깅과 introspection +### Keras vs PyTorch : 디버깅과 코드 복기(introspection) +추상화에서 많은 계산 조각들을 묶어주는 Keras는 문제를 발생시키는 외부 코드 라인을 고정시키는 게 어렵습니다. 좀 더 장황하게 구성된 프레임워크인 PyTorch는 우리의 스크립트 실행을 따라갈 수 있게 해줍니다. 이건 Numpy를 디버깅하는 것과 유사합니다. 우리는 쉽게 코드안의 모든 객체들에 접근할 수 있고, 어디서 오류가 발생하는 지 알려 주는 상태(혹은 기본 python식 디버깅)를 출력할 수 있습니다. +Keras로 기본 신경망을 만든 사용자들은 PyTorch 사용자들보다 잘못된 방향으로 갈 가능성이 적습니다. 하지만 일단 잘못되기 시작하면, 많이 힘들고 종종 막힌 코드 라인을 찾기 힘듭니다. PyTorch는 모델의 목잡성과 관련없이 보다 직접적이고 컨볼루션이 아닌 디버깅 경험을 제공합니다. 또한, 의심스러운 경우 PyTorch 레포를 쉽게 조회해 코드를 읽어볼 수 있습니다. #### 요약 +- PyTorch : 더 좋은 디버깅 기능을 제공 +- Keras : (잠재적으로) 단순 신경망 디버깅 빈도수 감소 ### Keras vs PyTorch : 모델을 추출하고 다른 플랫폼과의 호환성 +생산에서 학습된 모델을 내보내고 배포하는 옵션은 무엇인가요? + +PyTorch는 python기반으로 휴대할 수 없는 pickle에 모델을 저장하지만, Keras는 JSON + H5 파일을 사용하는 안전한 접근 방식의 장점을 활용합니다.(일반적으로 Keras에 저장하는게 더 어렵습니다.) 또한 [R에도 Keras](https://keras.rstudio.com/)가 있습니다. 이 경우, R을 사용하여 데이터 분석팀과 협력해야 할 수도 있습니다. + +Tensorflow에서 실행되는 Keras는 [모바일용 Tensorflow](https://www.tensorflow.org/mobile/mobile_intro)(혹은 [Tensorflow Lite](https://www.tensorflow.org/mobile/tflite/index))를 통해 모바일 플랫폼에 구축할 수 있는 다양한 솔리드 옵션을 제공합니다. [Tensorflow.js](https://js.tensorflow.org/) 혹은 [Keras.js](https://github.com/transcranial/keras-js)를 사용하여 멋진 웹 애플리케이션을 배포할 수 있습니다. 예를 들어, Piotr와 그의 학생들이 만든, [시험 공포증 유발 요소를 탐지하는 딥러닝 브라우저 플러그인](https://github.com/cytadela8/trypophobia)를 보세요. + +PyTorch 모델을 추출하는 건 python 코드때문에 더 부담되기에, 현재 많이 추천하는 접근방식은 [ONNX](https://pytorch.org/docs/master/onnx.html)를 사용하여 PyTorch 모델을 Caffe2로 변환하는 것입니다. + #### 요약 +- Keras : (Tensorflow backend를 통해) 더 많은 개발 옵션을 제공하고, 모델을 쉽게 추출할 수 있음. ### Keras vs PyTorch : 성능 +> 미리 측정된 최적화는 프로그래밍에서 모든 악의 근원입니다. - Donald Knuth + +대부분의 인스턴스에서, 속도 측정에서의 차이는 프레임워크 선택을 위한 주요 요점은 아닙니다.(특히, 학습할 때) GPU 시간은 데이터 과학자의 시간보다 더 인색합니다. 게다가, 학습하는 동안 발생하는 성능의 병목현상은 실패한 실험이나, 최적화하지 않은 신경망이나 데이터 로딩(loading)이 원인일 수 있습니다. 완벽을 위해, 여전히 우리는 해당 주제를 다뤄야할 compel을 느낍니다. 우리는 두 가지 비교사항을 제안합니다. + +- [Tensorflow, Keras 그리고 PyTorch를 비교](https://wrosinski.github.io/deep-learning-frameworks/) by Wojtek Rosinski +- [딥러닝 프레임워크들에 대한 비교 : 로제타 스톤식 접근](https://github.com/ilkarman/DeepLearningFrameworks/) by Microsoft +> 더 상세한 multi-GPU 프레임워크 비교를 보려면, [이 글](https://medium.com/@iliakarmanov/multi-gpu-rosetta-stone-d4fa96162986)을 참조하세요 + +PyTorch는 Tensorflow만큼 빠르며, RNN에선 잠재적으로 더 빠릅니다. Keras는 지속적으로 더 느립니다. 위의 첫 번째 비교를 작성한 저자가 지적했듯이, 고성능 프레임워크의 연산 효율성 향상(대부분 PyTorc와 Tensorflow)은 빠른 개발 환경과 Keras가 제공하는 실험의 용이성보다 더 중요할 것입니다. ![Tesla p100](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_2.png) ![Tesla K80](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_3.png) #### 요약 +- 학습 속도에 대한 걱정과 달리, PyTorch가 Keras를 능가 ### Keras vs PyTorch : 결론 +Keras와 PyTorch는 배우기위한 첫번째 딥러닝 프레임워크로 좋은 선택입니다. +만약 당신이 수학자, 연구자, 혹은 당신의 모델이 실제로 어떻게 작동하는지 알고 싶다면, PyTorch를 선택하길 권장합니다. 고급 맞춤형 알고리즘(그리고 디버깅)이 필요한 경우(ex. [YOLOv3](https://blog.paperspace.com/how-to-implement-a-yolo-object-detector-in-pytorch/) 혹은 [LSTM](https://medium.com/huggingface/understanding-emotions-from-keras-to-pytorch-3ccb61d5a983)을 사용한 객체 인식) 또는 신경망 이외의 배열 식을 최적화해야 할 경우(ex. [행렬 분해](http://blog.ethanrosenthal.com/2017/06/20/matrix-factorization-in-pytorch/) 혹은 [word2vec](https://adoni.github.io/2017/11/08/word2vec-pytorch/) 알고리즘)에 빛을 발합니다. + + plug & play 프레임워크를 원한다면, Keras는 확실히 더 쉬울 겁니다. 즉, 수학적 구현의 세부 사항들에 많은 시간을 들이지 않고도 모델을 신속하게 제작, 학습 그리고 평가할 수 있습니다. + +수정 : 실제 사례에 대해 코드를 비교하려면, 이 [기사](https://deepsense.ai/keras-vs-pytorch-avp-transfer-learning)를 참조하세요 + +딥러닝의 핵심 개념에 대한 지식은 유동성이 있습니다. 어떤 환경에서 기본사항을 숙지하고나면, 다른 곳에 적용하고 새로운 딥러닝 라이브러리로 전환할 때 이를 시행할 수 있다는 점입니다. + +Keras와 PyTorch에서 간단한 딥러닝 방법을 사용해 보는 것을 권장합니다. 당신이 가장 좋아하고 가장 덜 좋아하는 요소는 무엇입니까? 어떤 프레임워크 경험이 더 마음에 드시나요? + +Keras, Tensorflow 그리고 PyTorch의 딥러닝에 대해 자세히 알고 싶은가요? [맞춤형 교육 서비스](https://deepsense.ai/tailored-team-training-tracks/)를 확인하세요. + ### 참고문서 -* [참고 사이트 1]() -* [참고 사이트 2]() +* [케라스 공식 홈페이지](https://keras.io/) +* [파이토치 공식 홈페이지](https://pytorch.org/) > 이 글은 2018 컨트리뷰톤에서 [`Contribute to Keras`](https://github.com/KerasKorea/KEKOxTutorial) 프로젝트로 진행했습니다. From 3e0c232ab57ebd9b8d353d39a374fe66f9584911 Mon Sep 17 00:00:00 2001 From: moonhyeok song Date: Thu, 25 Oct 2018 21:20:07 +0900 Subject: [PATCH 32/49] Update 32_building_a_simple_keras_deep_learning_rest_api.md --- 32_building_a_simple_keras_deep_learning_rest_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 5402c81..7ecb6ca 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -27,7 +27,7 @@ #### 개발 환경 구축 우선 Keras가 컴퓨터에 이미 구성/설치되어 있다고 가정합니다. 만약 아닐 경우, [공식 설치 지침](https://keras.io/#installation)에 따라 Keras를 설치하세요. -여기서부터, python 웹 프레임워크인 [Flask](http://flask.pocoo.org/)를 설치해야 API 엔드포인트를 구축할 수 있습니다. 또한, API도 사용할 수 있도록 [요청](http://docs.python-requests.org/en/master/)이 필요합니다. +여기서부터, python 웹 프레임워크인 [Flask](http://flask.pocoo.org/)를 설치해야 API 엔드포인트를 구축할 수 있습니다. 또한, API도 사용할 수 있도록 [Request](http://docs.python-requests.org/en/master/)(요청)이 필요합니다. 관련 pip 설치 명령어는 다음과 같습니다. From edb67826881401ca8f8eb71629f22d94991cc104 Mon Sep 17 00:00:00 2001 From: moonhyeok song Date: Thu, 25 Oct 2018 21:22:28 +0900 Subject: [PATCH 33/49] Update 32_building_a_simple_keras_deep_learning_rest_api.md --- 32_building_a_simple_keras_deep_learning_rest_api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/32_building_a_simple_keras_deep_learning_rest_api.md b/32_building_a_simple_keras_deep_learning_rest_api.md index 7ecb6ca..f2d532c 100644 --- a/32_building_a_simple_keras_deep_learning_rest_api.md +++ b/32_building_a_simple_keras_deep_learning_rest_api.md @@ -221,7 +221,7 @@ Using TensorFlow backend. * Running on http://127.0.0.1:5000 ``` -결과물에서 볼 수 있듯이, 모델을 먼저 불러옵니다. 그런 수, Flask 서버를 시작할 수 있습니다. +결과물에서 볼 수 있듯이, 모델을 먼저 불러옵니다. 그 뒤, Flask 서버가 시작됩니다. 이제 http://127.0.0.1:5000을 통해 서버에 엑세스할 수 있습니다. From d7ea179cc3b8e9f1a6b5abb02132073530129c0b Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:51:09 +0900 Subject: [PATCH 34/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index ebff174..c5be071 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -8,7 +8,7 @@ ![Keras_vs_PyTorch](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_0.png) -> 본 글을 읽고 있는 그대, 딥러닝을 배우고 싶나요? 그대가 당신 비즈니스에 적용할 지, 다음 프로젝트에 적용할 지, 아니면 그저 시장성 있는 기술을 갖고 싶은 것인지가 중요하다. 배우기 위해 적절한 프레임워크를 선택하는건 그대 목표에 도달하기 위해 중요한 첫 단계이다. +> 본 글을 읽고 있는 당신, 딥러닝을 배우고 싶나요? 딥러닝을 당신의 사업에 적용하고 싶든, 다음 프로젝트에 적용하고 싶든, 아니면 그저 시장성 있는 기술을 갖고 싶든, 배우기에 적절한 프레임워크를 선택하는 것이 당신의 목표에 도달하기 위해 중요한 첫 단계입니다. 우리는 강력하게 당신이 Keras나 PyTorch를 선택하길 추천합니다. 그것들은 배우기도, 실험하기도 재밌는 강력한 도구들입니다. 우리는 교사나 학생의 입장에서 둘 다 알고 있습니다. Piotr는 두 프레임워크로 워크숍을 진행했고, Rafal은 현재 배우고 있는 중입니다. From e7d228ecb45075745e1a36c761bc7b8034d00ea1 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:51:30 +0900 Subject: [PATCH 35/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index c5be071..dc5475e 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -18,7 +18,7 @@ Keras와 PyTorch는 데이터 과학자들 사이에서 인기를 얻고있는 딥러닝용 오픈 소스 프레임워크입니다. - [Keras](https://keras.io/)는 Tensorflow, CNTK, Theano, MXNet(혹은 Tensorflow안의 tf.contrib)의 상단에서 작동할 수 있는 고수준 API입니다. 2015년 3월에 첫 배포를 한 이래로, 쉬운 사용법과 간단한 문법, 빠른 설계 덕분에 인기를 끌고 있습니다. 이 도구는 구글에서 지원받고 있습니다. -- [PyTorch](https://pytorch.org/)는 2016년 10월에 배포된, 배열 표현식으로 직접 작업 저수준 API입니다. 작년에 큰 관심을 끌었고, 학술 연구에서 선호되는 솔루션, 맞춤 표현식으로 최적화하는 딥러닝 어플리케이션이 되어가고 있습니다. 이 도구는 페이스북에서 지원받고 있습니다. +- [PyTorch](https://pytorch.org/)는 2016년 10월에 배포된, 배열 표현식으로 직접 작업하는 저수준 API입니다. 작년에 큰 관심을 끌었고, 학술 연구에서 선호되는 솔루션이자, 맞춤 표현식으로 최적화하는 딥러닝 어플리케이션이 되어가고 있습니다. 이 도구는 페이스북에서 지원받고 있습니다. 우리가 두 프레임워크([참조](https://www.reddit.com/r/MachineLearning/comments/6bicfo/d_keras_vs_PyTorch/))의 핵심 상세 내용을 논의하기 전에 당신을 실망시키고자 합니다. - '어떤 툴이 더 좋은가?'에 대한 정답은 없습니다. 선택은 절대적으로 당신의 기술적 지식, 필요성 그리고 기대에 달렸습니다. 본 글은 당신이 처음으로 두 프레임워크 중 한가지를 선택할 때 도움이 될 아이디어를 제공해주는데 목적을 두고 있습니다. From d0440d55402f2ecf083fd96ad90362d3cd277d44 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:51:37 +0900 Subject: [PATCH 36/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index dc5475e..0606057 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -29,7 +29,7 @@ PyTorch는 수학적으로 연관된 더 많은 사용자들을 위해 더 유 ### 좋아, 근데 다른 프레임워크는 어때? -Tensorflow는 대중적인 딥러닝 프레임워크입니다. 그러나, 원시 Tensorflowsms 계산 그래프 구축을 장황하고 모호하게 추상화하고 있습니다. 일단 딥러닝의 기초 지식을 알고 있다면 문제가 되지 않습니다. 하지만, 새로 입문하는 사람에겐 공식적으로 지원되는 인터페이스로써 Keras를 사용하는게 더 쉽고 생산적일 겁니다. +Tensorflow는 대중적인 딥러닝 프레임워크입니다. 그러나, 원시 Tensorflow는 계산 그래프 구축을 장황하고 모호하게 추상화하고 있습니다. 일단 딥러닝의 기초 지식을 알고 있다면 문제가 되지 않습니다. 하지만, 새로 입문하는 사람에겐 공식적으로 지원되는 인터페이스로써 Keras를 사용하는게 더 쉽고 생산적일 겁니다. [수정 : 최근, Tensorflow에서 [Eager Execution](https://www.tensorflow.org/versions/r1.9/programmers_guide/keras)를 소개했는데 이는 모든 python 코드를 실행하고 초보자에게 보다 직관적으로 모델을 학습시킬 수 있게 해줍니다.(특히 tf.keras API를 사용할 때!)] From e6d8fb18b3e28563e15afe2d1168d69b0840b2d8 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:51:41 +0900 Subject: [PATCH 37/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index 0606057..8db327d 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -37,7 +37,7 @@ Tensorflow는 대중적인 딥러닝 프레임워크입니다. 그러나, 원시 ### Keras vs PyTorch : 쉬운 사용법과 유연성 -Keras와 PyTorch는 작동에 대한 추상화 단게에서 다릅니다. +Keras와 PyTorch는 작동에 대한 추상화 단계에서 다릅니다. Keras는 딥러닝에 사용되는 레이어와 연산자들을 neat(레코 크기의 블럭)로 감싸고, 데이터 과학자의 입장에서 딥러닝 복잡성을 추상화하는 고수준 API입니다. From 7d9070f6f5f1f4e5cfdfae9ff54e94a69c061631 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:51:46 +0900 Subject: [PATCH 38/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index 8db327d..abaf0b0 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -43,7 +43,7 @@ Keras는 딥러닝에 사용되는 레이어와 연산자들을 neat(레코 크 PyTorch는 유저들에게 맞춤 레이어를 작성하고 수학적 최적화 작업을 볼 수 있게 자율성을 주도록 해주는 저수준 환경을 제공합니다. 더 복잡한 구조 개발은 python의 모든 기능을 사용하고 모든 기능의 내부에 접근하는 것보다 간단합니다. -어떻게 Keras와 PyTorch로 간단한 컨볼루션 신경망을 정의할 지를 head-to-head로 비교해보자. +어떻게 Keras와 PyTorch로 간단한 컨볼루션 신경망을 정의할 지를 head-to-head로 비교해봅시다. #### Keras From 4d4a067c041b11503b2a5b21f017eb856f2756ba Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:51:59 +0900 Subject: [PATCH 39/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index abaf0b0..b4eb209 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -80,7 +80,7 @@ class Net(nn.Module): model = Net() ``` -위 코드 블럭은 두 프레임워크의 차이를 약간 맛보게 해줍니다. 모델을 학습하기 위해, PyTorch는 20줄의 코드가 필요한 반면, Keras는 단일 코드만 필요했습니다. GPU 가속화 사용은 Keras에선 암묵적으로 처리되지만, PyTorch는 CPU와 GPU간 데이터 전송할 때 요구합니다. +위 코드 블럭은 두 프레임워크의 차이를 약간 맛보게 해줍니다. 모델을 학습하기 위해, PyTorch는 20줄의 코드가 필요한 반면, Keras는 단일 코드만 필요했습니다. GPU 가속화 사용은 Keras에선 암묵적으로 처리되지만, PyTorch는 CPU와 GPU간 데이터를 전송할 때 요구합니다. 만약 초보자라면, Keras는 명확한 이점을 보일 것입니다. Keras는 실제로 읽기 쉽고 간결해 구현 단계에서의 세부 사항을 건너뛰는 동시에 그대의 첫번째 end-to-end 딥러닝 모델을 빠르게 설계하도록 해줄겁니다. 그러나, 이런 세부 사항을 뛰어넘는 건 당신의 딥러닝 작업에서 계산이 필요한 블럭의 내부 작업 탐색에 제한이 됩니다. PyTorch를 사용하는 건 당신에게 역전파처럼 핵심 딥러닝 개념과 학습 단계의 나머지 부분에 대해 생각할 것들을 제공합니다. From e9c3d2b69bb31dedc3277369dd9d1ebf95447211 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:52:15 +0900 Subject: [PATCH 40/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index b4eb209..4283c17 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -84,7 +84,7 @@ model = Net() 만약 초보자라면, Keras는 명확한 이점을 보일 것입니다. Keras는 실제로 읽기 쉽고 간결해 구현 단계에서의 세부 사항을 건너뛰는 동시에 그대의 첫번째 end-to-end 딥러닝 모델을 빠르게 설계하도록 해줄겁니다. 그러나, 이런 세부 사항을 뛰어넘는 건 당신의 딥러닝 작업에서 계산이 필요한 블럭의 내부 작업 탐색에 제한이 됩니다. PyTorch를 사용하는 건 당신에게 역전파처럼 핵심 딥러닝 개념과 학습 단계의 나머지 부분에 대해 생각할 것들을 제공합니다. -PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않는다. 이는 초심자들이 사용하는 중요한 딥러닝 도구이다. 능숙한 데이터 과학자들에게도 마찬가지다. 예를 들면, Kaggle에서 열린 `the Dstl Satellite Imagery Feature Detection`에서 상위 3팀이 그들의 솔루션에 Keras를 사용하였다. 반면, 4등인 [우리](https://blog.deepsense.ai/deep-learning-for-satellite-imagery-via-image-segmentation/#_ga=2.53479528.114026073.1540369751-2000517400.1540369751)는 PyTorch와 Keras를 혼합해서 사용하였다. +PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않습니다. 이는 초심자들이 사용하는 중요한 딥러닝 도구입니다. 능숙한 데이터 과학자들에게도 마찬가지입니다. 예를 들면, Kaggle에서 열린 `the Dstl Satellite Imagery Feature Detection`에서 상위 3팀이 그들의 솔루션에 Keras를 사용하였습니다. 반면, 4등인 [우리](https://blog.deepsense.ai/deep-learning-for-satellite-imagery-via-image-segmentation/#_ga=2.53479528.114026073.1540369751-2000517400.1540369751)는 PyTorch와 Keras를 혼합해서 사용하였습니다. 당신의 딥러닝 어플리케이션이 Keras가 제공하는 것 이상의 유연성을 필요하는 지 파악하는 건 가치가 있다. 그대의 필요에 따라, Keras는 [가장 적은 힘의 규칙](https://en.wikipedia.org/wiki/Rule_of_least_power)에 입각하는 좋은 방법이 될 수 있다. From a614b4d39dbc497e3f20cf4bb37e93a7eeac2207 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:52:18 +0900 Subject: [PATCH 41/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index 4283c17..45b37e8 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -86,7 +86,7 @@ model = Net() PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않습니다. 이는 초심자들이 사용하는 중요한 딥러닝 도구입니다. 능숙한 데이터 과학자들에게도 마찬가지입니다. 예를 들면, Kaggle에서 열린 `the Dstl Satellite Imagery Feature Detection`에서 상위 3팀이 그들의 솔루션에 Keras를 사용하였습니다. 반면, 4등인 [우리](https://blog.deepsense.ai/deep-learning-for-satellite-imagery-via-image-segmentation/#_ga=2.53479528.114026073.1540369751-2000517400.1540369751)는 PyTorch와 Keras를 혼합해서 사용하였습니다. -당신의 딥러닝 어플리케이션이 Keras가 제공하는 것 이상의 유연성을 필요하는 지 파악하는 건 가치가 있다. 그대의 필요에 따라, Keras는 [가장 적은 힘의 규칙](https://en.wikipedia.org/wiki/Rule_of_least_power)에 입각하는 좋은 방법이 될 수 있다. +당신의 딥러닝 어플리케이션이 Keras가 제공하는 것 이상의 유연성을 필요하는 지 파악하는 건 가치가 있습니다. 그대의 필요에 따라, Keras는 [가장 적은 힘의 규칙](https://en.wikipedia.org/wiki/Rule_of_least_power)에 입각하는 좋은 방법이 될 수 있습니다. #### 요약 - Keras : 좀 더 간결한 API From f0e3a01de145f6c7d128506585c8c967651f6eb6 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:52:22 +0900 Subject: [PATCH 42/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index 45b37e8..d5fa2ce 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -93,7 +93,7 @@ PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않습니 - PyTorch : 더 유연하고, 딥러닝 개념을 깁게 이해하는데 도움을 줌 ### Keras vs PyTorch : 인기와 학습자료 접근성 -프레임워크의 인기는 단지 유용성의 대리만은 아니다. 작업 코드가 있는 튜토리얼, 리포지토리 그리고 단체 토론 등 커뮤니티 지원도 중요합니다. 2018년 6월 현재, Keras와 PyTorch는 GitHub과 arXiv 논문에서 인기를 누리고 있습니다.(Keras를 언급한 대부분의 논문들은 Tensorflow 백엔드 또한 언급하고 있습니다.) KDnugget에 따르면, Keras와 PyTorch는 가장 빠르게 성장하는 [데이터 과학 도구들](https://www.kdnuggets.com/2018/05/poll-tools-analytics-data-science-machine-learning-results.html)입니다. +프레임워크의 인기는 단지 유용성의 대리만은 아닙니다. 작업 코드가 있는 튜토리얼, 리포지토리 그리고 단체 토론 등 커뮤니티 지원도 중요합니다. 2018년 6월 현재, Keras와 PyTorch는 GitHub과 arXiv 논문에서 인기를 누리고 있습니다.(Keras를 언급한 대부분의 논문들은 Tensorflow 백엔드 또한 언급하고 있습니다.) KDnugget에 따르면, Keras와 PyTorch는 가장 빠르게 성장하는 [데이터 과학 도구들](https://www.kdnuggets.com/2018/05/poll-tools-analytics-data-science-machine-learning-results.html)입니다. ![Percentof ML papers that mention...](https://github.com/KerasKorea/KEKOxTutorial/blob/issue_42/media/42_1.png) From e82e930695c79adca159f82f9acc61da97dbc6dc Mon Sep 17 00:00:00 2001 From: Karl Kim Date: Thu, 25 Oct 2018 22:52:59 +0900 Subject: [PATCH 43/49] =?UTF-8?q?#175=20:=20=EB=A7=81=ED=81=AC=20=EC=B6=94?= =?UTF-8?q?=EA=B0=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 187017b..8797511 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ | 😁 | NAME | HOBBY | CONTRIBUTIONs | |------|----------|----------|-----------| | 😉 | [김수정](https://github.com/SooDevv) | 🕹 게임하기 | [Fasion-MNIST](https://keraskorea.github.io/posts/2018-09-28-딥러닝의%20Hello%20World,%20Fashion-MNIST/)
    [안드로이드에 MNIST 심기]() | -| ![](./profile/contributor_YK.png) | [김영규](https://github.com/karl6885) |고양이, 춤, 유투브 보기|[파이썬과 케라스를 이용한 알파제로 만들기]()
    [Keras의 Autoencoder를 활용해 신용카드 이상 거래 탐지하기]()
    [Keras를 활용한 주식 가격 예측]()| +| ![](./profile/contributor_YK.png) | [김영규](https://github.com/karl6885) |고양이, 춤, 유투브 보기|[파이썬과 케라스를 이용한 알파제로 만들기](https://keraskorea.github.io/posts/2018-10-23-파이썬과_케라스를_이용한_알파제로_만들기/)
    [Keras의 Autoencoder를 활용해 신용카드 이상 거래 탐지하기](https://keraskorea.github.io/posts/2018-10-23-Keras의%20Autoencoder를%20활용해%20신용카드%20이상%20거래%20탐지하기/)
    [Keras를 활용한 주식 가격 예측](https://github.com/KerasKorea/KEKOxTutorial/blob/master/22_Keras를%20활용한%20주식%20가격%20예측.md)| > 오해하지 마세요, 외모 순서가 아니라 ㄱ-ㄴ-ㄷ순서입니다. From 4ce1bcc4465c73d532efd81ce63f67ac675f8447 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:54:24 +0900 Subject: [PATCH 44/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index d5fa2ce..a8ad415 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -101,7 +101,7 @@ PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않습니 두 프레임워크는 만족스러운 참고 문서를 갖고 있지만, PyTorch는 강력한 커뮤니티 지원을 제공합니다. 해당 커뮤니티 게시판은 당신이 난관에 부딪쳤거나 참고 문서나 스택오버플로우는 당신이 필요로 하는 정답이 없다면 방문하기 좋은 곳이다. -anecdotally, 우리는 특정 신경망 구조에서 초심자 수준의 딥러닝 코스를 PyTorch보다 Keras로 더 쉽게 접근할 수 있다는 걸 발견했습니다. Keras에서 제공하는 코드의 가독성과 실험을 쉽게 해주는 장점으로 인해, Keras는 딥러닝 열광자, 튜터, 고수준의 kaggle 우승자들에 의해 많이 쓰이게 될 겁니다. +한 일화를 들자면, 우리는 특정 신경망 구조에서 초심자 수준의 딥러닝 코스를 PyTorch보다 Keras로 더 쉽게 접근할 수 있다는 걸 발견했습니다. Keras에서 제공하는 코드의 가독성과 실험을 쉽게 해주는 장점으로 인해, Keras는 딥러닝 열광자, 튜터, 고수준의 kaggle 우승자들에 의해 많이 쓰이게 될 겁니다. Keras 자료와 딥러닝 코스의 예시로, ["Starting deep learning hands-on: image classification on CIFAR-10"](https://blog.deepsense.ai/deep-learning-hands-on-image-classification/#_ga=2.52232937.114026073.1540369751-2000517400.1540369751)와 ["Deep Learning with Rython"](https://www.manning.com/books/deep-learning-with-python)를 참조하십시오. PyTorch 자료로는, 신경망의 내부 작업을 학습하는데 더 도던적이고 포괄적인 접근법을 제공하는 공식 튜토리얼을 추천합니다. PyTorch에 대한 전반적인 내용을 보려면, 이 [문서](http://www.goldsborough.me/ml/ai/python/2018/02/04/20-17-20-a_promenade_of_pytorch/)를 참조하세요. From 8fed211df731e4227f75f173a8d1f3bd40751832 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:54:35 +0900 Subject: [PATCH 45/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index a8ad415..23397c9 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -123,7 +123,7 @@ Keras로 기본 신경망을 만든 사용자들은 PyTorch 사용자들보다 PyTorch는 python기반으로 휴대할 수 없는 pickle에 모델을 저장하지만, Keras는 JSON + H5 파일을 사용하는 안전한 접근 방식의 장점을 활용합니다.(일반적으로 Keras에 저장하는게 더 어렵습니다.) 또한 [R에도 Keras](https://keras.rstudio.com/)가 있습니다. 이 경우, R을 사용하여 데이터 분석팀과 협력해야 할 수도 있습니다. -Tensorflow에서 실행되는 Keras는 [모바일용 Tensorflow](https://www.tensorflow.org/mobile/mobile_intro)(혹은 [Tensorflow Lite](https://www.tensorflow.org/mobile/tflite/index))를 통해 모바일 플랫폼에 구축할 수 있는 다양한 솔리드 옵션을 제공합니다. [Tensorflow.js](https://js.tensorflow.org/) 혹은 [Keras.js](https://github.com/transcranial/keras-js)를 사용하여 멋진 웹 애플리케이션을 배포할 수 있습니다. 예를 들어, Piotr와 그의 학생들이 만든, [시험 공포증 유발 요소를 탐지하는 딥러닝 브라우저 플러그인](https://github.com/cytadela8/trypophobia)를 보세요. +Tensorflow에서 실행되는 Keras는 [모바일용 Tensorflow](https://www.tensorflow.org/mobile/mobile_intro)(혹은 [Tensorflow Lite](https://www.tensorflow.org/mobile/tflite/index))를 통해 모바일 플랫폼에 구축할 수 있는 다양한 솔리드 옵션을 제공합니다. [Tensorflow.js](https://js.tensorflow.org/) 혹은 [Keras.js](https://github.com/transcranial/keras-js)를 사용하여 멋진 웹 애플리케이션을 배포할 수 있습니다. 예를 들어, Piotr와 그의 학생들이 만든, [시험 공포증 유발 요소를 탐지하는 딥러닝 브라우저 플러그인](https://github.com/cytadela8/trypophobia)을 보세요. PyTorch 모델을 추출하는 건 python 코드때문에 더 부담되기에, 현재 많이 추천하는 접근방식은 [ONNX](https://pytorch.org/docs/master/onnx.html)를 사용하여 PyTorch 모델을 Caffe2로 변환하는 것입니다. From 759ba187c539d1f50536abeafcd5ec79501c1d10 Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:54:39 +0900 Subject: [PATCH 46/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index 23397c9..b0113cc 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -125,7 +125,7 @@ PyTorch는 python기반으로 휴대할 수 없는 pickle에 모델을 저장하 Tensorflow에서 실행되는 Keras는 [모바일용 Tensorflow](https://www.tensorflow.org/mobile/mobile_intro)(혹은 [Tensorflow Lite](https://www.tensorflow.org/mobile/tflite/index))를 통해 모바일 플랫폼에 구축할 수 있는 다양한 솔리드 옵션을 제공합니다. [Tensorflow.js](https://js.tensorflow.org/) 혹은 [Keras.js](https://github.com/transcranial/keras-js)를 사용하여 멋진 웹 애플리케이션을 배포할 수 있습니다. 예를 들어, Piotr와 그의 학생들이 만든, [시험 공포증 유발 요소를 탐지하는 딥러닝 브라우저 플러그인](https://github.com/cytadela8/trypophobia)을 보세요. -PyTorch 모델을 추출하는 건 python 코드때문에 더 부담되기에, 현재 많이 추천하는 접근방식은 [ONNX](https://pytorch.org/docs/master/onnx.html)를 사용하여 PyTorch 모델을 Caffe2로 변환하는 것입니다. +PyTorch 모델을 추출하는 건 python 코드 때문에 더 부담되기에, 현재 많이 추천하는 접근방식은 [ONNX](https://pytorch.org/docs/master/onnx.html)를 사용하여 PyTorch 모델을 Caffe2로 변환하는 것입니다. #### 요약 - Keras : (Tensorflow backend를 통해) 더 많은 개발 옵션을 제공하고, 모델을 쉽게 추출할 수 있음. From f49a4b67024a90d3fb9c87a9297292f075c9131d Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:55:19 +0900 Subject: [PATCH 47/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index b0113cc..d5c3660 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -133,7 +133,7 @@ PyTorch 모델을 추출하는 건 python 코드 때문에 더 부담되기에, ### Keras vs PyTorch : 성능 > 미리 측정된 최적화는 프로그래밍에서 모든 악의 근원입니다. - Donald Knuth -대부분의 인스턴스에서, 속도 측정에서의 차이는 프레임워크 선택을 위한 주요 요점은 아닙니다.(특히, 학습할 때) GPU 시간은 데이터 과학자의 시간보다 더 인색합니다. 게다가, 학습하는 동안 발생하는 성능의 병목현상은 실패한 실험이나, 최적화하지 않은 신경망이나 데이터 로딩(loading)이 원인일 수 있습니다. 완벽을 위해, 여전히 우리는 해당 주제를 다뤄야할 compel을 느낍니다. 우리는 두 가지 비교사항을 제안합니다. +대부분의 인스턴스에서, 속도 측정에서의 차이는 프레임워크 선택을 위한 주요 요점은 아닙니다.(특히, 학습할 때) 데이터 과학자의 시간이 GPU 시간보다는 더 비싸기 때문입니다. 게다가, 학습하는 동안 발생하는 성능의 병목현상은 실패한 실험이나, 최적화하지 않은 신경망이나, 데이터를 불러오는 과정(loading)이 원인일 수 있습니다. 그럼에도, 제대로 마무리를 하려면, 우리는 해당 주제를 다뤄야 할 필요성을 느낍니다. 우리는 두 가지 비교사항을 제안합니다. - [Tensorflow, Keras 그리고 PyTorch를 비교](https://wrosinski.github.io/deep-learning-frameworks/) by Wojtek Rosinski - [딥러닝 프레임워크들에 대한 비교 : 로제타 스톤식 접근](https://github.com/ilkarman/DeepLearningFrameworks/) by Microsoft From 267b025eb89af2f076b92d0a617af2d274ef89af Mon Sep 17 00:00:00 2001 From: Yeongkyu Kim Date: Thu, 25 Oct 2018 22:55:28 +0900 Subject: [PATCH 48/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md Co-Authored-By: mike2ox --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index d5c3660..a87796f 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -149,7 +149,7 @@ PyTorch는 Tensorflow만큼 빠르며, RNN에선 잠재적으로 더 빠릅니 - 학습 속도에 대한 걱정과 달리, PyTorch가 Keras를 능가 ### Keras vs PyTorch : 결론 -Keras와 PyTorch는 배우기위한 첫번째 딥러닝 프레임워크로 좋은 선택입니다. +Keras와 PyTorch는 배우기 위한 첫번째 딥러닝 프레임워크로 좋은 선택입니다. 만약 당신이 수학자, 연구자, 혹은 당신의 모델이 실제로 어떻게 작동하는지 알고 싶다면, PyTorch를 선택하길 권장합니다. 고급 맞춤형 알고리즘(그리고 디버깅)이 필요한 경우(ex. [YOLOv3](https://blog.paperspace.com/how-to-implement-a-yolo-object-detector-in-pytorch/) 혹은 [LSTM](https://medium.com/huggingface/understanding-emotions-from-keras-to-pytorch-3ccb61d5a983)을 사용한 객체 인식) 또는 신경망 이외의 배열 식을 최적화해야 할 경우(ex. [행렬 분해](http://blog.ethanrosenthal.com/2017/06/20/matrix-factorization-in-pytorch/) 혹은 [word2vec](https://adoni.github.io/2017/11/08/word2vec-pytorch/) 알고리즘)에 빛을 발합니다. plug & play 프레임워크를 원한다면, Keras는 확실히 더 쉬울 겁니다. 즉, 수학적 구현의 세부 사항들에 많은 시간을 들이지 않고도 모델을 신속하게 제작, 학습 그리고 평가할 수 있습니다. From 36f3d3d1e5ac633ab57557622b7de1d055469505 Mon Sep 17 00:00:00 2001 From: moonhyeok song Date: Thu, 25 Oct 2018 23:36:04 +0900 Subject: [PATCH 49/49] Update 42_keras_or_pytorch_as_your_first_deep_learning_framework.md --- 42_keras_or_pytorch_as_your_first_deep_learning_framework.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md index a87796f..01bcfb5 100644 --- a/42_keras_or_pytorch_as_your_first_deep_learning_framework.md +++ b/42_keras_or_pytorch_as_your_first_deep_learning_framework.md @@ -99,7 +99,7 @@ PyTorch보다 간단한 Keras는 더이상 장난감을 의미하진 않습니 > 지난 6년간 43k개의 ML논문을 기반으로, arxiv 논문들에서 딥러닝 프레임워크에 대한 언급에 대한 자료입니다. Tensorflow는 전체 논문의 14.3%, PyTorch는 4.7%, Keras 4.0%, Caffe 3.8%, Theano 2.3%, Torch 1.5. MXNet/chainer/cntk는 1% 이하로 언급되었습니다. [참조](https://t.co/YOYAvc33iN) - Andrej Karpathy (@karpathy) -두 프레임워크는 만족스러운 참고 문서를 갖고 있지만, PyTorch는 강력한 커뮤니티 지원을 제공합니다. 해당 커뮤니티 게시판은 당신이 난관에 부딪쳤거나 참고 문서나 스택오버플로우는 당신이 필요로 하는 정답이 없다면 방문하기 좋은 곳이다. +두 프레임워크는 만족스러운 참고 문서를 갖고 있지만, PyTorch는 강력한 커뮤니티 지원을 제공합니다. 해당 커뮤니티 게시판은 당신이 난관에 부딪쳤거나 참고 문서나 스택오버플로우에 당신이 필요로 하는 정답이 없다면 방문하기 좋은 곳이다. 한 일화를 들자면, 우리는 특정 신경망 구조에서 초심자 수준의 딥러닝 코스를 PyTorch보다 Keras로 더 쉽게 접근할 수 있다는 걸 발견했습니다. Keras에서 제공하는 코드의 가독성과 실험을 쉽게 해주는 장점으로 인해, Keras는 딥러닝 열광자, 튜터, 고수준의 kaggle 우승자들에 의해 많이 쓰이게 될 겁니다.