text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# 7. Input and Ouput ## 7.1. Fancier Output Formatting ``` year = 2016 event = 'Referendum' f'Results of the {year} {event}' yes_votes = 42_572_654 no_votes = 43_132_495 percentage = yes_votes / (yes_votes+no_votes) '{:-9} YES votes {:2.2%}'.format(yes_votes, percentage) s = 'Hello, world.' str(s) repr(s) str(1/7) x = 10* 3.25 y = 200 * 200 s = 'The value of x is ' + repr(x) + ', and y is ' + repr(y) + '...' print(s) # The repr() of a string adds string quotes and backlashes: hello = 'hello, world\n' hellos = repr(hello) print(hellos) # The argument to repr() may be any Python object: repr((x, y, ('spam', 'eggs'))) ``` ### 7.1.1. Formatted String Literals ``` import math print(f'The value of pi is approximately {math.pi:.3f}.') table = {'Sjoerd': 4127, 'Jact': 4098, 'Dcab': 7678} for name, phone in table.items(): print(f'{name:10} ==> {phone:10d}') animals = 'eels' print(f'My hovercraft is full of {animals}.') print(f'My hovecraft is full of {animals!r}.') ``` ### 7.1.2. The String format() Method ``` print('We are the {} who say "{}!"'.format('knights', 'Ni')) print('{0} and {1}'.format('spam', 'eggs')) print('{1} and {0}'.format('spam', 'eggs')) print('This {food} is {adjective}.'.format( food= 'spam', adjective='absolutely horrible')) print('The story of {0}, {1}, and {other}.'.format('Bill', 'Manfred', other='Georg')) table = {'Sjoerd': 4127, 'Jack': 4098, 'Dcab': 867678} print('Jack: {Jack:d}; Sjoerd: {Sjoerd:d}; Dcab: {Dcab:d}'.format(**table)) for x in range(1, 11): print('{0:2d} {1:3d} {2:4d}'.format(x, x*x, x*x*x)) ``` ### 7.1.3. Manual String Formatting ``` for x in range(1, 11): print(repr(x).rjust(2), repr(x*x).rjust(3), end=' ') # Note use of 'end' on previous line print(repr(x*x*x).rjust(4)) '12'.zfill(5) '-3.14'.zfill(7) '3.14159265359'.zfill(5) ``` ### 7.1.4. Old string formatting ``` import math print('The value of pu is approximately %5.3f.' % math.pi) ``` ## 7.2. Reading and Writing Files ``` f = open('workfile', 'w') with open('workfile') as f: read_data = f.read() # We can check that the file has been automatically closed. f.closed f.close() f.read() ``` ### 7.2.1. Methods of File Objects ``` f.write('This is the entire file.\n') f = open('workfile', 'r') f.read() f.read() f = open('workfile', 'w') f.write('This is the first line of the file.\n') f.write('Second line of the file\n') f = open('workfile', 'r+') f.readline() f.readline() f.readline() f.write('THis is a test\n') value = ('the answer', 42) s = str(value) f.write(s) f = open('workfile', 'rb+') f.write(b'0123456789abcdef') f.seek(5) # Go to the 6th byte in the file f.read(1) f.seek(-3, 2) # Go to the 3rd byte before the end f.read(1) ``` ### 7.2.2. Saving structured data with json ``` import json json.dumps([1, 'simple', 'list']) ``` json.dumps(x, f) x = json.load(f)
github_jupyter
# Autoencoders [Source](https://twitter.com/rickwierenga/status/1216801014004797446) ``` %tensorflow_version 2.x %pylab inline import tensorflow as tf (x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data() x_train = x_train / 255 x_test = x_test / 255 ``` # Simple auto encoder ``` encoder = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=[28, 28]), tf.keras.layers.Dense(100, activation="relu"), tf.keras.layers.Dense(30, activation="relu"), ]) decoder = tf.keras.models.Sequential([ tf.keras.layers.Dense(100, activation="relu", input_shape=[30]), tf.keras.layers.Dense(28 * 28, activation="sigmoid"), tf.keras.layers.Reshape([28, 28]) ]) stacked_autoencoder = tf.keras.models.Sequential([encoder, decoder]) stacked_autoencoder.compile(loss="binary_crossentropy", optimizer='adam') history = stacked_autoencoder.fit(x_train, x_train, epochs=10, validation_data=[x_test, x_test]) figsize(20, 5) for i in range(8): subplot(2, 8, i+1) pred = stacked_autoencoder.predict(x_test[i].reshape((1, 28, 28))) imshow(x_test[i], cmap="binary") subplot(2, 8, i+8+1) imshow(pred.reshape((28, 28)), cmap="binary") ``` ## Convolutional auto encoder ``` encoder = tf.keras.models.Sequential([ tf.keras.layers.Reshape([28, 28, 1], input_shape=[28, 28]), tf.keras.layers.Conv2D(16, kernel_size=(3, 3), padding="same", activation="relu"), tf.keras.layers.MaxPool2D(pool_size=2), tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding="same", activation="relu"), tf.keras.layers.MaxPool2D(pool_size=2), tf.keras.layers.Conv2D(64, kernel_size=(3, 3), padding="same", activation="relu"), tf.keras.layers.MaxPool2D(pool_size=2) ]) decoder = tf.keras.models.Sequential([ tf.keras.layers.Conv2DTranspose(32, kernel_size=(3, 3), strides=2, padding="valid", activation="relu", input_shape=[3, 3, 64]), tf.keras.layers.Conv2DTranspose(16, kernel_size=(3, 3), strides=2, padding="same", activation="relu"), tf.keras.layers.Conv2DTranspose(1, kernel_size=(3, 3), strides=2, padding="same", activation="sigmoid"), tf.keras.layers.Reshape([28, 28]) ]) stacked_autoencoder = tf.keras.models.Sequential([encoder, decoder]) stacked_autoencoder.compile(loss="binary_crossentropy", optimizer='adam') history = stacked_autoencoder.fit(x_train, x_train, epochs=10, validation_data=[x_test, x_test]) figsize(20, 5) for i in range(8): subplot(2, 8, i+1) pred = stacked_autoencoder.predict(x_test[i].reshape((1, 28, 28))) imshow(x_test[i], cmap="binary") subplot(2, 8, i+8+1) imshow(pred.reshape((28, 28)), cmap="binary") ``` ## Denoising auto encoder ``` encoder = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=[28, 28]), tf.keras.layers.Dense(100, activation="relu"), tf.keras.layers.Dense(100, activation="relu"), tf.keras.layers.Dense(30, activation="relu") ]) decoder = tf.keras.models.Sequential([ tf.keras.layers.Dense(100, activation="relu", input_shape=[30]), tf.keras.layers.Dense(100, activation="relu"), tf.keras.layers.Dense(28 * 28, activation="sigmoid"), tf.keras.layers.Reshape([28, 28]) ]) stacked_autoencoder = tf.keras.models.Sequential([encoder, decoder]) stacked_autoencoder.compile(loss="binary_crossentropy", optimizer='adam') x_train_noise = x_train + ((np.random.random(x_train.shape)) / 4) x_test_noise = x_test + ((np.random.random(x_test.shape)) / 4) history = stacked_autoencoder.fit(x_train_noise, x_train, epochs=10, validation_data=[x_test_noise, x_test]) figsize(20, 5) for i in range(8): subplot(2, 8, i+1) imshow(x_test_noise[i], cmap="binary") subplot(2, 8, i+8+1) pred = stacked_autoencoder.predict(x_test_noise[i].reshape((1, 28, 28))) imshow(pred.reshape((28, 28)), cmap="binary") ```
github_jupyter
credit: https://github.com/airsplay/py-bottom-up-attention ``` %%capture !git clone https://github.com/airsplay/py-bottom-up-attention.git %cd py-bottom-up-attention # Install python libraries !pip install -r requirements.txt !pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' # Install detectron2 !python setup.py build develop # or if you are on macOS # MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build develop # or, as an alternative to `setup.py`, do # pip install [--editable] . !pip install --upgrade --force-reinstall imagesize from glob import glob import os import io import json import detectron2 from tqdm.notebook import tqdm # import some common detectron2 utilities from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog # import some common libraries import numpy as np import cv2 import torch # Show the image in ipynb from IPython.display import clear_output, Image, display import PIL.Image def showarray(a, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) f = io.BytesIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) %cd demo !ls # Load VG Classes data_path = 'data/genome/1600-400-20' vg_classes = [] with open(os.path.join(data_path, 'objects_vocab.txt')) as f: for object in f.readlines(): vg_classes.append(object.split(',')[0].lower().strip()) MetadataCatalog.get("vg").thing_classes = vg_classes cfg = get_cfg() cfg.merge_from_file("../configs/VG-Detection/faster_rcnn_R_101_C4_caffe.yaml") cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300 cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.6 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2 # VG Weight cfg.MODEL.WEIGHTS = "http://nlp.cs.unc.edu/models/faster_rcnn_from_caffe.pkl" predictor = DefaultPredictor(cfg) cfg = get_cfg() cfg.merge_from_file("../configs/VG-Detection/faster_rcnn_R_101_C4_caffe.yaml") cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300 cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.6 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2 # VG Weight cfg.MODEL.WEIGHTS = "http://nlp.cs.unc.edu/models/faster_rcnn_from_caffe.pkl" predictor = DefaultPredictor(cfg) im_paths = glob('/kaggle/input/simmc-img/data/all_images/*.png') NUM_OBJECTS = 10000 from torch import nn from detectron2.modeling.postprocessing import detector_postprocess from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs, fast_rcnn_inference_single_image from detectron2.structures.boxes import Boxes from detectron2.structures.instances import Instances def doit(raw_image, raw_boxes): # Process Boxes raw_boxes = Boxes(torch.from_numpy(raw_boxes).cuda()) with torch.no_grad(): raw_height, raw_width = raw_image.shape[:2] # print("Original image size: ", (raw_height, raw_width)) # Preprocessing image = predictor.transform_gen.get_transform(raw_image).apply_image(raw_image) # print("Transformed image size: ", image.shape[:2]) # Scale the box new_height, new_width = image.shape[:2] scale_x = 1. * new_width / raw_width scale_y = 1. * new_height / raw_height #print(scale_x, scale_y) boxes = raw_boxes.clone() boxes.scale(scale_x=scale_x, scale_y=scale_y) # ---- image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) inputs = [{"image": image, "height": raw_height, "width": raw_width}] images = predictor.model.preprocess_image(inputs) # Run Backbone Res1-Res4 features = predictor.model.backbone(images.tensor) # Run RoI head for each proposal (RoI Pooling + Res5) proposal_boxes = [boxes] features = [features[f] for f in predictor.model.roi_heads.in_features] box_features = predictor.model.roi_heads._shared_roi_transform( features, proposal_boxes ) feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 # print('Pooled features size:', feature_pooled.shape) # Predict classes and boxes for each proposal. pred_class_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(feature_pooled) # print(pred_class_logits.shape) pred_class_prob = nn.functional.softmax(pred_class_logits, -1) pred_scores, pred_classes = pred_class_prob[..., :-1].max(-1) # Detectron2 Formatting (for visualization only) roi_features = feature_pooled instances = Instances( image_size=(raw_height, raw_width), pred_boxes=raw_boxes, scores=pred_scores, pred_classes=pred_classes ) return instances, roi_features def read_data(im_path, m=False): try: im = cv2.imread(im_path) im_rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) except: return None h, w = im.shape[0], im.shape[1] name = im_path.split('/')[-1].split('.')[0] given_boxes = [] indices = [] try: if not m: with open(f'/kaggle/input/simmc-img/data/simmc2_scene_jsons_dstc10_public/public/{name}_scene.json') as f: data = json.load(f) else: with open(f'/kaggle/input/simmc-img/data/simmc2_scene_jsons_dstc10_public/public/m_{name}_scene.json') as f: data = json.load(f) except: return None for obj in data['scenes'][0]['objects']: x0 = obj['bbox'][0] y0 = obj['bbox'][1] x1 = x0 + obj['bbox'][3] y1 = y0 + obj['bbox'][2] given_boxes.append([x0, y0, x1, y1]) indices.append(obj['index']) given_boxes.append([0, 0, w, h]) indices.append('scene') return im, np.array(given_boxes), indices, name # instances, features = doit(im, given_boxes) without_m = {} with_m = {} for im_path in tqdm(im_paths): data = read_data(im_path) if data is not None: im, given_boxes, indices, name = data d = {} instances, features = doit(im, given_boxes) features = features.cpu().tolist() for i, idx in enumerate(indices): d[idx] = features[i] without_m[name] = d for im_path in tqdm(im_paths): data = read_data(im_path, m=True) if data is not None: im, given_boxes, indices, name = data d = {} instances, features = doit(im, given_boxes) features = features.cpu().tolist() for i, idx in enumerate(indices): d[idx] = features[i] with_m[name] = d len(without_m), len(with_m) %cd /kaggle/working !rm -r * !ls with open('without_m.json', 'w') as f: json.dump(without_m, f) with open('with_m.json', 'w') as f: json.dump(with_m, f) # Show the boxes, labels, and features pred = instances.to('cpu') v = Visualizer(im[:, :, :], MetadataCatalog.get("vg"), scale=1.2) v = v.draw_instance_predictions(pred) showarray(v.get_image()[:, :, ::-1]) print('instances:\n', instances) print() print('boxes:\n', instances.pred_boxes) print() # print('Shape of features:\n', features.shape) # # Verify the correspondence of RoI features # pred_class_logits, pred_proposal_deltas = predictor.model.roi_heads.box_predictor(features) # pred_class_probs = torch.nn.functional.softmax(pred_class_logits, -1)[:, :-1] # max_probs, max_classes = pred_class_probs.max(-1) # print("%d objects are different, it is because the classes-aware NMS process" % (NUM_OBJECTS - torch.eq(instances.pred_classes, max_classes).sum().item())) # print("The total difference of score is %0.4f" % (instances.scores - max_probs).abs().sum().item()) ```
github_jupyter
This is a tensorflow version of Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Networks based on a mixed numpy/tensorflow version (The PNet, RNet, ONet weights are imported from this) the post processing has been put into the tf graph to allow it to be exported as a large pb graph which can then be imported into a c++ (or other) tf session. ``` import tensorflow as tf import numpy as np ``` Create the networks. These are fairly simple, the PNet convolutional network accepts any input size. RNet and ONet use crops identified by the PNet and resized to a fixed size. ``` faceSession = tf.Session() from simple_align import PNet from simple_align import RNet from simple_align import ONet with tf.variable_scope('pnet'): data = tf.placeholder(tf.float32, (None,None,None,3), 'input') pnet = PNet({'data':data}) with tf.variable_scope('rnet'): data = tf.placeholder(tf.float32, (None,24,24,3), 'input') rnet = RNet({'data':data}) with tf.variable_scope('onet'): data = tf.placeholder(tf.float32, (None,48,48,3), 'input') onet = ONet({'data':data}) ``` Import 2 helpers - one creates a 'heatmap' from the output of PNet, the heatmap just buckets the response into larger pixel. - nms is a Non Maximal Surpression on the result (there is a native tf implementation of this too) ``` from simple_heatmap import create_heatmap create_heatmap() from simple_heatmap import create_nms create_nms() ``` Pull in a local image There is some weirdness in the python image code, decode_image doesn't always work and resize_bilinear seems buggy too, so there are some weird divide and multiply that seem to make it happy - these aren't necessary in the c++ version ``` import os image_path = os.path.join('images/', 'canoe.jpg') with tf.variable_scope('image'): image_data = tf.gfile.FastGFile(image_path, 'rb').read() #we want to use decode_image here but it's buggy decoded = tf.image.decode_jpeg(image_data, channels=None) normed = tf.divide(tf.cast(decoded, tf.float32), 255.0) batched = tf.expand_dims(normed, 0) standard_size = batched graph_norm = standard_size * 255.0 raw_image, file_image, plot_image = faceSession.run((decoded, graph_norm, standard_size), feed_dict={}) #This is the normalization the network expects feed_image = (file_image - 128) / 128 image_height = feed_image.shape[1] image_width = feed_image.shape[2] ``` Show the image using matplot (reverse the h, w axis) ``` import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.image as mpimg fig, ax = plt.subplots(1) show_image = np.reshape(plot_image, (image_height,image_width,3)) ax.imshow(show_image) plt.show() ``` Run the PNet over a range of scales to identify different face sizes ``` minsize = 20 factor_count = 0 factor = .709 minl=np.amin([image_height, image_width]) m=12.0/minsize minl=minl*m scales=[] while minl>=12: scales += [m*np.power(factor, factor_count)] minl = minl*factor factor_count += 1 model_path = "data"; with tf.variable_scope('rescale'): width = tf.placeholder(tf.int32, name='width') height = tf.placeholder(tf.int32, name='height') image = tf.placeholder(tf.float32, [None, None, None, 3], name='image') scaled = image / 255.0 resized_image = tf.multiply(tf.image.resize_bilinear(scaled, [height, width]),255.0, name="resize") with tf.variable_scope('gather'): gather_indices = tf.placeholder(tf.int32, [None], name='indices') gather_values = tf.placeholder(tf.float32, [None, 9], name='values') gathered = tf.gather(gather_values, gather_indices, name='output') ``` Load the weights into the PNet - these are from a pretrained np model, hence the transpose At the end of each scale we create a heatmap and reduce the results using NMS ``` with tf.variable_scope('pnet'): pnet.load(os.path.join(model_path, 'det1.npy'), faceSession) p_total_boxes=np.empty((0,9)) for scale in scales: out_height = image_height * scale out_width = image_width * scale scale_feed = {'rescale/height:0':out_height, 'rescale/width:0':out_width, 'rescale/image:0':file_image} rescaled_image = faceSession.run(('rescale/resize:0'), feed_dict=scale_feed) graph_image = (rescaled_image-128)/128.0 graph_image = np.transpose(graph_image, (0,2,1,3)) regression_boxes, box_scores = faceSession.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), \ feed_dict={'pnet/input:0':graph_image}) heatmap_feed={'heatmap/probabilities:0': box_scores, 'heatmap/bounds:0': regression_boxes, \ 'heatmap/threshold:0': [.6], 'heatmap/scale:0': [scale]} stacked = faceSession.run(('heatmap/bounds_output:0'), feed_dict=heatmap_feed) nms_feed={'nms/bounds:0': stacked, 'nms/threshold:0': [.5]} pick = faceSession.run(('nms/output:0'), feed_dict=nms_feed) if stacked.size>0 and pick.size>0: gather_feed={'gather/indices:0': pick, 'gather/values:0': stacked} boxes = faceSession.run(('gather/output:0'), feed_dict=gather_feed) p_total_boxes = np.append(p_total_boxes, boxes, axis=0) ``` Tensorflow loves normalizing everything - the image_crop op requires the crop boxes fed to it to be normalized against the images width and height, a bit weird if you ask me. ``` with tf.variable_scope('normalize'): normalize_bounds = tf.placeholder(tf.float32, [None, None], name='bounds') normalize_width = tf.placeholder(tf.float32, (None), name='width') normalize_height = tf.placeholder(tf.float32, (None), name='height') n1 = normalize_bounds[:,0] / normalize_height n2 = normalize_bounds[:,1] / normalize_width n3 = normalize_bounds[:,2] / normalize_height n4 = normalize_bounds[:,3] / normalize_width normalized_bounds = tf.transpose(tf.stack([n1, n2, n3, n4]), name='output') ``` The network learns adjustments to our fixed box sizes 'regression' applies them to the heatmaps output ``` with tf.variable_scope('regression'): regress_boxes = tf.placeholder(tf.float32, [None, 9], name='bounds') r_w = regress_boxes[:,2]-regress_boxes[:,0]+1. r_h = regress_boxes[:,3]-regress_boxes[:,1]+1. y1_adjust = regress_boxes[:,5]*r_w x1_adjust = regress_boxes[:,6]*r_h y2_adjust = regress_boxes[:,7]*r_w x2_adjust = regress_boxes[:,8]*r_h r_b1 = regress_boxes[:,0]+x1_adjust r_b2 = regress_boxes[:,1]+y1_adjust r_b3 = regress_boxes[:,2]+x2_adjust r_b4 = regress_boxes[:,3]+y2_adjust r_boundingbox = tf.transpose(tf.stack([r_b1, r_b2, r_b3, r_b4])) # convert to square s_h = r_boundingbox[:,3]-r_boundingbox[:,1] s_w = r_boundingbox[:,2]-r_boundingbox[:,0] length = tf.maximum(s_w, s_h) zero = r_boundingbox[:,0]+s_w*0.5-length*0.5 one = r_boundingbox[:,1]+s_h*0.5-length*0.5 zero_one = tf.transpose(tf.stack([zero, one])) two_three = zero_one[:,0:2] + tf.transpose([length, length]) regressed_bounds = tf.concat([zero_one, two_three], 1) expanded = tf.expand_dims(regress_boxes[:,4],1) existing = regress_boxes[:,5:9] regressed_rect = tf.concat([r_boundingbox, expanded, existing], 1, name='output_rect') regressed_stack = tf.concat([regressed_bounds, expanded, existing], 1, name='output_stack') ``` The cascading nature of this implementation allows us to stop at any point if we don't have bounding boxes. Otherwise we run a threshold over the probabilities attached to the output boxes and send any remaining boxes to the regression function. ``` numbox = p_total_boxes.shape[0] if numbox>0: nms_feed={'nms/bounds:0': p_total_boxes, 'nms/threshold:0': [.7]} p_pick = faceSession.run(('nms/output:0'), feed_dict=nms_feed) gather_feed={'gather/indices:0': p_pick, 'gather/values:0': p_total_boxes} p_boxes = faceSession.run(('gather/output:0'), feed_dict=gather_feed) out_boxes = faceSession.run(('regression/output_stack:0'), feed_dict={'regression/bounds:0': p_boxes}) num_crop = out_boxes.shape[0] ``` Ok lets draw the results. PNet tends to create a lot of boxes - it convolves over the entire image, it is the crops which interest us, and these are what we send forward to the next network. ``` fig, ax = plt.subplots(1) show_image = np.reshape(plot_image, (image_height,image_width,3)) ax.imshow(show_image) for box in out_boxes: # Create a Rectangle patch rect = patches.Rectangle((box[1],box[0]),box[3]-box[1],box[2]-box[0],linewidth=3,edgecolor='r',facecolor='none') # Add the patch to the Axes ax.add_patch(rect) plt.show() ``` Each of the boxes is cropped out and resized to the input size of RNet (24,24) ``` # norm_boxes = normalized_image(out_boxes[:,0:4], image_height, image_width) # normalize/normalize_width:0 norm_boxes = faceSession.run(('normalize/output:0'), feed_dict= \ {'normalize/bounds:0':out_boxes[:,0:4], \ 'normalize/width:0':image_width, 'normalize/height:0':image_height}) with tf.variable_scope('crop'): num = tf.placeholder(tf.int32, [1], name="num") image_to_crop = tf.placeholder(tf.float32, [None, None, None, 3], name="image") crop_size = tf.placeholder(tf.int32, [2], name="size") boxes = tf.placeholder(tf.float32, [None,4], name="boxes") box_floats = tf.cast(boxes, tf.float32) ind = tf.fill(num, 0, name="indices") resized_crop = tf.image.crop_and_resize(image_to_crop, box_floats, ind, crop_size, \ method="bilinear") subed = tf.transpose(resized_crop, [0,2,1,3]) o_rotated_images = tf.multiply(subed, 1.0, name="output") p_cropped_images = faceSession.run(('crop/output:0'), \ feed_dict={'crop/num:0':[num_crop], \ 'crop/size:0':[24, 24], \ 'crop/boxes:0':norm_boxes, 'crop/image:0':feed_image}) with tf.variable_scope('rnet'): rnet.load(os.path.join(model_path, 'det2.npy'), faceSession) r_regression_boxes, r_box_scores = faceSession.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), \ feed_dict={'rnet/input:0':p_cropped_images}) ``` RNet tends to significantly reduce the number of remaining boxes with a probability score greater than our threshold ``` with tf.variable_scope('rnet_post'): r_score_input = tf.placeholder(tf.float32, [None, 2], name='scores') r_regression_input = tf.placeholder(tf.float32, [None, 4], name='regression') r_full_input = tf.placeholder(tf.float32, [None, 9], name='full') r_conditions = tf.greater(r_score_input[:,1], .7) r_yx = tf.where(r_conditions) r_total_gather = tf.gather(r_full_input, r_yx) r_total_reshape = tf.reshape(tf.transpose(r_total_gather, [0,1,2]), [-1, 9]) r_total_remaining = tf.cast(r_total_reshape[:,0:4], tf.float32) r_regression_gather = tf.gather(r_regression_input, r_yx) r_regression_remaining = tf.reshape(r_regression_gather, [-1, 4]) r_scores = tf.expand_dims(tf.reshape(tf.gather(r_score_input, r_yx), [-1,2])[:,1], 1) r_hstack = tf.concat([r_total_remaining, r_scores, r_regression_remaining], 1, name="output") r_net_post_output = faceSession.run(('rnet_post/output:0'), \ feed_dict={'rnet_post/scores:0':r_box_scores, 'rnet_post/regression:0':r_regression_boxes, 'rnet_post/full:0':out_boxes}) rnet_bounding_boxes = faceSession.run(('regression/output_stack:0'), feed_dict={'regression/bounds:0': r_net_post_output}) nms_feed={'nms/bounds:0': rnet_bounding_boxes, 'nms/threshold:0': [.5]} r_pick = faceSession.run(('nms/output:0'), feed_dict=nms_feed) gather_feed={'gather/indices:0': r_pick, 'gather/values:0': rnet_bounding_boxes} rnet_gather = faceSession.run(('gather/output:0'), feed_dict=gather_feed) ``` Things are beginning to look better now with only a few crops to be sent forward to the next Network ``` fig, ax = plt.subplots(1) show_image = np.reshape(plot_image, (image_height,image_width,3)) ax.imshow(show_image) for box in rnet_gather: # Create a Rectangle patch rect = patches.Rectangle((box[1], box[0]),box[3]-box[1],box[2]-box[0],linewidth=3,edgecolor='r',facecolor='none') # Add the patch to the Axes ax.add_patch(rect) plt.show() # crops = normalized_image(np.fix(rnet_bounding_boxes), image_height, image_width) crops = faceSession.run(('normalize/output:0'), \ feed_dict= { 'normalize/bounds:0':rnet_gather, \ 'normalize/width:0':image_width, 'normalize/height:0':image_height}) numbox = crops.shape[0] r_cropped_images = faceSession.run(('crop/output:0'), \ feed_dict= { 'crop/num:0':[numbox], \ 'crop/size:0':[48, 48], \ 'crop/boxes:0':crops, 'crop/image:0':feed_image}) with tf.variable_scope('onet'): onet.load(os.path.join(model_path, 'det3.npy'), faceSession) onet_regression_boxes, onet_alignments, onet_scores = \ faceSession.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), \ feed_dict={'onet/input:0':r_cropped_images}) ``` Onet does some more work on identifing faces and provides a 5pt alignment of facial features ``` with tf.variable_scope('onet_post'): o_score_input = tf.placeholder(tf.float32, [None, 2], name='scores') o_alignment_input = tf.placeholder(tf.float32, [None, 10], name='alignments') o_regression_input = tf.placeholder(tf.float32, [None, 4], name='regression') o_full_input = tf.placeholder(tf.float32, [None, 9], name='gather') o_conditions = tf.greater(o_score_input[:,1], .6) o_yx = tf.where(o_conditions) o_total_gather = tf.gather(o_full_input, o_yx) o_total_reshape = tf.reshape(tf.transpose(o_total_gather, [1,0,2]), [-1, 9]) o_total_remaining = tf.cast(o_total_reshape[:,0:4], tf.float32) o_regression_gather = tf.gather(o_regression_input, o_yx) o_regression_remaining = tf.reshape(o_regression_gather, [-1, 4]) o_alignment_gather = tf.gather(o_alignment_input, o_yx) o_alignment_remaining = tf.reshape(o_alignment_gather, [-1, 10], name="alignment_output") o_scores = tf.expand_dims(tf.reshape(tf.gather(o_score_input, o_yx), [-1,2])[:,1], 1) o_hstack = tf.concat([o_total_remaining, o_scores, o_regression_remaining], 1, name="output") feed_dict={'onet_post/scores:0': onet_scores, \ 'onet_post/alignments:0':onet_alignments, \ 'onet_post/regression:0':onet_regression_boxes, \ 'onet_post/gather:0': rnet_gather} o_total_boxes, o_align = faceSession.run(('onet_post/output:0', 'onet_post/alignment_output:0'), feed_dict=feed_dict) onet_bounding_boxes = faceSession.run(('regression/output_stack:0'), feed_dict={'regression/bounds:0': o_total_boxes}) nms_feed={'nms/bounds:0': o_total_boxes, 'nms/threshold:0': [.7]} o_pick = faceSession.run(('nms/output:0'), feed_dict=nms_feed) with tf.variable_scope('final_gather'): f_pick_input = tf.placeholder(tf.int32, [None], name='pick') f_alignment_input = tf.placeholder(tf.float32, [None, 10], name='alignments') f_regression_input = tf.placeholder(tf.float32, [None, 9], name='boxes') f_total_input = tf.placeholder(tf.float32, [None, 9], name='total') f_total_gather = tf.gather(f_total_input, f_pick_input, name="total_output") f_regression_gather = tf.gather(f_regression_input, f_pick_input, name="box_output") f_alignment_gather = tf.gather(f_alignment_input, f_pick_input, name="alignment_output") final_feed={'final_gather/pick:0': o_pick, 'final_gather/alignments:0': o_align, \ 'final_gather/boxes:0': onet_bounding_boxes, 'final_gather/total:0': o_total_boxes} f_align, f_boxes, f_total = faceSession.run(('final_gather/alignment_output:0', \ 'final_gather/box_output:0', 'final_gather/total_output:0'), \ feed_dict=final_feed) ``` You can view the full graph we export as a summary ``` train_writer = tf.summary.FileWriter('summaries/' + 'graphs/face', faceSession.graph) ``` Finally we can display the bounding boxes and the point alignments of the face. Note the alignments should be transformed to the regressed bounding box for final positioning. ``` fig, ax = plt.subplots(1) show_image = np.reshape(plot_image, (image_height,image_width,3)) ax.imshow(show_image) for idx, box in enumerate(f_boxes): # Create a Rectangle patch width = box[3]-box[1] height = box[2]-box[0] re = f_total[idx] r_width = re[3]-re[1] r_height = re[2]-re[0] rect = patches.Rectangle((re[1], re[0]),r_width,r_height,linewidth=2,edgecolor='b',facecolor='none') sq = patches.Rectangle((box[1], box[0]),width,height,linewidth=3,edgecolor='r',facecolor='none') pts = f_align[idx] shape_pts = np.reshape(pts, (2, -1)) xs = shape_pts[0,:] * width + box[1] ys = shape_pts[1,:] * height + box[0] shape_pts = np.stack((ys, xs), 0) shape_pts = np.transpose(shape_pts) for circle in shape_pts: # Create a Rectangle patch cir = patches.Circle((circle[1], circle[0]), 2, linewidth=3, edgecolor='w', facecolor='none') # Add the patch to the Axes ax.add_patch(cir) # Add the patch to the Axes ax.add_patch(sq) ax.add_patch(rect) plt.show() ``` To import the pb graph into other sessions we save and freeze it ``` from tensorflow.python.framework import graph_util from tensorflow.python.training import saver as saver_lib from tensorflow.core.protobuf import saver_pb2 checkpoint_prefix = os.path.join("checkpoints", "saved_checkpoint") checkpoint_state_name = "checkpoint_state" input_graph_name = "input_graph.pb" output_graph_name = "face_align.pb" input_graph_path = os.path.join("checkpoints", input_graph_name) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) checkpoint_path = saver.save( faceSession, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name) graph_def = faceSession.graph.as_graph_def() from tensorflow.python.lib.io import file_io file_io.atomic_write_string_to_file(input_graph_path, str(graph_def)) print("wroteIt") from tensorflow.python.tools import freeze_graph input_saver_def_path = "" input_binary = False output_node_names = "rescale/resize,"+\ "regression/output_rect,"+\ "final_gather/box_output,"+\ "final_gather/alignment_output,"+\ "regression/output_stack,"+\ "normalize/output,"+\ "pnet/conv4-2/BiasAdd,pnet/prob1,"+\ "rnet/conv5-2/conv5-2,rnet/prob1,"+\ "rnet_post/output,"+\ "onet/conv6-2/conv6-2,onet/conv6-3/conv6-3,onet/prob1,"+\ "onet_post/output,"+\ "onet_post/alignment_output,"+\ "heatmap/bounds_output,"+\ "gather/output,"+\ "crop/output,"+\ "nms/output" restore_op_name = "save/restore_all" filename_tensor_name = "save/Const:0" output_graph_path = os.path.join("data", output_graph_name) clear_devices = False freeze_graph.freeze_graph(input_graph_path, input_saver_def_path, input_binary, checkpoint_path, output_node_names, restore_op_name, filename_tensor_name, output_graph_path, clear_devices, "") ```
github_jupyter
# <u>Chapter 3</u>: Topic Classification Businesses deal with many other unstructured texts daily, like, news posts, support tickets, or customer reviews. Failing to glean this data efficiently can lead to missed opportunities or, even worse, angry customers. So again, an automated system that can process a vast amount of data is a more scalable solution than manual scanning. In this exercise, we focus on the problem of `topic classification`, with the aim to assign a topic to some piece of text. The list of topics is predefined, and in that sense, we are still in the realm of supervised learning. For this task, we use the [20 newsgroups dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_20newsgroups.html#sklearn.datasets.fetch_20newsgroups) available through _scikit-learn_ and that comprises around 18000 newsgroups posts on 20 topics. We implement a topic classifier from scratch and present various techniques related to dimensionality reduction and text mining: * **Exploratory data analysis** * Pie charts * N-gram frequencies * **Dimensionality reduction** * Principal Component Analysis * Linear Discriminant Analysis * Singular Value Decomposition * **Text representations** * Word Embedding * **Classification algorithms** * K-Nearest Neighbor * Random Forest * Decision Trees * **Tools** * fastText You need a Google account to download: [GoogleNews-vectors-negative300.bin.gz](https://code.google.com/archive/p/word2vec/) Then, place the file under: _./data_ ``` # Install the necessary modules. %pip install matplotlib %pip install sklearn %pip install pandas %pip install numpy %pip install seaborn %pip install gensim %pip install fasttext ``` ## Exploratory data analysis As in every machine learning problem, we start with an exploratory data analysis. Before we start an in-depth analysis, we extract some basic information from the corpus. After importing the dataset, we can print the number of samples in it, the different categories of news posts, and the contents of one sample. As we can see from the output, the posts deal with diverse subjects like technology, politics, and religion, where their content consists of metadata (e.g. _From_, _Subject_) and the main body of the message. ``` %matplotlib inline from sklearn.datasets import fetch_20newsgroups # Load the news data and print the names of the categories. news = fetch_20newsgroups(subset='all') # Print various information about the data. print("Number of articles: " + str(len(news.data))) print("Number of different categories: " + str(len(news.target_names))) print(news.target_names) print("\n".join(news.data[6].split("\n")[:])) ``` Using the code below, we create a pie chart, where each slice shows the percentage of each category in the dataset. We observe a balance in the number of samples per category looking at the figure. This is quite an important observation because we want to avoid categories that monopolize the data and might skew our analysis. If this were the case, we should simply use a subset for this category to balance it with others. ``` import matplotlib.pyplot as plt # Keep track of the number of samples per category. samples_per_category = {} # Iterate over all data. for i in range(len(news.data)): # Get the category for the specific sample. category = news.target_names[news.target[i]] # Increase the category index by one. if category in samples_per_category: samples_per_category[category] += 1 else: samples_per_category[category] = 1 # Create and show the distribution pie chart. slices = [] # Obtain the slices of the pie. for key in samples_per_category: slices.append(samples_per_category[key]) fig, ax = plt.subplots(figsize=(10, 10)) ax.pie(slices, labels=news.target_names, autopct='%1.1f%%', startangle=90) ``` Another helpful visualization is the frequency of bi-grams in each category, as it provides an immediate intuition whether or not they can be used for classifying the posts. For example, we calculate the thirty most frequent bi-grams for the _misc.forsale_ category. According to the histogram, few bi-grams seem relevant to this category, like _for sale_, _best offer_ and _to sale_. There are, of course, others that can equally appear to all categories. ``` import pandas as pd from sklearn.feature_extraction.text import CountVectorizer # Samples for the 'misc.forsale' category. news_misc_forsale = [] # Iterate over all data. for i in range(len(news.data)): # Get the samples. if news.target_names[news.target[i]] == 'misc.forsale': news_misc_forsale.append(news.data[i]) # Create the count vectorizer using bi-grams. vectorizer = CountVectorizer(ngram_range=(2, 2)) # Fit and transform x = vectorizer.fit_transform(news_misc_forsale) # Get the 30 most frequent bigrams. sum_words = x.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vectorizer.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x:x[1], reverse=True) words = words_freq[:30] # Create and show the bar chart. df = pd.DataFrame(words, columns=['Bi-gram', 'Frequency']) df = df.set_index('Bi-gram') df.plot(figsize=(10, 5), kind='bar', title='Bi-gram frequency chart') ``` ## Dimensionality reduction As part of the `EDA`, it could be helpful to visualize high-dimensional spaces into something our limited human brain can comprehend. In this way, we can identify patterns in the data and possible directions for analysis. Furthermore, there are situations in machine learning problems where redundant or highly correlated features can be removed from the subsequent analysis. For example, in a task for classifying planets, radius (_r_) and circumference (_2πr_) can be two of the features. However, as they are highly correlated, there is no extra gain including both in the feature space. So, we can either keep only one feature or introduce a new one that is a linear combination of radius and circumference. This process is called `dimensionality reduction` and it proves to be very helpful for speeding up the training of machine learning algorithms, filtering noise in the data, performing feature extraction, and data visualization. In general, working with fewer dimensions often makes analysis algorithms more efficient and can help machine learning algorithms offer more accurate predictions. ### Principal Component Analysis One classic method of this kind is called `Principal Component Analysis` (PCA), which is an unsupervised learning method. This means that PCA tries to identify relations among the data samples without knowing the class each one belongs to. The method creates a new coordinate system with a new set of orthogonal axes (principal components); the first axis goes toward the highest variance in the data, while the second one goes to the second-highest variance. For multidimensional data, more principal components can be calculated. PCA aims at keeping the maximum amount of variation (information) about how the original data is distributed, so bear in mind that a certain level of information is lost during the process. After calculating the three components, the output is shown in the figure. It is evident that _PC1_ captures the most variation, which is reduced as we move down in order (_PC2_ and _PC3_). ``` import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA # Our random data points. points = np.array([[0.1,0.1,0.1], [0.2,0,0.2], [0.3,-0.1,0.3], [0.4,-0.1,0.4], [-0.1,0.1,-0.1], [-0.2,0,-0.2], [-0.3,0.1,-0.3], [-0.4,0,-0.4], [-0.5,0.1,-0.5], [0.2,0,0.1], [0.3,0.1,0.2], [0.4,-0.1,0.5], [0.5,0.1,0.4], [0.5,0,0.6], [0.3,-0.1,0.4], [-0.2,-0.1,-0.1], [-0.4,0.1,-0.3], [-0.2,0.1,-0.3], [-0.6,-0.1,-0.5], [-0.5,0,-0.4]]) # Standardize the points. spoints = StandardScaler().fit_transform(points) # Calculate 3 principal components. pca = PCA(n_components=3) pcaComponents = pca.fit_transform(spoints) # Generate the scatter plot. x1 = [1]*20 x2 = [2]*20 x3 = [3]*20 # Plot the figure. pcaFigure = plt.figure(figsize=(8, 8)) pcaAxes = pcaFigure.add_subplot(1, 1, 1) pcaAxes.scatter(x1, pcaComponents[:,0], s=50, c='r', marker="s", label='PC1') pcaAxes.scatter(x2, pcaComponents[:,1], s=50, c='g', marker="p", label='PC2') pcaAxes.scatter(x3, pcaComponents[:,2], s=50, c='b', marker="x", label='PC3') plt.legend(loc='upper right') ``` At the beginning of the section, we have mentioned that each principal component captures part of the variance in a certain direction. But how can we quantify this, and when can we be certain that enough variance has been considered? For example, the code below shows that the first component contains 69.9% of the variance, the second 29.6%, and the third 0.004%. A rule of thumb, in this case, is that the first and second principal components should capture at least 85% of the variance to expect some helpful insight into the data when examining the generated plot. So the percentage in our case is acceptable. ``` # Show the variance ratio per principal component. pca.explained_variance_ratio_ ``` We are now ready to draw the plot of our data points on the new coordinate system. The points now occupy a new position, with similar ones being closer. Look, for example, at the cluster with the samples 3, 4, 12, and 15. Another interesting observation is that there are three trend lines for the data points. The samples that belong to each line have the same value in their _y_ coordinate. E.g. the points 2, 6, 8, 10, 14, and 20. ``` # Create a data frame out of the principal components. pcaFrame = pd.DataFrame(data = pcaComponents, columns = ['principal component 1', 'principal component 2', 'principal component 3']) # Generate the scatter plot. pcaFigure = plt.figure(figsize=(13, 8)) pcaAxes = pcaFigure.add_subplot(1, 1, 1) pcaAxes.set_xlabel('First principal component', fontsize=15) pcaAxes.set_ylabel('Second pricipal omponent', fontsize=15) pcaAxes.scatter(pcaFrame.loc[:, 'principal component 1'], pcaFrame.loc[:, 'principal component 2'], c='black', s=50) # Add the index of each point to the plot. index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] for i, txt in enumerate(index): pcaAxes.annotate(txt, (pcaFrame.loc[i, 'principal component 1'], pcaFrame.loc[i, 'principal component 2']), fontsize=15) pcaAxes.grid() pcaFigure.show() ``` Going a step further, it is logical to think that if a few sets of principal components can visualize the data, these can also be used as new features. The idea behind feature extraction under PCA is that the new feature values can express the data samples as a weighted sum of the principal components. In practice, PCA reduces the initial number of features in ML problems and can lead to simpler and more accurate classification models. ### Linear Discriminant Analysis Another well-known dimensionality reduction algorithm is the `Linear Discriminant Analysis` (LDA). While PCA is an unsupervised algorithm focusing on identifying the combination of principal components that maximize variance in a dataset, LDA is a supervised algorithm to maximize separability between different classes. ### PCA and LDA in action Let’s apply our knowledge about PCA and LDA in the _20 newsgroups_ dataset. Both techniques will be used to visualize the data. From the output, we observe that data points of similar categories appear to cluster closer. This is quite amazing as only two principal components were used. ``` import matplotlib.pyplot as plt import numpy as np from sklearn import metrics from sklearn.decomposition import PCA from sklearn.feature_extraction.text import TfidfVectorizer # Select one of the following three categories. categories = ['comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'talk.politics.misc'] #categories = ['alt.atheism', 'comp.windows.x', 'talk.religion.misc'] #categories = ['rec.sport.baseball', 'rec.sport.hockey', 'sci.space'] #categories = ['rec.autos', 'rec.motorcycles', 'talk.politics.guns'] # Load the news data only for the specific categories. news = fetch_20newsgroups(categories=categories) # Keep a smaller portion of the data. data_samples = news.data[:2000] data_target = news.target[:2000] # Create the tf-idf vectorizer. vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=100, stop_words='english') # Generate the tf-idf matrix for the datamset. tfidf = vectorizer.fit_transform(data_samples) # Calculate 2 principal components. pca = PCA(n_components=2) pcaComponents = pca.fit_transform(tfidf.toarray()) # Create and show the plot. plt.figure(figsize=(10, 10)) scatter = plt.scatter(pcaComponents[:,0], pcaComponents[:,1], c=data_target) labels = np.unique(data_target) handles = [plt.Line2D([],[], marker="o", ls="", color=scatter.cmap(scatter.norm(i))) for i in labels] plt.legend(handles, categories) ``` Let’s try using LDA now for the same task. As before, two principal components are used, and the output is presented in the figure. We observe this time that the separation of classes is much better and with minimum overlap. ``` from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # Calculate 2 principal components. lda = LinearDiscriminantAnalysis(n_components=2) ldaComponents = lda.fit(tfidf.toarray(), data_target) ldaComponents = lda.transform(tfidf.toarray()) # Create and show the plot. plt.figure(figsize=(10, 10)) scatter = plt.scatter(ldaComponents[:,0], ldaComponents[:,1], c=data_target) labels = np.unique(data_target) handles = [plt.Line2D([], [], marker="o", ls="", color=scatter.cmap(scatter.norm(i))) for i in labels] plt.legend(handles, categories) ``` We can also obtain the ten top words per news category. Interestingly, the output includes words like “_card_”, “_ide_”, “_bus_” for _comp.sys.ibm.pc.hardware_, “_apple_”, “_mac_” for _comp.sys.mac.hardware_ and “_government_”, “_clinton_" for _talk.politics.misc_. ``` # Print the 10 top words per news category. feature_names = np.asarray(vectorizer.get_feature_names()) for i, category in enumerate(categories): top = np.argsort(lda.coef_[i])[-10:] print("%s: %s" % (category, " ".join(feature_names[top]))) ``` ## K-Nearest Neighbor The `K-Nearest Neighbor` (KNN) algorithm is a nonparametric and lazy learning algorithm that stores the position of all data samples and classifies new cases based on some similarity measure. Lazy learning means that the algorithm takes almost zero time to learn, and in the case of KNN, the training samples are simply stored and used to classify new samples. Then, a majority vote of its neighbors classifies each sample. We start by creating the training and test sets. ``` # Select the following five categories. categories = ['alt.atheism', 'comp.graphics', 'misc.forsale', 'rec.autos', 'sci.crypt'] # Load data only for the specific categories. train_data = fetch_20newsgroups(subset='train', categories=categories, random_state=123) test_data = fetch_20newsgroups(subset='test', categories=categories, random_state=123) # Create the tf-idf vectorizer. vectorizer = TfidfVectorizer(stop_words='english') # Generate the tf-idf matrix for the two sets. tfidf_train = vectorizer.fit_transform(train_data.data) tfidf_test = vectorizer.transform(test_data.data) # Print the shape of the sets. print(tfidf_train.shape) print(tfidf_test.shape) ``` To use KNN, we need to choose a value for the hyperparameter _K_. The question now is what this value should be. Hopefully, there is a technique to help us in this situation that is called `Cross-Validation`. There are three basic steps: (1) initially we partition the data into several subsets (folds), (2) each time we hold out one of these subsets and train the model with the rest, and (3) we evaluate the model with the holdout test. There are different types of cross-validation, and `k-Fold` is one of those. The code below shows an example for the 5-Fold cross-validation case. ``` import scipy.sparse as sp from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsClassifier # List of possible number of neighbors. neighbors_values = list(range(1, 100)) # List of the mean scores. mean_scores = [] # Perform 10-fold cross-validation. for k in neighbors_values: # Create the classifier classifier = KNeighborsClassifier(n_neighbors=k) # Obtain the cross-validation scores. scores = cross_val_score(classifier, tfidf_train, train_data.target, cv=10, scoring='accuracy') # Store the mean value of the scores. mean_scores.append(scores.mean()) # Calculate the errors. errors = [1 - x for x in mean_scores] # Obtain the best value for the hyperparameter. best_value = neighbors_values[errors.index(min(errors))] # Print the best value for the hyperparameter. print(best_value) ``` Using this value for _K_=94 we can proceed on training the KNN model on the whole training set and evaluate it on the test one. According to the output, the accuracy is around 90.5%, which is quite a decent result. ``` import seaborn as sns from sklearn.metrics import confusion_matrix # Create the classifier. knn_classifier = KNeighborsClassifier(n_neighbors=94) # Fit the classifier with the train data. knn_classifier.fit(tfidf_train, train_data.target) # Get the predicted classes. test_class_pred = knn_classifier.predict(tfidf_test) # Calculate the accuracy on the test set. metrics.accuracy_score(test_data.target, test_class_pred) ``` We also print the confusion matrix that can provide a better intuition on the strengths and weaknesses of the model. As we can observe, the highest numbers appear in the main diagonal, and ideally, we prefer everything else to be zero. For example, 24 of the _misc.forsale_ posts were mistakenly classified as _rec.autos_ ones. This is not very surprising, as one might expect that sale posts can also refer to automobiles. ``` # Create the confusion matrix. cm = confusion_matrix(test_data.target, test_class_pred) # Plot confusion_matrix. fig, ax = plt.subplots(figsize=(10, 5)) sns.heatmap(cm, annot=True, cmap="Set3", fmt="d", xticklabels=categories, yticklabels=categories) ax.set_yticklabels(categories, rotation=0) plt.ylabel('Actual') plt.xlabel('Predicted') ``` Another thing we can try is to remove the metadata from each sample and calculate the new model’s accuracy. It seems logical to think that headers like "_From_", _"Subject"_, etc. do not contribute much to the classification task. The accuracy drops significantly to 21%. In the previous run, the algorithm could simply learn from the metadata. Perhaps certain keywords in the subject, the presence or absence of certain headers, etc. proved to be an important source of knowledge that was lost in the second run. ``` # Load data only for the specific categories. train_data = fetch_20newsgroups(subset='train', categories=categories, random_state=123, remove=('headers', 'footers', 'quotes')) test_data = fetch_20newsgroups(subset='test', categories=categories, random_state=123, remove=('headers', 'footers', 'quotes')) # Create the tf-idf vectorizer. vectorizer = TfidfVectorizer(stop_words='english') # Generate the tf-idf matrix for the two sets. tfidf_train = vectorizer.fit_transform(train_data.data) tfidf_test = vectorizer.transform(test_data.data) # Fit the classifier with the train data. knn_classifier.fit(tfidf_train, train_data.target) # Get the predicted classes. test_class_pred = knn_classifier.predict(tfidf_test) # Calculate the accuracy on the test set. metrics.accuracy_score(test_data.target, test_class_pred) ``` Baseline model ``` samples_per_category len(fetch_20newsgroups(subset='train', categories=['sci.crypt']).data) len(fetch_20newsgroups(subset='test', categories=['sci.crypt']).data) ``` ## Random Forest The `Random forest` method exploits the benefits of ensemble learning by constructing a multitude of `decision trees` on randomly selected data samples. Each one of those produces its own prediction, and the method is responsible for choosing the best one by means of voting. For example, the code shown below uses the dataset after removing any header. In this case, accuracy reaches 80%, which is astonishingly better than the result using KNN. ``` from sklearn.ensemble import RandomForestClassifier # Create the classifier. rf_classifier = RandomForestClassifier(n_estimators=100, random_state=123) # Fit the classifier with the train data. rf_classifier.fit(tfidf_train, train_data.target) # Get the predicted classes. test_class_pred = rf_classifier.predict(tfidf_test) # Calculate the accuracy on the test set. metrics.accuracy_score(test_data.target, test_class_pred) ``` Coming back to the discussion about dimensionality reduction, it makes sense to apply it in our classification problem. Instead of using all available features, we can try to introduce a smaller set of features. One common technique for this task is the `Singular Value Decomposition` (SVD), which efficiently works with sparse matrices (frequently encountered in text classification). Two hundred components are chosen as output for the SVD that will be used as features by the random forest classifier. ``` from sklearn.decomposition import TruncatedSVD # Load data only for the specific categories. train_data = fetch_20newsgroups(subset='train', categories=categories, random_state=123) test_data = fetch_20newsgroups(subset='test', categories=categories, random_state=123) # Generate the tf-idf matrix for the two sets. tfidf_train = vectorizer.fit_transform(train_data.data) tfidf_test = vectorizer.transform(test_data.data) # Calculate 200 components for the train and test sets. svd = TruncatedSVD(n_components=200, algorithm='randomized', n_iter=5, random_state=123, tol=0.0) svdComponents_train = svd.fit_transform(tfidf_train.toarray()) svdComponents_test = svd.transform(tfidf_test.toarray()) print(svdComponents_train.shape) ``` We can also plot the percentage of the cumulative variance for the different number of components in SVD. For example, the 200 components capture around 30% of the variance in the dataset, as shown in the figure. ``` # Plot the cumulative variance percentage. explained = svd.explained_variance_ratio_.cumsum() plt.figure(figsize=(8, 8)) plt.plot(explained, '.-', ms=6, color='b') plt.xlabel('#Num of components', fontsize= 14) plt.ylabel('Cumulative variance percentage', fontsize=14) plt.xticks(fontsize=14) plt.yticks(fontsize=14) ``` In this case the accuracy of the classifier is around 90%. ``` # Fit the classifier with the train data. rf_classifier.fit(svdComponents_train, train_data.target) # Get the predicted classes. test_class_pred = rf_classifier.predict(svdComponents_test) # Calculate the accuracy on the test set. metrics.accuracy_score(test_data.target, test_class_pred) ``` Surprisingly, we obtain the same accuracy as the one for KNN when using _n_components=10_ in the same example. This suggests that the classification performance is the same with a much smaller set of features, and this model is preferable to the one for KNN. In general, there might be a number of possible and more complex alternatives for solving a particular problem. However, precedence should be given to simplicity; the simpler explanation of a problem should be preferred between two competing theories. This principle is called `Occam’s razor` and finds application in every field of life. ``` # Calculate 10 components for the train and test sets. svd = TruncatedSVD(n_components=10, algorithm='randomized', n_iter=5, random_state=123, tol=0.0) svdComponents_train = svd.fit_transform(tfidf_train.toarray()) svdComponents_test = svd.transform(tfidf_test.toarray()) # Fit the classifier with the train data. knn_classifier.fit(svdComponents_train, train_data.target) # Get the predicted classes. test_class_pred = knn_classifier.predict(svdComponents_test) # Calculate the accuracy on the test set. metrics.accuracy_score(test_data.target, test_class_pred) ``` ## Word embedding You are asked to create the matching algorithm for a new dating service. After registering to the system, your potential users will be asked a series of questions crafted to assess their five personality traits: extraversion, agreeableness, openness, conscientiousness, and neuroticism. A specific percentage will be assigned to each individual trait (black:0% and white:100%). <img src="https://github.com/PacktPublishing/Machine-Learning-Techniques-for-Text/blob/main/ch3/images/dating.jpg?raw=true" alt="Grayscale vectors of personality traits" style="width: 500px;"/> The code below translates grayscale percentages into real numerical values and calculates the cosine similarity between the vectors. The output of this process proves us correct. The fourth candidate has the highest cosine similarity with the user understudy indeed. Therefore, our dating application can now suggest their profile to the user. ``` import numpy as np from sklearn.metrics.pairwise import cosine_similarity # Create the data for our user and the candidate profiles. user = np.array([[0.41, 0.22, 0.85, 0.08, 0.98]]) candidates = np.matrix([[0.2, 0.93, 0.83, 0.39, 0.19], [0.89, 0.87, 0.7, 0.18, 0.25], [0.72, 0.03, 0.05, 0.82, 0.06], [0.43, 0.78, 0.79, 0.02, 0.86], [0.02, 0.03, 0.71, 0.39, 0.42]]) # Calculate and print the cosine similarity. for candidate in candidates: print(cosine_similarity(user, candidate)) ``` If you have grasped the reasoning behind the previous example, it should be straightforward to understand the word embedding representation. Like the five traits represent each person as a unique point in a 5-dimensional space, `word embedding` represents words in a multidimensional space, typically on the order of hundreds. <img src="https://github.com/PacktPublishing/Machine-Learning-Techniques-for-Text/blob/main/ch3/images/word-emdedding.jpg?raw=true" alt="Embedding vector of various English words" style="width: 700px;"/> Following the same approach as before, we show the embedding vector of various English words in the figure below. Each vector consists of 20 grayscale values, artificially constructed to provide some intuition on the subject. Take a look, for example, at the words for countries. They differ in all dimensions except for the 13th one. In the same manner, capital names match on the 3rd dimension, humans on the 9th (the dimension of humanity), boys and girls on the 9th and 6th (presumably the dimension of youth), and king and queen on the 9th and 17th (perhaps the dimension of royalty). So, word embedding is not just a way to numerically represent textual data but also a way to identify relations between words. This representation can lead to more powerful machine learning models by encapsulating their linguistic meaning. This is the reason why word embedding representations have gained a predominant role during the last few years. The `genism` module allows us to use word embedding. We will also need to download pre-trained word embedding and _GoogleNews-vectors-negative300.bin.gz_ is one of the many different options. Notice that according to your system configuration it might take several minutes to load and execute the package. In the example beloq, we perform the vector arithmetic presented earlier and obtain the expected results. ``` from gensim.models import KeyedVectors # Load the Word2Vec model. model = KeyedVectors.load_word2vec_format('./data/GoogleNews-vectors-negative300.bin.gz', binary=True) # Perform various word vector arithmetics. model.most_similar(positive=['woman', 'king'], negative=['man'], topn=1) model.most_similar(positive=['germany', 'paris'], negative=['france'], topn=1) model.most_similar(positive=['play', 'walked'], negative=['walk'], topn=1) model.most_similar(positive=['mother', 'doctor'], negative=['father'], topn=1) model.most_similar(positive=['she', 'architect'], negative=['he'], topn=1) model.most_similar(positive=['woman', 'professor'], negative=['man'], topn=1) model.most_similar(positive=['woman', 'schoolteacher'], negative=['man'], topn=1) model.most_similar(positive=['woman', 'author'], negative=['man'], topn=1) model.most_similar(positive=['man', 'psychologist'], negative=['woman'], topn=1) ``` ### Classification It’s about time to use this form of textual representation for classification. In the following example, we iterate over all training samples, calculating each word’s vector in the sample. The word embedding for each sample is an average value of each word’s embedding. The same process is repeated for the test set. ``` import re def get_word_vector(sample): wv = np.zeros(300) # Word vector. n = 0 # Number of words that have a word vector. # Iterate over all words in the sample. for word in re.sub('\\(|\\)|\n|\t| |,|\.|\?|/|=|\"', "", sample).split(" "): # The word might not be present in the model. if word.lower() in model: wv = np.add(wv, model[word.lower()]) n += 1 if n == 0: # Use a dummy word. wv = np.add(wv, model["empty"]) else: # Get an average value by dividing with n. wv = np.divide(wv, n) return wv def get_word_vect_from_data(input): # Word vectors of the samples. wv_vect = [] # Iterate through the data. for sample in input: # Get the word vector. wv = get_word_vector(sample) # Store the result for the sample. wv_vect.append(wv) return wv_vect # Get the word vectors for the training and test data. wv_train = get_word_vect_from_data(train_data.data) wv_test = get_word_vect_from_data(test_data.data) ``` As before, we use the `Random Forest` as our classifier, which yields a performance of around 82%. ``` # Fit the classifier with the train data. rf_classifier.fit(wv_train, train_data.target) # Get the predicted classes. test_class_pred = rf_classifier.predict(wv_test) # Calculate the accuracy on the test set. metrics.accuracy_score(test_data.target, test_class_pred) ``` ## fastText Let’s now examine another tool that offers pre-trained word embedding models. [fastText](https://fasttext.cc/) is an open-source tool created by Facebook for text representation and classification. We utilize the same training and test set, which were restructured to the form expected by the tool. After engaging the fastText supervised classifier, a similar accuracy is obtained. It seems that we cannot surpass this threshold, but you are strongly urged to test different configurations of the algorithms presented. ``` import fasttext # Read and clean the data. fasttext_train_data = [re.sub('\\(|\\)|\n|\t| |,|\.|\?|/|=|\"', "", sample) for sample in train_data.data] fasttext_test_data = [re.sub('\\(|\\)|\n|\t| |,|\.|\?|/|=|\"', "", sample) for sample in test_data.data] # Read and change the class labels. fasttext_train_target = [("__label__" + str(sample) + " ") for sample in train_data.target] fasttext_test_target = [("__label__" + str(sample) + " ") for sample in test_data.target] # Element wise concatanation of the two lists. fasttext_train = [i + j for i, j in zip(fasttext_train_target, fasttext_train_data)] # Print a sample. fasttext_train[0] # Write the data into a file. with open('./data/fasttext.train', 'w') as f: for item in fasttext_train: f.write("%s\n" % item) # Train the model. fs_model = fasttext.train_supervised(input="./data/fasttext.train", lr=1.0, epoch=100) # Get the predictions using the test data. predictions = fs_model.predict(fasttext_test_data) # Assess the model. fasttext_test_target_pred = [(label[0] + " ") for label in predictions[:][0]] metrics.accuracy_score(fasttext_test_target, fasttext_test_target_pred) ``` ### Machine Learning Techniques for Text &copy;2021&ndash;2022, Nikos Tsourakis, <nikos@tsourakis.net>, Packt Publications. All Rights Reserved.
github_jupyter
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MisaOgura/flashtorch/blob/master/examples/activation_maximization_colab.ipynb) ## Activation maximization --- A quick demo of activation maximization with [FlashTorch 🔦](https://github.com/MisaOgura/flashtorch), using the pre-trained VGG16 model. ❗This notebook is for those who are using this notebook in **Google Colab**. If you aren't on Google Colab already, please head to the Colab version of this notebook **[here](https://colab.research.google.com/github/MisaOgura/flashtorch/blob/master/examples/activation_maximization_colab.ipynb)** to execute. --- [Activation maximization](https://pdfs.semanticscholar.org/65d9/94fb778a8d9e0f632659fb33a082949a50d3.pdf) is one form of feature visualization that allows us to visualize what CNN filters are "looking for", by applying each filter to an input image and updating the input image so as to maximize the activation of the filter of interest (i.e. treating it as a gradient ascent task with filter activation values as the loss). The optimization and visualization is available via `flashtorch.activmax.GradientAscent`. The implementation is inspired by [this demo](https://blog.keras.io/category/demo.html) by Francois Chollet. ### 0. Set up A GPU runtime is available on Colab for free, from the `Runtime` tab on the top menu bar. It is **highly recommended to use GPU** as a runtime for the enhanced speed of computation. ``` # Install flashtorch !pip install flashtorch %matplotlib inline %config InlineBackend.figure_format = 'retina' import torchvision.models as models from flashtorch.activmax import GradientAscent ``` ### 1. Load a pre-trained Model ``` model = models.vgg16(pretrained=True) # Print layers and corresponding indicies list(model.features.named_children()) ``` ### 2. Specify layers and filters ``` conv1_2 = model.features[2] conv1_2_filters = [17, 33, 34, 57] conv2_1 = model.features[5] conv2_1_filters = [27, 40, 68, 73] conv3_1 = model.features[10] conv3_1_filters = [31, 61, 147, 182] conv4_1 = model.features[17] conv4_1_filters = [238, 251, 338, 495] conv5_1 = model.features[24] conv5_1_filters = [45, 271, 363, 409] ``` ### 3. Optimize and visualize filters Creating an instance of `GradientAscent` class with the model _without fully-connected layers_ allows us to use flexible input image sizes. ``` g_ascent = GradientAscent(model.features) g_ascent.use_gpu = True ``` By calling the `visualize` method and passing in the layer and filter indeciies defined above, it performs optimization and visualization. This is perhaps the most common way to use the `GradientAscent` class, but there are other APIs available according to your use cases (see section 4). ``` g_ascent.visualize(conv1_2, conv1_2_filters, title='conv1_2'); g_ascent.visualize(conv2_1, conv2_1_filters, title='conv2_1'); g_ascent.visualize(conv3_1, conv3_1_filters, title='conv3_1'); g_ascent.visualize(conv4_1, conv4_1_filters, title='conv4_1'); g_ascent.visualize(conv5_1, conv5_1_filters, title='conv5_1'); ``` We can see that, in the earlier layers (conv1_2, conv2_1), filters get activated by colors and simple patterns such as virtical, horisontal and diagonal lines. In the intermediate layers (conv3_1, conv4_1), we start to see more complex patterns. Then oncepts like 'eye' (filter 45) and 'entrance (?)' (filter 271) seem to appear in the last layer (conv5_1). ### 4. Other ways to use `GradientAscent` #### 4-1. `GradientAscent.visualize`: randomly select filters If you have a convolutional layer you want to vizualise, but you don't know which filters to choose, you can just pass in the layer to `visualize` without `filter_idxs`. It will randomly choose filters. You can adjust the number of filters chosen by passing `num_subplots` (default=4). ``` g_ascent.visualize(conv5_1, title='Randomly selected filters from conv5_1'); ``` #### 4-2. `GradientAscent.visualize`: plot one filter If you just want to visualize one filter, you can do so by specifying the filter index as an integer, not a list. ``` g_ascent.visualize(conv5_1, 3, title='conv5_1 filter 3'); ``` #### 4-3. `GradientAscent.visualize`: return image tensor If you want to grab the optimized image data, set `return_output` to `True`. ``` output = g_ascent.visualize(conv5_1, 3, title='conv5_1 filter 3', return_output=True); print('num_iter:', len(output)) print('optimized image:', output[-1].shape) ``` #### 4-4. `GradientAscent.deepdream`: create DeepDream You can create a [DeepDream](https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html) by supplying a path to your own image. This will optimize the supplied image, instead of a random input noise, with regards to the filter specidied. ``` # Download the example image !mkdir -p images !wget https://github.com/MisaOgura/flashtorch/raw/master/examples/images/jay.jpg -P /content/images g_ascent.deepdream('/content/images/jay.jpg', conv5_1, 33) ``` #### 4-5. `GradientAscent.optimize`: perform optimization only (no visualization) If no visualization is needed, or if you want to futher customize visualization, you can call the `optimize` method directly. ``` output = g_ascent.optimize(conv5_1, 3) print('num_iter:', len(output)) print('optimized image:', output[-1].shape) ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/projects/ComputerVision/transfer_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Transfer Learning **By Neuromatch Academy** __Content creators:__ [Jama Hussein Mohamud](https://engmubarak48.github.io/jmohamud/index.html) & [Alex Hernandez-Garcia](https://alexhernandezgarcia.github.io/) __Production editors:__ Saeed Salehi, Spiros Chavlis **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> --- # Objective One desired capability for machines is the ability to transfer the knowledge (features) learned on one domain to another This can potentially save compute time, enable training when data is scarce, and even improve performance. Unfortunately, there is no single recipe for transfer learning and instead multiple options are possible and much remains to be well understood. In this project, you will explore how transfer learning works in different scenarios. --- # Setup ``` # imports import os import gc import csv import glob import torch import multiprocessing import numpy as np import pandas as pd import torch.nn as nn import matplotlib.pyplot as plt import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn from torch.autograd import Variable import torchvision import torchvision.transforms as transforms # @title Set random seed # @markdown Executing `set_seed(seed=seed)` you are setting the seed # for DL its critical to set the random seed so that students can have a # baseline to compare their results to expected results. # Read more here: https://pytorch.org/docs/stable/notes/randomness.html # Call `set_seed` function in the exercises to ensure reproducibility. import random import torch def set_seed(seed=None, seed_torch=True): if seed is None: seed = np.random.choice(2 ** 32) random.seed(seed) np.random.seed(seed) if seed_torch: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True print(f'Random seed {seed} has been set.') # In case that `DataLoader` is used def seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) # @title Set device (GPU or CPU) # inform the user if the notebook uses GPU or CPU. def set_device(): device = "cuda" if torch.cuda.is_available() else "cpu" if device != "cuda": print("WARNING: For this notebook to perform best, " "if possible, in the menu under `Runtime` -> " "`Change runtime type.` select `GPU` ") else: print("GPU is enabled in this notebook.") return device ``` ### Random seeds If you want to obtain reproducible results, it is a good practice to set seeds for the random number generators of the various libraries ``` set_seed(seed=2021) device = set_device() ``` ### Training hyperparameters Here we set some general training hyperparameters such as the learning rate, batch size, etc. as well as other training options such as including data augmentation (`torchvision_transforms`). ``` # hyper-parameters use_cuda = torch.cuda.is_available() best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch batch_size = 128 max_epochs = 15 # Please change this to 200 max_epochs_target = 10 base_learning_rate = 0.1 torchvision_transforms = True # True/False if you want use torchvision augmentations ``` --- # Data ## Source dataset We will train the source model using CIFAR-100 data set from PyTorch, but with small tweaks we can get any other data we are interested in. Note that the data set is normalised by substracted the mean and dividing by the standard deviation (pre-computed) of the training set. Also, if `torchvision_transforms` is `True`, data augmentation will be applied during training. ``` # @markdown Download and prepare Data print('==> Preparing data..') def percentageSplit(full_dataset, percent = 0.0): set1_size = int(percent * len(full_dataset)) set2_size = len(full_dataset) - set1_size final_dataset, _ = torch.utils.data.random_split(full_dataset, [set1_size, set2_size]) return final_dataset # CIFAR100 normalizing mean = [0.5071, 0.4866, 0.4409] std = [0.2673, 0.2564, 0.2762] # CIFAR10 normalizing # mean = (0.4914, 0.4822, 0.4465) # std = (0.2023, 0.1994, 0.2010) # torchvision transforms transform_train = transforms.Compose([]) if torchvision_transforms: transform_train.transforms.append(transforms.RandomCrop(32, padding=4)) transform_train.transforms.append(transforms.RandomHorizontalFlip()) transform_train.transforms.append(transforms.ToTensor()) transform_train.transforms.append(transforms.Normalize(mean, std)) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean, std), ]) trainset = torchvision.datasets.CIFAR100( root='./CIFAR100', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100( root='./CIFAR100', train=False, download=True, transform=transform_test) ``` ### CIFAR-100 CIFAR-100 is a data set of 50,000 colour (RGB) training images and 10,000 test images, of size 32 x 32 pixels. Each image is labelled as 1 of 100 possible classes. The data set is stored as a custom `torchvision.datasets.cifar.CIFAR` object. You can check some of its properties with the following code: ``` print(f"Object type: {type(trainset)}") print(f"Training data shape: {trainset.data.shape}") print(f"Test data shape: {testset.data.shape}") print(f"Number of classes: {np.unique(trainset.targets).shape[0]}") ``` ## Data loaders A dataloader is an optimized data iterator that provides functionality for efficient shuffling, transformation and batching of the data. ``` ##@title Dataloader num_workers = multiprocessing.cpu_count() print(f'----> number of workers: {num_workers}') trainloader = torch.utils.data.DataLoader( trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) testloader = torch.utils.data.DataLoader( testset, batch_size=batch_size, shuffle=False, num_workers=num_workers) ``` ## Architecture: ResNet ResNet is a family of network architectures whose main property is that the network is organised as a stack of _residual blocks_. Residual blocks consist of a stack of layers whose output is added the input, making a _shortcut connection_. See the [original paper](https://arxiv.org/abs/1512.03385) for more details. ResNet is just a popular choice out of many others, but data augmentation works well in general. We just picked ResNet for illustration purposes. ``` # @title ResNet model in PyTorch class BasicBlock(nn.Module): """ResNet in PyTorch. Reference: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 """ expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=100): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear(512*block.expansion, num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1]*(num_blocks-1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.linear(out) return out def ResNet18(): return ResNet(BasicBlock, [2, 2, 2, 2]) def ResNet34(): return ResNet(BasicBlock, [3, 4, 6, 3]) def ResNet50(): return ResNet(Bottleneck, [3, 4, 6, 3]) ``` #### Test on random data ``` # Load the Model net = ResNet18() print('-----> verify if model is run on random data') y = net(Variable(torch.randn(1,3,32,32))) print('model loaded') result_folder = './results/' if not os.path.exists(result_folder): os.makedirs(result_folder) logname = result_folder + net.__class__.__name__ + '_pretrain' + '.csv' if use_cuda: net.cuda() net = torch.nn.DataParallel(net) print('Using', torch.cuda.device_count(), 'GPUs.') cudnn.benchmark = True print('Using CUDA..') ``` ## Set up training ### Set loss function and optimizer We use the cross entropy loss, commonly used for classification, and stochastic gradient descent (SGD) as optimizer, with momentum and weight decay. ``` # Optimizer and criterion criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=base_learning_rate, momentum=0.9, weight_decay=1e-4) ``` ### Train and test loops ``` # Training & Test functions def train(net, epoch, use_cuda=True): print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() optimizer.zero_grad() inputs, targets = Variable(inputs), Variable(targets) outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() if batch_idx % 500 == 0: print(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) return (train_loss/batch_idx, 100.*correct/total) def test(net, epoch, outModelName, use_cuda=True): global best_acc net.eval() test_loss, correct, total = 0, 0, 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = torch.max(outputs.data, 1) total += targets.size(0) correct += predicted.eq(targets.data).cpu().sum() if batch_idx % 200 == 0: print(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Save checkpoint. acc = 100.*correct/total if acc > best_acc: best_acc = acc checkpoint(net, acc, epoch, outModelName) return (test_loss/batch_idx, 100.*correct/total) ``` ### Auxiliary functions * `checkpoint()`: Store checkpoints of the model * `adjust_learning_rate()`: Decreases the learning rate (learning rate decay) at certain epochs of training. ``` # checkpoint & adjust_learning_rate def checkpoint(model, acc, epoch, outModelName): # Save checkpoint. print('Saving..') state = { 'state_dict': model.state_dict(), 'acc': acc, 'epoch': epoch, 'rng_state': torch.get_rng_state() } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, f'./checkpoint/{outModelName}.t7') def adjust_learning_rate(optimizer, epoch): """decrease the learning rate at 100 and 150 epoch""" lr = base_learning_rate if epoch <= 9 and lr > 0.1: # warm-up training for large minibatch lr = 0.1 + (base_learning_rate - 0.1) * epoch / 10. if epoch >= 100: lr /= 10 if epoch >= 150: lr /= 10 for param_group in optimizer.param_groups: param_group['lr'] = lr ``` ### Train the model This is the loop where the model is trained for `max_epochs` epochs. ``` # Start training outModelName = 'pretrain' if not os.path.exists(logname): with open(logname, 'w') as logfile: logwriter = csv.writer(logfile, delimiter=',') logwriter.writerow(['epoch', 'train loss', 'train acc', 'test loss', 'test acc']) for epoch in range(start_epoch, max_epochs): adjust_learning_rate(optimizer, epoch) train_loss, train_acc = train(net, epoch, use_cuda=use_cuda) test_loss, test_acc = test(net, epoch, outModelName, use_cuda=use_cuda) with open(logname, 'a') as logfile: logwriter = csv.writer(logfile, delimiter=',') logwriter.writerow([epoch, train_loss, train_acc.item(), test_loss, test_acc.item()]) print(f'Epoch: {epoch} | train acc: {train_acc} | test acc: {test_acc}') ``` ## Transfer learning ### Re-use the trained model to improve training on a different data set ### Delete variables from the previous model ``` # delete the backbone network delete = True if delete: del net del trainset del testset del trainloader del testloader gc.collect() ``` #### Target dataset We will now use CIFAR-10 as _target_ data set. Again, with small tweaks we can get any other data we are interested in. CIFAR-10 is very similar to CIFAR-100, but it contains only 10 classes instead of 100. ``` # Target domain Data print('==> Preparing target domain data..') # CIFAR10 normalizing mean = (0.4914, 0.4822, 0.4465) std = (0.2023, 0.1994, 0.2010) num_classes = 10 lr = 0.0001 # torchvision transforms transform_train = transforms.Compose([]) if torchvision_transforms: transform_train.transforms.append(transforms.RandomCrop(32, padding=4)) transform_train.transforms.append(transforms.RandomHorizontalFlip()) transform_train.transforms.append(transforms.ToTensor()) transform_train.transforms.append(transforms.Normalize(mean, std)) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean, std), ]) trainset = torchvision.datasets.CIFAR10( root='./CIFAR10', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR10( root='./CIFAR10', train=False, download=True, transform=transform_test) ``` #### Select a subset of the data To simulate a lower data regime, where transfer learning can be useful. Choose percentage from the trainset. Set `percent = 1.0` to use the whole train data ``` percent = 0.6 trainset = percentageSplit(trainset, percent = percent) print('size of the new trainset: ', len(trainset)) ``` #### Dataloaders As before ``` # Dataloader num_workers = multiprocessing.cpu_count() print(f'----> number of workers: {num_workers}') trainloader = torch.utils.data.DataLoader( trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) testloader = torch.utils.data.DataLoader( testset, batch_size=batch_size, shuffle=False, num_workers=num_workers) ``` ### Load pre-trained model Load the checkpoint of the model previously trained on CIFAR-100 ``` model = ResNet18() checkpointPath = '/content/checkpoint/pretrain.t7' print(' ===> loading pretrained model from: ', checkpointPath) if os.path.isfile(checkpointPath): state_dict = torch.load(checkpointPath) best_acc = state_dict['acc'] print('Best Accuracy:', best_acc) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] # remove prefixe "module." state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} for k, v in model.state_dict().items(): if k not in list(state_dict): print('key "{}" could not be found in provided state dict'.format(k)) elif state_dict[k].shape != v.shape: print('key "{}" is of different shape in model and provided state dict'.format(k)) state_dict[k] = v msg = model.load_state_dict(state_dict, strict=False) print("Load pretrained model with msg: {}".format(msg)) else: raise Exception('No pretrained weights found') ``` ### Freeze model parameters In transfer learning, we usually do not re-train all the weights of the model, but only a subset of them, for instance the last layer. Here we first _freeze_ all the parameters of the model, and we will _unfreeze_ one layer below. ``` # Freeze the model parameters, you can also freeze some layers only for param in model.parameters(): param.requires_grad = False ``` ### Loss function, optimizer and _unfreeze_ last layer ``` num_ftrs = model.linear.in_features model.linear = nn.Linear(num_ftrs, num_classes) model.to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD( model.linear.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4, ) ``` #### Check number of parameters We can calculate the number of total parameters and the number of trainable parameters, that is those that will be updated during training. Since we have freezed most of the parameters, the number of training parameters should be much smaller. ``` total_params = sum(p.numel() for p in model.parameters()) trainable_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print('Total Parameters:', total_params, 'Trainable parameters: ', trainable_total_params) ``` ### Train the target model ``` outModelName = 'finetuned' logname = result_folder + model.__class__.__name__ + f'_{outModelName}.csv' if not os.path.exists(logname): with open(logname, 'w') as logfile: logwriter = csv.writer(logfile, delimiter=',') logwriter.writerow(['epoch', 'train loss', 'train acc', 'test loss', 'test acc']) for epoch in range(start_epoch, max_epochs_target): adjust_learning_rate(optimizer, epoch) train_loss, train_acc = train(model, epoch, use_cuda=use_cuda) test_loss, test_acc = test(model, epoch, outModelName, use_cuda=use_cuda) with open(logname, 'a') as logfile: logwriter = csv.writer(logfile, delimiter=',') logwriter.writerow([epoch, train_loss, train_acc.item(), test_loss, test_acc.item()]) print(f'Epoch: {epoch} | train acc: {train_acc} | test acc: {test_acc}') ``` ## Plot results ``` # title plot results results = pd.read_csv(f'/content/results/ResNet_{outModelName}.csv', sep =',') results.head() train_accuracy = results['train acc'].values test_accuracy = results['test acc'].values print(f'Average Accuracy over {max_epochs_target} epochs:', sum(test_accuracy)//len(test_accuracy)) print(f'best accuraccy over {max_epochs_target} epochs:', max(test_accuracy)) figureName = 'figure' # change figure name plt.plot(results['epoch'].values, train_accuracy, label='train') plt.plot(results['epoch'].values, test_accuracy, label='test') plt.xlabel('Number of epochs') plt.ylabel('Accuracy') plt.title(f'Train/Test Accuracy curve for {max_epochs} epochs') plt.savefig(f'/content/results/{figureName}.png') plt.legend() plt.show() ```
github_jupyter
<!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* <!--NAVIGATION--> < [Operating on Data in Pandas](03.03-Operations-in-Pandas.ipynb) | [Contents](Index.ipynb) | [Hierarchical Indexing](03.05-Hierarchical-Indexing.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.04-Missing-Values.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> # Handling Missing Data The difference between data found in many tutorials and data in the real world is that real-world data is rarely clean and homogeneous. In particular, many interesting datasets will have some amount of data missing. To make matters even more complicated, different data sources may indicate missing data in different ways. In this section, we will discuss some general considerations for missing data, discuss how Pandas chooses to represent it, and demonstrate some built-in Pandas tools for handling missing data in Python. Here and throughout the book, we'll refer to missing data in general as *null*, *NaN*, or *NA* values. ## Trade-Offs in Missing Data Conventions There are a number of schemes that have been developed to indicate the presence of missing data in a table or DataFrame. Generally, they revolve around one of two strategies: using a *mask* that globally indicates missing values, or choosing a *sentinel value* that indicates a missing entry. In the masking approach, the mask might be an entirely separate Boolean array, or it may involve appropriation of one bit in the data representation to locally indicate the null status of a value. In the sentinel approach, the sentinel value could be some data-specific convention, such as indicating a missing integer value with -9999 or some rare bit pattern, or it could be a more global convention, such as indicating a missing floating-point value with NaN (Not a Number), a special value which is part of the IEEE floating-point specification. None of these approaches is without trade-offs: use of a separate mask array requires allocation of an additional Boolean array, which adds overhead in both storage and computation. A sentinel value reduces the range of valid values that can be represented, and may require extra (often non-optimized) logic in CPU and GPU arithmetic. Common special values like NaN are not available for all data types. As in most cases where no universally optimal choice exists, different languages and systems use different conventions. For example, the R language uses reserved bit patterns within each data type as sentinel values indicating missing data, while the SciDB system uses an extra byte attached to every cell which indicates a NA state. ## Missing Data in Pandas The way in which Pandas handles missing values is constrained by its reliance on the NumPy package, which does not have a built-in notion of NA values for non-floating-point data types. Pandas could have followed R's lead in specifying bit patterns for each individual data type to indicate nullness, but this approach turns out to be rather unwieldy. While R contains four basic data types, NumPy supports *far* more than this: for example, while R has a single integer type, NumPy supports *fourteen* basic integer types once you account for available precisions, signedness, and endianness of the encoding. Reserving a specific bit pattern in all available NumPy types would lead to an unwieldy amount of overhead in special-casing various operations for various types, likely even requiring a new fork of the NumPy package. Further, for the smaller data types (such as 8-bit integers), sacrificing a bit to use as a mask will significantly reduce the range of values it can represent. NumPy does have support for masked arrays – that is, arrays that have a separate Boolean mask array attached for marking data as "good" or "bad." Pandas could have derived from this, but the overhead in both storage, computation, and code maintenance makes that an unattractive choice. With these constraints in mind, Pandas chose to use sentinels for missing data, and further chose to use two already-existing Python null values: the special floating-point ``NaN`` value, and the Python ``None`` object. This choice has some side effects, as we will see, but in practice ends up being a good compromise in most cases of interest. ### ``None``: Pythonic missing data The first sentinel value used by Pandas is ``None``, a Python singleton object that is often used for missing data in Python code. Because it is a Python object, ``None`` cannot be used in any arbitrary NumPy/Pandas array, but only in arrays with data type ``'object'`` (i.e., arrays of Python objects): ``` import numpy as np import pandas as pd vals1 = np.array([1, None, 3, 4]) vals1 ``` This ``dtype=object`` means that the best common type representation NumPy could infer for the contents of the array is that they are Python objects. While this kind of object array is useful for some purposes, any operations on the data will be done at the Python level, with much more overhead than the typically fast operations seen for arrays with native types: ``` for dtype in ['object', 'int']: print("dtype =", dtype) %timeit np.arange(1E6, dtype=dtype).sum() print() ``` The use of Python objects in an array also means that if you perform aggregations like ``sum()`` or ``min()`` across an array with a ``None`` value, you will generally get an error: ``` vals1.sum() ``` This reflects the fact that addition between an integer and ``None`` is undefined. ### ``NaN``: Missing numerical data The other missing data representation, ``NaN`` (acronym for *Not a Number*), is different; it is a special floating-point value recognized by all systems that use the standard IEEE floating-point representation: ``` vals2 = np.array([1, np.nan, 3, 4]) vals2.dtype ``` Notice that NumPy chose a native floating-point type for this array: this means that unlike the object array from before, this array supports fast operations pushed into compiled code. You should be aware that ``NaN`` is a bit like a data virus–it infects any other object it touches. Regardless of the operation, the result of arithmetic with ``NaN`` will be another ``NaN``: ``` 1 + np.nan 0 * np.nan ``` Note that this means that aggregates over the values are well defined (i.e., they don't result in an error) but not always useful: ``` vals2.sum(), vals2.min(), vals2.max() ``` NumPy does provide some special aggregations that will ignore these missing values: ``` np.nansum(vals2), np.nanmin(vals2), np.nanmax(vals2) ``` Keep in mind that ``NaN`` is specifically a floating-point value; there is no equivalent NaN value for integers, strings, or other types. ### NaN and None in Pandas ``NaN`` and ``None`` both have their place, and Pandas is built to handle the two of them nearly interchangeably, converting between them where appropriate: ``` pd.Series([1, np.nan, 2, None]) ``` For types that don't have an available sentinel value, Pandas automatically type-casts when NA values are present. For example, if we set a value in an integer array to ``np.nan``, it will automatically be upcast to a floating-point type to accommodate the NA: ``` x = pd.Series(range(2), dtype=int) x x[0] = None x ``` Notice that in addition to casting the integer array to floating point, Pandas automatically converts the ``None`` to a ``NaN`` value. (Be aware that there is a proposal to add a native integer NA to Pandas in the future; as of this writing, it has not been included). While this type of magic may feel a bit hackish compared to the more unified approach to NA values in domain-specific languages like R, the Pandas sentinel/casting approach works quite well in practice and in my experience only rarely causes issues. The following table lists the upcasting conventions in Pandas when NA values are introduced: |Typeclass | Conversion When Storing NAs | NA Sentinel Value | |--------------|-----------------------------|------------------------| | ``floating`` | No change | ``np.nan`` | | ``object`` | No change | ``None`` or ``np.nan`` | | ``integer`` | Cast to ``float64`` | ``np.nan`` | | ``boolean`` | Cast to ``object`` | ``None`` or ``np.nan`` | Keep in mind that in Pandas, string data is always stored with an ``object`` dtype. ## Operating on Null Values As we have seen, Pandas treats ``None`` and ``NaN`` as essentially interchangeable for indicating missing or null values. To facilitate this convention, there are several useful methods for detecting, removing, and replacing null values in Pandas data structures. They are: - ``isnull()``: Generate a boolean mask indicating missing values - ``notnull()``: Opposite of ``isnull()`` - ``dropna()``: Return a filtered version of the data - ``fillna()``: Return a copy of the data with missing values filled or imputed We will conclude this section with a brief exploration and demonstration of these routines. ### Detecting null values Pandas data structures have two useful methods for detecting null data: ``isnull()`` and ``notnull()``. Either one will return a Boolean mask over the data. For example: ``` data = pd.Series([1, np.nan, 'hello', None]) data.isnull() ``` As mentioned in [Data Indexing and Selection](03.02-Data-Indexing-and-Selection.ipynb), Boolean masks can be used directly as a ``Series`` or ``DataFrame`` index: ``` data[data.notnull()] ``` The ``isnull()`` and ``notnull()`` methods produce similar Boolean results for ``DataFrame``s. ### Dropping null values In addition to the masking used before, there are the convenience methods, ``dropna()`` (which removes NA values) and ``fillna()`` (which fills in NA values). For a ``Series``, the result is straightforward: ``` data.dropna() ``` For a ``DataFrame``, there are more options. Consider the following ``DataFrame``: ``` df = pd.DataFrame([[1, np.nan, 2], [2, 3, 5], [np.nan, 4, 6]]) df ``` We cannot drop single values from a ``DataFrame``; we can only drop full rows or full columns. Depending on the application, you might want one or the other, so ``dropna()`` gives a number of options for a ``DataFrame``. By default, ``dropna()`` will drop all rows in which *any* null value is present: ``` df.dropna() ``` Alternatively, you can drop NA values along a different axis; ``axis=1`` drops all columns containing a null value: ``` df.dropna(axis='columns') ``` But this drops some good data as well; you might rather be interested in dropping rows or columns with *all* NA values, or a majority of NA values. This can be specified through the ``how`` or ``thresh`` parameters, which allow fine control of the number of nulls to allow through. The default is ``how='any'``, such that any row or column (depending on the ``axis`` keyword) containing a null value will be dropped. You can also specify ``how='all'``, which will only drop rows/columns that are *all* null values: ``` df[3] = np.nan df df.dropna(axis='columns', how='all') ``` For finer-grained control, the ``thresh`` parameter lets you specify a minimum number of non-null values for the row/column to be kept: ``` df.dropna(axis='rows', thresh=3) ``` Here the first and last row have been dropped, because they contain only two non-null values. ### Filling null values Sometimes rather than dropping NA values, you'd rather replace them with a valid value. This value might be a single number like zero, or it might be some sort of imputation or interpolation from the good values. You could do this in-place using the ``isnull()`` method as a mask, but because it is such a common operation Pandas provides the ``fillna()`` method, which returns a copy of the array with the null values replaced. Consider the following ``Series``: ``` data = pd.Series([1, np.nan, 2, None, 3], index=list('abcde')) data ``` We can fill NA entries with a single value, such as zero: ``` data.fillna(0) ``` We can specify a forward-fill to propagate the previous value forward: ``` # forward-fill data.fillna(method='ffill') ``` Or we can specify a back-fill to propagate the next values backward: ``` # back-fill data.fillna(method='bfill') ``` For ``DataFrame``s, the options are similar, but we can also specify an ``axis`` along which the fills take place: ``` df df.fillna(method='ffill', axis=1) ``` Notice that if a previous value is not available during a forward fill, the NA value remains. <!--NAVIGATION--> < [Operating on Data in Pandas](03.03-Operations-in-Pandas.ipynb) | [Contents](Index.ipynb) | [Hierarchical Indexing](03.05-Hierarchical-Indexing.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.04-Missing-Values.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
# Gaussian processes A common applied statistics task involves building regression models to characterize non-linear relationships between variables. It is possible to fit such models by assuming a particular non-linear functional form, such as a sinusoidal, exponential, or polynomial function, to describe one variable's response to the variation in another. Unless this relationship is obvious from the outset, however, it involves possibly extensive model selection procedures to ensure the most appropriate model is retained. Alternatively, a **non-parametric** approach can be adopted by defining a set of knots across the variable space and use a spline or kernel regression to describe arbitrary non-linear relationships. However, knot layout procedures are somewhat ad hoc and can also involve variable selection. A third alternative is to adopt a **Bayesian** non-parametric strategy, and directly model the unknown underlying function. For this, we can employ **Gaussian process** models. ## Building models with Gaussians What if we chose to use Gaussian distributions to model our data? $$p(x \mid \pi, \Sigma) = (2\pi)^{-k/2}|\Sigma|^{-1/2} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}\Sigma^{-1}(x-\mu) \right\}$$ There would not seem to be an advantage to doing this, because normal distributions are not particularly flexible distributions in and of themselves. However, adopting a set of Gaussians (a multivariate normal vector) confers a number of advantages. ### Marginalization First, the marginal distribution of any subset of elements from a multivariate normal distribution is also normal: $$p(x,y) = \mathcal{N}\left(\left[{ \begin{array}{c} {\mu_x} \\ {\mu_y} \\ \end{array} }\right], \left[{ \begin{array}{c} {\Sigma_x} & {\Sigma_{xy}} \\ {\Sigma_{xy}^T} & {\Sigma_y} \\ \end{array} }\right]\right)$$ $$p(x) = \int p(x,y) dy = \mathcal{N}(\mu_x, \Sigma_x)$$ ### Conditioning Also, conditionals distributions of a subset of a multivariate normal distribution (conditional on the remaining elements) are normal too: $$p(x|y) = \mathcal{N}(\mu_x + \Sigma_{xy}\Sigma_y^{-1}(y-\mu_y), \Sigma_x-\Sigma_{xy}\Sigma_y^{-1}\Sigma_{xy}^T)$$ A Gaussian process generalizes the multivariate normal to infinite dimension. It is defined as an infinite collection of random variables, any finite subset of which have a Gaussian distribution. Thus, the marginalization property is explicit in its definition. Another way of thinking about an infinite vector is as a *function*. When we write a function that takes continuous values as inputs, we are essentially specifying an infinte vector that only returns values (indexed by the inputs) when the function is called upon to do so. By the same token, this notion of an infinite-dimensional Gaussian as a function allows us to work with them computationally: we are never required to store all the elements of the Gaussian process, only to calculate them on demand. So, we can describe a Gaussian process as a ***disribution over functions***. Just as a multivariate normal distribution is completely specified by a mean vector and covariance matrix, a GP is fully specified by a mean *function* and a covariance *function*: $$p(x) \sim \mathcal{GP}(m(x), k(x,x^{\prime}))$$ It is the marginalization property that makes working with a Gaussian process feasible: we can marginalize over the infinitely-many variables that we are not interested in, or have not observed. For example, one specification of a GP might be as follows: $$\begin{aligned} m(x) &=0 \\ k(x,x^{\prime}) &= \theta_1\exp\left(-\frac{\theta_2}{2}(x-x^{\prime})^2\right) \end{aligned}$$ here, the covariance function is a **squared exponential**, for which values of $x$ and $x^{\prime}$ that are close together result in values of $k$ closer to 1 and those that are far apart return values closer to zero. It may seem odd to simply adopt the zero function to represent the mean function of the Gaussian process -- surely we can do better than that! It turns out that most of the learning in the GP involves the covariance function and its parameters, so very little is gained in specifying a complicated mean function. For a finite number of points, the GP becomes a multivariate normal, with the mean and covariance as the mean functon and covariance function evaluated at those points. ## Sampling from a Gaussian Process To make this notion of a "distribution over functions" more concrete, let's quickly demonstrate how we obtain realizations from a Gaussian process, which result in an evaluation of a function over a set of points. All we will do here is sample from the *prior* Gaussian process, so before any data have been introduced. What we need first is our covariance function, which will be the squared exponential, and a function to evaluate the covariance at given points (resulting in a covariance matrix). ``` %matplotlib inline import numpy as np import pandas as pd import seaborn as sns import matplotlib.pylab as plt np.random.seed(42) ``` We are going generate realizations sequentially, point by point, using the lovely conditioning property of mutlivariate Gaussian distributions. Here is that conditional: $$p(x|y) = \mathcal{N}(\mu_x + \Sigma_{xy}\Sigma_y^{-1}(y-\mu_y), \Sigma_x-\Sigma_{xy}\Sigma_y^{-1}\Sigma_{xy}^T)$$ And this the function that implements it: ``` def exponential_cov(x, y, params): return params[0] * np.exp( -0.5 * params[1] * np.subtract.outer(x, y)**2) def conditional(x_new, x, y, params): B = exponential_cov(x_new, x, params) C = exponential_cov(x, x, params) A = exponential_cov(x_new, x_new, params) mu = np.linalg.inv(C).dot(B.T).T.dot(y) sigma = A - B.dot(np.linalg.inv(C).dot(B.T)) return(mu.squeeze(), sigma.squeeze()) ``` We will start with a Gaussian process prior with hyperparameters $\theta_0=1, \theta_1=10$. We will also assume a zero function as the mean, so we can plot a band that represents one standard deviation from the mean. ``` θ = [1, 10] σ_0 = exponential_cov(0, 0, θ) xpts = np.arange(-3, 3, step=0.01) plt.errorbar(xpts, np.zeros(len(xpts)), yerr=σ_0, capsize=0) plt.ylim(-3, 3); ``` Let's select an arbitrary starting point to sample, say $x=1$. Since there are no prevous points, we can sample from an unconditional Gaussian: ``` x = [1.] y = [np.random.normal(scale=σ_0)] y ``` We can now update our confidence band, given the point that we just sampled, using the covariance function to generate new point-wise intervals, conditional on the value $[x_0, y_0]$. ``` σ_1 = exponential_cov(x, x, θ) def predict(x, data, kernel, params, sigma, t): k = [kernel(x, y, params) for y in data] Sinv = np.linalg.inv(sigma) y_pred = np.dot(k, Sinv).dot(t) sigma_new = kernel(x, x, params) - np.dot(k, Sinv).dot(k) return y_pred, sigma_new x_pred = np.linspace(-3, 3, 1000) predictions = [predict(i, x, exponential_cov, θ, σ_1, y) for i in x_pred] y_pred, sigmas = np.transpose(predictions) plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0) plt.plot(x, y, "ro") plt.xlim(-3, 3); plt.ylim(-3, 3); ``` So conditional on this point, and the covariance structure we have specified, we have essentially constrained the probable location of additional points. Let's now sample another: ``` m, s = conditional([-0.7], x, y, θ) y2 = np.random.normal(m, s) y2 ``` This point is added to the realization, and can be used to further update the location of the next point. ``` x.append(-0.7) y.append(y2) σ_2 = exponential_cov(x, x, θ) predictions = [predict(i, x, exponential_cov, θ, σ_2, y) for i in x_pred] y_pred, sigmas = np.transpose(predictions) plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0) plt.plot(x, y, "ro") plt.xlim(-3, 3); plt.ylim(-3, 3); ``` Of course, sampling sequentially is just a heuristic to demonstrate how the covariance structure works. We can just as easily sample several points at once: ``` x_more = [-2.1, -1.5, 0.3, 1.8, 2.5] mu, s = conditional(x_more, x, y, θ) y_more = np.random.multivariate_normal(mu, s) y_more x += x_more y += y_more.tolist() σ_new = exponential_cov(x, x, θ) predictions = [predict(i, x, exponential_cov, θ, σ_new, y) for i in x_pred] y_pred, sigmas = np.transpose(predictions) plt.errorbar(x_pred, y_pred, yerr=sigmas, capsize=0) plt.plot(x, y, "ro") plt.ylim(-3, 3); ``` So as the density of points becomes high, the result will be one realization (function) from the prior GP. # Fitting Gaussian Processes in Python Though it's entirely possible to extend the code above to introduce data and fit a Gaussian processes by hand, there are a number of libraries available for specifying and fitting GP models in a more automated way. Some of these include: - [scikit-learn](http://scikit-learn.org/stable/modules/gaussian_process.html) - [GPflow](http://gpflow.readthedocs.io/en/latest/intro.html) - [PyMC3](https://pymc-devs.github.io/pymc3/) In particular, each of these packages include covariance functions that can be flexibly combined to adequately describe the patterns of non-linearity in the data, along with methods for fitting the parameters of the GP. We will use some simulated data as a test case for comparing the performance of each package. I don't actually recall where I found this data, so I have no details regarding how it was generated . However, it clearly shows some type of non-linear process, corrupted by a certain amount of observation or measurement error so it should be a reasonable task for a Gaussian process approach. ``` %run get_data.py sns.regplot(x, y, fit_reg=False) ``` ## Gaussian processes in `scikit-learn` `scikit-learn` is the premier machine learning package for Python. It provides a comprehensive set of supervised and unsupervised learning algortihms, implemented under a consistent API that makes your entire modeling pipeline (from data preparation through output summarization) as frictionless as possible. Included among its library of tools is a Gaussian process module, which recently underwent a complete revision (as of version 0.18). Consistent with the implementation of other machine learning methods in `scikit-learn`, the appropriate interface for using GPs depends on the type of task to which it is being applied. For regression tasks, where we are predicting a continuous response variable, a `GaussianProcessRegressor` is applied by specifying an appropriate covariance function, or **kernel**. Fitting proceeds by maximizing the log of the marginal likelihood, a convenient approach for Gaussian processes that avoids the computationally-intensive crossvalidation strategy that is usually employed in choosing optimial hyperparameters for the model. The `GaussianProcessRegressor` does not allow for the specification of the mean function, always assuming it to be the zero function, highlighting the diminished role of the mean function in calculating the posterior. For classification tasks, where the output variable is categorical (or binary), the `GaussianProcessClassifier` is used. This may seem incongruous, using normal distributions to fit categorical data, but is accomodated by using a latent Gaussian response variable and then transforming it to the unit interval (or more generally for more than two outcome classes, a simplex). The result of this is a soft, probabilistic classification rather than a hard classification that is common in machine learning algorithms. Similar to the regression setting, the user chooses an appropriate kernel to describe the type of covariance expected in the dataset. Since the posterior of this GP is non-normal, a Laplace approximation is used to obtain a solution, rather than maximizing the marginal likelihood. ``` from sklearn import gaussian_process from sklearn.gaussian_process.kernels import Matern, WhiteKernel, ConstantKernel ``` `scikit-learn` offers a library of about a dozen covariance functions, which they call *kernels*, to choose from. A flexible choice to start with is the Mat&#232;rn covariance. $$k_{M}(x) = \frac{\sigma^2}{\Gamma(\nu)2^{\nu-1}} \left(\frac{\sqrt{2 \nu} x}{l}\right)^{\nu} K_{\nu}\left(\frac{\sqrt{2 \nu} x}{l}\right)$$ where where $\Gamma$ is the gamma function and $K$ is a modified Bessel function. The form of covariance matrices sampled from this function is governed by three parameters, each of which controls a property of the covariance. * **amplitude** ($\sigma$) controls the scaling of the output along the y-axis. This parameter is just a scalar multiplier, and is therefore usually left out of implementations of the Mat&#232;rn function (*i.e.* set to one) * **lengthscale** ($l$) complements the amplitude by scaling realizations on the x-axis. Larger values make points appear closer together. * **roughness** ($\nu$) controls the sharpness of ridges in the covariance function, which ultimately affect the roughness (smoothness) of realizations. Though in general all the parameters are non-negative real-valued, when $\nu = p + 1/2$ for integer-valued $p$, the function can be expressed partly as a polynomial function of order $p$ and generates realizations that are $p$-times differentiable, so values $\nu \in \{3/2, 5/2\}$ are extremely common. A GP kernel can be specified as the sum of additive components in `scikit-learn` simply by using the sum operator, so we can include a Mat&#232;rn component (`Matern`), an amplitude factor (`ConstantKernel`), as well as an observation noise (`WhiteKernel`): ``` kernel = ConstantKernel() + Matern(length_scale=2, nu=3/2) + WhiteKernel(noise_level=1) ``` As mentioned, the `scikit-learn` API is very consistent across learning methods, and as a result, all functions expect a tabular set of input variables, either as a 2-dimensional NumPy `array` or a pandas `DataFrame`. Since we have only a single input variable here, we can add a second dimension using the `reshape` method: ``` X = x.reshape(-1, 1) X.shape ``` Finally, we instantiate a `GaussianProcessRegressor` object with our custom kernel, and call its `fit` method, passing the input (`X`) and output (`y`) arrays. ``` gp = gaussian_process.GaussianProcessRegressor(kernel=kernel) gp.fit(X, y) ``` Conveniently, `scikit-learn` displays the configuration that is used for the fitting algorithm each time one of its classes is instantiated. This is useful because it reveals hidden settings that are given default values if not specified by the user; these settings can often influence the resulting fit, so its important that we understand what `fit` has assumed on our behalf. Here, for example, we see that the L-BFGS-B algorithm has been used to optimized the hyperparameters (`optimizer='fmin_l_bfgs_b'`) and that the output variable has not been normalized (`normalize_y=False`). When there is a fear of finding a local, rather than a global, maximum in the marginal likelihood, a non-zero value can be specified for `n_restarts_optimizer`, which will run the optimization algorithm as many times as specified, using randomly-chosen starting coordinates, in the hope that a globally-competitive value can be discovered. The `fit` method endows the returned model object with attributes associated with the fitting procedure; these attributes will all have an underscore (`_`) appended to their names. For example, the `kernel_` attribute will return the kernel used to parameterize the GP, along with their corresponding optimal hyperparameter values: ``` gp.kernel_ ``` Along with the `fit` method, each supervised learning class retains a `predict` method that generates predicted outcomes ($y^*$) given a new set of predictors ($X^*$) distinct from those used to fit the model. For a Gaussian process, this is fulfulled by the *posterior predictive distribution*, which is the Gaussian process with the mean and covariance functions updated to their posterior forms, after having been fit. $$p(y^*|y, x, x^*) = \mathcal{GP}(m^*(x^*), k^*(x^*))$$ where the posterior mean and covariance functions are calculated as: $$\begin{aligned} m^*(x^*) &= k(x^*,x)^T[k(x,x) + \sigma^2I]^{-1}y \\ k^*(x^*) &= k(x^*,x^*)+\sigma^2 - k(x^*,x)^T[k(x,x) + \sigma^2I]^{-1}k(x^*,x) \end{aligned}$$ ``` x_pred = np.linspace(-6, 6).reshape(-1,1) y_pred, sigma = gp.predict(x_pred, return_std=True) ``` Notice that we can calculate a prediction for arbitrary inputs $X^*$. To get a sense of the form of the posterior over a range of likely inputs, we can pass it a linear space as we have done above. `predict` optionally returns posterior standard deviations along with the expected value, so we can use this to plot a confidence region around the expected function: ``` plt.figure(figsize=(10,8)) sns.regplot(x, y, fit_reg=False, label='Data') plt.plot(x_pred, y_pred, color='grey', label='Prediction') plt.fill(np.concatenate([x_pred, x_pred[::-1]]), np.concatenate([y_pred - 2*sigma, (y_pred + 2*sigma)[::-1]]), alpha=.5, fc='grey', ec='None', label='95% CI') plt.xlabel('$x$') plt.ylabel('$f(x)$') plt.xlim(-6, 6) plt.ylim(-3, 3) plt.legend(loc='lower left'); ``` ## Example: Nashville daily temperatures The file `TNNASHVI.txt` in your data directory contains daily temperature readings for Nashville, courtesy of the [Average Daily Temperature Archive](http://academic.udayton.edu/kissock/http/Weather/). This data, as one would expect, oscillates annually. Use a Gaussian process to fit a regression model to this data. ``` daily_temps = pd.read_table("../data/TNNASHVI.txt", sep='\s+', names=['month','day','year','temp'], na_values=-99) temps_2010 = daily_temps.temp[daily_temps.year>2010] temps_2010.plot(style='b.', figsize=(10,6), grid=False) ``` For your reference, all of the covariance functions for GPs in `scikit-learn` [are available here](http://scikit-learn.org/stable/modules/gaussian_process.html#kernels-for-gaussian-processes). ``` # Write your answer here ``` --- ## References [Rasmussen, C. E., & Williams, C. K. I. (2005). Gaussian Processes for Machine Learning (Adaptive Computation and Machine Learning series). The MIT Press.](http://www.amazon.com/books/dp/026218253X)
github_jupyter
### There are two types of supervised machine learning algorithms: Regression and classification. The former predicts continuous value outputs while the latter predicts discrete outputs. For instance, predicting the price of a house in dollars is a regression problem whereas predicting whether a tumor is malignant or benign is a classification problem. ### In this, we will briefly study what linear regression is and how it can be implemented for both two variables and multiple variables using Scikit-Learn, which is one of the most popular machine learning libraries for Python. # Linear Regression Theory ### The term “linearity” in algebra refers to a linear relationship between two or more variables. If we draw this relationship in a two-dimensional space (between two variables), we get a straight line. ### Linear regression performs the task to predict a dependent variable value (y) based on a given independent variable (x). So, this regression technique finds out a linear relationship between x (input) and y(output). Hence, the name is Linear Regression. If we plot the independent variable (x) on the x-axis and dependent variable (y) on the y-axis, linear regression gives us a straight line that best fits the data points, as shown in the figure below. ### We know that the equation of a straight line is basically: <img src="images/1.png" > ### The equation of the above line is : ## Y= mx + b ### Where b is the intercept and m is the slope of the line. So basically, the linear regression algorithm gives us the most optimal value for the intercept and the slope (in two dimensions). The y and x variables remain the same, since they are the data features and cannot be changed. The values that we can control are the intercept(b) and slope(m). There can be multiple straight lines depending upon the values of intercept and slope. Basically what the linear regression algorithm does is it fits multiple lines on the data points and returns the line that results in the least error. -------------------------- ### This same concept can be extended to cases where there are more than two variables. This is called multiple linear regression. For instance, consider a scenario where you have to predict the price of the house based upon its area, number of bedrooms, the average income of the people in the area, the age of the house, and so on. In this case, the dependent variable(target variable) is dependent upon several independent variables. A regression model involving multiple variables can be represented as: ## y = b0 + m1b1 + m2b2 + m3b3 + … … mnbn ### This is the equation of a hyperplane. Remember, a linear regression model in two dimensions is a straight line; in three dimensions it is a plane, and in more than three dimensions, a hyperplane. ### In this section, we will see how Python’s Scikit-Learn library for machine learning can be used to implement regression functions. We will start with simple linear regression involving two variables and then we will move towards linear regression involving multiple variables. --- # 1. Simple Linear Regression --- <img src="images/2.gif" > --- # 2. Multiple Linear Regression --- <img src="images/6.png"> ### We just performed linear regression in the above section involving two variables. Almost all the real-world problems that you are going to encounter will have more than two variables. Linear regression involving multiple variables is called “multiple linear regression” or multivariate linear regression. The steps to perform multiple linear regression are almost similar to that of simple linear regression. The difference lies in the evaluation. You can use it to find out which factor has the highest impact on the predicted output and how different variables relate to each other. --- # Final Step findout ERROR in Model(Algorithm) --- ### The final step is to evaluate the performance of the algorithm. This step is particularly important to compare how well different algorithms perform on a particular dataset. For regression algorithms, three evaluation metrics are commonly used: ## 1. Mean Absolute Error (MAE) : ### (MAE) is the mean of the absolute value of the errors. It is calculated as: <img src="images/3.png" > ## 2. Mean Squared Error (MSE) : ### (MSE) is the mean of the squared errors and is calculated as: <img src="images/4.png"> ## 3. Root Mean Squared Error (RMSE) : ### (RMSE) is the square root of the mean of the squared errors: <img src="images/5.gif">
github_jupyter
# Explicit 5D Benchmarks This file demonstrates how to generate, plot, and output data for 1d benchmarks Choose from: 1. Korns_01 1. Korns_02 1. Korns_03 1. Korns_04 1. Korns_05 1. Korns_06 1. Korns_07 1. Korns_08 1. Korns_09 1. Korns_10 1. Korns_11 1. Korns_12 1. Korns_13 1. Korns_14 1. Korns_15 ### Imports ``` from pypge.benchmarks import explicit import numpy as np # visualization libraries import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import gridspec # plot the visuals in ipython %matplotlib inline ``` ### Generate the data with noise ``` # Set your output directories img_dir = "../img/benchmarks/explicit/" data_dir = "../data/benchmarks/explicit/" # used for plotting manual_scale = True ymin = -2000 ymax = 2000 do_enable = False xs_params = [ (-3.14,3.14), (-3.14,3.14), (0.001,1000), (-3.14,3.14), (-3.14,3.14) ] # choose your problem here prob = explicit.Korns_15(noise=1.0, npts=4000, xs_params=xs_params) # you can also specify the following params as keyword arguments # # params = { # 'name': "Koza_01", # 'xs_str': ["x"], # 'eqn_str': "x**4 + x**3 + x**2 + x", # 'xs_params': [ (-4.0,4.0) ], # 'npts': 200, # 'noise': 1.0 # } # or make your own with the following # # explicit.Explicit_1D(params): ``` ### Plot inline and save image ``` print prob['name'], prob['eqn'] print prob['xpts'].shape xs = prob['xpts'][0] ys = prob['xpts'][1] zs = prob['xpts'][2] vs = prob['xpts'][3] ws = prob['xpts'][4] Ys = prob['ypure'] fig = plt.figure() fig.set_size_inches(16, 20) gs = gridspec.GridSpec(5, 2) fig.suptitle(prob['name'] + " Clean", fontsize=36) ax0 = fig.add_subplot(gs[0,:]) ax0.scatter(xs, Ys, marker='.') ax0.set_xlabel('X') ax0.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax1 = fig.add_subplot(gs[1,:]) ax1.scatter(ys, Ys, marker='.') ax1.set_xlabel('Y') ax1.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax2 = fig.add_subplot(gs[2,:]) ax2.scatter(zs, Ys, marker='.') ax2.set_xlabel('Z') ax2.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax3 = fig.add_subplot(gs[3,:]) ax3.scatter(vs, Ys, marker='.') ax3.set_xlabel('V') ax3.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax4 = fig.add_subplot(gs[4,:]) ax4.scatter(ws, Ys, marker='.') ax4.set_xlabel('W') ax4.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) plt.savefig(img_dir + prob['name'].lower() + "_clean.png", dpi=200) plt.show() Ys = prob['ypts'] fig = plt.figure() fig.set_size_inches(16, 20) gs = gridspec.GridSpec(5, 2) fig.suptitle(prob['name'] + " Noisy", fontsize=36) ax0 = fig.add_subplot(gs[0,:]) ax0.scatter(xs, Ys, marker='.') ax0.set_xlabel('X') ax0.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax1 = fig.add_subplot(gs[1,:]) ax1.scatter(ys, Ys, marker='.') ax1.set_xlabel('Y') ax1.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax2 = fig.add_subplot(gs[2,:]) ax2.scatter(zs, Ys, marker='.') ax2.set_xlabel('Z') ax2.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax3 = fig.add_subplot(gs[3,:]) ax3.scatter(vs, Ys, marker='.') ax3.set_xlabel('V') ax3.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) ax4 = fig.add_subplot(gs[4,:]) ax4.scatter(ws, Ys, marker='.') ax4.set_xlabel('W') ax4.set_ylabel('OUT') if manual_scale: plt.autoscale(enable=do_enable) plt.ylim(ymin,ymax) plt.savefig(img_dir + prob['name'].lower() + "_noisy.png", dpi=200) plt.show() ``` ### Output json and csv data ``` data = np.array([prob['xpts'][0],prob['xpts'][1],prob['xpts'][2],prob['xpts'][3],prob['xpts'][4], prob['ypts']]).T print data.shape cols = [['x', 'y', 'z', 'v', 'w', 'out']] out_data = cols + data.tolist() import json json_out = json.dumps( out_data, indent=4) # print json_out f_json = open(data_dir + prob['name'].lower() + ".json", 'w') f_json.write(json_out) f_json.close() f_csv = open(data_dir + prob['name'].lower() + ".csv", 'w') for row in out_data: line = ", ".join([str(col) for col in row]) + "\n" f_csv.write(line) f_csv.close() ``` ### Output *clean* json and csv data ``` data = np.array([prob['xpts'][0],prob['xpts'][1],prob['xpts'][2],prob['xpts'][3],prob['xpts'][4], prob['ypure']]).T print data.shape cols = [['x', 'y', 'z', 'v', 'w', 'out']] out_data = cols + data.tolist() import json json_out = json.dumps( out_data, indent=4) # print json_out f_json = open(data_dir + prob['name'].lower() + "_clean.json", 'w') f_json.write(json_out) f_json.close() f_csv = open(data_dir + prob['name'].lower() + "_clean.csv", 'w') for row in out_data: line = ", ".join([str(col) for col in row]) + "\n" f_csv.write(line) f_csv.close() ```
github_jupyter
## Rotate, zoom, transform, change contrast to get new data ``` import os import PIL import cv2 import pathlib import numpy as np import pandas as pd import seaborn as sn import tensorflow as tf from tensorflow import keras from matplotlib import pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split %matplotlib inline # load data dir dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, cache_dir='.', untar=True) data_dir = pathlib.Path(data_dir) print(f"Numbers of images: {len(list(data_dir.glob('*/*.jpg')))}") roses = list(data_dir.glob("roses/*.jpg")) PIL.Image.open(str(roses[1])) flowers_images_dict = { 'roses': list(data_dir.glob('roses/*')), 'daisy': list(data_dir.glob('daisy/*')), 'dandelion': list(data_dir.glob('dandelion/*')), 'sunflowers': list(data_dir.glob('sunflowers/*')), 'tulips': list(data_dir.glob('tulips/*')), } flowers_labels_dict = { 'roses': 0, 'daisy': 1, 'dandelion': 2, 'sunflowers': 3, 'tulips': 4, } # get X, y data sets using a loop X, y = [], [] for name, imgs in flowers_images_dict.items(): for img in imgs: img = cv2.imread(str(img)) resized_img = cv2.resize(img, (180, 180)) X.append(resized_img) y.append(flowers_labels_dict[name]) X, y = np.array(X), np.array(y) X.shape, y.shape # split train test sets and scale them X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) X_train, X_test = X_train / 255, X_test / 255 # define the model model = keras.Sequential([ keras.layers.Conv2D(filters=16, kernel_size=(3, 3), padding="same", activation="relu"), keras.layers.MaxPooling2D(), keras.layers.Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu"), keras.layers.MaxPooling2D(), keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"), keras.layers.MaxPooling2D(), keras.layers.Flatten(), keras.layers.Dense(128, activation="relu"), keras.layers.Dense(5, activation="linear"), ]) model.compile(optimizer="adam", loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"]) # train model model.fit(X_train, y_train, epochs=30) # this model is overfitting y_pred = model.predict(X_test) y_pred_class = [np.argmax(x) for x in y_pred] print("Classification Report: \n", classification_report(y_test, y_pred_class)) # Augment Data data_augmentation = keras.Sequential([ keras.layers.experimental.preprocessing.RandomFlip("horizontal"), keras.layers.experimental.preprocessing.RandomRotation(0.2), keras.layers.experimental.preprocessing.RandomZoom(0.2), keras.layers.experimental.preprocessing.RandomContrast(0.3), ]) plt.axis("off") plt.imshow(X[0]) plt.axis("off") plt.imshow(data_augmentation(X)[0]) model = keras.Sequential([ data_augmentation, keras.layers.Conv2D(filters=16, kernel_size=(3, 3), padding="same", activation="relu"), keras.layers.MaxPooling2D(), keras.layers.Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu"), keras.layers.MaxPooling2D(), keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu"), keras.layers.MaxPooling2D(), keras.layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same", activation="relu"), keras.layers.MaxPooling2D(), keras.layers.Flatten(), keras.layers.Dense(128, activation="relu"), keras.layers.Dense(5, activation="linear"), ]) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.fit(X_train, y_train, epochs=50) # Accuracy increased from 68 percent to 77 percent y_pred = model.predict(X_test) y_pred_class = [np.argmax(x) for x in y_pred] print("Classification Report: \n", classification_report(y_test, y_pred_class)) from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Dense from keras.layers import Flatten from keras.layers import Dropout from keras.layers import BatchNormalization def better_model(): model = Sequential([data_augmentation]) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(180, 180, 3))) model.add(BatchNormalization()) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_uniform')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(5, activation='softmax')) # compile model model.compile(optimizer="SGD", loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model better_model = better_model() better_model.fit(X_train, y_train, epochs=30) # actually not better y_pred = better_model.predict(X_test) y_pred_class = [np.argmax(x) for x in y_pred] print("Classification Report: \n", classification_report(y_test, y_pred_class)) ```
github_jupyter
``` import numpy as np from tqdm import tqdm import os import h5py import sklearn.metrics as metrics import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.nn.parallel from torch.utils.data import Dataset, DataLoader import torch.nn.init as init import random class TransformNet(nn.Module): def __init__(self): super(TransformNet, self).__init__() self.k = 3 self.bn1 = nn.BatchNorm2d(64) self.bn2 = nn.BatchNorm2d(128) self.bn3 = nn.BatchNorm1d(1024) self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False), self.bn1, nn.LeakyReLU(negative_slope=0.2)) self.conv2 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=1, bias=False), self.bn2, nn.LeakyReLU(negative_slope=0.2)) self.conv3 = nn.Sequential(nn.Conv1d(128, 1024, kernel_size=1, bias=False), self.bn3, nn.LeakyReLU(negative_slope=0.2)) self.linear1 = nn.Linear(1024, 512, bias=False) self.bn3 = nn.BatchNorm1d(512) self.linear2 = nn.Linear(512, 256, bias=False) self.bn4 = nn.BatchNorm1d(256) self.transform = nn.Linear(256, 3 * 3) init.constant_(self.transform.weight, 0) init.eye_(self.transform.bias.view(3, 3)) def forward(self, x): bs = x.size(0) x = self.conv1(x) x = self.conv2(x) x = x.max(dim=-1, keepdim=False)[0] x = self.conv3(x) x = x.max(dim=-1, keepdim=False)[0] x = F.leaky_relu(self.bn3(self.linear1(x)), negative_slope=0.2) x = F.leaky_relu(self.bn4(self.linear2(x)), negative_slope=0.2) x = self.transform(x).view(bs, 3, 3) return x def knn(x, k): inner = -2 * torch.matmul(x.transpose(2, 1), x) xx = torch.sum(x ** 2, dim=1, keepdim=True) pairwise_distance = -xx - inner - xx.transpose(2, 1) idx = pairwise_distance.topk(k=k, dim=-1)[1] # (bs, n_points, k) return idx def get_graph_feature(x, k=20): batch_size = x.size(0) num_points = x.size(2) x = x.view(batch_size, -1, num_points) idx = knn(x, k=k) # (bs, n_points, k) idx_base = torch.arange(0, batch_size, device=x.device).view(-1, 1, 1) * num_points idx = idx + idx_base idx = idx.view(-1) _, num_dims, _ = x.size() x = x.transpose(2, 1).contiguous() feature = x.view(batch_size * num_points, -1)[idx, :] feature = feature.view(batch_size, num_points, k, num_dims) x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1) feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2).contiguous() return feature # (bs, 2*n_dims, n_points, k) class DGCNN_partseg(nn.Module): def __init__(self, seg_num_all, k=20): super(DGCNN_partseg, self).__init__() self.seg_num_all = seg_num_all self.k = k self.transform_net = TransformNet() self.bn1 = nn.BatchNorm2d(64) self.bn2 = nn.BatchNorm2d(64) self.bn3 = nn.BatchNorm2d(64) self.bn4 = nn.BatchNorm2d(64) self.bn5 = nn.BatchNorm2d(64) self.bn6 = nn.BatchNorm1d(1024) self.bn7 = nn.BatchNorm1d(64) self.bn8 = nn.BatchNorm1d(256) self.bn9 = nn.BatchNorm1d(256) self.bn10 = nn.BatchNorm1d(128) self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False), self.bn1, nn.LeakyReLU(negative_slope=0.2)) self.conv2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False), self.bn2, nn.LeakyReLU(negative_slope=0.2)) self.conv3 = nn.Sequential(nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False), self.bn3, nn.LeakyReLU(negative_slope=0.2)) self.conv4 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=False), self.bn4, nn.LeakyReLU(negative_slope=0.2)) self.conv5 = nn.Sequential(nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False), self.bn5, nn.LeakyReLU(negative_slope=0.2)) self.conv6 = nn.Sequential(nn.Conv1d(192, 1024, kernel_size=1, bias=False), self.bn6, nn.LeakyReLU(negative_slope=0.2)) self.conv7 = nn.Sequential(nn.Conv1d(16, 64, kernel_size=1, bias=False), self.bn7, nn.LeakyReLU(negative_slope=0.2)) self.conv8 = nn.Sequential(nn.Conv1d(1280, 256, kernel_size=1, bias=False), self.bn8, nn.LeakyReLU(negative_slope=0.2)) self.dp1 = nn.Dropout(p=0.5) self.conv9 = nn.Sequential(nn.Conv1d(256, 256, kernel_size=1, bias=False), self.bn9, nn.LeakyReLU(negative_slope=0.2)) self.dp2 = nn.Dropout(p=0.5) self.conv10 = nn.Sequential(nn.Conv1d(256, 128, kernel_size=1, bias=False), self.bn10, nn.LeakyReLU(negative_slope=0.2)) self.conv11 = nn.Conv1d(128, self.seg_num_all, kernel_size=1, bias=False) self.linearfil1 = nn.Conv1d(24, 512, kernel_size=1) self.bnfil1 = nn.BatchNorm1d(512) self.linearfil2 = nn.Conv1d(512, 256, kernel_size=1) self.bnfil2 = nn.BatchNorm1d(256) self.linearfil3 = nn.Linear(256, 24) def forward(self, x, l): bs, n_points = x.size(0), x.size(2) # pose selectot xf = x.permute(0, 2, 3, 1).view(bs,n_points*3, 24) c = F.leaky_relu(self.bnfil1(self.linearfil1(xf.transpose(2, 1))), negative_slope=0.2) c = F.leaky_relu(self.bnfil2(self.linearfil2(c)),negative_slope=0.2) c = F.adaptive_max_pool1d(c, 1).view(bs, -1) c = F.softmax(self.linearfil3(c), dim=1).unsqueeze(-1) x = torch.bmm(xf, c).view(bs,n_points, 3).permute(0, 2, 1).contiguous() x0 = get_graph_feature(x, k=self.k) # (batch_size, 3, num_points) -> (batch_size, 3*2, num_points, k) t = self.transform_net(x0) # (batch_size, 3, 3) x = x.transpose(2, 1) # (batch_size, 3, num_points) -> (batch_size, num_points, 3) x = torch.bmm(x, t) # (batch_size, num_points, 3) * (batch_size, 3, 3) -> (batch_size, num_points, 3) x = x.transpose(2, 1) # (batch_size, num_points, 3) -> (batch_size, 3, num_points) x = get_graph_feature(x, k=self.k) # (batch_size, 3, num_points) -> (batch_size, 3*2, num_points, k) x = self.conv1(x) # (batch_size, 3*2, num_points, k) -> (batch_size, 64, num_points, k) x = self.conv2(x) # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points, k) x1 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points) x = get_graph_feature(x1, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k) x = self.conv3(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 64, num_points, k) x = self.conv4(x) # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points, k) x2 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points) x = get_graph_feature(x2, k=self.k) # (batch_size, 64, num_points) -> (batch_size, 64*2, num_points, k) x = self.conv5(x) # (batch_size, 64*2, num_points, k) -> (batch_size, 64, num_points, k) x3 = x.max(dim=-1, keepdim=False)[0] # (batch_size, 64, num_points, k) -> (batch_size, 64, num_points) x = torch.cat((x1, x2, x3), dim=1) # (batch_size, 64*3, num_points) x = self.conv6(x) # (batch_size, 64*3, num_points) -> (batch_size, emb_dims, num_points) x = x.max(dim=-1, keepdim=True)[0] # (batch_size, emb_dims, num_points) -> (batch_size, emb_dims, 1) l = l.view(bs, -1, 1) # (batch_size, num_categoties, 1) l = self.conv7(l) # (batch_size, num_categoties, 1) -> (batch_size, 64, 1) x = torch.cat((x, l), dim=1) # (batch_size, 1088, 1) x = x.repeat(1, 1, n_points) # (batch_size, 1088, num_points) x = torch.cat((x, x1, x2, x3), dim=1) # (batch_size, 1088+64*3, num_points) x = self.conv8(x) # (batch_size, 1088+64*3, num_points) -> (batch_size, 256, num_points) x = self.dp1(x) x = self.conv9(x) # (batch_size, 256, num_points) -> (batch_size, 256, num_points) x = self.dp2(x) x = self.conv10(x) # (batch_size, 256, num_points) -> (batch_size, 128, num_points) x = self.conv11(x) # (batch_size, 256, num_points) -> (batch_size, seg_num_all, num_points) return x def load_data_partseg(data_dir, partition): all_data, all_label, all_seg = [], [], [] for file_name in open(os.path.join(data_dir, '{}/{}_list.txt'.format(partition, partition))): f = h5py.File(os.path.join(data_dir, '{}/{}'.format(partition, file_name)).rstrip(), 'r', swmr=True) data, label, seg = f['data'][:].astype('float32'), f['label'][:].astype('int64'), f['pid'][:].astype('int64') f.close() all_data.append(data) all_label.append(label) all_seg.append(seg) return np.asarray(all_data), np.asarray(all_label), np.asarray(all_seg) def generate_24_rotations(): res = [] for id in [[0, 1, 2], [1, 2, 0], [2, 0, 1]]: R = np.identity(3)[:, id].astype(int) R1= np.asarray([R[:, 0], R[:, 1], R[:, 2]]).T R2 = np.asarray([-R[:, 0], -R[:, 1], R[:, 2]]).T R3 = np.asarray([-R[:, 0], R[:, 1], -R[:, 2]]).T R4 = np.asarray([R[:, 0], -R[:, 1], -R[:, 2]]).T res += [R1, R2, R3, R4] for id in [[0, 2, 1], [1, 0, 2], [2, 1, 0]]: R = np.identity(3)[:, id].astype(int) R1 = np.asarray([-R[:, 0], -R[:, 1], -R[:, 2]]).T R2 = np.asarray([-R[:, 0], R[:, 1], R[:, 2]]).T R3 = np.asarray([R[:, 0], -R[:, 1], R[:, 2]]).T R4 = np.asarray([R[:, 0], R[:, 1], -R[:, 2]]).T res += [R1, R2, R3, R4] return res class ShapeNetPartSeg(Dataset): def __init__(self, data_dir, partition, class_choice=None): self.all_R = generate_24_rotations() self.data, self.label, self.seg = load_data_partseg(data_dir, partition) self.cat2id = {'airplane': 0, 'bag': 1, 'cap': 2, 'car': 3, 'chair': 4, 'earphone': 5, 'guitar': 6, 'knife': 7, 'lamp': 8, 'laptop': 9, 'motor': 10, 'mug': 11, 'pistol': 12, 'rocket': 13, 'skateboard': 14, 'table': 15} self.seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3] self.index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47] self.partition = partition self.class_choice = class_choice if self.class_choice is not None: id_choice = self.cat2id[self.class_choice] indices = (self.label == id_choice).squeeze() self.data, self.label, self.seg = self.data[indices], self.label[indices], self.seg[indices] self.seg_num_all, self.seg_start_index = self.seg_num[id_choice], self.index_start[id_choice] else: self.seg_num_all, self.seg_start_index = 50, 0 def __getitem__(self, item): pt, label, seg = self.data[item].reshape(1, 2048, 3), self.label[item], self.seg[item] pclist = [] nums = np.arange(24) #if training data, select pattern among 24 possible patterns if self.partition == 'train': random.shuffle(nums) for i in range(24): pointcloud = pt @ self.all_R[nums[i]] pclist.append(pointcloud) data = np.concatenate(pclist) return data.astype('float32'), label, seg def __len__(self): return self.data.shape[0] def cal_loss(pred, label, smoothing_eps=0.2): label = label.contiguous().view(-1) n_class = pred.size(1) one_hot = torch.zeros_like(pred).scatter(1, label.view(-1, 1), 1) one_hot = one_hot * (1 - smoothing_eps) + (1 - one_hot) * smoothing_eps / (n_class - 1) log_prb = F.log_softmax(pred, dim=1) loss = -(one_hot * log_prb).sum(dim=1).mean() return loss def calculate_shape_IoU(pred_np, seg_np, label, class_choice): seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3] index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47] label = label.squeeze() shape_ious = [] for shape_idx in range(seg_np.shape[0]): # 2874 shapes in total if class_choice is None or label[shape_idx] == class_choice: start_index = index_start[label[shape_idx]] num = seg_num[label[shape_idx]] parts = range(start_index, start_index + num) part_ious = [] for part in parts: I = np.sum(np.logical_and(pred_np[shape_idx] == part, seg_np[shape_idx] == part)) U = np.sum(np.logical_or(pred_np[shape_idx] == part, seg_np[shape_idx] == part)) iou = 1 if U == 0 else I / float(U) part_ious.append(iou) shape_ious.append(np.mean(part_ious)) else: continue return np.asarray(shape_ious) def train(data_dir, log_dir, device, n_epoch=1000, lr=1e-3, bs=16): # dataloader train_set = ShapeNetPartSeg(data_dir=data_dir, partition='train', class_choice=None) test_set = ShapeNetPartSeg(data_dir=data_dir, partition='test', class_choice=None) train_generator = DataLoader(train_set, batch_size=bs*2, shuffle=True, num_workers=2, drop_last=True) test_generator = DataLoader(test_set, batch_size=bs, shuffle=True, num_workers=2, drop_last=False) print('training data size: {}, test data size: {}'.format(train_set.__len__(), test_set.__len__())) # initialization model = DGCNN_partseg(seg_num_all=50).to(device) optimizer = optim.SGD(model.parameters(), lr=lr * 100, momentum=0.9, weight_decay=1e-4) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epoch, eta_min=1e-3) logger = open(log_dir, 'w').close() best_test_iou = 0 vcand = np.loadtxt('./all_id.txt').astype(np.int) print('init done') # training and validation for epoch in range(n_epoch): # training phase loss_val = 0.0 train_true, train_pred, train_label = [], [], [] model.train() for data, label, seg in tqdm(train_generator): label_one_hot = np.zeros((label.shape[0], 16)) label_one_hot[np.arange(label.shape[0]), label[np.arange(label.shape[0])].flatten()] = 1 label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32)) data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device) optimizer.zero_grad() pred = model(data, label_one_hot).permute(0, 2, 1).contiguous() # bs * n_points * n_class loss = cal_loss(pred.view(-1, 50), seg.view(-1, 1).squeeze()) loss.backward() optimizer.step() pred = pred.max(dim=2)[1] loss_val += loss.item() train_true.append(seg.cpu().numpy() ) train_pred.append(pred.detach().cpu().numpy()) train_label.append(label.reshape(-1)) scheduler.step() train_true, train_pred, train_label = np.concatenate(train_true), np.concatenate(train_pred), np.concatenate(train_label) curr_train_iou = np.mean(calculate_shape_IoU(train_pred, train_true, train_label, class_choice=None)) print('Epoch {}, loss {:.2f}, train_iou: {:.2f}'.format(epoch, loss_val, curr_train_iou)) # testing phase if epoch >= 2000 or curr_train_iou >= 0.9 or epoch % 100 == 0: model.eval() ids = torch.from_numpy(vcand).to(device).long() with torch.no_grad(): test_true, test_pred, test_label = [], [], [] for data, label, seg in tqdm(test_generator): label_one_hot = np.zeros((label.shape[0], 16)) label_one_hot[np.arange(label.shape[0]), label[np.arange(label.shape[0])].flatten()] = 1 label_one_hot = torch.from_numpy(label_one_hot.astype(np.float32)) data, label_one_hot, seg = data.to(device), label_one_hot.to(device), seg.to(device) tmp_pred = [] # roll though all 24 combinations for vw in range(24): tmp_pred.append(model(data[:, ids[vw]], label_one_hot).permute(0, 2, 1).contiguous().unsqueeze(0).detach().cpu().numpy()) pred = np.sum(np.concatenate(tmp_pred), axis=0) test_true.append(seg.cpu().numpy()) test_pred.append(np.argmax(pred,axis=2)) test_label.append(label.reshape(-1)) test_true, test_pred, test_label = np.concatenate(test_true), np.concatenate(test_pred), np.concatenate(test_label) curr_test_iou = np.mean(calculate_shape_IoU(test_pred, test_true, test_label, class_choice=None)) if best_test_iou < curr_test_iou: best_test_iou = curr_test_iou torch.save(model.state_dict(), './partseg_checkpoint.t7') print('Epoch {}, curr_test_iou {:.2%}, best_test_iou {:.2%}'.format(epoch, curr_test_iou, best_test_iou)) if __name__ == '__main__': data_dir = os.path.join(os.getcwd(), './dataset/partseg/pca/') log_dir = 'partseg_log.txt' device = torch.device('cuda:4') torch.manual_seed(1) torch.cuda.manual_seed(1) train(data_dir, log_dir, device=device, n_epoch=3000, lr=1e-4, bs=16) ``` ##
github_jupyter
1-1 인공지능과 가위바위보 하기 ``` #손글씨 분류기 활용. #데이터 준비->딥러닝 네트워크 설계->학습->테스트(평가) ``` 1-2 데이터를 준비하자! MNIST 숫자 손글씨 Dataset 불러들이기 ``` import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt print(tf.__version__) # Tensorflow의 버전을 출력 mnist = keras.datasets.mnist # MNIST 데이터를 로드. 다운로드하지 않았다면 다운로드까지 자동으로 진행됩니다. (x_train, y_train), (x_test, y_test) = mnist.load_data() print(len(x_train)) # x_train 배열의 크기를 출력 plt.imshow(x_train[1],cmap=plt.cm.binary) plt.show() print() print(y_train[1]) # index에 0에서 59999 사이 숫자를 지정해 보세요. index=12344 plt.imshow(x_train[index],cmap=plt.cm.binary) plt.show() print( (index+1), '번째 이미지의 숫자는 바로 ', y_train[index], '입니다.') #학습용 데이터의 수 print(x_train.shape) #시험용 데이터의 수 print(x_test.shape) ``` 데이터 전처리 하기 ``` print('최솟값: ', np.min(x_train), '최댓값: ', np.max(x_train)) #MNIST 데이터는 각 필셀의 값이 0~255 사이 범위에 있으므로 #인공지능 모델을 훈련시키고 사용할 때 입력은 0~1이 좋으므로 #데이터들을 255.0으로 나누어 준다. x_train_norm, x_test_norm = x_train/255, x_test/255 print('최솟값:', np.min(x_train_norm),'최댓값:', np.max(x_test_norm)) ``` 1-3 딥러닝 네트워크 설계\ Sequenential Model을 사용해 보자 ``` #tf.keras의 Sequential API를 이용하여 #LeNet이라는 딥러닝 네트워크를 설계한 예 model=keras.models.Sequential() model.add(keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(28,28,1))) model.add(keras.layers.MaxPool2D(2,2)) model.add(keras.layers.Conv2D(32, (3,3), activation='relu')) model.add(keras.layers.MaxPooling2D((2,2))) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(32, activation='relu')) model.add(keras.layers.Dense(10, activation='softmax')) print('Model에 추가된 Layer 개수: ', len(model.layers)) print() #딥러닝 네트워크 모델 확인 model.summary() ``` 1-4 딥러닝 네트워크 학습시키기 ``` #우리가 만든 네트워크의 입력은 (데이터개수, 이미지크기x, 이미지크기y, 채널수) #와 같은 형태를 가진다. 위에서 첫번째 레이어에 input_shape=(28,28,1)로 지정했는데, #print(x_train.shape)을 해보면 (60000,28,28)로 채널수에 대한 정보가 없으므로 #(60000,28,28,1)f로 만들어 주어야 한다. #여기서 채널수 1은 흑백, 컬러라면 R,G,B이므로 3 print("Before Reshape - x_train_norm shape: {}".format(x_train_norm.shape)) print("Before Reshape - x_test_norm shape: {}".format(x_test_norm.shape)) x_train_reshaped=x_train_norm.reshape( -1, 28, 28, 1) x_test_reshaped=x_test_norm.reshape( -1, 28, 28, 1) # 데이터갯수에 -1을 쓰면 reshape시 자동계산됩니다. print("After Reshape - x_train_reshaped shape: {}".format(x_train_reshaped.shape)) print("After Reshape - x_test_reshaped shape: {}".format(x_test_reshaped.shape)) #x_train 학습 데이터로 딥러닝 네트워크 학습시키기 #여기서 epochs=10 은 전체 60.000개의 데이터를 10번 반복 사용해서 #학습 시키라는 뜻이다. model의 입력 정의에 형태를 맞춘 x_train_reshaped 사용 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train_reshaped, y_train, epochs=10) #각 학습이 진행됨에 따라 epoch 별로 어느 정도 인식 정확도가 올라가는지 확인할 수 있다. #9 epoch정도부터는 인식률 상승이 미미하므로 10 epoch정도가 적당하다. ``` 1-5 얼마나 잘 만들었는지 확인하기 ``` #시험용 데이터를 가지고 테스트 해보기 test_loss, test_accuracy = model.evaluate(x_test_reshaped,y_test, verbose=2) print("test_loss: {} ".format(test_loss)) print("test_accuracy: {}".format(test_accuracy)) #예상보다 낮은 점수(98.45)가 나옴 #어떤 데이터가 잘못됐는지 확인해보기 predicted_result = model.predict(x_test_reshaped) # model이 추론한 확률값. predicted_labels = np.argmax(predicted_result, axis=1) idx=0 #1번째 x_test를 살펴보자. print('model.predict() 결과 : ', predicted_result[idx]) print('model이 추론한 가장 가능성이 높은 결과 : ', predicted_labels[idx]) print('실제 데이터의 라벨 : ', y_test[idx]) plt.imshow(x_test[idx],cmap=plt.cm.binary) plt.show() #틀린 경우 확인해보기 import random wrong_predict_list=[] for i, _ in enumerate(predicted_labels): # i번째 test_labels과 y_test이 다른 경우만 모아 봅시다. if predicted_labels[i] != y_test[i]: wrong_predict_list.append(i) # wrong_predict_list 에서 랜덤하게 5개만 뽑아봅시다. samples = random.choices(population=wrong_predict_list, k=5) for n in samples: print("예측확률분포: " + str(predicted_result[n])) print("라벨: " + str(y_test[n]) + ", 예측결과: " + str(predicted_labels[n])) plt.imshow(x_test[n], cmap=plt.cm.binary) plt.show() ``` 1-6 더 좋은 네트워크 만들기 ``` #하이퍼 피라미드 바꾸기 #바꿔 볼 수 있는 하이퍼파라미터들 n_channel_1=16 n_channel_2=32 n_dense=32 n_train_epoch=10 model=keras.models.Sequential() model.add(keras.layers.Conv2D(n_channel_1, (3,3), activation='relu', input_shape= (28,28,1))) model.add(keras.layers.MaxPool2D(2,2)) model.add(keras.layers.Conv2D(n_channel_2, (3,3), activation='relu')) model.add(keras.layers.MaxPooling2D((2,2))) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(n_dense, activation='relu')) model.add(keras.layers.Dense(10, activation='softmax')) model.summary() model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # 모델 훈련 model.fit(x_train_reshaped, y_train, epochs=n_train_epoch) # 모델 시험 test_loss, test_accuracy = model.evaluate(x_test_reshaped, y_test, verbose=2) print("test_loss: {} ".format(test_loss)) print("test_accuracy: {}".format(test_accuracy)) ``` 1-7 프로젝트: 가위바위보 분류기 만들기 ``` # PIL 라이브러리가 설치되어 있지 않다면 설치 #!pip install pillow from PIL import Image import os, glob import pandas as pd import numpy as np print("PIL 라이브러리 import 완료") #가위 이미지 불러오기 image_dir_path = "./data/rock_scissor_paper/scissor" print("이미지 디렉토리 경로: ", image_dir_path) images = glob.glob(image_dir_path + "/*.jpg") #모든 파일을 28*28 사이즈로 바꾸어 저장 target_size = (28,28) for img in images: old_img=Image.open(img) new_img=old_img.resize(target_size,Image.ANTIALIAS) new_img.save(img,"JPEG") print("가위 이미지 resize 완료") #바위 이미지 불러오기 image_dir_path = "./data/rock_scissor_paper/rock" print("이미지 디렉토리 경로: ", image_dir_path) images = glob.glob(image_dir_path + "/*.jpg") #모든 파일을 28*28 사이즈로 바꾸어 저장 target_size = (28,28) for img in images: old_img=Image.open(img) new_img=old_img.resize(target_size,Image.ANTIALIAS) new_img.save(img,"JPEG") print("바위 이미지 resize 완료") #보 이미지 불러오기 image_dir_path = "./data/rock_scissor_paper/paper" print("이미지 디렉토리 경로: ", image_dir_path) images = glob.glob(image_dir_path + "/*.jpg") #모든 파일을 28*28 사이즈로 바꾸어 저장 target_size = (28,28) for img in images: old_img=Image.open(img) new_img=old_img.resize(target_size,Image.ANTIALIAS) new_img.save(img,"JPEG") print("보 이미지 resize 완료") def load_data(img_path): # 가위 : 0, 바위 : 1, 보 : 2 number_of_data=300 # 가위바위보 이미지 개수 총합에 주의하세요. img_size=28 color=3 #이미지 데이터와 라벨(가위 : 0, 바위 : 1, 보 : 2) 데이터를 담을 행렬(matrix) 영역을 생성합니다. imgs=np.zeros(number_of_data*img_size*img_size*color,dtype=np.int32).reshape(number_of_data,img_size,img_size,color) labels=np.zeros(number_of_data,dtype=np.int32) idx=0 for file in glob.iglob(img_path+'/scissor/*.jpg'): img = np.array(Image.open(file),dtype=np.int32) imgs[idx,:,:,:]=img # 데이터 영역에 이미지 행렬을 복사 labels[idx]=0 # 가위 : 0 idx=idx+1 for file in glob.iglob(img_path+'/rock/*.jpg'): img = np.array(Image.open(file),dtype=np.int32) imgs[idx,:,:,:]=img # 데이터 영역에 이미지 행렬을 복사 labels[idx]=1 # 바위 : 1 idx=idx+1 for file in glob.iglob(img_path+'/paper/*.jpg'): img = np.array(Image.open(file),dtype=np.int32) imgs[idx,:,:,:]=img # 데이터 영역에 이미지 행렬을 복사 labels[idx]=2 # 보 : 2 idx=idx+1 print("학습데이터(x_train)의 이미지 개수는",idx,"입니다.") return imgs, labels image_dir_path = "./data/rock_scissor_paper" (x_train, y_train)=load_data(image_dir_path) x_train_norm = x_train/255.0 # 입력은 0~1 사이의 값으로 정규화 print("x_train shape: {}".format(x_train.shape)) print("y_train shape: {}".format(y_train.shape)) import matplotlib.pyplot as plt plt.imshow(x_train[0]) print('라벨: ', y_train[0]) 라벨: 0 ``` 1-8 for 문 활용, 한 번에 resize 하는 방법 ``` image_directory = "./data/rock_scissor_paper/paper" path_pool = "/rock", "/scissor", "/paper" target_size=(28, 28) for path in path_pool: images=glob.glob(image_directory + path + "/*.jpg") for img in images: old_img=Image.open(img) new_img=old_img.resize(target_size, Image.ANTIALIAS) new_img.save(img, "JPEG") print("리사이징 완료") ```
github_jupyter
**Getting Started** This tutorial relies on standard python packages as well as [SimPEG](http://simpeg.xyz). If you do not have SimPEG installed, you can uncomment the next line and install it from [pypi](https://pypi.python.org/pypi/SimPEG). ``` # !pip install SimPEG import numpy as np import scipy.sparse as sp from SimPEG import Mesh, Utils, Solver from scipy.constants import mu_0, epsilon_0 import matplotlib.pyplot as plt %matplotlib inline ``` # Numerical simulation of the 1D Magnetotelluric (MT) problem ## Purpose With [SimPEG's](http://simpeg.xyz) mesh class, we discretize Maxwell's equations for a 1D magnetotelluric problem. We then solve for both electric and magnetic fields, and evaluate data at a receing location. There are some milestones to be accomplished: - Introduce differential operators and the terminology used in the SimPEG mesh class - Set up boundary conditions - Set up an linear system $\mathbf{A}\mathbf{u} = \mathbf{rhs}$, compute the fields, $\mathbf{u}$ - Evaluate the data at a receiver location: apparent resistivity and phase - Recognize extensibility of this example to higher dimensions: 2D and 3D ## Physics: Maxwell's equations The governing equations for electromagnetic problems are Maxwell's equations. Here, we show them in the frequency domain. For more background on Maxwell's equations, we recommend http://em.geosci.xyz and [Ward & Hohmann, 1988](http://library.seg.org/doi/abs/10.1190/1.9781560802631.ch4). $$\nabla \times \mathbf{E} + \imath\omega \mu \mathbf{H} = 0 $$ $$\nabla \times \mathbf{H} - (\sigma + \imath \omega \epsilon) \mathbf{E} = 0$$ where - $\mathbf{E}$ is the electric field (V/m) - $\mathbf{H}$ is the magnetic field (A/m) - $\omega = 2\pi f$ is the angular frequency - $\mu$ is the magnetic permeability, often taken to be that of free spase ($\mu_0 = 4\pi\times 10^{-7}$ H/m) - $\sigma$ is the electrical conductivity (S/m). - $\epsilon$ is the dielectric permittivity, often taken to be that of free space ($\epsilon = 8.85 \times 10^{-12}$ F/m) For convienence, we will make the substitution: $\hat{\sigma} = \sigma + \imath \omega \epsilon$ and write Maxwell's equations as $$\nabla \times \mathbf{E} + \imath\omega \mu \mathbf{H} = 0$$ $$\nabla \times \mathbf{H} - \hat{\sigma} \mathbf{E} = 0$$ The first equation is [Faraday's Law](http://em.geosci.xyz/content/maxwell1_fundamentals/formative_laws/faraday.html), and the second is [Ampere's Law](http://em.geosci.xyz/content/maxwell1_fundamentals/formative_laws/ampere_maxwell.html) For the Magnetotelluric problem, we are interested in examining Maxwell's equations for a plane wave source. We consider a vertically propagating plane wave. For a 1D earth model, the fields and fluxes are defined by horizontal, orthogonal electric and magnetic fields, so we take $$\mathbf{E} = E_x\mathbf{\hat{x}}$$ $$\mathbf{H} = H_y\mathbf{\hat{y}}$$ <img src="http://em.geosci.xyz/_images/planewavedown1.png" alt="plane_wave" href="http://em.geosci.xyz/content/maxwell1_fundamentals/plane_waves_in_homogeneous_media/index.html" width=300px> The coordinate system we use is right-handed, and $z$ is positive up. In this case, our governing equations simplify to scalar equations $$ \frac{\partial E_x}{\partial z} + \imath \omega \mu H_y = 0$$ $$-\frac{\partial H_y}{\partial z} + \hat{\sigma} E_x = 0$$ with the boundary conditions: $$E_x (z=0) = 1$$ $$E_x (z=-\infty) = 0$$ To solve the forward problem, the - **knowns** are: $\omega$, $\mu$, $\hat{\sigma}$, boundary conditions - **unknowns** are: $E_x$, $H_y$ ## Discretiation, the Short version. **TL;DR.** Here is the answer. If you want to see the full derivation, checkout <a href="#Discretization,-the-Gory-Details.">The Gory Details</a> We define physical properties at cell centers, and stagger the electric and magnetic fields - $\sigma$, $\mu$, $\epsilon$ : cell centers - $E_x$: cell centers - $H_y$: faces <img src="https://github.com/simpeg/tle-magnetotelluric_inversion/raw/master/images/1DMT_discretize.png" width=200px> and use a finite difference approach to define the operators, this gives us the discrete system of equations $$ \underbrace{ \begin{bmatrix} \mathbf{Grad} & \imath \omega \mathbf{M}^{f}_{\mu} \\[0.3em] \mathbf{M}^{cc}_{\hat{\sigma}} & \mathbf{Div} \\[0.3em] \end{bmatrix} }_{\mathbf{A}} \underbrace{ \begin{bmatrix} \mathbf{e_x} \\[0.3em] \mathbf{h_y} \\[0.3em] \end{bmatrix} }_{\mathbf{u}} = \underbrace{ \begin{bmatrix} - \mathbf{B}\mathbf{e_x}^{BC} \\[0.3em] \boldsymbol{0} \\[0.3em] \end{bmatrix} }_{\mathbf{rhs}} $$ with - $\mathbf{e_x}$: Discrete $E_x$, on cell centers $[\text{nC} \times 1]$ - $\mathbf{h_y}$: Dicrete $H_x$, on cell faces $[(\text{nC}+1) \times 1]$ - $ \mathbf{Grad}$: Discrete gradient operator $[\text{nC} \times (\text{nC}+1)]$ - $ \mathbf{Div}$: Discrete divergence operator $[(\text{nC}+1) \times \text{nC}]$ - $\mathbf{M}^{f}_{\boldsymbol{\mu}} = \mathbf{diag}(\mathbf{Av^{cc2f}} \boldsymbol{\mu})$ $[(\text{nC}+1) \times (\text{nC}+1)]$ - $\mathbf{M}^{cc}_{\boldsymbol{\hat{\sigma}}} = \mathbf{diag}(\boldsymbol{\hat{\sigma}})$ $[\text{nC} \times \text{nC}]$ - $\mathbf{B} \mathbf{e_x}^{BC}$ handles the boundary conditions ## Designing a mesh When designing a mesh, we need to ensure that we can capture the physics (what should the thickness of the finest cells be?) and make sure that the boundary is far enough away so that the fields have decayed (how far do we need to go to approximate $\infty$??). To address these, we look at the [skin depth equation](http://em.geosci.xyz/content/maxwell1_fundamentals/plane_waves_in_homogeneous_media/frequency/analytic_solution.html#attenuation-and-skin-depth), which tells us over what distance we expect electromagnetic fields to have decayed by a factor of $1/e$ in a conductive medium: $$ \delta = \frac{500}{\sqrt{\sigma f}} $$ - The finest cells capture the behaviour of the highest frequency near the surface - The mesh needs to extend far enough so that the fields at the lowest frequency have sufficiently decayed Lets start by considering: - a half-space with conductivity $\sigma = 10^{-2}$ S/m - a maximum frequency of 1000 Hz - a minimum frequency of 0.01 Hz ``` sigma_halfspace = 1e-2 fmax, fmin = 1e3, 1e-2 def skin_depth(sigma, f): return 500./np.sqrt(sigma*f) skin_depth_min = skin_depth(sigma_halfspace, fmax) skin_depth_max = skin_depth(sigma_halfspace, fmin) print("The minimum skin depth is {:2.1f} m".format(skin_depth_min)) print("The maximum skin depth is {:2.1f} m".format(skin_depth_max)) ``` To ensure that we are capturing the physics and have a sufficiently far boundary, we will choose - a minimum cell size of $\delta_{\text{min}}/4$ - a padding distance that extends to $2 \delta_{\text{max}}$ ``` print( "The smallest cell size should be {:2.1f} m".format( skin_depth_min / 4. ) ) print( "The mesh should extend {:1.1e} m".format( skin_depth_max * 2. ) ) ``` ### Set up a mesh Here, we use the [SimPEG Mesh class](http://docs.simpeg.xyz) to set up the mesh, differential operators, and handy properties and methods that handle counting and plotting. ``` cs = 39. # core cell size npad = 25 # number of padding cells ncz = 100 # number of core cells # define a tensor mesh hz = [(cs, npad, -1.3), (cs, ncz)] mesh = Mesh.TensorMesh([hz], x0='N') # put the origin at the surface # plot the mesh fig, ax = plt.subplots(1,1, figsize=(8, 3)) mesh.plotGrid(centers=True, faces=True, ax=ax) ax.legend(["centers", "faces"]) ax.invert_xaxis() # so that the surface is on our left hand side ax.set_xlabel('z (m)') ax.grid(which="both", linewidth=0.5) print( "The mesh extends {:1.1e}m, is that far enough?".format( mesh.hx.sum() ) ) print(mesh) ``` ## Assemble the discrete system of equations #### Model parameters We start with a half space that has physical properties - $\sigma = 10^{-2}$ S/m - $\mu = \mu_0$ - $\epsilon = \epsilon_0$ and define these on the mesh ``` sigma = np.ones(mesh.nC)*sigma_halfspace # conductivity values for all cells mu = np.ones(mesh.nC)*mu_0 # magnetic permeability values for all cells epsilon = np.ones(mesh.nC)*epsilon_0 # dielectric constant values for all cells print( "There are {:1.0f} cell centers. \n" " sigma is {:1.0f} elements long, all cells have a value of {:1.2e} S/m \n" " mu is {:1.0f} elements long, all cells have a value of {:1.2e} H/m \n" " epsilon is {:1.0f} elements long, all cells have a value of {:1.2e} F/m \n".format( mesh.nC, len(sigma), sigma_halfspace, len(mu), mu_0, len(epsilon), epsilon_0 ) ) ``` We will pick a single frequency to work with for now - f = 1000 Hz ``` frequency = 1e3 # Frequency (Hz) omega = 2*np.pi*frequency # Angular frequency (rad/s) ``` Here, we will adopt the quasistatic assumption and ignore displacement current $(i \epsilon \omega)$. To explore the impacts of this assumption, uncomment the next second line ``` sigmahat = sigma # quasi-static assumption # sigmahat = sigma + 1j*epsilon*omega # includes displacement current ``` The system we want to solve is $$ \underbrace{ \begin{bmatrix} \mathbf{Grad} & \imath \omega \mathbf{M}^{f}_{\mu} \\[0.3em] \mathbf{M}^{cc}_{\hat{\sigma}} & \mathbf{Div} \\[0.3em] \end{bmatrix} }_{\mathbf{A}} \underbrace{ \begin{bmatrix} \mathbf{e_x} \\[0.3em] \mathbf{h_y} \\[0.3em] \end{bmatrix} }_{\mathbf{u}} = \underbrace{ \begin{bmatrix} - \mathbf{B}\mathbf{e_x}^{BC} \\[0.3em] \boldsymbol{0} \\[0.3em] \end{bmatrix} }_{\mathbf{rhs}} $$ so we need to construct each of the operators. For details, see: <a href="#Discretization,-the-Gory-Details.">The Gory Details</a> We start by laying our the piece and will then assemble the full matrix system. ``` # Grad mesh.setCellGradBC([['dirichlet', 'dirichlet']]) # Setup boundary conditions Grad = mesh.cellGrad # Gradient matrix # MfMu Mmu = Utils.sdiag(mesh.aveCC2F * mu) # Mccsigma Msighat = Utils.sdiag(sigmahat) # Div Div = mesh.faceDiv # Divergence matrix # Right Hand Side B = mesh.cellGradBC # a matrix for boundary conditions Exbc = np.r_[0., 1.] # boundary values for Ex # Assemble the matrix # A-matrix A = sp.vstack([ sp.hstack([Grad, 1j*omega*Mmu]), # Top row of A matrix sp.hstack((Msighat, Div)) # Bottom row of A matrix ]) # Right-hand side rhs = np.r_[ -B*Exbc, np.zeros(mesh.nC) ] ``` Now that we have all of the pieces, we can go ahead and solve the MT system ``` %%time Ainv = Solver(A) # Factorize A matrix sol = Ainv*rhs # Solve A^-1 rhs = sol Ex = sol[:mesh.nC] # Extract Ex from solution vector u Hy = sol[mesh.nC:mesh.nC+mesh.nN] # Extract Hy from solution vector u ``` ## Impedance, Apparent Resistivity, and Phase MT data are natural source data, meaning that the source is free! but we don't know its amplitude. To account for this, the data we examine are typically transfer functions that involve ratios of the electric and magnetic fields. For the 1D problem, the Impedance is simply given by $$ Z_{xy} = - \frac{E_x}{H_y} $$ (The negative is because we have defined a coordinate system such that z is positive up) $Z_{xy}$ is a complex number, so we can look at real and imaginary components or amplitude and phase. ``` Zxy = - 1./Hy[-1] # Impedance at the surface print("Impedance: {:1.1e} + {:1.1e}i".format(Zxy.real, Zxy.imag)) print("or in terms of Amplidude: {:1.1e} and phase: {:1.1f} degrees".format( np.absolute(Zxy), np.rad2deg(np.arctan(Zxy.imag / Zxy.real))) ) ``` Often is useful to translate the impedance to an apparent resistivity ($\rho_a$) and phase. $$ \rho_a = \frac{1}{\mu_0\omega} \big|Z_{xy}\big|^2 $$ $$ \phi = \tan^{-1}\left(\frac{\text{Im}(Z_{xy})}{\text{Re}(Z_{xy})}\right) $$ For a half-space, we expect the apparent resistivity to equal the true resistivity, and the phase to be $45^\circ$ ``` app_res = abs(Zxy)**2 / (mu_0*omega) app_phase = np.rad2deg(np.arctan(Zxy.imag / Zxy.real)) print( "Apparent Resistivity: {:1.1f}, Phase: {:1.1f}".format( app_res, app_phase ) ) print( "Note that the apparent resistivity, {:1.1f} " "is the same as the true half-space {:1.1f}".format( app_res, 1./sigma_halfspace ) ) ``` ## Put it all together Here, we define a function that performs an MT simulation so that we can readily compute the Magnetotelluric response at multiple frequencies and for a variety of models. We write this function to the file MTsimulation.py so that we can import it and use it in later notebooks. Uncomment the first three lines to write out the file again. ``` # %%writefile MTforward.py # import numpy as np # import scipy.sparse as sp # from scipy.constants import mu_0 # from SimPEG import Utils, Solver def simulateMT(mesh, sigma, frequency, rtype="app_res"): """ Compute apparent resistivity and phase at each frequency. Return apparent resistivity and phase for rtype="app_res", or impedance for rtype="impedance" """ # Angular frequency (rad/s) def omega(freq): return 2*np.pi*freq # make sure we are working with numpy arrays if type(frequency) is float: frequency = np.r_[frequency] # make it a list to loop over later if it is just a scalar elif type(frequency) is list: frequency = np.array(frequency) # Frequency independent pieces of the A matrix # Grad mesh.setCellGradBC([['dirichlet', 'dirichlet']]) # Setup boundary conditions Grad = mesh.cellGrad # Gradient matrix # MfMu mu = np.ones(mesh.nC)*mu_0 # magnetic permeability values for all cells Mmu = Utils.sdiag(mesh.aveCC2F * mu) # Mccsigma sigmahat = sigma # quasi-static assumption Msighat = Utils.sdiag(sigmahat) # Div Div = mesh.faceDiv # Divergence matrix # Right Hand Side B = mesh.cellGradBC # a matrix for boundary conditions Exbc = np.r_[0., 1.] # boundary values for Ex # Right-hand side rhs = np.r_[ -B*Exbc, np.zeros(mesh.nC) ] # loop over frequencies Zxy = [] for freq in frequency: # A-matrix A = sp.vstack([ sp.hstack([Grad, 1j*omega(freq)*Mmu]), # Top row of A matrix sp.hstack((Msighat, Div)) # Bottom row of A matrix ]) Ainv = Solver(A) # Factorize A matrix sol = Ainv*rhs # Solve A^-1 rhs = sol Ex = sol[:mesh.nC] # Extract Ex from solution vector u Hy = sol[mesh.nC:mesh.nC+mesh.nN] # Extract Hy from solution vector u Zxy.append(- 1./Hy[-1]) # Impedance at the surface # turn it into an array Zxy = np.array(Zxy) # return impedance or apparent resistivity and phase if rtype.lower() == "impedance": return Zxy elif rtype.lower() == "app_res": app_res = abs(Zxy)**2 / (mu_0*omega(frequency)) app_phase = np.rad2deg(np.arctan(Zxy.imag / Zxy.real)) return app_res, app_phase else: raise Exception, "rtype must be 'impedance' or 'app_res', not {}".format(rtype.lower()) %%time # Run the simulation over 25 frequencies from 1e-3 Hz to 100 Hz frequencies = np.logspace(-2, 3, 25) # for freq in frequencies: app_res, phase = simulateMT(mesh, sigma, frequencies) ``` For a half-space, the apparent resistivity should equal the true resistivity and the phase should be $45^\circ$. How did we do?? ``` fig, ax = plt.subplots(2, 1, figsize=(8, 3*2)) # plot apparent resistivity ax[0].loglog(frequencies, app_res, lw=2) ax[0].set_ylim(1./sigma_halfspace*np.r_[0.1, 10]) ax[0].set_ylabel("$ \\rho_a (\Omega$-m)", fontsize=14) # plot phase ax[1].semilogx(frequencies, phase, lw=2) ax[1].set_ylim(np.r_[0., 90.]) ax[1].grid(which="both", linewidth=0.4) ax[1].set_xlabel("frequency (Hz)", fontsize=14) ax[1].set_ylabel("$\phi (^\circ)$", fontsize=14) for a in ax: a.invert_xaxis() # highest frequencies see the near surface, lower frequencies see deeper a.set_xlabel("frequency (Hz)", fontsize=14) a.grid(which="both", linewidth=0.4) plt.tight_layout() ``` # Discretization, the Gory Details. If you want to skip this section, we won't judge! To numerically solve Maxwell's equations, we need to first discretize them so they are represented on a mesh. We will take a finite difference approach for this example. Since we are solving for a 1D model, we will use a 1D mesh and leverage the Mesh class in SimPEG to build the operators (see http://docs.simpeg.xyz for docs). We show a very small mesh in the derivation so that it is meaningful to print out the matrices. When we go to solve, we will use a larger mesh. ``` cell_size = 1. # width of the cell in meters ncells = 4 # number of cells that make up our domain # define a Tensor Mesh dz = [(cell_size, ncells)] mesh = Mesh.TensorMesh([dz], x0='N') print( "This mesh has {nC} cells and {nF} faces. " "Each cell is {h}m wide".format( nC=mesh.nC, nF=mesh.nF, h=mesh.hx.min() # it is hx because SimPEG treats dimensions in the order (x, y, z), so if the mesh is 1D, we work with the first component ) ) ``` There are two places where we can discretize variables on a 1D mesh for the electromagnetic problem: cell centers and cell faces. ``` fig, ax = plt.subplots(1,1, figsize=(8,3)) mesh.plotGrid(centers=True, faces=True, ax=ax) ax.invert_xaxis() # put the surface of the earth on the left. ax.set_xlabel('z (m)') ax.grid(which="both", linewidth=0.4) plt.legend(("centers", "faces")) ``` To count, we will use $$ i = 0, 1, 2, ..., N $$ to denote cell centers, so faces are at $i \pm 1/2$. To discretize our system of equations, we put the physical properties, $\sigma$, $\mu$, $\epsilon$ at cell centers and stagger the electric and magnetic fields so that $E_x$ is on cell centers and $H_y$ is at cell faces. Our physical properties are described by the discrete vectors $$ \boldsymbol{\sigma} = [\sigma_0, \sigma_1, \sigma_2, ..., \sigma_N]^\top $$ $$ \boldsymbol{\mu} = [\mu_0, \mu_1, \mu_2, ..., \mu_N]^\top $$ $$ \boldsymbol{\epsilon} = [\epsilon_0, \epsilon_1, \epsilon_2, ..., \epsilon_N]^\top $$ and $$ \boldsymbol{\hat{\sigma}} = \boldsymbol{\sigma} + \imath \omega \boldsymbol{\epsilon} $$ is also defined at cell centers. Our fields are described by the discrete vectors $$ \mathbf{e_x} = [e_0, e_1, e_2, ..., e_N]^\top $$ $$ \mathbf{h_y} = [h_{-1/2}, h_{1/2}, h_{1+1/2}, h_{2+1/2}, ..., h_{N+1/2}]^\top $$ ### Discretizing Ampere's law Lets start by exmining Ampere's law (the second equation), $$-\frac{\partial H_y}{\partial z} + \hat{\sigma} E_x = 0$$ To approximate the derivative of $H_y$ (which is defined on faces) with respect to $z$, we use centered differences, so $$ \frac{\partial H_y}{\partial z} \bigg\rvert_i \simeq \frac{h_{i+1/2} - h_{i-1/2}}{\Delta z_i} $$ where $\Delta z_i$ is the width of the cell, and the approximation of the derivative lands on the cell center. We repeat this operation for each cell in our mesh. You could do this in a for loop, but it is often benificial to work with the matrix form, so we will do that here. The differential operator matrix that takes the derivative of a variable defined on faces is the face divergence operator. $$ \frac{\partial H_y}{\partial z} \simeq \mathbf{Div} ~\mathbf{h_y} $$ ``` Div = mesh.faceDiv print(Div.todense()) # operators are stored as sparse matrices in SimPEG ``` Since the physical properties $\boldsymbol{\hat{\sigma}}$ is defined at cell centers which is in the same location as $E_x$, we can simply multiply them. In matrix form, we use a diagonal matrix, $$\mathbf{M^{cc}_{\boldsymbol{\hat{\sigma}}}} = \mathbf{diag}(\boldsymbol{\hat{\sigma}})$$ so the product is given by $$ \hat{\sigma} E_x \simeq \mathbf{M^{cc}_{\boldsymbol{\hat{\sigma}}}} ~\mathbf{e_x} $$ in the example that follows, we will assume that $\sigma \gg \imath \omega \epsilon$, so $\hat{\sigma} \simeq \sigma$. In the more general implementation later on in the tutorial, we will use the full definition of $\hat{\sigma}$ ``` sigma = 1e-1 * np.ones(mesh.nC) Mcc_sigma = Utils.sdiag(sigma) print(Mcc_sigma.todense()) ``` So we have taken $$-\frac{\partial H_y}{\partial z} + \hat{\sigma} E_x = 0$$ and discretized to $$ - \mathbf{Div} ~ \mathbf{h_y} + \mathbf{M^{cc}_{\boldsymbol{\hat{\sigma}}}} ~ \mathbf{e_x} = \mathbf{0} $$ ### Discretizing Faraday's Law Next, we examine Faraday's law: $$ \frac{\partial E_x}{\partial z} + \imath \omega \mu H_y = 0 $$ Over one cell, the discrete approximation of the derivative of $E_x$ with respect to $z$ is $$ \frac{\partial E_x}{\partial z}\bigg\rvert_{i+1/2} = \frac{e_{i+1} - e_{i}}{\Delta z_{i+1/2}} $$ where $\Delta z_{1+1/2}$ is the distance (m) from the cell center $z_{i}$ to $z_{i+1}$. Notice here that we are going from cell centers to cell faces. So in this case we need to handle the boundary conditions, what do we do at the top and the bottom: $$ \frac{\partial E_x}{\partial z}\bigg\rvert_{-1/2}, \quad \frac{\partial E_x}{\partial z}\bigg\rvert_{nC+1/2} $$ we somehow need to define "ghost points" $e_{-1}$ and $e_{N+1}$ so that we can solve $$ \frac{\partial E_x}{\partial z}\bigg\rvert_{-1/2} = \frac{e_{0} - e_{-1}}{\Delta z_{-1/2}} $$ and $$ \frac{\partial E_x}{\partial z}\bigg\rvert_{N+1/2} = \frac{e_{N+1} - e_{N}}{\Delta z_{N+1/2}} $$ #### Boundary Conditions Lets start with the bottom boundary - we know that MT fields and fluxes are diffusive and decay as they travel through conductive media, so if our boundary is sufficiently far away $$E_x (z=-\infty) = 0$$ Clearly we can't discretize to infinity... but we know approximately how quickly the fields decay, this is captured by the skin depth $$ \delta \simeq \frac{500}{\sqrt{\sigma f}} $$ So as long as we define our mesh such that we are a few skin depths from the surface, then we can safely assume that the fields will have decayed to zero (dirichlet boundary condition). In our discrete world, this means that we want to enforce $$ E_x \big|_{nC+{1/2}} = 0 $$ The elements of $e$ are defined on cell centers and our boundary is a face, so we choose our ghost point $e_N$ such that when we average across the boundary, the average is 0, eg $$ \frac{1}{2} (e_{N-1} + e_{N}) = 0 $$ which means $$ e_N = - e_{N-1} $$ and our discrete approximation of the derivative at this boundary is $$ \frac{\partial E_x}{\partial z}\bigg\rvert_{N+1/2} = \frac{e_{N+1} - e_{N}}{\Delta z_{N+1/2}} = \frac{-2 e_{N}}{\Delta z_{N+1/2}} $$ At the top boundary is where our incoming plane wave is, so we specify an electric field at the surface of $$E_x (z=0) = 1$$ So this means we want to define our ghost point $e_{-1}$ such that $$ \frac{1}{2}(e_{-1} + e_0) = 1 $$ or $$ (e_{-1}) = 2 - e_0 $$ and the derivative is $$ \frac{\partial E_x}{\partial z}\bigg\rvert_{-1/2} = \frac{e_{0} - e_{-1}}{\Delta z_{-1/2}} = \underbrace{\frac{2 e_{0}}{\Delta z_{-1/2}}}_{\text{due to dirichlet BC}} - \underbrace{\frac{2}{\Delta z_{-1/2}}}_{\text{due to non-homogeneous BC}} $$ For conveienence, when we discretize, we first employ dirichlet boundary conditions on each boundary, and add the second term, due to a non-homogeneous boundary condition. The differential operator matrix that takes the derivative of a variable defined on faces is the cell gradient operator, so the discrete derivative of $E_x$ is given by $$ \frac{\partial E_x}{\partial z} \simeq \mathbf{Grad} ~ \mathbf{e_x} + \mathbf{B} ~ \mathbf{e_x}^{BC} $$ where $\mathbf{Grad}$ includes dirichlet boundary conditions, and $\mathbf{B}$ is a $\text{nC}\times2$ matrix that accounts for the non-homogeneous boundary conditions ``` # Grad matrix with dirichlet boundary conditions mesh.setCellGradBC([['dirichlet', 'dirichlet']]) # set the boundary conditions Grad = mesh.cellGrad print(Grad.todense()) # deal with the boundary conditions ex_bc = np.r_[0., 1.] # bottom boundary, fields decay to zero, top is source B = mesh.cellGradBC # a matrix for boundary conditions print(B.todense()) # B * e_BC describes what we need to add to Grad e in order to addount for # the boundary conditions print(B*ex_bc) ``` The last piece we need to define is how to take the product $\imath \omega \mu H_y$. $\imath$ and $\omega$ are scalars, so they are easy. The tricky part is $\mu H_y$ since $\mathbf{\mu}$ is defined at cell centers (there are $\text{nC}$ of them) and $\mathbf{h}$ is at faces (there are $\text{nC+1}$ of them). So to take this product, we will average the magnetic permeability to faces, and again stick it in a diagonal matrix $$\mathbf{M^{f}_{\mu}} = \mathbf{diag}(\mathbf{Av^{cc2f} \mathbf{\mu}})$$ so the product is then $$ \imath\omega\mu H_y \simeq \imath\omega\mathbf{M^{f}_{\mu}} ~\mathbf{h_y} $$ ``` # Averaging matrix AvCC2F = mesh.aveCC2F print(AvCC2F.todense()) mu = mu_0*np.ones(mesh.nC) Mfmu = Utils.sdiag(AvCC2F * mu) print(Mfmu.todense()) ``` So we have taken Faraday's law $$ \frac{\partial E_x}{\partial z} + \imath \omega \mu H_y = 0 $$ and arrived at the discrete system $$ \mathbf{Grad} ~ \mathbf{e_x} + \mathbf{B} ~ \mathbf{e_x}^{BC} + \imath\omega \mathbf{M^f_\mu} \mathbf{h_y} = 0 $$ since the boundary conditions are known, we can move them to the right hand side $$ \mathbf{Grad} ~ \mathbf{e_x} + \imath\omega \mathbf{M^f_\mu} \mathbf{h_y} = - \mathbf{B} ~ \mathbf{e_x}^{BC} $$ ### Two equations, Two unknowns Our discrete Maxwell system is $$ \mathbf{Grad} ~ \mathbf{e_x} + \imath\omega \mathbf{M^f_\mu} \mathbf{h_y} = - \mathbf{B} ~ \mathbf{e_x}^{BC} $$ $$ - \mathbf{Div} ~ \mathbf{h_y} + \mathbf{M^{cc}_{\boldsymbol{\hat{\sigma}}}} ~ \mathbf{e_x} = \mathbf{0} $$ For convienence, lets re-arrage... $$ \mathbf{Grad} ~ \mathbf{e_x} + \imath\omega \mathbf{M^f_\mu} \mathbf{h_y} = - \mathbf{B} ~ \mathbf{e_x}^{BC} $$ $$ \mathbf{M^{cc}_{\boldsymbol{\hat{\sigma}}}} ~ \mathbf{e_x} - \mathbf{Div} ~ \mathbf{h_y} = \mathbf{0} $$ and assemble into a single matrix system $$ \underbrace{ \begin{bmatrix} \mathbf{Grad} & \imath \omega \mathbf{M}^{f2cc}_{\mu} \\[0.3em] \mathbf{M}^{cc}_{\hat{\sigma}} & \mathbf{Div} \\[0.3em] \end{bmatrix} }_{\mathbf{A}} \underbrace{ \begin{bmatrix} \mathbf{e}_x \\[0.3em] \mathbf{h}_y \\[0.3em] \end{bmatrix} }_{\mathbf{u}} = \underbrace{ \begin{bmatrix} - \mathbf{B}\mathbf{E}_x^{BC} \\[0.3em] \boldsymbol{0} \\[0.3em] \end{bmatrix} }_{\mathbf{rhs}} $$ with - $\mathbf{e}_x$: Discrete $E_x$ $[\text{nC} \times 1]$ - $\mathbf{e}_y$: Dicrete $H_x$ $[(\text{nC}+1) \times 1]$ - $ \mathbf{Grad}$: Discrete gradient operator with dirichlet boundary conditions $[\text{nC} \times (\text{nC}+1)]$ - $ \mathbf{Div}$: Discrete divergence operator $[(\text{nC}+1) \times \text{nC}]$ - $\mathbf{M}^{f}_{\boldsymbol{\mu}} = \mathbf{diag}(\mathbf{Av^{cc2f}} \boldsymbol{\mu})$ $[(\text{nC}+1) \times (\text{nC}+1)]$ - $\mathbf{M}^{cc}_{\boldsymbol{\hat{\sigma}}} = \mathbf{diag}(\boldsymbol{\hat{\sigma}})$ $[\text{nC} \times \text{nC}]$ - $\mathbf{B} \mathbf{e_x}^{BC}$ handles the boundary conditions ## Now we have all of the pieces Here, lets create a larger mesh, and we can go ahead and asseble the system of equations to solve ``` sigma_halfspace = 1e-2 freq = 1 cs = 39. # core cell size npad = 25 # number of padding cells ncz = 100 # number of core cells # define a tensor mesh hz = [(cs, npad, -1.3), (cs, ncz)] mesh = Mesh.TensorMesh([hz], x0='N') # put the origin at the surface print( "The mesh extends {:1.1e}m, is that far enough? (should be at least {:1.1e}m away)".format( mesh.hx.sum(), skin_depth(sigma_halfspace, freq) ) ) # physical properties sigma = np.ones(mesh.nC)*sigma_halfspace # conductivity values for all cells mu = np.ones(mesh.nC)*mu_0 # magnetic permeability values for all cells epsilon = np.ones(mesh.nC)*epsilon_0 # dielectric constant values for all cells # Grad mesh.setCellGradBC([['dirichlet', 'dirichlet']]) # Setup boundary conditions Grad = mesh.cellGrad # Gradient matrix # MfMu Mmu = Utils.sdiag(mesh.aveCC2F * mu) # Mccsigma Msighat = Utils.sdiag(sigmahat) # Div Div = mesh.faceDiv # Divergence matrix # Right Hand Side B = mesh.cellGradBC # a matrix for boundary conditions Exbc = np.r_[0., 1.] # boundary values for Ex # Assemble the matrix # A-matrix A = sp.vstack([ sp.hstack([Grad, 1j*omega*Mmu]), # Top row of A matrix sp.hstack((Msighat, Div)) # Bottom row of A matrix ]) # Right-hand side rhs = np.r_[ -B*Exbc, np.zeros(mesh.nC) ] %%time Ainv = Solver(A) # Factorize A matrix sol = Ainv*rhs # Solve A^-1 rhs = sol Ex = sol[:mesh.nC] # Extract Ex from solution vector u Hy = sol[mesh.nC:mesh.nC+mesh.nN] # Extract Hy from solution vector u Zxy = - 1./Hy[-1] # Impedance at the surface print("Impedance: {:1.1e} + {:1.1e}i".format(Zxy.real, Zxy.imag)) print( "or in terms of Amplidude: {:1.1e} and phase: {:1.1f} degrees".format( np.absolute(Zxy), np.rad2deg(np.arctan(Zxy.imag / Zxy.real)) ) ) ```
github_jupyter
``` #IMPORT SEMUA LIBARARY #IMPORT LIBRARY PANDAS import pandas as pd #IMPORT LIBRARY UNTUK POSTGRE from sqlalchemy import create_engine import psycopg2 #IMPORT LIBRARY CHART from matplotlib import pyplot as plt from matplotlib import style #IMPORT LIBRARY BASE PATH import os import io #IMPORT LIBARARY PDF from fpdf import FPDF #IMPORT LIBARARY CHART KE BASE64 import base64 #IMPORT LIBARARY EXCEL import xlsxwriter #FUNGSI UNTUK MENGUPLOAD DATA DARI CSV KE POSTGRESQL def uploadToPSQL(columns, table, filePath, engine): #FUNGSI UNTUK MEMBACA CSV df = pd.read_csv( os.path.abspath(filePath), names=columns, keep_default_na=False ) #APABILA ADA FIELD KOSONG DISINI DIFILTER df.fillna('') #MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN del df['kategori'] del df['jenis'] del df['pengiriman'] del df['satuan'] #MEMINDAHKAN DATA DARI CSV KE POSTGRESQL df.to_sql( table, engine, if_exists='replace' ) #DIHITUNG APABILA DATA YANG DIUPLOAD BERHASIL, MAKA AKAN MENGEMBALIKAN KELUARAN TRUE(BENAR) DAN SEBALIKNYA if len(df) == 0: return False else: return True #FUNGSI UNTUK MEMBUAT CHART, DATA YANG DIAMBIL DARI DATABASE DENGAN MENGGUNAKAN ORDER DARI TANGGAL DAN JUGA LIMIT #DISINI JUGA MEMANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF def makeChart(host, username, password, db, port, table, judul, columns, filePath, name, subjudul, limit, negara, basePath): #TEST KONEKSI DATABASE try: #KONEKSI KE DATABASE connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=db) cursor = connection.cursor() #MENGAMBL DATA DARI TABLE YANG DIDEFINISIKAN DIBAWAH, DAN DIORDER DARI TANGGAL TERAKHIR #BISA DITAMBAHKAN LIMIT SUPAYA DATA YANG DIAMBIL TIDAK TERLALU BANYAK DAN BERAT postgreSQL_select_Query = "SELECT * FROM "+table+" ORDER BY tanggal ASC LIMIT " + str(limit) cursor.execute(postgreSQL_select_Query) mobile_records = cursor.fetchall() uid = [] lengthx = [] lengthy = [] #MELAKUKAN LOOPING ATAU PERULANGAN DARI DATA YANG SUDAH DIAMBIL #KEMUDIAN DATA TERSEBUT DITEMPELKAN KE VARIABLE DIATAS INI for row in mobile_records: uid.append(row[0]) lengthx.append(row[1]) if row[2] == "": lengthy.append(float(0)) else: lengthy.append(float(row[2])) #FUNGSI UNTUK MEMBUAT CHART #bar style.use('ggplot') fig, ax = plt.subplots() #MASUKAN DATA ID DARI DATABASE, DAN JUGA DATA TANGGAL ax.bar(uid, lengthy, align='center') #UNTUK JUDUL CHARTNYA ax.set_title(judul) ax.set_ylabel('Total') ax.set_xlabel('Tanggal') ax.set_xticks(uid) #TOTAL DATA YANG DIAMBIL DARI DATABASE, DIMASUKAN DISINI ax.set_xticklabels((lengthx)) b = io.BytesIO() #CHART DISIMPAN KE FORMAT PNG plt.savefig(b, format='png', bbox_inches="tight") #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64 barChart = base64.b64encode(b.getvalue()).decode("utf-8").replace("\n", "") #CHART DITAMPILKAN plt.show() #line #MASUKAN DATA DARI DATABASE plt.plot(lengthx, lengthy) plt.xlabel('Tanggal') plt.ylabel('Total') #UNTUK JUDUL CHARTNYA plt.title(judul) plt.grid(True) l = io.BytesIO() #CHART DISIMPAN KE FORMAT PNG plt.savefig(l, format='png', bbox_inches="tight") #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64 lineChart = base64.b64encode(l.getvalue()).decode("utf-8").replace("\n", "") #CHART DITAMPILKAN plt.show() #pie #UNTUK JUDUL CHARTNYA plt.title(judul) #MASUKAN DATA DARI DATABASE plt.pie(lengthy, labels=lengthx, autopct='%1.1f%%', shadow=True, startangle=180) plt.axis('equal') p = io.BytesIO() #CHART DISIMPAN KE FORMAT PNG plt.savefig(p, format='png', bbox_inches="tight") #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64 pieChart = base64.b64encode(p.getvalue()).decode("utf-8").replace("\n", "") #CHART DITAMPILKAN plt.show() #MENGAMBIL DATA DARI CSV YANG DIGUNAKAN SEBAGAI HEADER DARI TABLE UNTUK EXCEL DAN JUGA PDF header = pd.read_csv( os.path.abspath(filePath), names=columns, keep_default_na=False ) #MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN header.fillna('') del header['tanggal'] del header['total'] #MEMANGGIL FUNGSI EXCEL makeExcel(mobile_records, header, name, limit, basePath) #MEMANGGIL FUNGSI PDF makePDF(mobile_records, header, judul, barChart, lineChart, pieChart, name, subjudul, limit, basePath) #JIKA GAGAL KONEKSI KE DATABASE, MASUK KESINI UNTUK MENAMPILKAN ERRORNYA except (Exception, psycopg2.Error) as error : print (error) #KONEKSI DITUTUP finally: if(connection): cursor.close() connection.close() #FUNGSI MAKEEXCEL GUNANYA UNTUK MEMBUAT DATA YANG BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2 #PLUGIN YANG DIGUNAKAN ADALAH XLSXWRITER def makeExcel(datarow, dataheader, name, limit, basePath): #MEMBUAT FILE EXCEL workbook = xlsxwriter.Workbook(basePath+'jupyter/BLOOMBERG/WorldData/excel/'+name+'.xlsx') #MENAMBAHKAN WORKSHEET PADA FILE EXCEL TERSEBUT worksheet = workbook.add_worksheet('sheet1') #SETINGAN AGAR DIBERIKAN BORDER DAN FONT MENJADI BOLD row1 = workbook.add_format({'border': 2, 'bold': 1}) row2 = workbook.add_format({'border': 2}) #MENJADIKAN DATA MENJADI ARRAY data=list(datarow) isihead=list(dataheader.values) header = [] body = [] #LOOPING ATAU PERULANGAN, KEMUDIAN DATA DITAMPUNG PADA VARIABLE DIATAS for rowhead in dataheader: header.append(str(rowhead)) for rowhead2 in datarow: header.append(str(rowhead2[1])) for rowbody in isihead[1]: body.append(str(rowbody)) for rowbody2 in data: body.append(str(rowbody2[2])) #MEMASUKAN DATA DARI VARIABLE DIATAS KE DALAM COLUMN DAN ROW EXCEL for col_num, data in enumerate(header): worksheet.write(0, col_num, data, row1) for col_num, data in enumerate(body): worksheet.write(1, col_num, data, row2) #FILE EXCEL DITUTUP workbook.close() #FUNGSI UNTUK MEMBUAT PDF YANG DATANYA BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2 #PLUGIN YANG DIGUNAKAN ADALAH FPDF def makePDF(datarow, dataheader, judul, bar, line, pie, name, subjudul, lengthPDF, basePath): #FUNGSI UNTUK MENGATUR UKURAN KERTAS, DISINI MENGGUNAKAN UKURAN A4 DENGAN POSISI LANDSCAPE pdf = FPDF('L', 'mm', [210,297]) #MENAMBAHKAN HALAMAN PADA PDF pdf.add_page() #PENGATURAN UNTUK JARAK PADDING DAN JUGA UKURAN FONT pdf.set_font('helvetica', 'B', 20.0) pdf.set_xy(145.0, 15.0) #MEMASUKAN JUDUL KE DALAM PDF pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=judul, border=0) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_font('arial', '', 14.0) pdf.set_xy(145.0, 25.0) #MEMASUKAN SUB JUDUL KE PDF pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=subjudul, border=0) #MEMBUAT GARIS DI BAWAH SUB JUDUL pdf.line(10.0, 30.0, 287.0, 30.0) pdf.set_font('times', '', 10.0) pdf.set_xy(17.0, 37.0) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_font('Times','',10.0) #MENGAMBIL DATA HEADER PDF YANG SEBELUMNYA SUDAH DIDEFINISIKAN DIATAS datahead=list(dataheader.values) pdf.set_font('Times','B',12.0) pdf.ln(0.5) th1 = pdf.font_size #MEMBUAT TABLE PADA PDF, DAN MENAMPILKAN DATA DARI VARIABLE YANG SUDAH DIKIRIM pdf.cell(100, 2*th1, "Kategori", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][0], border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Jenis", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][1], border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Pengiriman", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][2], border=1, align='C') pdf.ln(2*th1) pdf.cell(100, 2*th1, "Satuan", border=1, align='C') pdf.cell(177, 2*th1, datahead[0][3], border=1, align='C') pdf.ln(2*th1) #PENGATURAN PADDING pdf.set_xy(17.0, 75.0) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_font('Times','B',11.0) data=list(datarow) epw = pdf.w - 2*pdf.l_margin col_width = epw/(lengthPDF+1) #PENGATURAN UNTUK JARAK PADDING pdf.ln(0.5) th = pdf.font_size #MEMASUKAN DATA HEADER YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF pdf.cell(50, 2*th, str("Negara"), border=1, align='C') for row in data: pdf.cell(40, 2*th, str(row[1]), border=1, align='C') pdf.ln(2*th) #MEMASUKAN DATA ISI YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF pdf.set_font('Times','B',10.0) pdf.set_font('Arial','',9) pdf.cell(50, 2*th, negara, border=1, align='C') for row in data: pdf.cell(40, 2*th, str(row[2]), border=1, align='C') pdf.ln(2*th) #MENGAMBIL DATA CHART, KEMUDIAN CHART TERSEBUT DIJADIKAN PNG DAN DISIMPAN PADA DIRECTORY DIBAWAH INI #BAR CHART bardata = base64.b64decode(bar) barname = basePath+'jupyter/BLOOMBERG/WorldData/img/'+name+'-bar.png' with open(barname, 'wb') as f: f.write(bardata) #LINE CHART linedata = base64.b64decode(line) linename = basePath+'jupyter/BLOOMBERG/WorldData/img/'+name+'-line.png' with open(linename, 'wb') as f: f.write(linedata) #PIE CHART piedata = base64.b64decode(pie) piename = basePath+'jupyter/BLOOMBERG/WorldData/img/'+name+'-pie.png' with open(piename, 'wb') as f: f.write(piedata) #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING pdf.set_xy(17.0, 75.0) col = pdf.w - 2*pdf.l_margin widthcol = col/3 #MEMANGGIL DATA GAMBAR DARI DIREKTORY DIATAS pdf.image(barname, link='', type='',x=8, y=100, w=widthcol) pdf.set_xy(17.0, 75.0) col = pdf.w - 2*pdf.l_margin pdf.image(linename, link='', type='',x=103, y=100, w=widthcol) pdf.set_xy(17.0, 75.0) col = pdf.w - 2*pdf.l_margin pdf.image(piename, link='', type='',x=195, y=100, w=widthcol) pdf.ln(2*th) #MEMBUAT FILE PDF pdf.output(basePath+'jupyter/BLOOMBERG/WorldData/pdf/'+name+'.pdf', 'F') #DISINI TEMPAT AWAL UNTUK MENDEFINISIKAN VARIABEL VARIABEL SEBELUM NANTINYA DIKIRIM KE FUNGSI #PERTAMA MANGGIL FUNGSI UPLOADTOPSQL DULU, KALAU SUKSES BARU MANGGIL FUNGSI MAKECHART #DAN DI MAKECHART MANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF #DEFINISIKAN COLUMN BERDASARKAN FIELD CSV columns = [ "kategori", "jenis", "tanggal", "total", "pengiriman", "satuan", ] #UNTUK NAMA FILE name = "WorldData1_4" #VARIABLE UNTUK KONEKSI KE DATABASE host = "localhost" username = "postgres" password = "1234567890" port = "5432" database = "bloomberg_WorldData" table = name.lower() #JUDUL PADA PDF DAN EXCEL judul = "World Data" subjudul = "Badan Perencanaan Pembangunan Nasional" #LIMIT DATA UNTUK SELECT DI DATABASE limitdata = int(8) #NAMA NEGARA UNTUK DITAMPILKAN DI EXCEL DAN PDF negara = "Indonesia" #BASE PATH DIRECTORY basePath = 'C:/Users/ASUS/Documents/bappenas/' #FILE CSV filePath = basePath+ 'data mentah/BLOOMBERG/WorldData/' +name+'.csv'; #KONEKSI KE DATABASE engine = create_engine('postgresql://'+username+':'+password+'@'+host+':'+port+'/'+database) #MEMANGGIL FUNGSI UPLOAD TO PSQL checkUpload = uploadToPSQL(columns, table, filePath, engine) #MENGECEK FUNGSI DARI UPLOAD PSQL, JIKA BERHASIL LANJUT MEMBUAT FUNGSI CHART, JIKA GAGAL AKAN MENAMPILKAN PESAN ERROR if checkUpload == True: makeChart(host, username, password, database, port, table, judul, columns, filePath, name, subjudul, limitdata, negara, basePath) else: print("Error When Upload CSV") ```
github_jupyter
``` from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) import pandas df = pandas.read_excel("resources/Goddess/goddess of everything else essays.xlsx") def get_end_row_num(): for row_num in range(df.shape[0]): if df.iloc[row_num,0] == 'END': end_row_num = row_num print(end_row_num) break return end_row_num def normalize_name(url_or_html_name): if url_or_html_name.endswith('html'): return url_or_html_name[:-5] elif url_or_html_name.startswith('https://slate'): return url_or_html_name[38:-1] else: return url_or_html_name.lower().replace(' ','-') def url_to_path(ssc_url): return 'output/ssc_fiction_html.processed_no_typos/' + ssc_url[37:-1]+'.html' def md_to_path(ssc_md): return 'non-ssc_htmls/' + ssc_md[:-2] + 'html' def get_files(): files = [] for row_num in range(end_row_num): row = df.iloc[row_num,:] if isinstance(row[0],str) and row[0].startswith('Section: '): files.append(row[0][9:]) continue if not isinstance(row[3],str): continue if row[3].startswith('https://slate'): files.append(url_to_path(row[3])) #path elif row[3].endswith('.md'): files.append(md_to_path(row[3])) #path return files # def form_sec_html(sec_name, sec_num): # def int_to_Roman(num): # val = [ # 1000, 900, 500, 400, # 100, 90, 50, 40, # 10, 9, 5, 4, # 1 # ] # syb = [ # "M", "CM", "D", "CD", # "C", "XC", "L", "XL", # "X", "IX", "V", "IV", # "I" # ] # roman_num = '' # i = 0 # while num > 0: # for _ in range(num // val[i]): # roman_num += syb[i] # num -= val[i] # i += 1 # return roman_num # sec_num_and_name = f'{int_to_Roman(sec_num)}. {sec_name}' # return '<style> h1 {text-align: center;} p {text-align: center;} div {text-align: center;}</style><h1>' + sec_num_and_name + '</h1>' def yield_htmls(files): for p in files: yield p if p.endswith('.html') else 'resources/Goddess/non-ssc_htmls/'+normalize_name(p) + '.html' # def remove_hyperlinks_from_htmls(): # hyperlink_str = '<link href="../styles/stylesheet.css" rel="stylesheet" type="text/css"/>' # for p in yield_htmls(files): # with open(p,'r') as r: # text = r.read() # if text.endswith(hyperlink_str): # continue # with open(p,'a') as a: # a.write('\n'+hyperlink_str) end_row_num = get_end_row_num() files = get_files() # remove_hyperlinks_from_htmls() !ls output/html.processed files # crawled_html_paths = ['output/html.processed/'+basename for basename in os.listdir('output/html.processed')] def create_epub_from_html_cmd(files): html_paths = ' '.join([p if p.endswith('.html') else 'resources/Goddess/non-ssc_htmls/'+normalize_name(p) + '.html' for p in files]) command = f"pandoc --from html -o ssc_secs_fic.epub --epub-metadata output/meta/slatestarcodex.fiction.xml {html_paths}" return command print(create_epub_from_html_cmd(files)) import ebooklib from ebooklib import epub ssc_book = epub.read_epub('ssc_secs_fic.epub') ssc_book.spine = ['nav'] + ssc_book.spine epub.write_epub('goddess.epub', ssc_book, {}) import re def fix_nav_xhtml(nav_xhtml,files): nav_xtml = nav_xhtml.replace('SSC Fiction','Table of Contents') #could use beautifulsoup, but don't really feel like it lis = re.findall('<li.*?</li>',nav_xhtml,re.DOTALL) cur_section = None file_num = -2 for li,file in zip(lis[:-1],files): file_num += 1 if not file.endswith('.html'): # print(file) nav_xhtml = nav_xhtml.replace(li,li[:-5]+'<ol>') if cur_section is not None: prev_li = lis[file_num] nav_xhtml = nav_xhtml.replace(prev_li,prev_li + '</ol></li>') cur_section = file nav_xhtml = nav_xhtml.replace(lis[0],'') nav_xhtml = re.sub('<nav epub:type="landmarks">.*</nav>','',nav_xhtml,flags=re.DOTALL) return nav_xhtml def fix_toc(book,files): for x in book.get_items(): if x.get_name() == 'nav.xhtml': fixed_nav = fix_nav_xhtml(x.content.decode('utf-8'),files) return fixed_nav # nav_xhtml = x.content.decode('utf-8') # x.set_content(fix_nav_xhtml(x.content.decode('utf-8'),files).encode('utf-8')) #Unfortunately, you need to open the epub in calibre and edit it #Go to the nav.xhtml and paste this improved toc print(fix_toc(ssc_book,files)) ok! after editing the toc in calibre, there are a couple more things you'll want to use calibre for 1. you can change the cover image by right clicking on it and going to 'edit metadata' 2. Most importantly, you'll want to convert to a pdf. Right click and go to 'convert book'. 2a. select pdf in the upper right 2b. under pdf output add page numbers, change the font to cambria (it's important to use a serif font), and set the custom size to 6 x 9 inches (for a small paperback) 2c. under page setup select default output profile. Also make the margins all 36-72 pt (make sure the sizing is consistent with the pdf you used to get page numbers!) Now that we have page numbers (ugh), we can add them to the toc, and then repeat the whole epub-to-pdf conversion margins - for ssc abridged, 36 on top and bottom and 54 on sides... files hyphenate_title('''G.K. Chesterton On AI Risk [An SSC reader working at an Oxford library stumbled across a previously Empyrean itself. Ezekiel saw wheels of spinning flame and reported them quite soberly; our modern wr''') def hyphenate_title(title): if title.startswith('The Goddess of Everything Else'): title = 'the-goddess-of-everything-else-2' elif title.startswith('[REPOST]'): title = 'repost-the-demiurges-older-brother' elif title.startswith('G.K. Chesterton'): title = 'g-k-chesterton-on-ai-risk' to_null = ['“','”',',','…',"'",'’',':','(',')','!'] for c in to_null: title = title.replace(c,'') to_hyphen = ['/',' ','\n'] for c in to_hyphen: title = title.replace(c,'-') return title.lower() import os import pdftotext def is_file_page(pdf_page,file): if file.endswith('.html'): return hyphenate_title(pdf_page).startswith(os.path.basename(file).replace('.html','')) else: return ' '.join(pdf_page.split(' ')[1:]).startswith(file) def get_page_nums_from_pdf(files,pdf_path='/Users/dawndrain/Downloads/SSC Fiction - Scott Alexander.pdf'): with open(pdf_path,'rb') as f: pdf = pdftotext.PDF(f) print(len(pdf)) page_num = 0 file_to_page_num = {} try: for file in files: # print(file) while not is_file_page(pdf[page_num],file): if file.find('Logician and the') > -1: print(pdf[page_num][:100]) page_num += 1 file_to_page_num[file] = page_num + 2 except: return file_to_page_num return file_to_page_num # file_to_page_num = get_page_nums_from_pdf(files[:-2] + [files[-1]]) file_to_page_num = get_page_nums_from_pdf(files=files,pdf_path='Slate Star Codex Abridged - no typos yes page numbers.pdf') def add_page_numbers_to_toc(book,files,file_to_page_num): for x in book.get_items(): if x.get_name() == 'nav.xhtml': nav_xhtml = x.content.decode('utf-8') better_nav = fix_nav_xhtml(nav_xhtml,file_to_page_num.keys()) hrefs = re.findall('<a href=.*?</a>',better_nav,re.DOTALL) for href,file in zip(hrefs,files): if file.endswith('html'): page_num = file_to_page_num[file] better_nav = better_nav.replace(href,href[:-4] + f'...{page_num-1}</a>') return better_nav # print(add_page_numbers_to_toc(ssc_book,files[:-2] + [files[-1]],file_to_page_num)) # with open('Slate Star Codex Abridged - no typos yes page numbers.pdf','rb') as f: # pdf = pdf = pdftotext.PDF(f) with open('Library of Scott Alexandria, pagenums no toc.pdf','rb') as f: pdf = pdf = pdftotext.PDF(f) pdf[10] from pandas import read_excel # df = read_excel("SSC abridged essay titles.xlsx") df = read_excel("Library of Scott Alexandria essay titles.xlsx") df df['Title'][0] df['Title'][5:20] from collections import defaultdict title_to_pagenums = defaultdict(list) import time start = time.time() for i,page in enumerate(pdf): if i < 7: continue page = ' '.join([line.strip() for line in page.strip().splitlines()]) for title in df['Title']: if not isinstance(title,str): continue if title.startswith('○ \xa0 '): title = title[len('○ \xa0 '):] if page.startswith(title): title_to_pagenums[title].append(i+1) print(time.time()-start) title_to_pagenums['The Meditation on Superweapons'] = [577] #don't double count for The Meditation on Superweapons and Bingo lol #the bottleneck is iterating over the pages of the pdf, having a quadratic for loop for each file is fine for i in range(20): print(pdf[i]) len("""Feminists think that women should be free from fear of rape, and that, if raped, no one should be able to excuse themselves with “well, she was asking for it”. But this is the same anti-violence principle as saying that the IRA shouldn’t throw nail-bombs through people’s windows or that, nail bombs having been thrown, the IRA can’t use as an excuse “Yeah, well, they were complicit with the evil British occupation, they deserved it.” Again, I feel like I’m defending this principle a whole lot more strongly and consistently than Andrew is. Feminists are, shall we say, divided about transgender people, but let’s allow that the correct solution is to respect their rights. When I was young and stupid, I used to believe that transgender was really, really dumb. That they were looking for attention or making it up or something along those lines. Luckily, since I was a classical liberal, my reaction to this mistake was – to not bother them, and to get very very angry at people who did bother them. I got upset with people trying to fire Phil Robertson for being homophobic even though homophobia is stupid. You better bet I also got upset with people trying to fire transgender people back when I thought transgender was stupid. And then I grew older and wiser and learned – hey, transgender isn’t stupid at all, they have very important reasons for what they do and go through and I was atrociously wrong. And I said a mea culpa. But it could have been worse. I didn’t like transgender people, and so I left them alone while still standing up for their rights. My epistemic structure failed gracefully. For anyone who’s not overconfident, and so who expects massive epistemic failure on a variety of important issues all the time, graceful failure modes are a really important feature for an epistemic structure to have. God only knows what Andrew would have done, if through bad luck he had accidentally gotten it into his head that transgender people are bad. From his own words, we know he wouldn’t be “pussyfooting around with debate-team nonsense”. I admit there are many feminist principles that cannot be derived from, or are even opposed to my own liberal principles. For example, some feminists have suggested that pornography be banned because it increases the likelihood of violence against women. Others suggest that research into gender differences should be banned, or at least we should stigmatize and harass the researchers, because any discoveries made might lend aid and comfort to sexists. To the first, I would point out that there is now strong evidence that pornography, especially violent objectifying pornography, very significantly decreases violence against women. I would ask them whether they’re happy that we did the nice liberal thing and waited until all the evidence came in so we could discuss it rationally, rather than immediately moving to harass and silence anyone taking the pro-pornography side. And to the second, well, we have a genuine disagreement. But I wonder whether they would prefer to discuss that disagreement reasonably, or whether we should both try to harass and destroy the other until one or both of us are too damaged to continue the struggle. And if feminists agree to have that reasonable discussion, but lose, I would tell them that they get a consolation prize. Having joined liberal society, they can be sure that no matter what those researchers find, I and all of their new liberal- society buddies will fight tooth and nail against anyone who uses any tiny differences those researchers find to challenge the central liberal belief that everyone of every gender has basic human dignity. Any victory for me is going""") 3697/582 3697 * 800 sum([len(page) for page in pdf]) sum([page.count(' ') + page.count('\n') for page in pdf]) # 2882879 / 5 # 576575 for Library of Scott Alexandria # 2372837 / 5 # 474567 for SSC Abridged def normalize_title(title): if not isinstance(title,str): return '' if title.startswith('○ \xa0 '): title = title[len('○ \xa0 '):] return title for title in df['Title']: if normalize_title(title) not in title_to_pagenums: print(title) pdf[736] title_to_pagenums for title,pagenums in title_to_pagenums.items(): if len(pagenums) != 1: print(title,pagenums) how to add to toc?? e.g. we want for the epub file index_split_001.html to take: <a class="pcalibre1 selflink pcalibre2" href="Archipelago-And-Atomic-Communitarianism.html">Archipelago and Atomic Communitarianism</a> to: <a class="pcalibre1 selflink pcalibre2" href="Archipelago-And-Atomic-Communitarianism.html">Archipelago and Atomic Communitarianism...56</a> could just do this eight times in calibre for the eight sections, ok def add_pagenums_to_toc_page(toc_page,title_to_pagenums): section_titles = ['Liberalism And its Enemies','Moloch','Culture War','Science and Pseudoscience','Psychiatry, Psychology, & Neurology','Medicine', 'Sociology and Economics','“Powers, complete mental revision, ultraintelligence, posthumanity…”', 'II. Probabilism', 'III. Science and Doubt','IV. Medicine, Therapy, and Human Enhancement','V. Introduction to Game Theory', 'VI. Promises and Principles','VII. Cognition and Association', 'VIII. Doing Good','IX. Liberty', 'X. Progress', 'XI. Social Justice', 'XII. Politicization', 'XIII. Competition and Cooperation', ] for title,pagenum in title_to_pagenums.items(): if title in section_titles: continue if isinstance(pagenum,list): assert len(pagenum) == 1 pagenum = pagenum[0] toc_page = toc_page.replace(f'{title}</a>',f'{title}...{pagenum}</a>') return toc_page toc_page = """<?xml version='1.0' encoding='utf-8'?> <html xmlns="http://www.w3.org/1999/xhtml" lang="en"> <head> <title>Table of Contents</title> <style type="text/css"> li { list-style-type: none; padding-left: 2em; margin-left: 0; } a { text-decoration: none; } a:hover { color: red; } </style> </head> <body id="calibre_generated_inline_toc"> <h1>Table of Contents</h1> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_001.html">I. Rationality and Rationalization</a></h2> <li><a href="scott-alexandria_split_002.html">Blue- and Yellow-Tinted Choices</a></li> <li><a href="scott-alexandria_split_004.html">The Apologist and the Revolutionary</a></li> <li><a href="scott-alexandria_split_005.html">Historical realism</a></li> <li><a href="scott-alexandria_split_006.html">Simultaneously Right and Wrong</a></li> <li><a href="scott-alexandria_split_007.html">You May Already Be A Sinner</a></li> <li><a href="scott-alexandria_split_008.html">Beware the Man of One Study</a></li> <li><a href="scott-alexandria_split_009.html">Debunked and Well-Refuted</a></li> <li><a href="scott-alexandria_split_010.html">How to Not Lose an Argument</a></li> <li><a href="scott-alexandria_split_011.html">The Least Convenient Possible World</a></li> <li><a href="scott-alexandria_split_012.html">Bayes for Schizophrenics: Reasoning in Delusional Disorders</a></li> <li><a href="scott-alexandria_split_013.html">Generalizing from One Example</a></li> <li><a href="scott-alexandria_split_014.html">Typical Mind and Politics</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_015.html">II. Probabilism</a></h2> <li><a href="scott-alexandria_split_016.html">Confidence Levels Inside and Outside an Argument</a></li> <li><a href="scott-alexandria_split_017.html">Schizophrenia and Geomagnetic Storms</a></li> <li><a href="scott-alexandria_split_018.html">Talking Snakes: A Cautionary Tale</a></li> <li><a href="scott-alexandria_split_019.html">Arguments from My Opponent Believes Something</a></li> <li><a href="scott-alexandria_split_020.html">Statistical Literacy Among Doctors Now Lower Than Chance</a></li> <li><a href="scott-alexandria_split_021.html">Techniques for Probability Estimates</a></li> <li><a href="scott-alexandria_split_022.html">On First Looking into Chapman’s “Pop Bayesianism”</a></li> <li><a href="scott-alexandria_split_023.html">Utilitarianism for Engineers</a></li> <li><a href="scott-alexandria_split_024.html">If It’s Worth Doing, It’s Worth Doing with Made-Up Statistics</a></li> <li><a href="scott-alexandria_split_025.html">Marijuana: Much More Than You Wanted to Know</a></li> <li><a href="scott-alexandria_split_026.html">Are You a Solar Deity?</a></li> <li><a href="scott-alexandria_split_027.html">The “Spot the Fakes” Test</a></li> <li><a href="scott-alexandria_split_028.html">Epistemic Learned Helplessness</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_029.html">III. Science and Doubt</a></h2> <li><a href="scott-alexandria_split_030.html">Google Correlate Does Not Imply Google Causation</a></li> <li><a href="scott-alexandria_split_031.html">Stop Confounding Yourself! Stop Confounding Yourself!</a></li> <li><a href="scott-alexandria_split_032.html">Effects of Vertical Acceleration on Wrongness</a></li> <li><a href="scott-alexandria_split_033.html">90% Of All Claims About The Problems With Medical Studies Are Wrong</a></li> <li><a href="scott-alexandria_split_034.html">Prisons are Built with Bricks of Law and Brothels with Bricks of Religion, But That Doesn’t Prove a Causal Relationship</a></li> <li><a href="scott-alexandria_split_035.html">Noisy Poll Results and the Reptilian Muslim Climatologists from Mars</a></li> <li><a href="scott-alexandria_split_036.html">Two Dark Side Statistics Papers</a></li> <li><a href="scott-alexandria_split_037.html">Alcoholics Anonymous: Much More Than You Wanted to Know</a></li> <li><a href="scott-alexandria_split_038.html">The Control Group Is Out Of Control</a></li> <li><a href="scott-alexandria_split_039.html">The Cowpox of Doubt</a></li> <li><a href="scott-alexandria_split_040.html">The Skeptic’s Trilemma</a></li> <li><a href="scott-alexandria_split_041.html">If You Can’t Make Predictions, You’re Still in a Crisis</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_042.html">IV. Medicine, Therapy, and Human Enhancement</a></h2> <li><a href="scott-alexandria_split_043.html">Scientific Freud</a></li> <li><a href="scott-alexandria_split_044.html">Sleep – Now by Prescription</a></li> <li><a href="scott-alexandria_split_045.html">In Defense of Psych Treatment for Attempted Suicide</a></li> <li><a href="scott-alexandria_split_046.html">Who By Very Slow Decay</a></li> <li><a href="scott-alexandria_split_047.html">Medicine, As Not Seen on TV</a></li> <li><a href="scott-alexandria_split_048.html">Searching for One-Sided Tradeoffs</a></li> <li><a href="scott-alexandria_split_049.html">Do Life Hacks Ever Reach Fixation?</a></li> <li><a href="scott-alexandria_split_050.html">Polyamory is Boring</a></li> <li><a href="scott-alexandria_split_051.html">Can You Condition Yourself?</a></li> <li><a href="scott-alexandria_split_052.html">Wirehead Gods on Lotus Thrones</a></li> <li><a href="scott-alexandria_split_053.html">Don’t Fear the Filter</a></li> <li><a href="scott-alexandria_split_054.html">Transhumanist Fables</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_055.html">V. Introduction to Game Theory</a></h2> <li><a href="scott-alexandria_split_056.html">Backward Reasoning Over Decision Trees</a></li> <li><a href="scott-alexandria_split_057.html">Nash Equilibria and Schelling Points</a></li> <li><a href="scott-alexandria_split_058.html">Introduction to Prisoners’ Dilemma</a></li> <li><a href="scott-alexandria_split_059.html">Real-World Solutions to Prisoners’ Dilemmas</a></li> <li><a href="scott-alexandria_split_060.html">Interlude for Behavioral Economics</a></li> <li><a href="scott-alexandria_split_061.html">What is Signaling, Really?</a></li> <li><a href="scott-alexandria_split_062.html">Bargaining and Auctions</a></li> <li><a href="scott-alexandria_split_063.html">Imperfect Voting Systems</a></li> <li><a href="scott-alexandria_split_064.html">Game Theory as a Dark Art</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_065.html">VI. Promises and Principles</a></h2> <li><a href="scott-alexandria_split_066.html">Beware Trivial Inconveniences</a></li> <li><a href="scott-alexandria_split_067.html">Time and Effort Discounting</a></li> <li><a href="scott-alexandria_split_068.html">Applied Picoeconomics</a></li> <li><a href="scott-alexandria_split_069.html">Schelling Fences on Slippery Slopes</a></li> <li><a href="scott-alexandria_split_070.html">Democracy is the Worst Form of Government Except for All the Others Except Possibly Futarchy</a></li> <li><a href="scott-alexandria_split_071.html">Eight Short Studies on Excuses</a></li> <li><a href="scott-alexandria_split_072.html">Revenge as Charitable Act</a></li> <li><a href="scott-alexandria_split_073.html">Would Your Real Preferences Please Stand Up?</a></li> <li><a href="scott-alexandria_split_074.html">Are Wireheads Happy?</a></li> <li><a href="scott-alexandria_split_075.html">Guilt: Another Gift Nobody Wants</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_076.html">VII. Cognition and Association</a></h2> <li><a href="scott-alexandria_split_077.html">Diseased Thinking: Dissolving Questions about Disease</a></li> <li><a href="scott-alexandria_split_078.html">The Noncentral Fallacy — The Worst Argument in the World?</a></li> <li><a href="scott-alexandria_split_079.html">The Power of Positivist Thinking</a></li> <li><a href="scott-alexandria_split_080.html">When Truth Isn’t Enough</a></li> <li><a href="scott-alexandria_split_081.html">Ambijectivity</a></li> <li><a href="scott-alexandria_split_082.html">The Blue-Minimizing Robot</a></li> <li><a href="scott-alexandria_split_083.html">Basics of Animal Reinforcement</a></li> <li><a href="scott-alexandria_split_084.html">Wanting vs. Liking Revisited</a></li> <li><a href="scott-alexandria_split_085.html">Physical and Mental Behavior</a></li> <li><a href="scott-alexandria_split_086.html">Trivers on Self-Deception</a></li> <li><a href="scott-alexandria_split_087.html">Ego-Syntonic Thoughts and Values</a></li> <li><a href="scott-alexandria_split_088.html">Approving Reinforces Low-Effort Behaviors</a></li> <li><a href="scott-alexandria_split_089.html">To What Degree Do We Have Goals?</a></li> <li><a href="scott-alexandria_split_090.html">The Limits of Introspection</a></li> <li><a href="scott-alexandria_split_091.html">Secrets of the Eliminati</a></li> <li><a href="scott-alexandria_split_092.html">Tendencies in Reflective Equilibrium</a></li> <li><a href="scott-alexandria_split_093.html">Hansonian Optimism</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_094.html">VIII. Doing Good</a></h2> <li><a href="scott-alexandria_split_095.html">Newtonian Ethics</a></li> <li><a href="scott-alexandria_split_096.html">Efficient Charity: Do Unto Others…</a></li> <li><a href="scott-alexandria_split_097.html">The Economics of Art and the Art of Economics</a></li> <li><a href="scott-alexandria_split_098.html">A Modest Proposal</a></li> <li><a href="scott-alexandria_split_099.html">The Life Issue</a></li> <li><a href="scott-alexandria_split_100.html">What if Drone Warfare Had Come First?</a></li> <li><a href="scott-alexandria_split_101.html">Nefarious Nefazodone and Flashy Rare Side-Effects</a></li> <li><a href="scott-alexandria_split_102.html">The Consequentialism FAQ</a></li> <li><a href="scott-alexandria_split_103.html">Doing Your Good Deed for the Day</a></li> <li><a href="scott-alexandria_split_104.html">I Myself Am A Scientismist</a></li> <li><a href="scott-alexandria_split_105.html">Whose Utilitarianism?</a></li> <li><a href="scott-alexandria_split_106.html">Book Review: After Virtue</a></li> <li><a href="scott-alexandria_split_107.html">Read History of Philosophy Backwards</a></li> <li><a href="scott-alexandria_split_108.html">Virtue Ethics: Not Practically Useful Either</a></li> <li><a href="scott-alexandria_split_109.html">Last Thoughts on Virtue Ethics</a></li> <li><a href="scott-alexandria_split_110.html">Proving Too Much</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_111.html">IX. Liberty</a></h2> <li><a href="scott-alexandria_split_112.html">The Non-Libertarian FAQ (aka Why I Hate Your Freedom)</a></li> <li><a href="scott-alexandria_split_113.html">A Blessing in Disguise, Albeit a Very Good Disguise</a></li> <li><a href="scott-alexandria_split_114.html">Basic Income Guarantees</a></li> <li><a href="scott-alexandria_split_115.html">Book Review: The Nurture Assumption</a></li> <li><a href="scott-alexandria_split_116.html">The Death of Wages is Sin</a></li> <li><a href="scott-alexandria_split_117.html">Thank You For Doing Something Ambiguously Between Smoking And Not Smoking</a></li> <li><a href="scott-alexandria_split_118.html">Lies, Damned Lies, and Facebook (Part 1 of ∞)</a></li> <li><a href="scott-alexandria_split_119.html">The Life Cycle of Medical Ideas</a></li> <li><a href="scott-alexandria_split_120.html">Vote on Values, Outsource Beliefs</a></li> <li><a href="scott-alexandria_split_121.html">A Something Sort of Like Left-Libertarian-ist Manifesto</a></li> <li><a href="scott-alexandria_split_122.html">Plutocracy Isn’t About Money</a></li> <li><a href="scott-alexandria_split_123.html">Against Tulip Subsidies</a></li> <li><a href="scott-alexandria_split_124.html">SlateStarCodex Gives a Graduation Speech</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_125.html">X. Progress</a></h2> <li><a href="scott-alexandria_split_126.html">Intellectual Hipsters and Meta-Contrarianism</a></li> <li><a href="scott-alexandria_split_127.html">A Signaling Theory of Class x Politics Interaction</a></li> <li><a href="scott-alexandria_split_128.html">Reactionary Philosophy in an Enormous, Planet-Sized Nutshell</a></li> <li><a href="scott-alexandria_split_129.html">A Thrive/Survive Theory of the Political Spectrum</a></li> <li><a href="scott-alexandria_split_130.html">We Wrestle Not With Flesh And Blood, But Against Powers And Principalities</a></li> <li><a href="scott-alexandria_split_131.html">Poor Folks Do Smile… For Now</a></li> <li><a href="scott-alexandria_split_132.html">Apart from Better Sanitation and Medicine and Education and Irrigation and Public Health and Roads and Public Order, What Has Modernity Done for Us?</a></li> <li><a href="scott-alexandria_split_133.html">The Wisdom of the Ancients</a></li> <li><a href="scott-alexandria_split_134.html">Can Atheists Appreciate Chesterton?</a></li> <li><a href="scott-alexandria_split_135.html">Holocaust Good for You, Research Finds, But Frequent Taunting Causes Cancer in Rats</a></li> <li><a href="scott-alexandria_split_136.html">Public Awareness Campaigns</a></li> <li><a href="scott-alexandria_split_137.html">Social Psychology is a Flamethrower</a></li> <li><a href="scott-alexandria_split_138.html">Nature is Not a Slate. It’s a Series of Levers.</a></li> <li><a href="scott-alexandria_split_139.html">The Anti-Reactionary FAQ</a></li> <li><a href="scott-alexandria_split_140.html">The Poor You Will Always Have With You</a></li> <li><a href="scott-alexandria_split_141.html">Proposed Biological Explanations for Historical Trends in Crime</a></li> <li><a href="scott-alexandria_split_142.html">Society is Fixed, Biology is Mutable</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_143.html">XI. Social Justice</a></h2> <li><a href="scott-alexandria_split_144.html">Practically-a-Book Review: Dying to be Free</a></li> <li><a href="scott-alexandria_split_145.html">Drug Testing Welfare Users is a Sham, But Not for the Reasons You Think</a></li> <li><a href="scott-alexandria_split_146.html">The Meditation on Creepiness</a></li> <li><a href="scott-alexandria_split_147.html">The Meditation on Superweapons</a></li> <li><a href="scott-alexandria_split_148.html">The Meditation on the War on Applause Lights</a></li> <li><a href="scott-alexandria_split_149.html">The Meditation on Superweapons and Bingo</a></li> <li><a href="scott-alexandria_split_150.html">An Analysis of the Formalist Account of Power Relations in Democratic Societies</a></li> <li><a href="scott-alexandria_split_151.html">Arguments About Male Violence Prove Too Much</a></li> <li><a href="scott-alexandria_split_152.html">Social Justice for the Highly-Demanding-of-Rigor</a></li> <li><a href="scott-alexandria_split_153.html">Against Bravery Debates</a></li> <li><a href="scott-alexandria_split_154.html">All Debates Are Bravery Debates</a></li> <li><a href="scott-alexandria_split_155.html">A Comment I Posted on “What Would JT Do?”</a></li> <li><a href="scott-alexandria_split_156.html">We Are All MsScribe</a></li> <li><a href="scott-alexandria_split_157.html">The Spirit of the First Amendment</a></li> <li><a href="scott-alexandria_split_158.html">A Response to Apophemi on Triggers</a></li> <li><a href="scott-alexandria_split_159.html">Lies, Damned Lies, and Social Media: False Rape Accusations</a></li> <li><a href="scott-alexandria_split_160.html">In Favor of Niceness, Community, and Civilization</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_161.html">XII. Politicization</a></h2> <li><a href="scott-alexandria_split_162.html">Right is the New Left</a></li> <li><a href="scott-alexandria_split_163.html">Weak Men are Superweapons</a></li> <li><a href="scott-alexandria_split_164.html">You Kant Dismiss Universalizability</a></li> <li><a href="scott-alexandria_split_165.html">I Can Tolerate Anything Except the Outgroup</a></li> <li><a href="scott-alexandria_split_166.html">Five Case Studies on Politicization</a></li> <li><a href="scott-alexandria_split_167.html">Black People Less Likely</a></li> <li><a href="scott-alexandria_split_168.html">Nydwracu’s Fnords</a></li> <li><a href="scott-alexandria_split_169.html">All in All, Another Brick in the Motte</a></li> <li><a href="scott-alexandria_split_170.html">Ethnic Tension and Meaningless Arguments</a></li> <li><a href="scott-alexandria_split_171.html">Race and Justice: Much More Than You Wanted to Know</a></li> <li><a href="scott-alexandria_split_172.html">Framing for Light Instead of Heat</a></li> <li><a href="scott-alexandria_split_173.html">The Wonderful Thing About Triggers</a></li> <li><a href="scott-alexandria_split_174.html">Fearful Symmetry</a></li> <li><a href="scott-alexandria_split_175.html">Archipelago and Atomic Communitarianism</a></li> <h2 class="pcalibre1 pcalibre9 pcalibre2" id="calibre_pb_1"><a class="pcalibre1 selflink pcalibre2" href="scott-alexandria-split_176.html">XIII. Competition and Cooperation</a></h2> <li><a href="scott-alexandria_split_177.html">Galactic Core</a></li> <li><a href="scott-alexandria_split_178.html">Book Review: The Two-Income Trap</a></li> <li><a href="scott-alexandria_split_179.html">Just for Stealing a Mouthful of Bread</a></li> <li><a href="scott-alexandria_split_180.html">Meditations on Moloch</a></li> <li><a href="scott-alexandria_split_181.html">Misperceptions on Moloch</a></li> <li><a href="scott-alexandria_split_182.html">The Invisible Nation — Reconciling Utilitarianism and Contractualism</a></li> <li><a href="scott-alexandria_split_183.html">Freedom on the Centralized Web</a></li> <li><a href="scott-alexandria_split_184.html">Book Review: Singer on Marx</a></li> <li><a href="scott-alexandria_split_185.html">Does Class Warfare Have a Free Rider Problem?</a></li> <li><a href="scott-alexandria_split_186.html">Book Review: Red Plenty</a></li> </body> </html> """ print(add_pagenums_to_toc_page(toc_page,title_to_pagenums)) !ls ~/Documents | grep SSC import json print(json.dumps(file_to_page_num,indent=2)) for x in ssc_book.get_items(): print(x) ```
github_jupyter
# Solutions for chapter 10 exercises ## Setup ``` # Common libraries import pandas as pd import numpy as np import statsmodels.formula.api as smf from statsmodels.formula.api import ols import seaborn as sns # To rescale numeric variables from sklearn.preprocessing import MinMaxScaler # To one-hot encode cat. variables from sklearn.preprocessing import OneHotEncoder ``` ## Exercise 2 – Hierarchical regression, guided. ### 1) Traditional regression. a) Load the data in chap4-hotel_booking_case_study.csv and create a copy of it where all countries representing less than 1% of the data are lumped under “Other”. ``` #Loading the data dat_df = pd.read_csv("chap4-hotel_booking_case_study.csv") #Removing NA's for the relevant variables dat_df = dat_df.dropna(subset = ['Country', 'ADR', 'MarketSegment']) dat_df.head(5) #Reducing the number of values for Country of origin by keeping most frequent countries only #and aggregating the remaining ones under "Other" countries_df = dat_df.groupby('Country').agg(pct = ('NRDeposit', lambda x: len(x)/len(dat_df))).\ sort_values(by=['pct'], ascending = False) top_countries_df = countries_df.loc[countries_df.pct >= 0.01].reset_index() top_countries_lst = top_countries_df['Country'].tolist() print(top_countries_lst) dat_df_agg = dat_df.copy() dat_df_agg['Country'] = np.where(dat_df_agg['Country'].isin(top_countries_lst), dat_df_agg['Country'], 'Other') ``` b) Run a linear regression of ADR on Country and MarketSegment. Save the predicted values for all the rows in the data and calculate the mean absolute difference (MAD1) between the predicted values and the true values. ``` model = ols("ADR~Country+MarketSegment", data=dat_df_agg) res = model.fit(disp=0) res.summary() #Calculating the predicted values from the model predicted_values1 = res.predict(dat_df_agg) predicted_values1.head(5) #Calculating the mean absolute difference MAD1 = np.mean(abs(predicted_values1 - dat_df['ADR'])) MAD1 ``` ### 2) Hierarchical regression. Run a hierarchical linear regression of ADR on Country and MarketSegment, with Country as the clustering variable. Save the predicted values for all the rows in the data and calculate the mean absolute difference (MAD2) between the predicted values and the true values. ``` mixed = smf.mixedlm("ADR~Country+MarketSegment", data = dat_df, groups = dat_df["Country"]) print(mixed.fit().summary()) #Calculating the predicted values from the model predicted_values2 = mixed.fit().predict(dat_df) predicted_values2.head(5) #Calculating the mean absolute difference MAD2 = np.mean(abs(predicted_values2 - dat_df['ADR'])) MAD2 ``` ### 3) Comparison ``` diff = MAD1 - MAD2 diff ``` This number means that the hierarchical regression is more accurate by $0.09 on average.
github_jupyter
# Pytsal Anomaly Detection Tutorial **Created using: Pytsal 1.1.0** **Date Updated: May 08, 2021** **Tutorial Author: Krishnan S G** ## 1.0 Tutorial Objective Welcome to Anomaly detection Tutorial. This tutorial assumes that you are new to Pytsal and looking to get started with Anomaly detection using the `pytsal.anomaly` Module. In this tutorial we will learn: * **Getting Data**: How to import data from Pytsal repository * **Setting up Environment**: How to setup an experiment in Pytsal and get started with building forecasting models * **Perform anomaly detection**: Create anomaly detection model and generate insights and plots Read Time : Approx. 15 Minutes ## 1.1 Installing Pytsal The first step to get started with Pytsal is to install Pytsal. Installation is easy and will only take a few minutes. Follow the instructions below: #### Installing Pytsal in Local Jupyter Notebook ```pip install pytsal``` #### Installing Pytsal on Google Colab or Azure Notebooks ```!pip install pytsal``` ## 1.2 Pre-Requisites * Python 3.6 or greater * Pytsal 1.0 or greater * Basic Knowledge of time series analysis ## Anomaly Detection using Brutlag algorithm Anomaly detection problem for time series is usually formulated as finding outlier data points relative to some standard. ### What is Brutlag algorithm? The algorithm is an extension to Holt-Winter model. The data points within the confidence bands are considered to be normal and data points outside the bands are considered to be anomalies. ### Formula ![](https://latex.codecogs.com/svg.latex?d_t%20%3D%20%5Cgamma%20%5Cleft%20%7C%20D_t-P_t%20%5Cright%20%7C&plus;%281-%5Cgamma%20%29d_%7Bt-p%7D%5C%5C%20UB%20%3D%20a_%7Bt-1%7D&plus;b_%7Bt-1%7D&plus;c_%7Bt-p%7D%20&plus;%20m*d_%7Bt-p%7D%20%5C%5C%20LB%20%3D%20a_%7Bt-1%7D&plus;b_%7Bt-1%7D&plus;c_%7Bt-p%7D%20-%20m*d_%7Bt-p%7D) - **UB** - upper confidence band. - **LB** - lower confidence band. - **a** - level at time **t**. - **b** - trend/slope component at time **t**. - **c** - seasonal component at time **t**. - **γ** - smoothing parameter for seasonality. - **p** - period for the time series. - **m** - brutlag scaling factor. The following graphs illustrates how Holt-Winter's model and brutlag algorithm are used to identify anomalies present in the time series. ![](https://raw.githubusercontent.com/KrishnanSG/holt-winters/70f540a98b67da645d170e3706ea0cc427a83fae/plots/anomaly_2016-17.png) ## 3. Getting the data Pytsal has a set of datasets available in-built with the package for easy prototyping and test. In future further more dataset would be added under [datasets folder](https://github.com/KrishnanSG/pytsal/tree/master/pytsal/internal/datasets) ### Loading Airline Anomaly dataset ``` from pytsal.dataset import load_airline_with_anomaly ts_with_anomaly = load_airline_with_anomaly() print(ts_with_anomaly.summary()) print('\n### Data ###\n') print(ts) ``` ## 4. Anomaly detection The `setup()` function initializes the environment in pytsal and creates the transformation pipeline to prepare the data for modeling and deployment. setup() must be called before executing any other function in pytsal. It takes three mandatory parameters: a pytsal timeseries object, pretrained model on true datapoints and algorithm name. All other parameters are optional and are used to customize the detection algorithm. ``` import pytsal.forecasting as f import pytsal.anomaly as ad # 2.a Load model if exists model = f.load_model() # 2.b Create new model if model is None: ts = load_airline() model = f.setup(ts, 'holtwinter', eda=False, validation=False, find_best_model=True, plot_model_comparison=False) trained_model = f.finalize(ts, model) f.save_model(trained_model) model = f.load_model() # 3. brutlag algorithm finds and returns the anomaly points anomaly_points = ad.setup(ts_with_anomaly, model, 'brutlag') ``` ## 5. Anomaly Inference The output of the algorithm is a set of anomaly points detected by the algorithm ``` anomaly_points ``` ## 6. Hurray! This tutorial has covered how to perform basic anomaly detection using brutlag algorithm and derive insights and visualizations. We have completed all of these steps in **less than 5 commands!**
github_jupyter
# Latent Dirichlet Allocation - Paper: Latent Dirichlet Allocatio - Author: David M.Blei, Andrew Y.Ng, Michael I.Jordan - Teammates: Yizi Lin, Siqi Fu - Github: https://github.com/lyz1206/lda.git ### Part 1 Abstract *Latent Dirichlet Allocation* (LDA) is a generative probabilitistic model dealing with collections of data such as corpus. Based on the assumption of bag of word and exchangeability, each document in corpus is modeled as random mixture over latent topics and each topic is modeled by a distribution over words. Document is represented in a form of topic probability. In this project, we foucs on the text data, and in this case, each document is represented as a topic probability. We implement *variational inference* and *EM algorithm* to estimate parameters and performed optimazation method to make our algorithm more efficient. We compare LDA with other *topic model* like LSI and HDP. *key words*: Topic Model, Latent Dirichlet Allocation, Variational Inference, EM algorithm ### Part 2 Background In our project, we use the the paper "Latent Dirichlet Allocation" by David M. Blei, Andrew Y. Ng and Michael I.Jordan. Latent Dirichlet allocation (LDA) is a generative probabilistic model of a corpus, it uses a three-level hierarchical Bayesian model to describe the word generative process. Its basic idea is that each document are represented as random mixtures over latent topics, and each topic is characterized by a distribution over words. In general, LDA assumes the following generative process for each document w in a corpus D: 1. Choose $N \sim Possion(\xi)$, which represents the document length. 2. Choose $\theta \sim Dir(\alpha)$, which $\theta$ is a column vector representing the topic probability. 3. For each of the N words: - Choose $z_n\sim Multinomial(\theta)$, which represents current topic. - Choose $w_n$ based on $p( w_n \mid z_n; \beta)$ There are three critical assumption for this model: - the dimensionality k of the Dirichlet distribution is assumed known and fixed. - $\beta$ is a V $\times$ k matrix, where $\beta_{ij} = P( w^j = 1 \mid z^i = 1)$, which means $\beta$ represents the probability of generating one particular word given the particular topic. $\beta$ is also assumed to be known and fix. - words are generated by topics and those topics are infinitely exchangeable within a document. The generating process is represented as a probabilistic graphical model below: ![image](Figures/figure1.png) Based on the model described above, the joint distribution of a topic mixture $\theta$, a set of N topics z, and a set of N words w is given by: $$ p(\theta,z,w|\alpha, \beta)=p(\theta|\alpha)\prod_{n=1}^{N}p(z_n|\theta)p(w_n|z_n, \beta) $$ Integrating over $\theta$ and summing over z, we obtain the marginal distribution of a document: $$ p(w|\alpha, \beta) = \int p(\theta|\alpha)(\prod_{n=1}^{N}\sum_{z_n}p(z_n|\theta)p(w_n|z_n,\beta))d\theta $$ Finally, taking the product of the marginal probabilities of single documents, we obtain the probability of a corpus: $$ P(D \mid \alpha, \beta) = \prod_{d = 1}^{M} \int p(\theta_d \mid \alpha)(\prod_{n = 1}^{N_d}\sum_{z_{dn}}p(z_{dn}\mid \theta_d) p(w_{dn} \mid z_{dn},\beta)) d \theta_d $$ Using Bayesian rule, we can get the formula of the posterior distribution of the hidden variables given a document: $$ p(\theta,z|w, \alpha, \beta) = \frac{p(\theta,z,w|\alpha,\beta)}{p(w|\alpha,\beta)} $$ However, this distribution is intractable to capture in general. In this paper, the author use variational EM algorithm to approximate the distribution. We will discuss it in Part 3. Generally speaking, the main goal of LDA is to find short descriptions of the members of a collection that enable efficient processing of large collections while preserving the essential statistical relationships that are useful for basis tasks. Common applications involve: - document modeling - text classification - collaborative filtering As a three level hierarchical Bayesian model, LDA is more elaborate than some other latent models such as Mixture of unigrams, and pLSA. - In the Mixture of unigrams, the word distributions can be viewed as representations od topics under the assumption that each document exihibit only one topic. However, LDA allows documents to exihibit multiple topics to different probabilities. - The pLSA model does solves the problem of Mixture of unigrams, but it has further problems that it is not well-defined generative model of documents,which means that it cannot be used to assign probability to a previously unused document. Also, since the linear growth in parameter of the pLSA model, it can causes overfitting. However, LDA sufferes neigher of those problems. From Mixture of unigrams to PLSA TO lDA, the text modeling is improved step by step. LDA introduces the Dirichlet Distribution in the document to topic layer, which is better than PLSA, so that the number of model parameters does not expand with the increase of corpus. ### Part 3 Description of algorithm In part 2, we have mentioned that the posterior distribution of the hidden variables is intractable to capture in general, so the authors in this paper use variational EM algorithm to approximate it. Generally, this algorithm follows such iteration: 1. (E-step) For each document, find the optimizing values of the variational parameters{$\gamma_d^\ast$, \Phi_d^\ast, d \in D}. 2. (M-step) Maximize the resulting lower bound on the log likelihood with respect to the model parameters $\alpha$ and $\beta$. This correspondings to finding maximum likelihood estimates with expected sufficient statistics for each document under the approximate posterior which is computed in the E-step. * E-step The main idea in this step is to find the tightest possible lower bound of the log likelihood and choose variational parameters. Firstly, we show the procedure of finding the tightest lower bound of the log likelihood. We begin by applying `Jensen's inequality` to bound the log likehood of document: $$ \begin{split} log \space p \space (w \mid \alpha, \beta) &= log \int \sum_z p(\theta,z,w \mid \alpha, \beta ) d \theta\\ &=log \int \sum_z \frac{p(\theta,z,w \mid \alpha, \beta)\space q(\theta,z)}{q(\theta,z)} d\theta\\ &\ge \int \sum_z q(\theta,z) \space log \space p(\theta,z,w \mid \alpha,\beta) d\theta - \int \sum_z q(\theta,z) \space log \space q(\theta,z) d\theta\\ &= E_q[log \space p(\theta, z,w \mid \alpha, \beta)] -E_q[log\space q(\theta, z)] \end{split} $$ Form the above equation, we get a lower bound of the likelihood for variational distribution $q(\theta,z \mid \gamma,\phi)$. The difference between the left side and right side of the above qeuation represents the `KL` divergence between variational posterior probability and the true posterior probability. Let $L(\gamma,\phi: \alpha, \beta)$ denote the right-hand side, and we can get: $$log p(w \mid \alpha, \beta) = L (\gamma, \phi :\alpha,\beta) + D(q(\theta, z \mid \gamma, \phi) \mid \mid p(\theta,z \mid w, \alpha,\beta))$$ This shows the maximize lower bound $L(\gamma, \phi :\alpha,\beta)$ with respect to $\gamma$ and $\phi$ is equivalent to minimizing the KL divergence. So we successfully translate the into the optimization problem as below: $$(\gamma^*,\phi^*) = argmin_{\gamma,\phi} D(q(\theta,z \mid \gamma, \phi) \mid \mid p (\theta,z \mid w,\alpha,\beta))$$ Secondly, we obtain a tractable family of the $q(\theta,z)$. In the paper, the authors drop the edges between $\theta$, z and w, as well as the w nodes. This procedure is shown below. ![image](Figures/figure2.png) So $q(\theta,z)$ is characterized by the following variational distribution: $$ q(\theta,z|\gamma, \Phi) = q(\theta|\gamma)\prod_{n=1}^{N}q(z_n|\Phi_n) $$ Thirdly, we expand the lower bound using the factorizations of p and q: $$ L(\gamma,\phi: \alpha,\beta) = E_q[log p(\theta \alpha)] +E_q [log p(z \mid \theta )] +E_q [log p(w \mid z,\beta)] -E_q[log q(\theta)] -E_q[log q(z)] $$ Finally, we use Lagrange method to maximize the lower bound with respect to the variational parameters $\Phi$ and $\gamma$. The updated equations are: $$\phi_{ni} \propto \beta_{iw_n} exp[E_q (log(\theta_i) \mid \gamma)]$$ $$\gamma_i = \alpha_i +\sum_{n = 1}^N \phi_{ni}$$ The pseudocode of E-step is as follow: ![image](Figures/figure3.png) - M-step The main in M-step is to maximize the resulting lower bound on the log likelohood with respect to the model parameters $\alpha$ and $\beta$. We update $\beta$ through Lagrange method. $$ L_{[\beta]} = \sum_{d=1}^{M}\sum_{n=1}^{N_d}\sum_{i=1}^{k}\sum_{j=1}^{V}\Phi_{dni}w_{dn}^jlog\beta_{ij}+\sum_{i=1}^{k}\lambda_i(\sum_{j=1}^{V}\beta_{ij}-1) $$ So the update equation is: $$ \beta_{ij} \propto \sum_{d=1}^{M}\sum_{n=1}^{N_d}\Phi_{dn_i}w^{j}_{dn} $$ $\alpha$ is updated by Newton-Raphson Method: $$\alpha_{new} = \alpha_{old} -H (\alpha_{old})^{-1} g(\alpha_{old})$$ where $H(\alpha)$ is the Hessian matrix and $g(\alpha)$ is the gradient at point $\alpha$. This algorithm scales as $O(N^3)$ due to the matrix inversion. Instead, if the Hessian matrix has a special struture $H = diag(h) + \textbf{1} z \textbf{1}^T$, we are able to yields a Newton-Raphson algorithm that has linear complexity. This precesure is shown below: $$ H^{-1} = diag(h)^{-1} - \frac{diag(h)^{-1} \textbf{1} \textbf{1}^T diag(h)^{-1}}{z^{-1} + \sum_{j = 1}^k h_j^{-1}}\\ $$ Multiplying by the gradient, we can get the ith component as: $$(H^{-1} g)_i = \frac{g_i-c}{h_i}$$ where $c = \frac{\sum_{j=1}^k g_j/h_j}{z^{-1} +\sum_{j = 1}^{k} j_j^{-1}}$ ### Part 4 Optimization In this project, we use variational EM algorithm for LDA model to find the value of parameter $\alpha$ and $\beta$, to maximize the marginal log likelihood. In general, there are two parts that needed to be optimized. 1. E-step: For each document d, calculate the optimizing values of the variational parameters : $\gamma_d^\ast$ and $Phi_d^\ast$. 2. M-step: Based on the results of E-step, update $\alpha$ and $\beta$. In the previous part(the plain version), we have optimized the M-step. More specifically, we update $\alpha$ through the Newton-Raphson methods for a Hessian with special structure, which is mentioned in the paper and decrease the time complexity from $O(N^3)$ to linearity. In this part, our main goal is to optimize the E-step. There are two processes here: optimize $\gamma_d^\ast$ and $Phi_d^\ast$, then calculate the statistics for M-step. Method we use: - Vectorization: in `Estep_singedoc()` function, usd matrix to avoid the use of for loop. - JIT compilation: for `accumulate_Phi()` and `Estep()` function. We also tried the cython method, but unfortunately it didn't improve the code performance here. ``` import numpy as np import pandas as pd import gensim import numba from nltk.stem import WordNetLemmatizer from nltk import PorterStemmer from scipy.special import digamma, polygamma ``` #### Original version ``` def Estep_original(doc, alpha, beta, k, N_d, max_iter = 50): ''' E step for a document, which calculate the posterior parameters. beta and alpha is coming from previous iteration. Return Phi and gamma of a document. ''' gamma_old = [alpha[i] + N_d/k for i in range(k)] row_index = list(doc.keys()) word_count = np.array(list(doc.values())) for i in range(max_iter): # Update Phi Phi = np.zeros((N_d, k)) for i in range(N_d): for j in range(k): Phi[i,j] = beta[row_index[i],j]*np.exp(digamma(gamma_old[j])) Phi[i,:] = Phi[i,:]/np.sum(Phi[i,:]) # Update gamma Phi_sum = np.zeros(k) for j in range(k): z = 0 for i in range(N_d): z += Phi[i,j] * word_count[i] Phi_sum[j] = z gamma_new = alpha + Phi_sum # converge or not if(i>0) and (convergence(gamma_new, gamma_old)): break else: gamma_old = gamma_new.copy() return gamma_new, Phi def accumulate_Phi_original(beta, Phi, doc): ''' This function accumulates the effect of Phi_new from all documents after e step. beta is V*k matrix. Phi is N_d * k matrix. Return updated beta. ''' row_index = list(doc.keys()) word_count = list(doc.values()) for i in range(len(row_index)): beta[row_index[i],:] = word_count[i] * Phi[i,:] return beta ``` #### Opitimized version ``` def Estep_singedoc(doc, alpha, beta, k, N_d, max_iter = 50): ''' E step for a document, which calculate the posterior parameters. beta and alpha is coming from previous iteration. Return Phi and gamma of a document. ''' gamma_old = alpha + np.ones(k) * N_d/k row_index = list(doc.keys()) word_count = np.array(list(doc.values())) for i in range(max_iter): # Update Phi Phi_exp = np.exp(digamma(gamma_old)) Phi = beta[row_index,:] @ np.diag(Phi_exp) Phi_new = normalization_row(Phi) # Update gamma Phi_sum = Phi_new.T @ word_count[:,None] # k-dim gamma_new = alpha + Phi_sum.T[0] # Converge or not if (i>0) & convergence(gamma_new, gamma_old): break else: gamma_old = gamma_new.copy() return gamma_new, Phi_new @numba.jit(cache = True) def accumulate_Phi(beta, Phi, doc): ''' This function accumulates the effect of Phi_new from all documents after e step. beta is V*k matrix. Phi is N_d * k matrix. Return updated beta. ''' beta[list(doc.keys()),:] += np.diag(list(doc.values())) @ Phi return beta @numba.jit(cache = True) def Estep(doc, alpha_old, beta_old, beta_new, gamma_matrix, k, N_d, M): ''' Calculate $\gamma$ and $\Phi$ for all documents. ''' for i in range(M): gamma, Phi = Estep_singedoc(doc[i], alpha_old, beta_old, k, N_d[i]) beta_new = accumulate_Phi(beta_new, Phi, doc[i]) gamma_matrix[i,:] = gamma return beta_new, gamma_matrix # Some helpful functions for comparison def convergence(new, old, epsilon = 1.0e-3): ''' Check convergence. ''' return np.all(np.abs(new - old)) < epsilon def normalization_row(x): ''' Normaize a matrix by row. ''' return x/np.sum(x,1)[:,None] def initializaiton(k, V): ''' Initialize alpha and beta. alpha is a k-dim vector. beta is V*k matrix. ''' np.random.seed(12345) alpha = np.random.uniform(size = k) alpha_output = alpha/np.sum(alpha) beta_output = np.random.dirichlet(alpha_output, V) return alpha_output, beta_output def lemmatize_stemming(text): ''' Lenmmatize and stem the text. ''' return PorterStemmer().stem(WordNetLemmatizer().lemmatize(text, pos='v')) def preprocess(text): ''' Preprocess the text. ''' result = [] for token in gensim.utils.simple_preprocess(text): if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3: result.append(lemmatize_stemming(token)) return result ``` #### Comparison ``` # Load the data and preprocess it. data = pd.read_csv('data/articles.csv', error_bad_lines=False) document = data[['content']].iloc[:50,:].copy() document['index'] = document.index processed_docs = document['content'].map(preprocess) vocabulary = gensim.corpora.Dictionary(processed_docs) #vocabulary.filter_extremes(no_below=5, no_above=0.1, keep_n=100) bow_corpus = [vocabulary.doc2bow(doc) for doc in processed_docs] doc = [dict(bow) for bow in bow_corpus] N_d = [len(d) for d in doc] V = len(vocabulary) M = len(doc) k = 3 ``` - Original Version ``` %%timeit alpha0, beta0 = initializaiton(k, V) beta = beta0 gamma_matrix_origin = np.zeros((M, k)) for i in range(M): gamma, Phi = Estep_original(doc[i], alpha0, beta0, k, N_d[i]) beta = accumulate_Phi_original(beta, Phi, doc[i]) gamma_matrix_origin[i,:] = gamma ``` - Optimized Version ``` %%timeit alpha0, beta0 = initializaiton(k, V) beta = beta0 gamma_matrix_opt = np.zeros((M, k)) beta_new, gamma_matrix_opt = Estep(doc, alpha0, beta0, beta, gamma_matrix_opt, k, N_d, M) ``` - Conclusion We use 50 documents here, and set up k=3. After opitimizaion, the running time decreased from 15.2 to 221 ms. the increase percentage is nearly 100%. ### Part 5: Applications to simulated data sets In this part, we use our model on a simulated data set. Given $\alpha$, $\beta$, k, M, V, and N_d by ourselves, the data process is described in Part 2. The corresponding code and result is in the file Test-Simulated.ipynb. The real value of $\alpha$ is [0.15, 0.35, 0.5]. The results of the simulated shows that, , the $\hat{\alpha}$ is [0.49, 0.34, 0.16]. We find that the estiamted value is able to approximate the true value. We also calculate the average difference between $\beta$ and $\hat{\beta}$, which is equals to 0.0059 and is acceptable. In conclusion, the LDA method is able to capture the true value of $\alpha$ and $\beta$ if we run the code for enough time and start with a "good" point. ### Part 6: Real data set In this part, we implent our algorithm on two real dataset. We preprocess the dataset before using them. Operation includes: spliting the sentences into words, lemmatizating, removing stop words, creating vocaubulary and establish corpus. - Dataset in Paper In the original paper, the authors used 16,000 documents from a subset of the TREC AP corpors(Harman, 1992). It is not easy to get the TREC datast since we need to sign an individual agreement and ask for approval from NIST. Instead, we download the sample data on [Blei's webpage](http://www.cs.columbia.edu/~blei/lda-c/). This sample is just a subset of the data that the authors used in the paper, so we cannot get the same result. ![image](Figures/example1.png) The topic words from some of the resulting multinomial distribution $p(w|z)$ are illustrated above. These distribution seem to capture some hidden topics in the corpus. For example, "millison", "market", "stock", "company" are common words in the topic like "economy", and "president", "reagan", "statement" and "troop" are common words in the topic like "politics". - Another Dataset This dataset is named "All the news" and it is coming from [kaggle](https://www.kaggle.com/snapcrack/all-the-news). The dataset contains articles from New York Times, Breitbart, CNN, Business Insider, the Atlantic, Fox News and so on. The original dataset has three csv file, but we just use the first 1000 rows in the second file. ![image](Figures/example 2.png) Similar to the previous dataset, the LDA model captures some hidden topics in the corpus. For example, words like "space", "planet", "earth" and "universe" are common in astronomy area. In conclusion, our package works well. The LDA model is able to capture the hidden topic in the corpus and to provide reasonable insights to us, which is useful for text classification and collaborative filtering. ### Part 7 Comparative Analysis with Competing Algorihtms In this part, we compare the LDA method with two competing algorithm: Latent Semantic Indexing (LSI) and Hierarchical Dirichlet process(HDP). We still use the "All the news" dataset to evaluate the performance of the algorithm. ``` df = pd.read_csv('data/articles.csv') document = df[['content']].copy() document['index'] = document.index processed_docs = document['content'].map(preprocess) vocabulary = gensim.corpora.Dictionary(processed_docs) bow_corpus = [vocabulary.doc2bow(doc) for doc in processed_docs] ``` #### LDA vs LSI ``` from gensim.models import LdaModel from gensim.models import LsiModel ``` - Compare speed ``` %timeit LsiModel(bow_corpus, 30, id2word = vocabulary) %timeit LdaModel(bow_corpus, 30, id2word = vocabulary) ``` - Compare result ``` lsamodel = LsiModel(bow_corpus, 30, id2word = vocabulary) ldamodel = LdaModel(bow_corpus, 30, id2word = vocabulary) for idx, topic in ldamodel.print_topics(-1): if idx<5: print('Topic: {} \nWords: {}'.format(idx, topic))\ for idx, topic in lsamodel.print_topics(-1): if idx<5: print('Topic: {} \nWords: {}'.format(idx, topic)) ``` The result above shows that the LSI algorithm is even faster, since it implement a SVD decompisition to reduce the dimension of the input. Howvever, the LDA method implement a variational EM algorithm or gibbs sampling, which require a lot of iteration to make the estimated value of every document converge, and thus is time consuming. As for the result, we find that all the coefficients in LDA is positive, but some of the coefficients in LSI is negative. In conclusion, the speed of LSI algotithm is significantly faster than the LDA algorithm. In the case that we need to use cross-validation to choose the topic number, LSI is more effective. However, the components of the topic in LSI method are arbitrarily positive/negative, which it is difficult to interpret. Moreover, LSI is unable to capture the multiple meanings of words. #### LDA vs HDP One character of the LDA algorithm is that we need to specify the number of topic. However, in most cases, we do not know the exactly topic numbers. Cross validatio is a method to deal with this problem. However, in the previous part, we have shown that the LDA algorithm is less effective, using cross validation is time consuming. HDP algorithm is a natural nonparametric generalization of Latent Dirichlet allocation, where the number of topics can be unbounded and learnt from data, so we don't need to select the topic numbers. ``` from gensim.models import HdpModel ``` - Compare speed ``` %timeit HdpModel(bow_corpus, vocabulary) %timeit LdaModel(bow_corpus, 30, id2word = vocabulary) ``` - Compare results ``` hdp = HdpModel(bow_corpus, vocabulary) for idx, topic in hdp.print_topics(-1): if idx<5: print('Topic: {} \nWords: {}'.format(idx, topic)) ``` Although the speed the LDA algorithm is faster, if we want to implement cross validaiton to ensure the topic numbers, we have to run the model serveal time, and thus the total time is longer than the HDP algorithm. In this case, HDP is better. However, compared the results from two algorithm, it is obvious that they are different. Sometimes it is difficult to interpret the result of HDP algorithm. What's more, if the previous experience tells us what the topic number is, the LDA model is more effective. ### Part 8 Discussion Based on the performance of our algorithm on the real dataset and the dataset in paper, our algorithm does fulfill the need that divide the document into different topics and explore the words occurring in that topic of different weight.<br> LDA can be used in a variety of purposes: - Clustering: The topic in the clustering center, and the document associate with multer clusters. Clustering is very helpful in organizing and summarizing article collections. - Feature generation: LDA can generate features for other machine learning algorithm. As mentioned before, LDA generates topics for each document, and these K topics can be treated as K features, and these features can be used to predict as in logistic regression and decision tree. - Dimension Reduction: LDA provides a topic distribution which can be seen as a concise summary of the article.Comparing articles in this dimensionally reduced feature space is more meaningful than in the feature space of the original vocabulary. Even though the LDA performs well in our dataset, it does have some limitations. For example, the number of topics $k$ is fixed and must be known ahead of time. Also, Since the Dirichlet topic distribution cannot capture correlations, the LDA can only capture uncorrelated topics. Finally, the LDA algorithm is based on the assumption of BoW, which assumes words are exchangeable, and does not consider sentence sequence. <br> To overcome the limitation, extend LDA to the distributions on the topic variables are elaborated. For example, arranging the topics in a time series, so that it relax the full exchangeability assumption to one of partial exchangeability. ### Part 9 References/bibliography [1] Blei, David M., Andrew Y. Ng, and Michael I. Jordan. "Latent dirichlet allocation." the Journal of machine Learning research 3 (2003): 993-1022.
github_jupyter
``` # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # AI Platform (Unified) SDK: Data Labeling ## Installation Install the latest (preview) version of AI Platform (Unified) SDK. ``` ! pip3 install -U google-cloud-aiplatform --user ``` Install the Google *cloud-storage* library as well. ``` ! pip3 install google-cloud-storage ``` ### Restart the Kernel Once you've installed the AI Platform (Unified) SDK and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages. ``` import os if not os.getenv("AUTORUN"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU run-time *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** ### Set up your GCP project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a GCP project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the AI Platform APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component) 4. [Google Cloud SDK](https://cloud.google.com/sdk) is already installed in AI Platform Notebooks. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. ``` PROJECT_ID = "[your-project-id]" #@param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for AI Platform (Unified). We recommend when possible, to choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You cannot use a Multi-Regional Storage bucket for training with AI Platform. Not all regions provide support for all AI Platform services. For the latest support per region, see [Region support for AI Platform (Unified) services](https://cloud.google.com/ai-platform-unified/docs/general/locations) ``` REGION = 'us-central1' #@param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your GCP account **If you are using AI Platform Notebooks**, your environment is already authenticated. Skip this step. *Note: If you are on an AI Platform notebook and run the cell, the cell knows to skip executing the authentication steps.* ``` import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your Google Cloud account. This provides access # to your Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists('/opt/deeplearning/metadata/env_version'): if 'google.colab' in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this tutorial in a notebook locally, replace the string # below with the path to your service account key and run this cell to # authenticate your Google Cloud account. else: %env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json # Log in to your account on Google Cloud ! gcloud auth login ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket. Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets. ``` BUCKET_NAME = "[your-bucket-name]" #@param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]": BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION gs://$BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al gs://$BUCKET_NAME ``` ### Set up variables Next, set up some variables used throughout the tutorial. ### Import libraries and define constants #### Import AI Platform (Unified) SDK Import the AI Platform (Unified) SDK into our Python environment. ``` import os import sys import time from google.cloud.aiplatform import gapic as aip from google.protobuf import json_format from google.protobuf.struct_pb2 import Value from google.protobuf.struct_pb2 import Struct from google.protobuf.json_format import MessageToJson from google.protobuf.json_format import ParseDict ``` #### AI Platform (Unified) constants Setup up the following constants for AI Platform (Unified): - `API_ENDPOINT`: The AI Platform (Unified) API service endpoint for dataset, model, job, pipeline and endpoint services. - `API_PREDICT_ENDPOINT`: The AI Platform (Unified) API service endpoint for prediction. - `PARENT`: The AI Platform (Unified) location root path for dataset, model and endpoint resources. ``` # API Endpoint API_ENDPOINT = "{0}-aiplatform.googleapis.com".format(REGION) # AI Platform (Unified) location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` #### AutoML constants Next, setup constants unique to AutoML image classification datasets and training: - Dataset Schemas: Tells the managed dataset service which type of dataset it is. - Data Labeling (Annotations) Schemas: Tells the managed dataset service how the data is labeled (annotated). - Dataset Training Schemas: Tells the managed pipelines service the task (e.g., classification) to train the model for. ``` # Image Dataset type IMAGE_SCHEMA = "google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml" # Image Labeling type IMPORT_SCHEMA_IMAGE_CLASSIFICATION = "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_classification_single_label_io_format_1.0.0.yaml" # Image labeling task LABELING_SCHEMA_IMAGE = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml" ``` ## Clients The AI Platform (Unified) SDK works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the server (AI Platform). You will use several clients in this tutorial, so set them all up upfront. - Dataset Service for managed datasets. - Job Service for batch jobs and custom training. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_dataset_client(): client = aip.DatasetServiceClient( client_options=client_options ) return client def create_job_client(): client = aip.JobServiceClient( client_options=client_options ) return client clients = {} clients["dataset"] = create_dataset_client() clients["job"] = create_job_client() for client in clients.items(): print(client) import tensorflow as tf LABELING_FILES = [ "https://raw.githubusercontent.com/googleapis/python-aiplatform/master/samples/snippets/resources/daisy.jpg" ] IMPORT_FILE = "gs://" + BUCKET_NAME + '/labeling.csv' with tf.io.gfile.GFile(IMPORT_FILE, 'w') as f: for lf in LABELING_FILES: ! wget {lf} | gsutil cp {lf.split("/")[-1]} gs://{BUCKET_NAME} f.write("gs://" + BUCKET_NAME + "/" + lf.split("/")[-1] + "\n") ! gsutil cat $IMPORT_FILE ``` *Example output*: ``` gs://migration-ucaip-trainingaip-20210303215432/daisy.jpg ``` ## Create a dataset ### [projects.locations.datasets.create](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.datasets/create) #### Request ``` DATA_SCHEMA = IMAGE_SCHEMA dataset = { "display_name": "labeling_" + TIMESTAMP, "metadata_schema_uri": "gs://" + DATA_SCHEMA } print(MessageToJson( aip.CreateDatasetRequest( parent=PARENT, dataset=dataset ).__dict__["_pb"]) ) ``` *Example output*: ``` { "parent": "projects/migration-ucaip-training/locations/us-central1", "dataset": { "displayName": "labeling_20210303215432", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml" } } ``` #### Call ``` request = clients["dataset"].create_dataset( parent=PARENT, dataset=dataset ) ``` #### Response ``` result = request.result() print(MessageToJson(result.__dict__["_pb"])) ``` *Example output*: ``` { "name": "projects/116273516712/locations/us-central1/datasets/1165112889535627264", "displayName": "labeling_20210303215432", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml", "labels": { "aiplatform.googleapis.com/dataset_metadata_schema": "IMAGE" }, "metadata": { "dataItemSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/dataitem/image_1.0.0.yaml" } } ``` ``` # The full unique ID for the dataset dataset_id = result.name # The short numeric ID for the dataset dataset_short_id = dataset_id.split('/')[-1] print(dataset_id) ``` ### [projects.locations.datasets.import](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.datasets/import) #### Request ``` LABEL_SCHEMA = IMPORT_SCHEMA_IMAGE_CLASSIFICATION import_config = { "gcs_source": { "uris": [IMPORT_FILE] }, "import_schema_uri": LABEL_SCHEMA } print(MessageToJson( aip.ImportDataRequest( name=dataset_short_id, import_configs=[import_config] ).__dict__["_pb"]) ) ``` *Example output*: ``` { "name": "1165112889535627264", "importConfigs": [ { "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210303215432/labeling.csv" ] }, "importSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_classification_single_label_io_format_1.0.0.yaml" } ] } ``` #### Call ``` request = clients["dataset"].import_data( name=dataset_id, import_configs=[import_config] ) ``` #### Response ``` result = request.result() print(MessageToJson(result.__dict__["_pb"])) ``` *Example output*: ``` {} ``` ### Create data labeling specialist pool In case you do not have access to labeling services execute this section. ``` # add client for specialist pool clients["specialist_pool"] = aip.SpecialistPoolServiceClient( client_options=client_options ) ``` ### [projects.locations.specialistPools.create](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.specialistPools/createe) #### Request In this part, you will replace [your-email-address] with your email address. This makes you the specialist and recipient of the labeling request. ``` EMAIL = "[your-email-address]" specialist_pool = { "name": "labeling_" + TIMESTAMP, #he resource name of the SpecialistPool. "display_name": "labeling_" + TIMESTAMP, # user-defined name of the SpecialistPool "specialist_manager_emails": [EMAIL] } print(MessageToJson( aip.CreateSpecialistPoolRequest( parent=PARENT, specialist_pool=specialist_pool ).__dict__["_pb"]) ) ``` *Example output*: ``` { "parent": "projects/migration-ucaip-training/locations/us-central1", "specialistPool": { "name": "labeling_20210303215432", "displayName": "labeling_20210303215432", "specialistManagerEmails": [ "dev@fourteen33.com" ] } } ``` #### Call ``` request = clients["specialist_pool"].create_specialist_pool( parent=PARENT, specialist_pool=specialist_pool ) ``` #### Response ``` result = request.result() print(MessageToJson(result.__dict__["_pb"])) ``` *Example output*: ``` { "name": "projects/116273516712/locations/us-central1/specialistPools/1167839678372511744" } ``` ``` specialist_name = result.name specialist_id = specialist_name.split("/")[-1] print(specialist_name) ``` ## Create data labeling job ### [projects.locations.dataLabelingJobs.create](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.dataLabelingJobs/create) ``` # create placeholder file for valid PDF file with instruction for data labeling ! echo "this is instruction" >> instruction.txt | gsutil cp instruction.txt gs://$BUCKET_NAME ``` #### Request ``` LABLEING_SCHEMA = LABELING_SCHEMA_IMAGE INSTRUCTION_FILE = "gs://" + BUCKET_NAME + "/instruction.txt" inputs = json_format.ParseDict({"annotation_specs": ["rose"]}, Value()) data_labeling_job = { "display_name": "labeling_" + TIMESTAMP, "datasets": [dataset_id], "labeler_count": 1, "instruction_uri": INSTRUCTION_FILE, "inputs_schema_uri": LABLEING_SCHEMA, "inputs": inputs, "annotation_labels": { "aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_specialist_pool" }, "specialist_pools": [specialist_name] } print(MessageToJson( aip.CreateDataLabelingJobRequest( parent=PARENT, data_labeling_job=data_labeling_job ).__dict__["_pb"]) ) ``` *Example output*: ``` { "parent": "projects/migration-ucaip-training/locations/us-central1", "dataLabelingJob": { "displayName": "labeling_20210303215432", "datasets": [ "projects/116273516712/locations/us-central1/datasets/1165112889535627264" ], "labelerCount": 1, "instructionUri": "gs://migration-ucaip-trainingaip-20210303215432/instruction.txt", "inputsSchemaUri": "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml", "inputs": { "annotation_specs": [ "rose" ] }, "annotationLabels": { "aiplatform.googleapis.com/annotation_set_name": "data_labeling_job_specialist_pool" }, "specialistPools": [ "projects/116273516712/locations/us-central1/specialistPools/1167839678372511744" ] } ``` #### Call ``` request = clients["job"].create_data_labeling_job( parent=PARENT, data_labeling_job=data_labeling_job ) ``` #### Response ``` print(MessageToJson(request.__dict__["_pb"])) ``` *Example output*: ``` { "name": "projects/116273516712/locations/us-central1/dataLabelingJobs/3830883229125050368", "displayName": "labeling_20210303215432", "datasets": [ "projects/116273516712/locations/us-central1/datasets/1165112889535627264" ], "labelerCount": 1, "instructionUri": "gs://migration-ucaip-trainingaip-20210303215432/instruction.txt", "inputsSchemaUri": "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml", "inputs": { "annotationSpecs": [ "rose" ] }, "state": "JOB_STATE_PENDING", "createTime": "2021-03-03T21:55:31.239049Z", "updateTime": "2021-03-03T21:55:31.239049Z" } ``` ``` labeling_task_name = request.name print(labeling_task_name) ``` ### [projects.locations.dataLabelingJobs.get](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.dataLabelingJobs/get) #### Call ``` request = clients["job"].get_data_labeling_job( name=labeling_task_name ) ``` #### Response ``` print(MessageToJson(request.__dict__["_pb"])) ``` *Example output*: ``` { "name": "projects/116273516712/locations/us-central1/dataLabelingJobs/3830883229125050368", "displayName": "labeling_20210303215432", "datasets": [ "projects/116273516712/locations/us-central1/datasets/1165112889535627264" ], "labelerCount": 1, "instructionUri": "gs://migration-ucaip-trainingaip-20210303215432/instruction.txt", "inputsSchemaUri": "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml", "inputs": { "annotationSpecs": [ "rose" ] }, "state": "JOB_STATE_PENDING", "createTime": "2021-03-03T21:55:31.239049Z", "updateTime": "2021-03-03T21:55:31.239049Z", "specialistPools": [ "projects/116273516712/locations/us-central1/specialistPools/1167839678372511744" ] } ``` ### [projects.locations.dataLabelingJobs.cancel](https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.dataLabelingJobs/cancel) #### Call ``` request = clients["job"].cancel_data_labeling_job( name=labeling_task_name ) ``` #### Response ``` print(request) ``` *Example output*: ``` None ``` ``` while True: response = clients["job"].get_data_labeling_job(name=labeling_task_name) if response.state == aip.JobState.JOB_STATE_CANCELLED: print("Labeling job CANCELED") break else: print("Canceling labeling job:", response.state) time.sleep(60) ``` # Cleaning up To clean up all GCP resources used in this project, you can [delete the GCP project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial. ``` delete_dataset = True delete_job = True delete_specialist_pool = True delete_bucket = True # Delete the dataset using the AI Platform (Unified) fully qualified identifier for the dataset try: if delete_dataset: clients['dataset'].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the labeling job using the AI Platform (Unified) fully qualified identifier for the dataset try: if delete_job: request = clients["job"].delete_data_labeling_job(name=labeling_task_name) except Exception as e: print(e) # Delete the specialist pool using the AI Platform (Unified) fully qualified identifier for the dataset try: if delete_specialist_pool: clients["specialist_pool"].delete_specialist_pool(name=specialist_name) except Exception as e: print(e) if delete_bucket and 'BUCKET_NAME' in globals(): ! gsutil rm -r gs://$BUCKET_NAME ```
github_jupyter
# Exploration of Quora dataset ``` import sys sys.path.append("..") import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use("dark_background") # comment out if using light Jupyter theme dtypes = {"qid": str, "question_text": str, "target": int} train = pd.read_csv("../data/train.csv", dtype=dtypes) test = pd.read_csv("../data/test.csv", dtype=dtypes) ``` ## 1. A first glance ``` train.head() print("There are {} questions in train and {} in test".format(train.shape[0], test.shape[0])) print("Target value is binary (values: {})".format(set(train["target"].unique()))) print("Number of toxic questions in training data is {} (proportion: {}).".format(train["target"].sum(), train["target"].mean())) ``` ## 2. A closer look at the questions ### 2.1 Question length (characters) ``` train["text_length"] = train["question_text"].str.len() train["text_length"].describe() ``` Most questions are relatively short, i.e., less than 100 characters. There are some exceptions, however, with a maximum of more than a thousand. Let's see how many characters we should consider. ``` for length in [100, 150, 200, 250, 300, 350, 500]: num = np.sum(train["text_length"] > length) print("There are {} questions ({}%) with more than {} characters." .format(num, np.round(num / len(train) * 100, 2), length)) ``` The number of questions with more than 250 characters is already small and with more than 300 negligible. We can cut the questions at 300 or even just remove them. Would there be a difference between the length of toxic and sincere questions? ``` def split_on_target(data): toxic = data[data["target"] == 1] sincere = data[data["target"] == 0] return sincere, toxic sincere, toxic = split_on_target(train) def plot_density_plots(sincere_data, toxic_data, column, xlim=(0, 300), bin_size=5): fig, axes = plt.subplots(1, 2, figsize=(12, 5)) axes[0] = sns.distplot(sincere_data[column], ax=axes[0], bins=np.arange(xlim[0], xlim[1], bin_size)) axes[0].set_title("Sincere questions") axes[1] = sns.distplot(toxic_data[column], ax=axes[1], bins=np.arange(xlim[0], xlim[1], bin_size)) axes[1].set_title("Toxic questions") if xlim is not None: for ax in axes: ax.set_xlim(xlim[0], xlim[1]) plt.suptitle("Comparison of {} between sincere and toxic questions".format(column)) plt.show() plot_density_plots(sincere, toxic, "text_length") ``` Toxic questions seem to have a higher chance of having somewhat more characters, although the medians seem to be more or less the same. The numbers confirm: ``` pd.concat([sincere["text_length"].describe(), toxic["text_length"].describe()], axis=1) ``` ### 2.2 Question length (words) A similar analysis can be done based on the number of _words_ per question, rather than the number of characters. To do this properly, we should probably first remove symbols and punctuation, but let's take a quick look. ``` train["words"] = train["question_text"].apply(lambda x: len(x.split(" "))) sincere, toxic = split_on_target(train) plot_density_plots(sincere, toxic, "words", xlim=(0, 60), bin_size=2) ``` The same conclusion seems to hold for the number of words. It is, thus, useful to include the question size as a feature in our models. Also, it seems that there are not many questions with more than 50 or 60 words: ``` for n in [50, 55, 60]: print("{} questions with more than {} words.".format(np.sum(train["words"] > n), n)) ```
github_jupyter
Someone asked how to generate outputs to use with [LibFFM](https://github.com/guestwalk/libffm) So all I do is to use pandas cuts for the numerics to turn them into categories. Feel free to try using them as straight numerics if you wish. I have tried to make it as generic as possible so you can use it on other competitons going forward! ``` import math import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.insert(1,'target',0) print(train.shape) print(test.shape) x = pd.concat([train,test]) x = x.reset_index(drop=True) unwanted = x.columns[x.columns.str.startswith('ps_calc_')] x.drop(unwanted,inplace=True,axis=1) features = x.columns[2:] categories = [] for c in features: trainno = len(x.loc[:train.shape[0],c].unique()) testno = len(x.loc[train.shape[0]:,c].unique()) print(c,trainno,testno) ``` OK so let us turn the big boys into categories ``` x.loc[:,'ps_reg_03'] = pd.cut(x['ps_reg_03'], 50,labels=False) x.loc[:,'ps_car_12'] = pd.cut(x['ps_car_12'], 50,labels=False) x.loc[:,'ps_car_13'] = pd.cut(x['ps_car_13'], 50,labels=False) x.loc[:,'ps_car_14'] = pd.cut(x['ps_car_14'], 50,labels=False) x.loc[:,'ps_car_15'] = pd.cut(x['ps_car_15'], 50,labels=False) test = x.loc[train.shape[0]:].copy() train = x.loc[:train.shape[0]].copy() #Always good to shuffle for SGD type optimizers train = train.sample(frac=1).reset_index(drop=True) train.drop('id',inplace=True,axis=1) test.drop('id',inplace=True,axis=1) train.head() test.head() ``` All out parameters are categories - if you want to try numerics please scale them first! ``` categories = train.columns[1:] numerics = [] currentcode = len(numerics) catdict = {} catcodes = {} for x in numerics: catdict[x] = 0 for x in categories: catdict[x] = 1 noofrows = train.shape[0] noofcolumns = len(features) with open("alltrainffm.txt", "w") as text_file: for n, r in enumerate(range(noofrows)): if((n%100000)==0): print('Row',n) datastring = "" datarow = train.iloc[r].to_dict() datastring += str(int(datarow['target'])) for i, x in enumerate(catdict.keys()): if(catdict[x]==0): datastring = datastring + " "+str(i)+":"+ str(i)+":"+ str(datarow[x]) else: if(x not in catcodes): catcodes[x] = {} currentcode +=1 catcodes[x][datarow[x]] = currentcode elif(datarow[x] not in catcodes[x]): currentcode +=1 catcodes[x][datarow[x]] = currentcode code = catcodes[x][datarow[x]] datastring = datastring + " "+str(i)+":"+ str(int(code))+":1" datastring += '\n' text_file.write(datastring) noofrows = test.shape[0] noofcolumns = len(features) with open("alltestffm.txt", "w") as text_file: for n, r in enumerate(range(noofrows)): if((n%100000)==0): print('Row',n) datastring = "" datarow = test.iloc[r].to_dict() datastring += str(int(datarow['target'])) for i, x in enumerate(catdict.keys()): if(catdict[x]==0): datastring = datastring + " "+str(i)+":"+ str(i)+":"+ str(datarow[x]) else: if(x not in catcodes): catcodes[x] = {} currentcode +=1 catcodes[x][datarow[x]] = currentcode elif(datarow[x] not in catcodes[x]): currentcode +=1 catcodes[x][datarow[x]] = currentcode code = catcodes[x][datarow[x]] datastring = datastring + " "+str(i)+":"+ str(int(code))+":1" datastring += '\n' text_file.write(datastring) ``` Once you have built the libffm just use * ./ffm-train alltrainffm.txt * ./ffm-predict alltestffm.txt alltrainffm.txt.model output.txt We haven't shuffled test to we can just create the submission as follows ``` # sub = pd.read_csv('../input/sample_submission.csv') # outputs = pd.read_csv('output.txt',header=None) # outputs.columns = ['target'] # sub.target = outputs.target.ravel() # sub.to_csv('libffmsubmission.csv',index=False) ```
github_jupyter
# Normalizing Flows Overview Normalizing Flows is a rich family of distributions. They were described by [Rezende and Mohamed](https://arxiv.org/abs/1505.05770), and their experiments proved the importance of studying them further. Some extensions like that of [Tomczak and Welling](https://arxiv.org/abs/1611.09630) made partially/full rank Gaussian approximations for high dimensional spaces computationally tractable. This notebook reveals some tips and tricks for using normalizing flows effectively in PyMC3. ``` %matplotlib inline from collections import Counter import matplotlib.pyplot as plt import numpy as np import pymc3 as pm import seaborn as sns import theano import theano.tensor as tt pm.set_tt_rng(42) np.random.seed(42) ``` ## Theory Normalizing flows is a series of invertible transformations on an initial distribution. $$z_K = f_K \circ \dots \circ f_2 \circ f_1(z_0) $$ In this case, we can compute a tractable density for the flow. $$\ln q_K(z_K) = \ln q_0(z_0) - \sum_{k=1}^{K}\ln \left|\frac{\partial f_k}{\partial z_{k-1}}\right|$$ Here, every $f_k$ is a parametric function with a well-defined determinant. The transformation used is up to the user; for example, the simplest flow is an affine transform: $$z = loc(scale(z_0)) = \mu + \sigma * z_0 $$ In this case, we get a mean field approximation if $z_0 \sim \mathcal{N}(0, 1)$ ## Flow Formulas In PyMC3 there are flexible ways to define flows with formulas. There are currently 5 types defined: * Loc (`loc`): $z' = z + \mu$ * Scale (`scale`): $z' = \sigma * z$ * Planar (`planar`): $z' = z + u * \tanh(w^T z + b)$ * Radial (`radial`): $z' = z + \beta (\alpha + ||z-z_r||)^{-1}(z-z_r)$ * Householder (`hh`): $z' = H z$ Formulae can be composed as a string, e.g. `'scale-loc'`, `'scale-hh*4-loc'`, `'planar*10'`. Each step is separated with `'-'`, and repeated flows are defined with `'*'` in the form of `'<flow>*<#repeats>'`. Flow-based approximations in PyMC3 are based on the `NormalizingFlow` class, with corresponding inference classes named using the `NF` abbreviation (analogous to how `ADVI` and `SVGD` are treated in PyMC3). Concretely, an approximation is represented by: ``` pm.NormalizingFlow ``` While an inference class is: ``` pm.NFVI ``` ## Flow patterns Composing flows requires some understanding of the target output. Flows that are too complex might not converge, whereas if they are too simple, they may not accurately estimate the posterior. Let's start simply: ``` with pm.Model() as dummy: N = pm.Normal("N", shape=(100,)) ``` ### Mean Field connectivity Let's apply the transformation corresponding to the mean-field family to begin with: ``` pm.NormalizingFlow("scale-loc", model=dummy) ``` ### Full Rank Normal connectivity We can get a full rank model with dense covariance matrix using **householder flows** (hh). One `hh` flow adds exactly one rank to the covariance matrix, so for a full rank matrix we need `K=ndim` householder flows. hh flows are volume-preserving, so we need to change the scaling if we want our posterior to have unit variance for the latent variables. After we specify the covariance with a combination of `'scale-hh*K'`, we then add location shift with the `loc` flow. We now have a full-rank analog: ``` pm.NormalizingFlow("scale-hh*100-loc", model=dummy) ``` A more interesting case is when we do not expect a lot of interactions within the posterior. In this case, where our covariance is expected to be sparse, we can constrain it by defining a *low rank* approximation family. This has the additional benefit of reducing the computational cost of approximating the model. ``` pm.NormalizingFlow("scale-hh*10-loc", model=dummy) ``` Parameters can be initialized randomly, using the `jitter` argument to specify the scale of the randomness. ``` pm.NormalizingFlow("scale-hh*10-loc", model=dummy, jitter=0.001) # LowRank ``` ### Planar and Radial Flows * Planar (`planar`): $z' = z + u * \tanh(w^T z + b)$ * Radial (`radial`): $z' = z + \beta (\alpha + ||z-z_r||)^{-1}(z-z_r)$ Planar flows are useful for splitting the incoming distribution into two parts, which allows multimodal distributions to be modeled. Similarly, a radial flow changes density around a specific reference point. ## Simulated data example There were 4 potential functions illustrated in the [original paper](https://arxiv.org/abs/1505.05770), which we can replicate here. Inference can be unstable in multimodal cases, but there are strategies for dealing with them. First, let's specify the potential functions: ``` def w1(z): return tt.sin(2.0 * np.pi * z[0] / 4.0) def w2(z): return 3.0 * tt.exp(-0.5 * ((z[0] - 1.0) / 0.6) ** 2) def w3(z): return 3.0 * (1 + tt.exp(-(z[0] - 1.0) / 0.3)) ** -1 def pot1(z): z = z.T return 0.5 * ((z.norm(2, axis=0) - 2.0) / 0.4) ** 2 - tt.log( tt.exp(-0.5 * ((z[0] - 2.0) / 0.6) ** 2) + tt.exp(-0.5 * ((z[0] + 2.0) / 0.6) ** 2) ) def pot2(z): z = z.T return 0.5 * ((z[1] - w1(z)) / 0.4) ** 2 + 0.1 * tt.abs_(z[0]) def pot3(z): z = z.T return -tt.log( tt.exp(-0.5 * ((z[1] - w1(z)) / 0.35) ** 2) + tt.exp(-0.5 * ((z[1] - w1(z) + w2(z)) / 0.35) ** 2) ) + 0.1 * tt.abs_(z[0]) def pot4(z): z = z.T return -tt.log( tt.exp(-0.5 * ((z[1] - w1(z)) / 0.4) ** 2) + tt.exp(-0.5 * ((z[1] - w1(z) + w3(z)) / 0.35) ** 2) ) + 0.1 * tt.abs_(z[0]) z = tt.matrix("z") z.tag.test_value = pm.floatX([[0.0, 0.0]]) pot1f = theano.function([z], pot1(z)) pot2f = theano.function([z], pot2(z)) pot3f = theano.function([z], pot3(z)) pot4f = theano.function([z], pot4(z)) def contour_pot(potf, ax=None, title=None, xlim=5, ylim=5): grid = pm.floatX(np.mgrid[-xlim:xlim:100j, -ylim:ylim:100j]) grid_2d = grid.reshape(2, -1).T cmap = plt.get_cmap("inferno") if ax is None: _, ax = plt.subplots(figsize=(12, 9)) pdf1e = np.exp(-potf(grid_2d)) contour = ax.contourf(grid[0], grid[1], pdf1e.reshape(100, 100), cmap=cmap) if title is not None: ax.set_title(title, fontsize=16) return ax fig, ax = plt.subplots(2, 2, figsize=(12, 12)) ax = ax.flatten() contour_pot( pot1f, ax[0], "pot1", ) contour_pot(pot2f, ax[1], "pot2") contour_pot(pot3f, ax[2], "pot3") contour_pot(pot4f, ax[3], "pot4") fig.tight_layout() ``` ## Reproducing first potential function ``` from pymc3.distributions.dist_math import bound def cust_logp(z): # return bound(-pot1(z), z>-5, z<5) return -pot1(z) with pm.Model() as pot1m: pm.DensityDist("pot1", logp=cust_logp, shape=(2,)) ``` ### NUTS Let's use NUTS first. Just to have a look how good is it's approximation. > Note you may need to rerun the model a couple of times, as the sampler/estimator might not fully explore function due to multimodality. ``` pm.set_tt_rng(42) np.random.seed(42) with pot1m: trace = pm.sample( 1000, init="auto", cores=2, start=[dict(pot1=np.array([-2, 0])), dict(pot1=np.array([2, 0]))], ) dftrace = pm.trace_to_dataframe(trace) sns.jointplot(dftrace.iloc[:, 0], dftrace.iloc[:, 1], kind="kde") ``` ### Normalizing flows As a first (naive) try with flows, we will keep things simple: Let's use just 2 planar flows and see what we get: ``` with pot1m: inference = pm.NFVI("planar*2", jitter=1) ## Plotting starting distribution dftrace = pm.trace_to_dataframe(inference.approx.sample(1000)) sns.jointplot(dftrace.iloc[:, 0], dftrace.iloc[:, 1], kind="kde"); ``` #### Tracking gradients It is illustrative to track gradients as well as parameters. In this setup, different sampling points can give different gradients because a single sampled point tends to collapse to a mode. Here are the parameters of the model: ``` inference.approx.params ``` We also require an objective: ``` inference.objective(nmc=None) ``` Theano can be used to calcuate the gradient of the objective with respect to the parameters: ``` with theano.configparser.change_flags(compute_test_value="off"): grads = tt.grad(inference.objective(None), inference.approx.params) grads ``` If we want to keep track of the gradient changes during the inference, we warp them in a pymc3 callback: ``` from collections import OrderedDict, defaultdict from itertools import count @theano.configparser.change_flags(compute_test_value="off") def get_tracker(inference): numbers = defaultdict(count) params = inference.approx.params grads = tt.grad(inference.objective(None), params) names = ["%s_%d" % (v.name, next(numbers[v.name])) for v in inference.approx.params] return pm.callbacks.Tracker( **OrderedDict( [(name, v.eval) for name, v in zip(names, params)] + [("grad_" + name, v.eval) for name, v in zip(names, grads)] ) ) tracker = get_tracker(inference) tracker.whatchdict inference.fit(30000, obj_optimizer=pm.adagrad_window(learning_rate=0.01), callbacks=[tracker]) dftrace = pm.trace_to_dataframe(inference.approx.sample(1000)) sns.jointplot(dftrace.iloc[:, 0], dftrace.iloc[:, 1], kind="kde") plt.plot(inference.hist); ``` As you can see, the objective history is not very informative here. This is where the gradient tracker can be more informative. ``` # fmt: off trackername = ['u_0', 'w_0', 'b_0', 'u_1', 'w_1', 'b_1', 'grad_u_0', 'grad_w_0', 'grad_b_0', 'grad_u_1', 'grad_w_1', 'grad_b_1'] # fmt: on def plot_tracker_results(tracker): fig, ax = plt.subplots(len(tracker.hist) // 2, 2, figsize=(16, len(tracker.hist) // 2 * 2.3)) ax = ax.flatten() # names = list(tracker.hist.keys()) names = trackername gnames = names[len(names) // 2 :] names = names[: len(names) // 2] pairnames = zip(names, gnames) def plot_params_and_grads(name, gname): i = names.index(name) left = ax[i * 2] right = ax[i * 2 + 1] grads = np.asarray(tracker[gname]) if grads.ndim == 1: grads = grads[:, None] grads = grads.T params = np.asarray(tracker[name]) if params.ndim == 1: params = params[:, None] params = params.T right.set_title("Gradient of %s" % name) left.set_title("Param trace of %s" % name) s = params.shape[0] for j, (v, g) in enumerate(zip(params, grads)): left.plot(v, "-") right.plot(g, "o", alpha=1 / s / 10) left.legend([name + "_%d" % j for j in range(len(names))]) right.legend([gname + "_%d" % j for j in range(len(names))]) for vn, gn in pairnames: plot_params_and_grads(vn, gn) fig.tight_layout() plot_tracker_results(tracker); ``` Inference **is often unstable**, some parameters are not well fitted as they poorly influence the resulting posterior. In a multimodal setting, the dominant mode might well change from run to run. ### Going deeper We can try to improve our approximation by adding flows; in the original paper they used both 8 and 32. Let's try using 8 here. ``` with pot1m: inference = pm.NFVI("planar*8", jitter=1.0) dftrace = pm.trace_to_dataframe(inference.approx.sample(1000)) sns.jointplot(dftrace.iloc[:, 0], dftrace.iloc[:, 1], kind="kde"); ``` We can try for a more robust fit by allocating more samples to `obj_n_mc` in `fit`, which controls the number of Monte Carlo samples used to approximate the gradient. ``` inference.fit( 25000, obj_optimizer=pm.adam(learning_rate=0.01), obj_n_mc=100, callbacks=[pm.callbacks.CheckParametersConvergence()], ) dftrace = pm.trace_to_dataframe(inference.approx.sample(1000)) sns.jointplot(dftrace.iloc[:, 0], dftrace.iloc[:, 1], kind="kde") ``` This is a noticeable improvement. Here, we see that flows are able to characterize the multimodality of a given posterior, but as we have seen, they are hard to fit. The initial point of the optimization matters in general for the multimodal case. ### MCMC vs NFVI Let's use another potential function, and compare the sampling using NUTS to what we get with NF: ``` def cust_logp(z): return -pot4(z) with pm.Model() as pot_m: pm.DensityDist("pot_func", logp=cust_logp, shape=(2,)) with pot_m: traceNUTS = pm.sample(3000, tune=1000, target_accept=0.9, cores=2) formula = "planar*10" with pot_m: inference = pm.NFVI(formula, jitter=0.1) inference.fit(25000, obj_optimizer=pm.adam(learning_rate=0.01), obj_n_mc=10) traceNF = inference.approx.sample(5000) fig, ax = plt.subplots(1, 3, figsize=(18, 6)) contour_pot(pot4f, ax[0], "Target Potential Function") ax[1].scatter(traceNUTS["pot_func"][:, 0], traceNUTS["pot_func"][:, 1], c="r", alpha=0.05) ax[1].set_xlim(-5, 5) ax[1].set_ylim(-5, 5) ax[1].set_title("NUTS") ax[2].scatter(traceNF["pot_func"][:, 0], traceNF["pot_func"][:, 1], c="b", alpha=0.05) ax[2].set_xlim(-5, 5) ax[2].set_ylim(-5, 5) ax[2].set_title("NF with " + formula); %load_ext watermark %watermark -n -u -v -iv -w ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.spatial.distance as dist import collections import time import warnings warnings.filterwarnings('ignore') from sklearn.cluster import KMeans from numba import jit, vectorize, float64, int64 @jit def jit_kmeans_pp(data, k, weights): first_random = np.random.choice(data.shape[0], 1) C = data[first_random, :] for i in range(k-1): cdist = (jit_distance(data, C))**2 cdist_min = np.min(cdist, axis = 1)* weights prob = cdist_min/np.sum(cdist_min) new_center = np.random.choice(data.shape[0],1, p=prob) C = np.vstack([C, data[new_center,:]]) return C @jit def jit_get_weight(C, data): weights=np.zeros(C.shape[0]) cdist = (jit_distance(data,C))**2 min_cdist = np.argmin(cdist, axis = 1) count = collections.Counter(min_cdist) weights = list(collections.OrderedDict(sorted(count.items(), key=lambda x: x[0])).values()) weights=np.array(weights)/sum(weights) return weights @jit def jit_kmeans_II(data, k, l, max_iter=10000): first_random = np.random.choice(data.shape[0], 1) C = data[first_random, :] cdist = (jit_distance(data, C))**2 cdist_min = np.min(cdist, axis = 1) cost_phi = np.sum(cdist_min) for i in range(int(round(np.log(cost_phi)))): cdist = (jit_distance(data, C))**2 cdist_min = np.min(cdist, axis = 1) prob = cdist_min * l/np.sum(cdist_min) for j in range(data.shape[0]): if np.random.uniform() <= prob[j] and data[j,:] not in C: C = np.vstack([C, data[j,:]]) weights= get_weight(C, data) return jit_kmeans_pp(C, k, weights) ``` 1st real world data: Housing data Reference :http://ichrome.com/blogs/archives/221 ``` housing = np.loadtxt("../data/housing.data.txt") #the sixth column is average number of rooms per dwelling room = housing[:,5].reshape(-1,1) #the last column is Median value of owner-occupied homes in $1000's value = housing[:,-1].reshape(-1,1) data = housing[:,(5,-1)] def cdist_kmeans(data, k, centroids, max_iter=10000): for i in range(max_iter): cdist = (dist.cdist(data, centroids))**2 labels = np.argmin(cdist, axis=1) update_centroids = np.zeros(centroids.shape) for j in range(k): # check if the centroid is the closest to some data point if sum(labels == j) != 0: update_centroids[j] = np.mean(data[labels ==j], axis=0) else: # if not, leave the lone centroid unmoved update_centroids[j] = centroids[j] if np.allclose(update_centroids, centroids): print("Algorithm converged after", i, "iterations.") return ({"Centroids": centroids, "Labels": labels}) else: centroids = update_centroids print("Warning: maximum number of iterations reached. Failed to converge.") return centroids def cdist_kmeans_pp(data, k, weights): first_random = np.random.choice(data.shape[0], 1) C = data[first_random, :] for i in range(k-1): cdist = (dist.cdist(data, C))**2 cdist_min = np.min(cdist, axis = 1)* weights prob = cdist_min/np.sum(cdist_min) new_center = np.random.choice(data.shape[0],1, p=prob) C = np.vstack([C, data[new_center,:]]) return C def cdist_get_weight(C, data): weights=np.zeros(C.shape[0]) cdist = (dist.cdist(data,C))**2 min_cdist = np.argmin(cdist, axis = 1) count = collections.Counter(min_cdist) weights = list(collections.OrderedDict(sorted(count.items(), key=lambda x: x[0])).values()) weights=np.array(weights)/sum(weights) return weights def cdist_kmeans_II(data, k, l, max_iter=10000): first_random = np.random.choice(data.shape[0], 1) C = data[first_random, :] cdist = (dist.cdist(data, C))**2 cdist_min = np.min(cdist, axis = 1) cost_phi = np.sum(cdist_min) for i in range(int(round(np.log(cost_phi)))): cdist = (dist.cdist(data, C))**2 cdist_min = np.min(cdist, axis = 1) prob = cdist_min * l/np.sum(cdist_min) for j in range(data.shape[0]): if np.random.uniform() <= prob[j] and data[j,:] not in C: C = np.vstack([C, data[j,:]]) weights= cdist_get_weight(C, data) return cdist_kmeans_pp(C, k, weights) def navie_result(data, k, l): #find the initial center c = np.random.choice(np.arange(len(data)), k) #base c0 = np.empty([k, data.shape[1]]) c1 = data[c,:] c2 = cdist_kmeans_pp(data, k, 1) c3 = cdist_kmeans_II(data, k, l, max_iter = 10000) initial_centroid = np.array([c0, c1, c2, c3]) label = np.empty([data.shape[0]]) final_centroid = np.empty([k, data.shape[1]]) for i in np.arange(1, 4):#three algorithm #kmeans_label = KMeans(n_clusters = k, init = initial_centroid[i], n_init = 1).fit_predict(data) output = cdist_kmeans(data,k ,initial_centroid[i]) kmeans_label = output["Labels"] label = np.vstack([label, kmeans_label]) kmeans_centroid = output["Centroids"] final_centroid = np.dstack((final_centroid, kmeans_centroid)) return {"Initial_centroid":initial_centroid, "label":label, "Final_centroidtroid" :final_centroid } dict = navie_result(data, 5, 10) def test_cluster(ini_cen, fin_cen, label, data, title): plt.scatter(data[:, 0], data[:, 1], c = label) for i in range(np.unique(label).size): if i > 0: plt.scatter(ini_cen[i,0],ini_cen[i,1],marker = '+', c = 'red', s= 100) plt.scatter(fin_cen[i,0],fin_cen[i,1],marker = '*', c = 'blue', s= 100) else: plt.scatter(ini_cen[i,0],ini_cen[i,1],marker = '+', c = 'red', s= 100, label = "Initial Cluster") plt.scatter(fin_cen[i,0],fin_cen[i,1],marker = '*', c = 'blue', s= 100, label = "Final Cluster") plt.legend(loc='best', prop={'size':15}) plt.title(title) title = ["k-means", "k-means++", "k-means||"] plt.figure(1, figsize=(20, 15)) for i in np.arange(1, 4): plt.subplot(220 + i) test_cluster(dict['Initial_centroid'][i], dict['Final_centroidtroid'][:,:,i], dict['label'][i],data, title[i - 1]) plt.show() ``` 2nd real world data: spam ``` spam = pd.read_csv("data/spambase.data", header=None) spam_np = np.asarray(spam) d = spam.apply(np.ptp, axis = 0).to_dict() sorted(d.items(), key = lambda x: x[1])[:5] dict1 = navie_result(spam_np, 5, 10) def test_cluster(ini_cen, fin_cen, label, data, title): plt.scatter(data[:, 10], data[:, 46], c = label) for i in range(np.unique(label).size): if i > 0: plt.scatter(ini_cen[i,10],ini_cen[i,46],marker = '+', c = 'red', s= 100) plt.scatter(fin_cen[i,10],fin_cen[i,46],marker = '*', c = 'blue', s= 100) else: plt.scatter(ini_cen[i,10],ini_cen[i,46],marker = '+', c = 'red', s= 100, label = "Initial Cluster") plt.scatter(fin_cen[i,10],fin_cen[i,46],marker = '*', c = 'blue', s= 100, label = "Final Cluster") plt.legend(loc='best', prop={'size':15}) plt.title(title) title = ["k-means", "k-means++", "k-means||"] plt.figure(1, figsize=(20, 15)) for i in np.arange(1, 4): plt.subplot(220 + i) test_cluster(dict1['Initial_centroid'][i], dict1['Final_centroidtroid'][:,:,i], dict1['label'][i],spam_np, title[i - 1]) plt.show() ```
github_jupyter
# 2D Advection-Diffusion equation in this notebook we provide a simple example of the DeepMoD algorithm and apply it on the 2D advection-diffusion equation. ``` import numpy as np import pandas as pd from scipy.io import loadmat from deepymod.DeepMoD import DeepMoD from deepymod.library_functions import library_2Din_1Dout from deepymod.utilities import library_matrix_mat, print_PDE import matplotlib.pyplot as plt plt.style.use('seaborn-notebook') np.random.seed(42) # setting seed for randomisation ``` ## Prepare the data Next, we prepare the dataset. ``` data = loadmat('data/Advection_diffusion.mat') usol = np.real(data['Expression1']) usol= usol.reshape((51,51,61,4)) x_v= usol[:,:,:,0] y_v = usol[:,:,:,1] t_v = usol[:,:,:,2] u_v = usol[:,:,:,3] ``` Next we plot the dataset for three different time-points ``` fig, axes = plt.subplots(ncols=3, figsize=(15, 4)) im0 = axes[0].contourf(x_v[:,:,0], y_v[:,:,0], u_v[:,:,0], cmap='coolwarm') axes[0].set_xlabel('x') axes[0].set_ylabel('y') axes[0].set_title('t = 0') im1 = axes[1].contourf(x_v[:,:,10], y_v[:,:,10], u_v[:,:,10], cmap='coolwarm') axes[1].set_xlabel('x') axes[1].set_title('t = 10') im2 = axes[2].contourf(x_v[:,:,20], y_v[:,:,20], u_v[:,:,20], cmap='coolwarm') axes[2].set_xlabel('x') axes[2].set_title('t= 20') fig.colorbar(im1, ax=axes.ravel().tolist()) plt.show() ``` We flatten it to give it the right dimensions for feeding it to the network: ``` X = np.transpose((t_v.flatten(),x_v.flatten(), y_v.flatten())) y = u_v.reshape((u_v.size, 1)) ``` We select the noise level we add to the data-set ``` noise_level = 0.1 y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size, 1) ``` Select the number of samples: ``` number_of_samples = 2000 idx = np.random.permutation(y.size) X_train = X[idx, :][:number_of_samples] y_train = y_noisy[idx, :][:number_of_samples] ``` ## Configure the neural network Next we define the architecture and strength of the $L_1$ penalty for the neural network. Note that in this example have three input channels in this example: {x,y,t} ``` config = {'layers': [3, 20, 20, 20, 20, 20, 1], 'lambda': 10**-6} ``` DeepMoD accepts any arbitrary library function and any options for it can be given through *the library_config*. The library function for this example accepts a maximum order for the polynomial and derivative terms. DeepMoD also needs to know the total number of terms upfront. We can calculate that by making a list of the polynomial and derivative terms and getting all the terms by feeding them into the library_matrix_mat function. Its output will be used later to print the found PDE. ``` u = ['1', 'u'] du = ['1', 'u_{x}', 'u_{y}','u_{xx}', 'u_{yy}','u_{xy}'] coeffs_list = library_matrix_mat(u, du) print(coeffs_list) library_config = {'total_terms': 12, 'deriv_order': 2, 'poly_order': 1} ``` Next we set the training options. Usually we only change the maximum iterations and the grad_tol, which sets the convergence criterion; ``` train_opts = {'max_iterations': 50000, 'grad_tol':10**-7, 'learning_rate': 0.002, 'beta1': 0.99, 'beta2': 0.999, 'epsilon': 10**-8} ``` The last configuration we need to fill is the ouput_opts. It contains an output_directory and X_predict field. We've build a custom tensorboard so you can follow the progress of the run. Output_directory sets where the files are saved, then simply run `tensorboard --logdir $[OUTPUT_DIRECTORY]` in a terminal to open tensorboard. It shows the value of the coefficients, scaled coefficients and all possible costs. Note that the runs are timestamped in output_directory, so you'll have to add it! We can also use the output of tensorboard to analyze deepmod after. We show this below. The last option is X_predict. As DeepMoD also denoises the data, use this option to denoise some dataset X after DeepMoD has converged. ``` output_opts = {'output_directory': 'output/AD/', 'X_predict': X} ``` ## Run DeepMoD We can now run DeepMoD using all the options we have set and the training data. We also need to specify which library function we wish to use. You can build any library you want and just pass is through this command: ``` sparse_vectors, denoised = DeepMoD(X_train, y_train, config, library_2Din_1Dout, library_config, train_opts, output_opts) ``` Show final result: ``` u = ['1', 'u'] du = ['1', 'u_{x}', 'u_{y}','u_{xx}', 'u_{yy}','u_{xy}'] coeffs_list = library_matrix_mat(u, du) print('Inferred equation:') print_PDE(sparse_vectors[0], coeffs_list, PDE_term='u_t') ``` Plot the 'Ground truth', 'Noisy' and 'Reconstructed/Denoised' solution ``` X_predict = X[(X[:,0]==5.0) | (X[:,0]==9.0) | (X[:,0]==7.0), :] data_dict = {'x_grid': X[:,1],'y_grid': X[:,2], 't_grid': X[:,0], 'ground_truth': np.squeeze(y), 'noisy': np.squeeze(y_noisy)} df = pd.DataFrame(data_dict) for key in df: df[key] = np.squeeze(df[key]) data = pd.DataFrame(df) data_dict_denoised = {'x_grid': X[:,1],'y_grid': X[:,2], 't_grid': X[:,0], 'denoised': np.squeeze(denoised)} df_dn = pd.DataFrame(data_dict_denoised) for key in df_dn: df_dn[key] = np.squeeze(df_dn[key]) data_denoised = pd.DataFrame(df_dn) time1 = 7.0 vmin = np.min(data[data['t_grid'] == time1]['ground_truth']) vmax = np.max(data[data['t_grid'] == time1]['ground_truth']) fig, axes = plt.subplots(ncols=3, figsize=(15, 4)) im1 = axes[0].contourf(pd.pivot_table(data[data['t_grid'] == time1],index='y_grid', columns='x_grid', values='ground_truth'), cmap='coolwarm',vmin=vmin, vmax=vmax) axes[0].set_xlabel('x') axes[0].set_ylabel('y') axes[0].set_title('Ground truth') im2 = axes[1].contourf(pd.pivot_table(data[data['t_grid'] == time1],index='y_grid', columns='x_grid', values='noisy'), cmap='coolwarm', vmin=vmin, vmax=vmax) axes[1].set_xlabel('x') axes[1].set_title('Noisy') im3 = axes[2].contourf(pd.pivot_table(data_denoised[data_denoised['t_grid'] == time1],index='y_grid', columns='x_grid', values='denoised'), cmap='coolwarm', vmin=vmin, vmax=vmax) axes[2].set_xlabel('x') axes[2].set_title('Sampled') fig.colorbar(im1, ax=axes.ravel().tolist()) plt.show() ```
github_jupyter
## Read Data ``` import pandas as pd from pathlib import Path import pickle from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder import bisect import numpy as np input_filepath = '../data/interim/' output_filepath = '../data/processed/' # cols BINARY_COLS = Path.cwd().joinpath(input_filepath).joinpath('binary-cols.pickle') CATEGORICAL_COLS = Path.cwd().joinpath(input_filepath).joinpath('categorical-cols.pickle') CONTINUOUS_COLS = Path.cwd().joinpath(input_filepath).joinpath('continuous-cols.pickle') TARGET_COL = Path.cwd().joinpath(input_filepath).joinpath('target-col.pickle') BINARY_COLS_OUT = Path.cwd().joinpath(output_filepath).joinpath('binary-cols.pickle') CATEGORICAL_COLS_OUT = Path.cwd().joinpath(output_filepath).joinpath('categorical-cols.pickle') CONTINUOUS_COLS_OUT = Path.cwd().joinpath(output_filepath).joinpath('continuous-cols.pickle') TARGET_COL_OUT = Path.cwd().joinpath(output_filepath).joinpath('target-col.pickle') # data TRAIN_CSV = Path.cwd().joinpath(input_filepath).joinpath('train.csv') VAL_CSV = Path.cwd().joinpath(input_filepath).joinpath('val.csv') TEST_CSV = Path.cwd().joinpath(input_filepath).joinpath('test.csv') TRAIN_CSV_OUT = Path.cwd().joinpath(output_filepath).joinpath('train.csv') VAL_CSV_OUT = Path.cwd().joinpath(output_filepath).joinpath('val.csv') TEST_CSV_OUT = Path.cwd().joinpath(output_filepath).joinpath('test.csv') # metadata BINARY_ENCODERS = Path.cwd().joinpath(output_filepath).joinpath('binary-encoders.pickle') CATEGORICAL_ENCODERS = Path.cwd().joinpath(output_filepath).joinpath('categorical-encoders.pickle') TARGET_ENCODERS = Path.cwd().joinpath(output_filepath).joinpath('target-encoders.pickle') CONTINUOUS_SCALERS = Path.cwd().joinpath(output_filepath).joinpath('continuous-scalers.pickle') def read_obj(path): with open(path, 'rb') as f: return pickle.load(f) return None binary_cols = read_obj(BINARY_COLS) categorical_cols = read_obj(CATEGORICAL_COLS) continuous_cols = read_obj(CONTINUOUS_COLS) target_col = read_obj(TARGET_COL) train = pd.read_csv(TRAIN_CSV) val = pd.read_csv(VAL_CSV) test = pd.read_csv(TEST_CSV) ``` ## Typify ``` train[continuous_cols] = train[continuous_cols].astype('float32') val[continuous_cols] = val[continuous_cols].astype('float32') test[continuous_cols] = test[continuous_cols].astype('float32') train[categorical_cols] = train[categorical_cols].astype('str').astype('category') val[categorical_cols] = val[categorical_cols].astype('str').astype('category') test[categorical_cols] = test[categorical_cols].astype('str').astype('category') train[binary_cols] = train[binary_cols].astype('str').astype('category') val[binary_cols] = val[binary_cols].astype('str').astype('category') test[binary_cols] = test[binary_cols].astype('str').astype('category') train[target_col] = train[target_col].astype('str').astype('category') val[target_col] = val[target_col].astype('str').astype('category') test[target_col] = test[target_col].astype('str').astype('category') ``` ## Fill Data ``` train[continuous_cols] = train[continuous_cols].fillna(0) val[continuous_cols] = val[continuous_cols].fillna(0) test[continuous_cols] = test[continuous_cols].fillna(0) ``` ## Normalize ``` def normalize(df, cols, scalers=None): if None is scalers: scalers = dict() for col in cols: if col not in scalers: scalers[col] = StandardScaler(with_mean=True, with_std=True) scalers[col].fit(df[col].values.reshape(-1,1)) scaler = scalers[col] df[col] = scaler.transform(df[col].values.reshape(-1,1)) return df, scalers train, scalers = normalize(train, continuous_cols) val, _ = normalize(val, continuous_cols, scalers) test, _ = normalize(test, continuous_cols, scalers) train[continuous_cols].head() ``` ## Label Encode ``` def labelencode(df, cols, encoders=None, unknown_value='UNK'): if None is encoders: encoders = dict() for col in cols: if col not in encoders: le = LabelEncoder() le.fit(df[col].values) # add unknown val to cats cats = le.classes_.tolist() bisect.insort_left(cats, unknown_value) # redefine cats on le le.classes_ = np.asarray(cats) encoders[col] = le le = encoders[col] df[col] = df[col].map(lambda x: unknown_value if x not in le.classes_ else x) df[col] = le.transform(df[col].values) return df, encoders train, label_encoders = labelencode(train, categorical_cols) val, _ = labelencode(val, categorical_cols, label_encoders) test, _ = labelencode(test, categorical_cols, label_encoders) train[categorical_cols].head() ``` ## One-Hot Encode ``` # todo train, ohe_encoders = labelencode(train, binary_cols) val, _ = labelencode(val, binary_cols, ohe_encoders) test, _ = labelencode(test, binary_cols, ohe_encoders) train[binary_cols].head() ``` ## Label Encode Targets ``` train, target_encoders = labelencode(train, [target_col]) val, _ = labelencode(val, [target_col], target_encoders) test, _ = labelencode(test, [target_col], target_encoders) train[target_col].head() ``` ## Persist Data and Metadata ``` def pickle_obj(path, obj): with open(path, 'wb') as f: pickle.dump(obj, f) # cols pickle_obj(BINARY_COLS_OUT, binary_cols) pickle_obj(CATEGORICAL_COLS_OUT, categorical_cols) pickle_obj(CONTINUOUS_COLS_OUT, continuous_cols) pickle_obj(TARGET_COL_OUT, target_col) # metadata pickle_obj(BINARY_ENCODERS, ohe_encoders) pickle_obj(CATEGORICAL_ENCODERS, label_encoders) pickle_obj(TARGET_ENCODERS, target_encoders) pickle_obj(CONTINUOUS_SCALERS, scalers) # data train.to_csv(TRAIN_CSV_OUT, index=False) val.to_csv(VAL_CSV_OUT, index=False) test.to_csv(TEST_CSV_OUT, index=False) ```
github_jupyter
# FloPy ## MODPATH example This notebook demonstrates how to create and run forward and backward tracking with MODPATH. The notebooks also shows how to create subsets of pathline and endpoint information, plot MODPATH results on ModelMap objects, and export endpoints and pathlines as shapefiles. ``` import sys import shutil import os import glob import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt # run installed version of flopy or add local path try: import flopy except: fpth = os.path.abspath(os.path.join("..", "..")) sys.path.append(fpth) import flopy print(sys.version) print("numpy version: {}".format(np.__version__)) print("matplotlib version: {}".format(mpl.__version__)) print("pandas version: {}".format(pd.__version__)) print("flopy version: {}".format(flopy.__version__)) if not os.path.isdir("data"): os.makedirs("data", exist_ok=True) ``` ### Copy modflow datasets to scratch directory ``` mffiles = glob.glob(os.path.join("..", "data", "mp6", "EXAMPLE.*")) for f in mffiles: print(f) shutil.copy(f, os.path.join("data", os.path.split(f)[-1])) ``` ### Load MODFLOW model ``` model_ws = "data" m = flopy.modflow.Modflow.load("EXAMPLE.nam", model_ws=model_ws) m.get_package_list() nrow, ncol, nlay, nper = m.nrow_ncol_nlay_nper nrow, ncol, nlay, nper m.dis.steady.array m.write_input() hdsfile = flopy.utils.HeadFile(os.path.join(model_ws, "EXAMPLE.HED")) hdsfile.get_kstpkper() hds = hdsfile.get_data(kstpkper=(0, 2)) ``` ### Plot RIV bc and head results ``` plt.imshow(hds[4, :, :]) plt.colorbar(); fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1, 1, 1, aspect="equal") mapview = flopy.plot.PlotMapView(model=m, layer=4) quadmesh = mapview.plot_ibound() linecollection = mapview.plot_grid() riv = mapview.plot_bc("RIV", color="g", plotAll=True) quadmesh = mapview.plot_bc("WEL", kper=1, plotAll=True) contour_set = mapview.contour_array( hds, levels=np.arange(np.min(hds), np.max(hds), 0.5), colors="b" ) plt.clabel(contour_set, inline=1, fontsize=14); ``` ## Create forward particle tracking simulation where particles are released at the top of each cell in layer 1 * specifying the recharge package in ```create_mpsim``` releases a single particle on iface=6 of each top cell * start the particles at begining of per 3, step 1, as in example 3 in MODPATH6 manual ``` mp = flopy.modpath.Modpath6( modelname="ex6", exe_name="mp6", modflowmodel=m, model_ws="data", dis_file=m.name + ".DIS", head_file=m.name + ".hed", budget_file=m.name + ".bud", ) mpb = flopy.modpath.Modpath6Bas( mp, hdry=m.lpf.hdry, laytyp=m.lpf.laytyp, ibound=1, prsity=0.1 ) # start the particles at begining of per 3, step 1, as in example 3 in MODPATH6 manual # (otherwise particles will all go to river) sim = mp.create_mpsim( trackdir="forward", simtype="pathline", packages="RCH", start_time=(2, 0, 1.0), ) mp.write_input() mp.run_model(silent=False) ``` ### Read in the endpoint file and plot particles that terminated in the well ``` fpth = os.path.join("data", "ex6.mpend") epobj = flopy.utils.EndpointFile(fpth) well_epd = epobj.get_destination_endpoint_data(dest_cells=[(4, 12, 12)]) # returns record array of same form as epobj.get_all_data() well_epd[0:2] fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1, 1, 1, aspect="equal") mapview = flopy.plot.PlotMapView(model=m, layer=2) quadmesh = mapview.plot_ibound() linecollection = mapview.plot_grid() riv = mapview.plot_bc("RIV", color="g", plotAll=True) quadmesh = mapview.plot_bc("WEL", kper=1, plotAll=True) contour_set = mapview.contour_array( hds, levels=np.arange(np.min(hds), np.max(hds), 0.5), colors="b" ) plt.clabel(contour_set, inline=1, fontsize=14) mapview.plot_endpoint(well_epd, direction="starting", colorbar=True); ``` ### Write starting locations to a shapefile ``` fpth = os.path.join("data", "starting_locs.shp") print(type(fpth)) epobj.write_shapefile( well_epd, direction="starting", shpname=fpth, mg=m.modelgrid ) ``` ### Read in the pathline file and subset to pathlines that terminated in the well ``` # make a selection of cells that terminate in the well cell = (4, 12, 12) pthobj = flopy.utils.PathlineFile(os.path.join("data", "ex6.mppth")) well_pathlines = pthobj.get_destination_pathline_data(dest_cells=[(4, 12, 12)]) ``` ### Plot the pathlines that terminate in the well and the starting locations of the particles ``` fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1, 1, 1, aspect="equal") mapview = flopy.plot.PlotMapView(model=m, layer=2) quadmesh = mapview.plot_ibound() linecollection = mapview.plot_grid() riv = mapview.plot_bc("RIV", color="g", plotAll=True) quadmesh = mapview.plot_bc("WEL", kper=1, plotAll=True) contour_set = mapview.contour_array( hds, levels=np.arange(np.min(hds), np.max(hds), 0.5), colors="b" ) plt.clabel(contour_set, inline=1, fontsize=14) mapview.plot_endpoint(well_epd, direction="starting", colorbar=True) # for now, each particle must be plotted individually # (plot_pathline() will plot a single line for recarray with multiple particles) # for pid in np.unique(well_pathlines.particleid): # modelmap.plot_pathline(pthobj.get_data(pid), layer='all', colors='red'); mapview.plot_pathline(well_pathlines, layer="all", colors="red"); ``` ### Write pathlines to a shapefile ``` # one line feature per particle pthobj.write_shapefile( well_pathlines, direction="starting", shpname=os.path.join("data", "pathlines.shp"), mg=m.modelgrid, ) # one line feature for each row in pathline file # (can be used to color lines by time or layer in a GIS) pthobj.write_shapefile( well_pathlines, one_per_particle=False, shpname=os.path.join("data", "pathlines_1per.shp"), mg=m.modelgrid, ) ``` ## Replace WEL package with MNW2; create backward tracking simulation using particles released at MNW well ``` model_ws = "data" m2 = flopy.modflow.Modflow.load( "EXAMPLE.nam", model_ws=model_ws, exe_name="mf2005" ) m2.get_package_list() m2.nrow_ncol_nlay_nper m2.wel.stress_period_data.data node_data = np.array( [ (3, 12, 12, "well1", "skin", -1, 0, 0, 0, 1.0, 2.0, 5.0, 6.2), (4, 12, 12, "well1", "skin", -1, 0, 0, 0, 0.5, 2.0, 5.0, 6.2), ], dtype=[ ("k", int), ("i", int), ("j", int), ("wellid", object), ("losstype", object), ("pumploc", int), ("qlimit", int), ("ppflag", int), ("pumpcap", int), ("rw", float), ("rskin", float), ("kskin", float), ("zpump", float), ], ).view(np.recarray) stress_period_data = { 0: np.array( [(0, "well1", -150000.0)], dtype=[("per", int), ("wellid", object), ("qdes", float)], ) } m2.name = "Example_mnw" m2.remove_package("WEL") mnw2 = flopy.modflow.ModflowMnw2( model=m2, mnwmax=1, node_data=node_data, stress_period_data=stress_period_data, itmp=[1, -1, -1], ) m2.get_package_list() ``` ### Write and run MODFLOW ``` m2.write_input() m2.run_model(silent=False) ``` ### Create new MODPATH object ``` mp = flopy.modpath.Modpath6( modelname="ex6mnw", exe_name="mp6", modflowmodel=m2, model_ws="data", dis_file=m.name + ".DIS", head_file=m.name + ".hds", budget_file=m.name + ".cbc", ) mpb = flopy.modpath.Modpath6Bas( mp, hdry=m2.lpf.hdry, laytyp=m2.lpf.laytyp, ibound=1, prsity=0.1 ) sim = mp.create_mpsim(trackdir="backward", simtype="pathline", packages="MNW2") mp.write_input() mp.run_model(silent=False) ``` ### Read in results and plot ``` pthobj = flopy.utils.PathlineFile(os.path.join("data", "ex6mnw.mppth")) epdobj = flopy.utils.EndpointFile(os.path.join("data", "ex6mnw.mpend")) well_epd = epdobj.get_alldata() well_pathlines = ( pthobj.get_alldata() ) # returns a list of recarrays; one per pathline fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1, 1, 1, aspect="equal") mapview = flopy.plot.PlotMapView(model=m2, layer=2) quadmesh = mapview.plot_ibound() linecollection = mapview.plot_grid() riv = mapview.plot_bc("RIV", color="g", plotAll=True) quadmesh = mapview.plot_bc("MNW2", kper=1, plotAll=True) contour_set = mapview.contour_array( hds, levels=np.arange(np.min(hds), np.max(hds), 0.5), colors="b" ) plt.clabel(contour_set, inline=1, fontsize=14) mapview.plot_pathline( well_pathlines, travel_time="<10000", layer="all", colors="red" ); ```
github_jupyter
# Resample Data ## Pandas Resample You've learned about bucketing to different periods of time like Months. Let's see how it's done. We'll start with an example series of days. ``` import numpy as np import pandas as pd dates = pd.date_range('10/10/2018', periods=11, freq='D') close_prices = np.arange(len(dates)) close = pd.Series(close_prices, dates) close ``` Let's say we want to bucket these days into 3 day periods. To do that, we'll use the [DataFrame.resample](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DataFrame.resample.html) function. The first parameter in this function is a string called `rule`, which is a representation of how to resample the data. This string representation is made using an offset alias. You can find a list of them [here](http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases). To create 3 day periods, we'll set `rule` to "3D". ``` close.resample('3D') ``` This returns a `DatetimeIndexResampler` object. It's an intermediate object similar to the `GroupBy` object. Just like group by, it breaks the original data into groups. That means, we'll have to apply an operation to these groups. Let's make it simple and get the first element from each group. ``` close.resample('3D').first() ``` You might notice that this is the same as `.iloc[::3]` ``` close.iloc[::3] ``` So, why use the `resample` function instead of `.iloc[::3]` or the `groupby` function? The `resample` function shines when handling time and/or date specific tasks. In fact, you can't use this function if the index isn't a [time-related class](https://pandas.pydata.org/pandas-docs/version/0.21/timeseries.html#overview). ``` try: # Attempt resample on a series without a time index pd.Series(close_prices).resample('W') except TypeError: print('It threw a TypeError.') else: print('It worked.') ``` One of the resampling tasks it can help with is resampling on periods, like weeks. Let's resample `close` from it's days frequency to weeks. We'll use the "W" offset allies, which stands for Weeks. ``` pd.DataFrame({ 'days': close, 'weeks': close.resample('W').first()}) ``` The weeks offset considers the start of a week on a Monday. Since 2018-10-10 is a Wednesday, the first group only looks at the first 5 items. There are offsets that handle more complicated problems like filtering for Holidays. For now, we'll only worry about resampling for days, weeks, months, quarters, and years. The frequency you want the data to be in, will depend on how often you'll be trading. If you're making trade decisions based on reports that come out at the end of the year, we might only care about a frequency of years or months. ## OHLC Now that you've seen how Pandas resamples time series data, we can apply this to Open, High, Low, and Close (OHLC). Pandas provides the [`Resampler.ohlc`](https://pandas.pydata.org/pandas-docs/version/0.21.0/generated/pandas.core.resample.Resampler.ohlc.html#pandas.core.resample.Resampler.ohlc) function will convert any resampling frequency to OHLC data. Let's get the Weekly OHLC. ``` close.resample('W').ohlc() ``` Can you spot a potential problem with that? It has to do with resampling data that has already been resampled. We're getting the OHLC from close data. If we want OHLC data from already resampled data, we should resample the first price from the open data, resample the highest price from the high data, etc.. To get the weekly closing prices from `close`, you can use the [`Resampler.last`](https://pandas.pydata.org/pandas-docs/version/0.21.0/generated/pandas.core.resample.Resampler.last.html#pandas.core.resample.Resampler.last) function. ``` close.resample('W').last() ``` ## Quiz Implement `days_to_weeks` function to resample OHLC price data to weekly OHLC price data. You find find more Resampler functions [here](https://pandas.pydata.org/pandas-docs/version/0.21.0/api.html#id44) for calculating high and low prices. ``` import quiz_tests def days_to_weeks(open_prices, high_prices, low_prices, close_prices): """Converts daily OHLC prices to weekly OHLC prices. Parameters ---------- open_prices : DataFrame Daily open prices for each ticker and date high_prices : DataFrame Daily high prices for each ticker and date low_prices : DataFrame Daily low prices for each ticker and date close_prices : DataFrame Daily close prices for each ticker and date Returns ------- open_prices_weekly : DataFrame Weekly open prices for each ticker and date high_prices_weekly : DataFrame Weekly high prices for each ticker and date low_prices_weekly : DataFrame Weekly low prices for each ticker and date close_prices_weekly : DataFrame Weekly close prices for each ticker and date """ open_prices_weekly = open_prices.resample('W').first() high_prices_weekly = high_prices.resample('W').max() low_prices_weekly = low_prices.resample('W').min() close_prices_weekly = close_prices.resample('W').last() return open_prices_weekly, high_prices_weekly, low_prices_weekly, close_prices_weekly quiz_tests.test_days_to_weeks(days_to_weeks) ```
github_jupyter
# Getting Started: Sensitivity Analysis To start analyzing tree diversity, it's important to check if how sensistive your dataset is to the number of trees in the city center geographic boundary. The two diversity (entropy) indices used below are both impacted by the number of members in a sample, so this notebook walks through some basic cleaning and then iterates through counts of trees to generate plots suggested where the diversity indices reach a reasonable equilibrium. ``` # library import import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import descartes from tqdm import tqdm import treeParsing as tP ``` # Import Tree Inventory and City Center Boundary Import your tree data and city center boundary data below. These data may use any geospatial data format (SHP, Geojson, Geopackage) and should be in the same coordinate projection. Your tree data will need the following columns: * Point geographic location * Diameter at breast height (DBH) * Tree Scientific Name * Tree Genus Name * Tree Family Name Your city center geography simply needs to be a single, dissolved geometry representing your city center area. ``` ### Enter the path to your data below ### tree_data_path = 'example_data/trees_paris.gpkg' tree_data = gpd.read_file(tree_data_path) tree_data.plot() ### Enter the path to your data below ### city_center_boundary_path = 'example_data/paris.gpkg' city_center = gpd.read_file(city_center_boundary_path) city_center.plot() ``` # Clean Data and Calculate Basal Area To start, we need to remove features missing data and remove the top quantile of data. Removing any missing data and the top quantile helps remove erroneous entries that are too large or too small than what we would expect. If your data has already been cleaned, feel free to skip the second cell below. ``` ### Enter your column names here ### scientific_name_column = 'Scientific' genus_name_column = 'genus' family_name_column = 'family' diameter_breast_height_column = 'DBH' ### Ignore if data is already cleaned ### # Exclude Data Missing DBH tree_data = tree_data[tree_data[diameter_breast_height_column]>0] # Exclude data larger than the 99th quantile (often erroneously large) tree_data = tree_data[tree_data[diameter_breast_height_column]<=tree_data.quantile(0.99).DBH] # Calculate Basal Area basal_area_column = 'BA' tree_data[basal_area_column] = tree_data[diameter_breast_height_column]**2 * 0.00007854 ``` # Generate Shannon and Simpson Indices for Count and Basal Area This analysis will sample a random number of trees up to your total number of trees. By default, it will take 10 samples at each count, but a higher number will yield more accurate results. ``` ### For more accurate results increase the iterations_per_count ### If this notebook is running too slowly on your computer, reduce the iterations_per_count iterations_per_count = 10 increment_exponent = 1.01 # Generate counts for Sample Size i = 1 sample_size_list = [] while i < len(tree_data): sample_size_list.append(round(i)) i = (i+1)**increment_exponent sample_size_list.append(len(tree_data)) # List of results entropy_data = [] # Loop through increasing sizes of tree samples until you hit the size of the tree data for count in tqdm(sample_size_list): # Repeat for the number of iterations for i in range(0,iterations_per_count): # Sample the trees sample = tree_data.sample(count) # Derive counts, areas, for species, genus, and family species_count = sample[[scientific_name_column, basal_area_column]].groupby(scientific_name_column).count().reset_index() species_area = sample[[scientific_name_column, basal_area_column]].groupby(scientific_name_column).sum().reset_index() genus_count = sample[[genus_name_column, basal_area_column]].groupby(genus_name_column).count().reset_index() genus_area = sample[[genus_name_column, basal_area_column]].groupby(genus_name_column).sum().reset_index() family_count = sample[[family_name_column, basal_area_column]].groupby(family_name_column).count().reset_index() family_area = sample[[family_name_column, basal_area_column]].groupby(family_name_column).sum().reset_index() # Calculate Percentages by count and area species_count["Pct"] = species_count[basal_area_column]/sum(species_count[basal_area_column]) species_area["Pct"] = species_area[basal_area_column]/sum(species_area[basal_area_column]) genus_count["Pct"] = genus_count[basal_area_column]/sum(genus_count[basal_area_column]) genus_area["Pct"] = genus_area[basal_area_column]/sum(genus_area[basal_area_column]) family_count["Pct"] = family_count[basal_area_column]/sum(family_count[basal_area_column]) family_area["Pct"] = family_area[basal_area_column]/sum(family_area[basal_area_column]) # Calculate Shannon Indices species_shannon_count = tP.ShannonEntropy(list(species_count["Pct"])) species_shannon_area = tP.ShannonEntropy(list(species_area["Pct"])) genus_shannon_count = tP.ShannonEntropy(list(genus_count["Pct"])) genus_shannon_area = tP.ShannonEntropy(list(genus_area["Pct"])) family_shannon_count = tP.ShannonEntropy(list(family_count["Pct"])) family_shannon_area = tP.ShannonEntropy(list(family_area["Pct"])) # Calculate Simpson Indices species_simpson_count = tP.simpson_di(list(species_count[scientific_name_column]), list(species_count[basal_area_column])) species_simpson_area = tP.simpson_di(list(species_area[scientific_name_column]),list(species_area[basal_area_column])) genus_simpson_count = tP.simpson_di(list(genus_count[genus_name_column]), list(genus_count[basal_area_column])) genus_simpson_area = tP.simpson_di(list(genus_area[genus_name_column]), list(genus_area[basal_area_column])) family_simpson_count = tP.simpson_di(list(family_count[family_name_column]), list(family_count[basal_area_column])) family_simpson_area = tP.simpson_di(list(family_area[family_name_column]), list(family_area[basal_area_column])) # Append to results entropy_data.append({ 'count': count, 'species_simpson_count': species_simpson_count, 'species_simpson_area': species_simpson_area, 'genus_simpson_count': genus_simpson_count, 'genus_simpson_area': genus_simpson_area, 'family_simpson_count': family_simpson_count, 'family_simpson_area': family_simpson_area, 'species_shannon_count': species_shannon_count, 'species_shannon_area': species_shannon_area, 'genus_shannon_count': genus_shannon_count, 'genus_shannon_area': genus_shannon_area, 'family_shannon_count': family_shannon_count, 'family_shannon_area': family_shannon_area }) # Load results into a Dataframe and get median vlalues results_df = pd.DataFrame(entropy_data) median_df = results_df.groupby('count').median().reset_index() median_df.head() ``` # Chart Shannon Index by Basal Area Our results are now loaded into a DataFrame. The below functions and cells will map out each combination (Shannon Index and Simpson Index by Area and Count). Each plot will include a vertical line indicating the count of trees in your tree inventory contained within your city center geography. If the vertical line falls after the slope of the species, genus, and family lines have leveled off, it is reasonable to continueto the second notebook, which conducts the analysis. ``` city_center_tree_count = len(gpd.sjoin(tree_data, city_center)) plt.rcParams["figure.figsize"] = [14, 7] def generateFigure(df, index, measure): x = df['count'] y = df[f'species_{index}_{measure}'] y2 = df[f'genus_{index}_{measure}'] y3 = df[f'family_{index}_{measure}'] axes = plt.gca() axes.set_xlim([1,city_center_tree_count*4]) if index == "shannon": axes.set_ylim([0,4.5]) else: axes.set_ylim([0,1]) plt.xscale('symlog') plt.xlabel('Random Sample Size (Log Scale)') plt.ylabel(f'{index.capitalize()} Index ({measure.capitalize()})') plt.scatter(x, y, s=30, c='red', alpha=1, marker="x", label='Species') plt.scatter(x, y2, s=30, c='orange', alpha=1, marker="+", label='Genus') plt.scatter(x, y3, s=30, c='green', alpha=1, marker="_", label='Family') plt.legend(loc="upper left") plt.axvline(x=city_center_tree_count, linewidth=1, color='black') plt.text(city_center_tree_count+1000,0.1,'Trees in City Center',horizontalalignment='left', rotation=90) plt.title(f'{index.capitalize()} Index by {measure.capitalize()}') plt.show() generateFigure(median_df, 'shannon', 'area') generateFigure(median_df, 'shannon', 'count') generateFigure(median_df, 'simpson', 'area') generateFigure(median_df, 'simpson', 'count') ``` ___ All looks good? Continue to Notebook 2: City Center Diversity Analysis
github_jupyter
# Python for scientific computing > Marcos Duarte > Laboratory of Biomechanics and Motor Control ([http://pesquisa.ufabc.edu.br/bmclab/](http://pesquisa.ufabc.edu.br/bmclab/)) > Federal University of ABC, Brazil The [Python programming language](https://www.python.org/) with [its ecosystem for scientific programming](https://scipy.org/) has features, maturity, and a community of developers and users that makes it the ideal environment for the scientific community. This talk will show some of these features and usage examples. ## Computing as a third kind of Science Traditionally, science has been divided into experimental and theoretical disciplines, but nowadays computing plays an important role in science. Scientific computation is sometimes related to theory, and at other times to experimental work. Hence, it is often seen as a new third branch of science. <figure><img src="https://raw.githubusercontent.com/jrjohansson/scientific-python-lectures/master/images/theory-experiment-computation.png" width=300 alt="theory-experiment-computation"/></figure> Figure from [J.R. Johansson](http://nbviewer.jupyter.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-0-Scientific-Computing-with-Python.ipynb). ## The lifecycle of a scientific idea ``` from IPython.display import Image Image(filename='../images/lifecycle_FPerez.png', width=600) # image from Fernando Perez ``` ## About Python [[Python documentation](http://www.python.org/doc/essays/blurb/)] *Python is a programming language that lets you work more quickly and integrate your systems more effectively. You can learn to use Python and see almost immediate gains in productivity and lower maintenance costs* [[python.org](http://python.org/)]. - *Python is an interpreted, object-oriented, high-level programming language with dynamic semantics. Its high-level built in data structures, combined with dynamic typing and dynamic binding, make it very attractive for Rapid Application Development, as well as for use as a scripting or glue language to connect existing components together*. - *Python's simple, easy to learn syntax emphasizes readability and therefore reduces the cost of program maintenance. Python supports modules and packages, which encourages program modularity and code reuse*. - Python is free and open source. ## About Python [[Python documentation](http://www.python.org/doc/essays/blurb/)] - *Often, programmers fall in love with Python because of the increased productivity it provides. Since there is no compilation step, the edit-test-debug cycle is incredibly fast. Debugging Python programs is easy: a bug or bad input will never cause a segmentation fault. Instead, when the interpreter discovers an error, it raises an exception. When the program doesn't catch the exception, the interpreter prints a stack trace.* - A source level debugger allows inspection of local and global variables, evaluation of arbitrary expressions, setting breakpoints, stepping through the code a line at a time, and so on. The debugger is written in Python itself, testifying to Python's introspective power. On the other hand, often the quickest way to debug a program is to add a few print statements to the source: the fast edit-test-debug cycle makes this simple approach very effective.* ## Glossary for the Python technical characteristics I - Programming language: a formal language designed to communicate instructions to a computer. A sequence of instructions that specifies how to perform a computation is called a program. - Interpreted language: a program in an interpreted language is executed or interpreted by an interpreter program. This interpreter executes the program source code, statement by statement. - Compiled language: a program in a compiled language is first explicitly translated by the user into a lower-level machine language executable (with a compiler) and then this program can be executed. - Python interpreter: an interpreter is the computer program that executes the program. The most-widely used implementation of the Python programming language, referred as CPython or simply Python, is written in C (another programming language, which is lower-level and compiled). - High-level: a high-level programming language has a strong abstraction from the details of the computer and the language is independent of a particular type of computer. A high-level programming language is closer to human languages than to the programming language running inside the computer that communicate instructions to its hardware, the machine language. The machine language is a low-level programming language, in fact, the lowest one. - Object-oriented programming: a programming paradigm that represents concepts as "objects" that have data fields (attributes that describe the object) and associated procedures known as methods. - Semantics and syntax: the term semantics refers to the meaning of a language, as opposed to its form, the syntax. - Static and dynamic semantics: static and dynamic refer to the point in time at which some programming element is resolved. Static indicates that resolution takes place at the time a program is written. Dynamic indicates that resolution takes place at the time a program is executed. - Static and dynamic typing and binding: in dynamic typing, the type of the variable (e.g., if it is an integer or a string or a different type of element) is not explicitly declared, it can change, and in general is not known until execution time. In static typing, the type of the variable must be declared and it is known before the execution time. - Rapid Application Development: a software development methodology that uses minimal planning in favor of rapid prototyping. - Scripting: the writing of scripts, small pieces of simple instructions (programs) that can be rapidly executed. ## Glossary for the Python technical characteristics II - Glue language: a programming language for writing programs to connect software components (including programs written in other programming languages). - Modules and packages: a module is a file containing Python definitions (e.g., functions) and statements. Packages are a way of structuring Python’s module namespace by using “dotted module names”. For example, the module name A.B designates a submodule named B in a package named A. To be used, modules and packages have to be imported in Python with the import function. Namespace is a container for a set of identifiers (names), and allows the disambiguation of homonym identifiers residing in different namespaces. For example, with the command `import math`, we will have all the functions and statements defined in this module in the namespace '`math.`', for example, `math.pi` is the $\pi$ constant and `math.cos()`, the cosine function. - Program modularity and code reuse: the degree that programs can be compartmentalized (divided in smaller programs) to facilitate program reuse. - Source or binary form: source refers to the original code of the program (typically in a text format) which would need to be compiled to a binary form (not anymore human readable) to be able to be executed. - Major platforms: typically refers to the main operating systems (OS) in the market: Windows (by Microsoft), Mac OSX (by Apple), and Linux distributions (such as Debian, Ubuntu, Mint, etc.). Mac OSX and Linux distros are derived from, or heavily inspired by, another operating system called Unix. - Edit-test-debug cycle: the typical cycle in the life of a programmer; write (edit) the code, run (test) it, and correct errors or improve it (debug). The read–eval–print loop (REPL) is another related term. - Segmentation fault: an error in a program that is generated by the hardware which notifies the operating system about a memory access violation. - Exception: an error in a program detected during execution is called an exception and the Python interpreter raises a message about this error (an exception is not necessarily fatal, i.e., does not necessarily terminate or break the program). - Stack trace: information related to what caused the exception describing the line of the program where it occurred with a possible history of related events. - Source level debugger: Python has a module (named pdb) for interactive source code debugging. - Local and global variables: refers to the scope of the variables. A local variable is defined inside a function and typically can be accessed (it exists) only inside that function unless declared as global. ## About Python Python is also the name of the software with the most-widely used implementation of the language (maintained by the [Python Software Foundation](http://www.python.org/psf/)). This implementation is written mostly in the *C* programming language and it is nicknamed CPython. So, the following phrase is correct: download Python *(the software)* to program in Python *(the language)* because Python *(both)* is great! ## Python The origin of the name for the Python language in fact is not because of the big snake, the author of the Python language, Guido van Rossum, named the language after Monty Python, a famous British comedy group in the 70's. By coincidence, the Monty Python group was also interested in human movement science: ``` from IPython.display import YouTubeVideo YouTubeVideo('iV2ViNJFZC8', width=480, height=360, rel=0) ``` ## Why Python and not 'X' (put any other language here) Python is not the best programming language for all needs and for all people. There is no such language. Now, if you are doing scientific computing, chances are that Python is perfect for you because (and might also be perfect for lots of other needs): - Python is free, open source, and cross-platform. - Python is easy to learn, with readable code, well documented, and with a huge and friendly user community. - Python is a real programming language, able to handle a variety of problems, easy to scale from small to huge problems, and easy to integrate with other systems (including other programming languages). - Python code is not the fastest but Python is one the fastest languages for programming. It is not uncommon in science to care more about the time we spend programming than the time the program took to run. But if code speed is important, one can easily integrate in different ways a code written in other languages (such as C and Fortran) with Python. - The Jupyter Notebook is a versatile tool for programming, data visualization, plotting, simulation, numeric and symbolic mathematics, and writing for daily use. ## Popularity of Python for teaching ``` from IPython.display import IFrame IFrame('http://cacm.acm.org/blogs/blog-cacm/176450-python-is-now-the-most-popular-' + 'introductory-teaching-language-at-top-us-universities/fulltext', width='100%', height=450) ``` ## Python ecosystem for scientific computing (main libraries) - [Python](https://www.python.org/) of course (the CPython distribution): a free, open source and cross-platform programming language that lets you work more quickly and integrate your systems more effectively. - [Numpy](http://numpy.scipy.org): fundamental package for scientific computing with a N-dimensional array package. - [Scipy](http://scipy.org/scipylib/index.html): numerical routines for scientific computing. - [Matplotlib](http://matplotlib.org): comprehensive 2D Plotting. - [Sympy](http://sympy.org): symbolic mathematics. - [Pandas](http://pandas.pydata.org/): data structures and data analysis tools. - [IPython](http://ipython.org): provides a rich architecture for interactive computing with powerful interactive shell, kernel for Jupyter, support for interactive data visualization and use of GUI toolkits, flexible embeddable interpreters, and high performance tools for parallel computing. - [Jupyter Notebook](https://jupyter.org/): web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text. - [Statsmodels](http://statsmodels.sourceforge.net/): to explore data, estimate statistical models, and perform statistical tests. - [Scikit-learn](http://scikit-learn.org/stable/): tools for data mining and data analysis (including machine learning). - [Pillow](http://python-pillow.github.io/): Python Imaging Library. - [Spyder](https://code.google.com/p/spyderlib/): interactive development environment with advanced editing, interactive testing, debugging and introspection features. ## The Jupyter Notebook The Jupyter Notebook App is a server-client application that allows editing and running notebook documents via a web browser. The Jupyter Notebook App can be executed on a local desktop requiring no Internet access (as described in this document) or installed on a remote server and accessed through the Internet. Notebook documents (or “notebooks”, all lower case) are documents produced by the Jupyter Notebook App which contain both computer code (e.g. python) and rich text elements (paragraph, equations, figures, links, etc...). Notebook documents are both human-readable documents containing the analysis description and the results (figures, tables, etc..) as well as executable documents which can be run to perform data analysis. [Try Jupyter Notebook in your browser](https://try.jupyter.org/). ## Jupyter Notebook and IPython kernel architectures <figure><img src="./../images/jupyternotebook.png" width=800 alt="Jupyter Notebook and IPython kernel architectures"/></figure> ## Installing the Python ecosystem **The easy way** The easiest way to get Python and the most popular packages for scientific programming is to install them with a Python distribution such as [Anaconda](https://www.continuum.io/anaconda-overview). In fact, you don't even need to install Python in your computer, you can run Python for scientific programming in the cloud using [python.org](https://www.python.org/shell/), [pythonanywhere](https://www.pythonanywhere.com/), or [repl.it](https://repl.it/languages/python3). **The hard way** You can download Python and all individual packages you need and install them one by one. In general, it's not that difficult, but it can become challenging and painful for certain big packages heavily dependent on math, image visualization, and your operating system (i.e., Microsoft Windows). ## Anaconda Go to the [*Anaconda* website](https://www.anaconda.com/download/) and download the appropriate version for your computer (but download Anaconda3! for Python 3.x). The file is big (about 500 MB). [From their website](https://docs.anaconda.com/anaconda/install/): **Linux Install** In your terminal window type and follow the instructions: ``` bash Anaconda3-4.4.0-Linux-x86_64.sh ``` **OS X Install** For the graphical installer, double-click the downloaded .pkg file and follow the instructions For the command-line installer, in your terminal window type and follow the instructions: ``` bash Anaconda3-4.4.0-MacOSX-x86_64.sh ``` **Windows** Double-click the .exe file to install Anaconda and follow the instructions on the screen ## Miniconda A variation of *Anaconda* is [*Miniconda*](http://conda.pydata.org/miniconda.html) (Miniconda3 for Python 3.x), which contains only the *Conda* package manager and Python. Once *Miniconda* is installed, you can use the `conda` command to install any other packages and create environments, etc. # My current installation ``` # pip install version_information %load_ext version_information %version_information numpy, scipy, matplotlib, sympy, pandas, ipython, jupyter ``` ## IDE for Python You might want an Integrated Development Environment (IDE) for programming in Python. See [Top 5 Python IDEs For Data Science](https://www.datacamp.com/community/tutorials/data-science-python-ide#gs.mN_Wu0M) for possible IDEs. Soon there will be a new IDE for scientific computing with Python: [JupyterLab](https://github.com/jupyterlab/jupyterlab), developed by the Jupyter team. See [this video about JupyterLab](https://channel9.msdn.com/Events/PyData/Seattle2017/BRK11). ## To learn about Python There is a lot of good material in the Internet about Python for scientific computing, some of them are: - [How To Think Like A Computer Scientist](http://openbookproject.net/thinkcs/python/english3e/) or [the interactive edition](https://runestone.academy/runestone/static/thinkcspy/index.html) (book) - [Python Scientific Lecture Notes](http://scipy-lectures.github.io/) (lecture notes) - [A Whirlwind Tour of Python](https://github.com/jakevdp/WhirlwindTourOfPython) (tutorial/book) - [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) (tutorial/book) - [Lectures on scientific computing with Python](https://github.com/jrjohansson/scientific-python-lectures#lectures-on-scientific-computing-with-python) (lecture notes) ## More examples of Jupyter Notebooks Let's run stuff from: - [https://github.com/demotu/BMC](https://github.com/demotu/BMC) - [A gallery of interesting Jupyter Notebooks](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks) ## Questions? - https://www.reddit.com/r/learnpython/ - https://stackoverflow.com/questions/tagged/python - https://www.reddit.com/r/Python/ - https://python-forum.io/ ``` Image(data='http://imgs.xkcd.com/comics/python.png') import this ```
github_jupyter
``` import os from tinytag import TinyTag, TinyTagException from sklearn.neighbors import NearestNeighbors from collections import defaultdict from keras.models import load_model import librosa from collections import Counter import multiprocessing from tqdm import tqdm from keras.models import Model import numpy as np import sounddevice as sd MUSIC_ROOT = '/Users/douwe/Songs/' mp3s = [] for root, subdirs, files in os.walk(MUSIC_ROOT): for fn in files: if fn.endswith('.mp3'): mp3s.append(os.path.join(root, fn)) len(mp3s) TO_SKIP = {'Podcast', 'Books & Spoken'} def process_mp3(path): try: tag = TinyTag.get(path) if tag.genre in TO_SKIP: return None except TinyTagException: print('error') return None signal, sr = librosa.load(path, res_type='kaiser_fast', offset=30, duration=30) try: melspec = librosa.feature.melspectrogram(signal, sr=sr).T[:1280,] if len(melspec) != 1280: return None except ValueError: return None return {'path': path, 'melspecs': np.asarray(np.split(melspec, 10)), 'tag': tag} songs = [process_mp3(path) for path in tqdm(mp3s[:1000])] songs = [song for song in songs if song] inputs = [] for song in songs: inputs.extend(song['melspecs']) inputs = np.array(inputs) inputs.shape cnn_model = load_model('zoo/15/song_classify.h5') vectorize_model = Model(inputs=cnn_model.input, outputs=cnn_model.layers[-4].output) vectors = vectorize_model.predict(inputs) vectors.shape nbrs = NearestNeighbors(n_neighbors=10, algorithm='ball_tree').fit(vectors) def most_similar_songs(song_idx): distances, indices = nbrs.kneighbors(vectors[song_idx * 10: song_idx * 10 + 10]) c = Counter() for row in indices: for idx in row[1:]: c[idx // 10] += 1 return c.most_common() song_idx = 7 print(songs[song_idx]['path']) print('---') for idx, score in most_similar_songs(song_idx)[:5]: print(songs[idx]['path'], score) print('') duration = 30 # seconds fs = 22050 myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=1) myrecording.shape sd.play(myrecording, samplerate=fs) myrecording.min() signal, sr = librosa.load('/Users/douwe/Dropbox/Apps/Hi-Q Recordings/recording-20180219-162112.mp3', res_type='kaiser_fast', offset=0, duration=30) melspec = librosa.feature.melspectrogram(signal, sr=sr).T[:1280,] melspecs = np.asarray(np.split(melspec, 10)) melspecs.shape recorded_vectors = vectorize_model.predict(melspecs) distances, indices = nbrs.kneighbors(recorded_vectors) c = Counter() for row in indices: for idx in row[1:]: c[idx // 10] += 1 for idx, _ in c.most_common(): print(songs[idx]['path']) signal, sr = librosa.load('/Users/douwe/Dropbox/Apps/Hi-Q Recordings/recording-20180219-162112.mp3', res_type='kaiser_fast', offset=0, duration=30) sd.play(signal.flatten(), samplerate=sr) ```
github_jupyter
# Solver - Tutorial ## Non colliding fiber models An important component of nerve fibers is that they are 3d objects. Therefore, they should not overlap each other. To achieve this, an [algorithm](https://arxiv.org/abs/1901.10284) was developed based on collision checking of conical objects. A conical object is defined by two neighboring points in the fiber array, i.e. fiber[i] and fiber[i+1]. The class `solver` checks a given fiber model for collisions and resolves these collisions iteratively by small displacements. To account for the flexibility of fibers, they are continuously divided into segments. These segments are modeled geometrically as cones. A parallel implementation of an octree is used to run the collision detection algorithm between these cones. ## General imports First, we prepair all necesarry modules and defining a function to euqalice all three axis of an 3d plot. ``` import fastpli.model.solver import fastpli.model.sandbox import fastpli.io import os import numpy as np import matplotlib.pyplot as plt np.random.seed(42) def set_3d_axes_equal(ax): x_limits = ax.get_xlim3d() y_limits = ax.get_ylim3d() z_limits = ax.get_zlim3d() x_range = abs(x_limits[1] - x_limits[0]) x_middle = np.mean(x_limits) y_range = abs(y_limits[1] - y_limits[0]) y_middle = np.mean(y_limits) z_range = abs(z_limits[1] - z_limits[0]) z_middle = np.mean(z_limits) plot_radius = 0.5 * max([x_range, y_range, z_range]) ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius]) ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius]) ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius]) def plot_fiber_bundles(fbs, colors): fig = plt.figure() ax = fig.add_subplot(1, 1, 1, projection='3d') for fb, c in zip(fbs, colors): for f in fb: plt.plot(f[:,0],f[:,1],f[:,2], c) set_3d_axes_equal(ax) ``` ## Prepairing Models and defining bounding conditions The [fiber bundles](https://github.com/3d-pli/fastpli/wiki/FiberBundles) are prepaired as shown in the sandbox examples/tutorials. Additionally each fiber will get a random radius. Two crossing fiber bundle (x and y) are prepaiered in this manor. ### Note - Take note that matplotlib does not check z-buffering. Therefore each new plotted line is on top of the lines before. That why the second fiber bundle (red) seems to be on top of the first one (blue). - Also not be showing here. The solver class provides an OpenGL visualization tool `solver.draw_scene()` which is not shown in this notebook since. The example file `examples/solver.py` and [wiki](https://github.com/3d-pli/fastpli/wiki/Solver) shows its capabilities. ``` solver = fastpli.model.solver.Solver() fiber_bundle_trj_x = [[-150, 0, 0], [150, 0, 0]] fiber_bundle_trj_y = [[0, -150, 0], [0, 150, 0]] population = fastpli.model.sandbox.seeds.triangular_circle(20, 6) fiber_radii = np.random.uniform(2.0, 10.0, population.shape[0]) fiber_bundle_x = fastpli.model.sandbox.build.bundle(fiber_bundle_trj_x, population, fiber_radii) fiber_radii = np.random.uniform(2.0, 10.0, population.shape[0]) fiber_bundle_y = fastpli.model.sandbox.build.bundle(fiber_bundle_trj_y, population, fiber_radii) fiber_bundles = [fiber_bundle_x, fiber_bundle_y] plot_fiber_bundles(fiber_bundles, ['b', 'r']) plt.show() ``` ## Running solver The solver algorithm splits each fiber into almost equal fiber segments allowing to seperate the model more naturally. The mean length of this segments is controlled via `solver.obj_mean_length`. Since the fiber segments will move in each step of the algorithm, the curviture of the fibers can increase quite fast. To limit this a maximal curviture radii of the fibers can be set via `solver.obj_min_radius`. This means that a "circle" of point `p_i-1, p_i` and `p_i+1` is limited by a lower value. Is the value exceeded, the betroffende fiber segments are corrected slightly. If all conditions are fullfiled, the output is marked as solved and the model can be used for further processing. ``` # run solver solver.fiber_bundles = fiber_bundles solver.obj_min_radius = 10 solver.obj_mean_length = 30 N = 1000 for i in range(N): solved = solver.step() if solved: break print(f'{i/N*100:.2f}%', end='\r') print(f'solved: {i}, {solver.num_obj}/{solver.num_col_obj}') plot_fiber_bundles(solver.fiber_bundles, ['b', 'r']) plt.show() ``` ## Saving The resulting configuration can be save in a `.dat` file or `.h5` (HDF5) file wich is supported via this toolbox. ``` fastpli.io.fiber_bundles.save('output.dat', solver.fiber_bundles, mode='w') ``` ## Additiona manipulations A trick to allow for more randomness is to apply more varrity to the fiber models at the beginning of the solver alrogithm. However since the boundry conditions i. e. curviture and mean fiber segment length is usually not set when initializing the models, one can apply the boundry conditions to the currently set models inside the solver object.And can then be afterward manipulated ``` # run solver solver.fiber_bundles = fiber_bundles solver.obj_min_radius = 10 solver.obj_mean_length = 30 solver.apply_boundary_conditions(n_max=10) print(fiber_bundles[0][0].shape) print(solver.fiber_bundles[0][0].shape) fbs = solver.fiber_bundles for i, fb in enumerate(fbs): for j, _ in enumerate(fb): fbs[i][j][:,:3] += np.random.uniform(-10,10,(fbs[i][j].shape[0],3)) fbs[i][j][:,3] *= np.random.uniform(0.5,2,(fbs[i][j].shape[0])) plot_fiber_bundles(fbs, ['b', 'r']) plt.show() N = 1000 solver.fiber_bundles = fbs for i in range(N): solved = solver.step() if solved: break print(f'{i/N*100:.2f}%', end='\r') print(f'solved: {i}, {solver.num_obj}/{solver.num_col_obj}') plot_fiber_bundles(solver.fiber_bundles, ['b', 'r']) plt.show() ``` ## Orientation histogram ``` import fastpli.analysis _, axs = plt.subplots(1,2, subplot_kw=dict(projection='polar'), figsize=(10,5)) pcs=[None, None] phi, theta = fastpli.analysis.orientation.fiber_bundles(fiber_bundles) _, _, _, pcs[0] = fastpli.analysis.orientation.histogram(phi, theta, ax=axs[0], n_phi=60, n_theta=30, weight_area=False) phi, theta = fastpli.analysis.orientation.fiber_bundles(solver.fiber_bundles) _, _, _, pcs[1] = fastpli.analysis.orientation.histogram(phi, theta, ax=axs[1], n_phi=60, n_theta=30, weight_area=False) for ax, pc in zip(axs, pcs): cbar = plt.colorbar(pc, ax=ax) cbar.ax.set_title('#') ax.set_rmax(90) ax.set_rticks(range(0, 90, 10)) ax.set_rlabel_position(22.5) ax.set_yticklabels([]) ax.grid(True) plt.show() ```
github_jupyter
``` import pandas as pd import numpy as np import urllib.request from zipfile import ZipFile from re import compile from pathlib import Path from shutil import rmtree deis_data = Path('deis_data') deis_data.mkdir(parents=True, exist_ok=True) def get_deis_death_url(): datapattern = compile('http.*DEFUNCIONES_FUENTE_DEIS_2016_2021.*zip') with urllib.request.urlopen('https://deis.minsal.cl/wp-admin/admin-ajax.php?action=wp_ajax_ninja_tables_public_action&table_id=2889&target_action=get-all-data&default_sorting=manual_sort') as f: return datapattern.search(f.read().decode().replace(',','\n')).group().replace('\\', '') def get_csv_deis(): url = get_deis_death_url() urllib.request.urlretrieve(url, 'deis_data/' + url.split('/')[-1]) with ZipFile('deis_data/' + url.split('/')[-1], 'r') as zip_ref: zip_ref.extractall('deis_data') return url.split('/')[-1][:-3] def annos(row): edad = row['EDAD_CANT'] tipo = row['EDAD_TIPO'] if tipo == 1: return edad elif tipo == 2: return edad/12 elif tipo == 3: return edad/365 elif tipo == 4: return edad/365*24 else: return np.nan deis_csv = 'deis_data/' + get_csv_deis() + 'csv' # INE - Proyección base 2017 piramide = { '<=19': 4_988_470, '20-29': 3_046_000, '30-39': 3_120_583, '40-49': 2_658_453, '50-59': 2_392_614, '60-69': 1_857_879, '70-79': 1_046_294, '>=80': 568_070, } columnas = ['ANO_DEF', 'FECHA_DEF', 'GLOSA_SEXO', 'EDAD_TIPO', 'EDAD_CANT', 'CODIGO_COMUNA_RESIDENCIA', 'GLOSA_COMUNA_RESIDENCIA', 'GLOSA_REG_RES', 'DIAG1', 'CAPITULO_DIAG1', 'GLOSA_CAPITULO_DIAG1', 'CODIGO_GRUPO_DIAG1', 'GLOSA_GRUPO_DIAG1', 'CODIGO_CATEGORIA_DIAG1', 'GLOSA_CATEGORIA_DIAG1', 'CODIGO_SUBCATEGORIA_DIAG1', 'GLOSA_SUBCATEGORIA_DIAG1', 'DIAG2', 'CAPITULO_DIAG2', 'GLOSA_CAPITULO_DIAG2', 'CODIGO_GRUPO_DIAG2', 'GLOSA_GRUPO_DIAG2', 'CODIGO_CATEGORIA_DIAG2', 'GLOSA_CATEGORIA_DIAG2', 'CODIGO_SUBCATEGORIA_DIAG2', 'GLOSA_SUBCATEGORIA_DIAG2'] deis = pd.read_csv(deis_csv, sep=';', parse_dates=[1], header=None, names=columnas, index_col=False) deis.set_index(['FECHA_DEF'], inplace=True) deis.sort_index(inplace=True) # CODIGO_CATEGORIA_DIAG1 U07 > covid19 rmtree(deis_data) deis['EDAD_ANOS'] = deis.apply(annos, axis = 1) bins = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 999] bins_10s = [0, 10, 20, 30, 40, 50, 60, 70, 80, 999] labels = ['00-04', '05-09', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75-79', '80+'] labels_10s = ['00-09', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80+'] deis['agerange'] = pd.cut(deis.EDAD_ANOS, bins, labels=labels, include_lowest=True, right=False) deis['agerange_10s'] = pd.cut(deis.EDAD_ANOS, bins_10s, labels=labels_10s, include_lowest=True, right=False) # deis_gruped = pd.pivot_table(deis.loc[(deis['CODIGO_CATEGORIA_DIAG1'] == 'U07')], values=['EDAD_CANT'], index=['FECHA_DEF'], # columns=['agerange'], aggfunc='count')['EDAD_CANT'] # deis_gruped = deis_gruped.resample('D').asfreq().fillna(0) # deis_gruped defunciones_deis_genero_edad = pd.pivot_table(deis.loc[(deis['CODIGO_CATEGORIA_DIAG1'] == 'U07')], values=['EDAD_CANT'], index=['FECHA_DEF'], columns=['GLOSA_SEXO', 'agerange'], aggfunc='count')['EDAD_CANT'] defunciones_deis_genero_edad.columns.names = ['Sexo', 'Edad'] defunciones_deis_genero_edad.index.name = 'Fecha' defunciones_deis_genero_edad = defunciones_deis_genero_edad.asfreq('D').fillna(0) defunciones_deis_genero_edad_10s = pd.pivot_table(deis.loc[(deis['CODIGO_CATEGORIA_DIAG1'] == 'U07')], values=['EDAD_CANT'], index=['FECHA_DEF'], columns=['GLOSA_SEXO', 'agerange_10s'], aggfunc='count')['EDAD_CANT'] defunciones_deis_genero_edad_10s.columns.names = ['Sexo', 'Edad'] defunciones_deis_genero_edad_10s.index.name = 'Fecha' defunciones_deis_genero_edad_10s = defunciones_deis_genero_edad_10s.asfreq('D').fillna(0) defunciones_deis_genero_edad_10s.sum() import plotly.express as px import plotly.graph_objects as go from plotly.subplots import make_subplots # px.line(deis_gruped) deis_gruped.sum(axis=1).plot(figsize=(16,9)) casos = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto16/CasosGeneroEtario_std.csv') tr_edad = { '00 - 04 años': '00-04', '05 - 09 años': '05-09', '10 - 14 años': '10-14', '15 - 19 años': '15-19', '20 - 24 años': '20-24', '25 - 29 años': '25-29', '30 - 34 años': '30-34', '35 - 39 años': '35-39', '40 - 44 años': '40-44', '45 - 49 años': '45-49', '50 - 54 años': '50-54', '55 - 59 años': '55-59', '60 - 64 años': '60-64', '65 - 69 años': '65-69', '70 - 74 años': '70-74', '75 - 79 años': '75-79', '80 y más años': '80+' } tr_sex ={ 'M': 'Hombre', 'F': 'Mujer' } casos['Grupo de edad'].replace(tr_edad, inplace=True) casos['Sexo'].replace(tr_sex, inplace=True) casos['Fecha'] = pd.to_datetime(casos['Fecha']) casos_genero_edad = pd.pivot_table(casos, values=['Casos confirmados'], index=['Fecha'], columns=['Sexo', 'Grupo de edad'])['Casos confirmados'] # hay un dia que no hubieron nuevos casos, borrando casos_genero_edad = casos_genero_edad.drop(pd.Timestamp('2020-10-02')) casos_genero_edad = casos_genero_edad.resample('D').interpolate('quadratic') casos_genero_edad = casos_genero_edad.diff().fillna(0) casos_genero_edad.columns.names = ['Sexo', 'Edad'] # casos_genero_edad.index.name = 'Fecha' casos_genero_edad_10s = casos_genero_edad.copy() i = 0 for sex in tr_sex.values(): for ages in labels_10s: if ages == '80+': # casos_genero_edad_10s[sex, ages] = casos_genero_edad_10s[sex]['80+'] continue # print(casos_genero_edad_10s[sex].columns[i:i+2]) # print(casos_genero_edad_10s[sex][casos_genero_edad_10s[sex].columns[i:i+2]].sum()) casos_genero_edad_10s[sex, ages] = casos_genero_edad_10s[sex][casos_genero_edad_10s[sex].columns[i:i+2]].sum(axis=1) i += 2 i = 0 casos_genero_edad_10s.drop(columns=list(tr_edad.values())[:-1], inplace=True, level=1) casos_genero_edad_10s = casos_genero_edad_10s.sort_index(axis=1) casos_genero_edad.sum() casos_genero_edad_10s defunciones_deis_edad = defunciones_deis_genero_edad.sum().reset_index().groupby(['Edad']).sum() casos_edad = casos_genero_edad.sum().reset_index().groupby(['Edad']).sum() cfr_edad = defunciones_deis_edad/casos_edad cfr_edad.rename(columns={0: 'CFR'}, inplace=True) cfr_edad = cfr_edad.reset_index() overall_ing = 'General' cfr_edad['Sexo'] = overall_ing cfr = ((defunciones_deis_genero_edad.sum()/casos_genero_edad.sum())) cfr.rename('CFR', inplace=True) cfr = cfr.reset_index() cfr_total_total = defunciones_deis_genero_edad.sum().sum()/casos_genero_edad.sum().sum() cfr_total_hombre = defunciones_deis_genero_edad['Hombre'].sum().sum()/casos_genero_edad['Hombre'].sum().sum() cfr_total_mujer = defunciones_deis_genero_edad['Mujer'].sum().sum()/casos_genero_edad['Mujer'].sum().sum() cfr = cfr.append({'Sexo': 'Hombre', 'Edad': overall_ing, 'CFR': cfr_total_hombre}, ignore_index=True) cfr = cfr.append({'Sexo': 'Mujer', 'Edad': overall_ing, 'CFR': cfr_total_mujer}, ignore_index=True) cfr = cfr.append({'Sexo': overall_ing, 'Edad': overall_ing, 'CFR': cfr_total_total}, ignore_index=True) cfr = cfr.append(cfr_edad, ignore_index=True) fig_cfr = px.bar( cfr, x='Edad', y='CFR', color='Sexo', barmode='group', color_discrete_sequence=Wong ) fig_cfr.update_xaxes(type='category') fig_cfr.update_layout(hovermode='x') fig_cfr.update_traces( hovertemplate="<br>".join([ # "Día: %{x}", "%{y:.2%}", ]) ) fig_cfr.update_layout( template='plotly_white', yaxis_tickformat = '.1%', font=dict( size=14, ), title='Tasa de letalidad por COVID19 en Chile (Case Fatality Rate, CFR)' ) fig_cfr.add_layout_image( dict( source="https://i2.wp.com/dlab.cl/wp-content/uploads/2016/08/LogoWebDlab.png", xref="paper", yref="paper", x=1, y=1.05, sizex=0.2, sizey=0.2, xanchor="right", yanchor="bottom" ) ) fig_cfr defunciones_deis_edad_10s = defunciones_deis_genero_edad_10s.sum().reset_index().groupby(['Edad']).sum() casos_edad_10s = casos_genero_edad_10s.sum().reset_index().groupby(['Edad']).sum() cfr_edad_10s = defunciones_deis_edad_10s/casos_edad_10s cfr_edad_10s.rename(columns={0: 'CFR'}, inplace=True) cfr_edad_10s = cfr_edad_10s.reset_index() overall_ing = 'General' cfr_edad_10s['Sexo'] = overall_ing cfr_10s = ((defunciones_deis_genero_edad_10s.sum()/casos_genero_edad_10s.sum())) cfr_10s.rename('CFR', inplace=True) cfr_10s = cfr_10s.reset_index() cfr_total_total_10s = defunciones_deis_genero_edad_10s.sum().sum()/casos_genero_edad_10s.sum().sum() cfr_total_hombre_10s = defunciones_deis_genero_edad_10s['Hombre'].sum().sum()/casos_genero_edad_10s['Hombre'].sum().sum() cfr_total_mujer_10s = defunciones_deis_genero_edad_10s['Mujer'].sum().sum()/casos_genero_edad_10s['Mujer'].sum().sum() cfr_10s = cfr_10s.append({'Sexo': 'Hombre', 'Edad': overall_ing, 'CFR': cfr_total_hombre_10s}, ignore_index=True) cfr_10s = cfr_10s.append({'Sexo': 'Mujer', 'Edad': overall_ing, 'CFR': cfr_total_mujer_10s}, ignore_index=True) cfr_10s = cfr_10s.append({'Sexo': overall_ing, 'Edad': overall_ing, 'CFR': cfr_total_total_10s}, ignore_index=True) cfr_10s = cfr_10s.append(cfr_edad_10s, ignore_index=True) Wong = ['#000000', '#E69F00', '#56B4E9', '#009E73', '#F0E442', '#0072B2', '#D55E00', '#CC79A7'] fig_cfr_10s = px.bar( cfr_10s, x='Edad', y='CFR', color='Sexo', barmode='group', color_discrete_sequence=Wong ) fig_cfr_10s.update_xaxes(type='category') fig_cfr_10s.update_layout(hovermode='x') fig_cfr_10s.update_traces( hovertemplate="<br>".join([ # "Día: %{x}", "%{y:.2%}", ]) ) fig_cfr_10s.update_layout( template='plotly_white', yaxis_tickformat = '.1%', font=dict( size=14, ), title='Tasa de letalidad por COVID19 en Chile (Case Fatality Rate, CFR)' ) fig_cfr_10s.add_layout_image( dict( source="https://i2.wp.com/dlab.cl/wp-content/uploads/2016/08/LogoWebDlab.png", xref="paper", yref="paper", x=1, y=1.0, sizex=0.2, sizey=0.2, xanchor="right", yanchor="bottom" ) ) fig_cfr_10s.update_layout( updatemenus=[ dict( buttons=[ dict(label="Lineal", method="relayout", args=[{"yaxis.type": "linear"}]), dict(label="Logaritmico", method="relayout", args=[{"yaxis.type": "log"}]), ])], font=dict(size=11) ) fig_cfr_10s cfr_10s ```
github_jupyter
# Demo of the LCS package ``` #preamble import os, sys import pandas as pd import numpy as np import random import pickle ``` ## Import Package ``` # how to import the packaes from Rulern.LCSModule import LCS # the core library from Rulern.RuleModule import Rule # this is only needed if you create your own rules ``` ## Load Pre-Trained Models (Back-up) ``` # #how to load the models using pickle # with open("Eval/LCSvsNN/28072020-bool/"+"0cv_model_LCS.obj", 'rb') as f: # model = pickle.load(f) # #show example rules form a trained model # # print(b.history) # for rule in model.rules: # if rule.fitness > 1.0: # filter out all the bad rules # print(rule,rule.fitness) # print rule and rule fittness ``` ## Generating data (swap with your own data) ``` # generate data i 0 - 9 are the input bits and o0-4 are the output # replce this with your own data set and data wrangling operations # the LCS package can work with dataframes, arrays or numpy arrays def gen_rand_in_out(arr_len = 10): input = [] for i in range(arr_len): input.append(random.choice([1,0])) output = np.array(input[0:int(arr_len/2)]) | np.array(input[int(arr_len/2):arr_len]) # logical or of the first and last five bits return np.append(input,output) print(gen_rand_in_out()) df = [] np_samples = 1000 for i in range(np_samples): df.append(gen_rand_in_out()) df = pd.DataFrame(np.array(df).reshape(np_samples,15),columns = ["i0","i1","i2","i3","i4","i5","i6","i7","i8","i9","o0","o1","o2","o3","o4"]) print(df) ``` ## Initialise an LCS model (recommended order of operations) See Appendix B, Table B.1 for a summary of the model parameters ``` # initialise LCS # recommended order of parameter initialisation def init_LCS(): lcs = LCS((10,1),(5,1),max_pop = 100) #input and output shapes as well as the max population lcs.input_names = ["i0","i1","i2","i3","i4","i5","i6","i7","i8","i9"] # column names of the input lcs.output_names = ["o0","o1","o2","o3","o4"] # column names of the outputs lcs.initGA() # initialise genetic algorithms lcs.covering_threshold = 5 # set a covering threshold - how may rules must match a data instance lcs.GA.interval = 0 # the range interval if range antecedents are enabled lcs.GA.sigma = 0.0 # sigma of the spread of genetic mutations of the rule values lcs.GA.max_ranges = 0 # max number of ranges a rule can have = > i1 > 0.5 and i1 < 1.0 lcs.GA.max_attribute_comp = 0 # max number of attribute comparisons a rule can have = > i0 >= i1 lcs.GA.max_comp = 1 # max number of attribute comparisons to a cosntant a rule can have = > i0 >= 0.5 lcs.GA.max_output_attributes = 0 # max number of ouput atributes excl. bias => i1*0.5 + i2*0.5 lcs.fitness_weights =[1,0,1] # weights on the fitness function c1, c2 and c3 in the report lcs.GA.input_template = df[["i0","i1","i2","i3","i4","i5","i6","i7","i8","i9"]].iloc[[0]] # template on an input frame lcs.purge_threshold = 1.0 # purge threshold lcs.type = "Multi-Class" # this by default is "continous" but can be a classifer if it is a a single-classifer return lcs lcs = init_LCS() # initialise LCS X_test = df[lcs.input_names] # get input data ``` ## How to add your own rules ``` rules = [] # how to add manual rules for an or operation for i in range(5): ant_dict = { "i"+str(i):[[0],["=="],[1]] # antecedent dictionary structure } con_dict = { # consequent dictionary structure "out_var":"o"+str(i), "vars":{}, "bias":1} rules.append(Rule("USER"+str(i),ant_dict,con_dict,seq_len = 1)) # name, antecedent, consequent, sequence length (def. 1) for i in range(5): ant_dict = { "i"+str(i+5):[[0],["=="],[1]] } con_dict = { "out_var":"o"+str(i), "vars":{}, "bias":1} rules.append(Rule("USER"+str(i+5),ant_dict,con_dict,seq_len = 1)) # initalise each rules parameters, if a rule does not have stats, it will not contribute to a classifcation for rule in rules: rule.fitness = 2 rule.correctness = 100 rule.matched = 100 rule.evaluated = 100 lcs.rules.append(rule) for rule in lcs.rules: if rule.fitness > 1.0: # filter out all the bad rules print(rule) ``` ## Evaluate inputs ``` # evaluate input data results,activations = lcs.evaluate_data(X_test) ``` ### Show results ``` print(results[0:10].apply(np.ceil).astype("int"),activations[0:10]) #print the prediction and activations for each row y_test= df[lcs.output_names] print(y_test.iloc[0:10]) # print the true value for comparison ``` ## How to train your own LCS model ``` #how to train your own LCS # initialise new LCS instance lcs = init_LCS() # initialise new LCS instance lcs.LearningClassifierSystem(X_test.iloc[0:100],y_test.iloc[0:100],mutation_frq = 10,verberose = True,eval = [X_test,y_test],epochs = 10) results,activations = lcs.evaluate_data(X_test) for rule in lcs.rules: #if rule.fitness > 0: # filter out all the bad rules print(rule,rule.fitness) # show system classfications, recommeded to use ceil for muticlass models outputs print(results[10:20].apply(np.ceil).astype("int"),activations[0:10]) #print the prediction and activations for each row y_test= df[lcs.output_names] print(y_test.iloc[10:20]) # print the true value for comparison ```
github_jupyter
# Hyper-parameter Tunning of Machine Learning (ML) Models ### Code for Classification Problems #### `Dataset Used:` MNIST dataset #### `Machine Learning Algorithm Used:` * Random Forest (RF) * Support Vector Machine (SVM) * K-Nearest Neighbor (KNN) * Artificial Neural Network (ANN) #### `Hyper-parameter Tuning Algorithms Used:` * Grid Search * Random Search * Bayesian Optimization with Gaussian Processes (BO-GP) * Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE) --- ``` # Importing required libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline import scipy.stats as stats from sklearn import datasets from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC ``` #### Loading MNIST Dataset The Modified National Institute of Standards and Technology (MNIST) database is a large database of handwritten digits that is commonly used by the people who want to try learning techniques and pattern recognition methods on real-world data while spending minimal efforts on preprocessing and formatting. It has a training set of 60,000 examples, and a test set of 10,000 examples.It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image. It has 1797 record and 64 columns. For more details about the dataset click here: [Details-1](http://yann.lecun.com/exdb/mnist/), [Details-2](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits/) ``` # Loading the dataset X, y = datasets.load_digits(return_X_y=True) datasets.load_digits() ``` ### Baseline Machine Learning Models: Classifier with default Hyper-parameters ### `Random Forest` ``` # Random Forest (RF) with 3-fold cross validation RF_clf = RandomForestClassifier() RF_clf.fit(X,y) RF_scores = cross_val_score(RF_clf, X, y, cv = 3,scoring = 'accuracy') print("Accuracy (RF): "+ str(RF_scores.mean())) ``` ### `Support Vector Machine` ``` # Support Vector Machine (SVM) SVM_clf = SVC(gamma='scale') SVM_clf.fit(X,y) SVM_scores = cross_val_score(SVM_clf, X, y, cv = 3,scoring = 'accuracy') print("Accuracy (SVM): "+ str(SVM_scores.mean())) ``` ### `K-Nearest Neighbor` ``` # K-Nearest Neighbor (KNN) KNN_clf = KNeighborsClassifier() KNN_clf.fit(X,y) KNN_scores = cross_val_score(KNN_clf, X, y, cv = 3,scoring='accuracy') print("Accuracy (KNN):"+ str(KNN_scores.mean())) ``` ### `Artificial Neural Network` ``` # Artificial Neural Network (ANN) from keras.models import Sequential, Model from keras.layers import Dense, Input from keras.wrappers.scikit_learn import KerasClassifier from keras.callbacks import EarlyStopping def ann_model(optimizer = 'sgd',neurons = 32,batch_size = 32,epochs = 50,activation = 'relu',patience = 5,loss = 'categorical_crossentropy'): model = Sequential() model.add(Dense(neurons, input_shape = (X.shape[1],), activation = activation)) model.add(Dense(neurons, activation = activation)) model.add(Dense(10,activation='softmax')) model.compile(optimizer = optimizer, loss = loss) early_stopping = EarlyStopping(monitor = "loss", patience = patience) history = model.fit(X, pd.get_dummies(y).values, batch_size = batch_size, epochs=epochs, callbacks = [early_stopping], verbose=0) return model ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0) ANN_scores = cross_val_score(ANN_clf, X, y, cv = 3,scoring = 'accuracy') print("Accuracy (ANN):"+ str(ANN_scores.mean())) ``` ### Hyper-parameter Tuning Algorithms ### `1] Grid Search` ``` from sklearn.model_selection import GridSearchCV ``` ### `Random Forest` ``` # Random Forest (RF) RF_params = { 'n_estimators': [10, 20, 30], 'max_depth': [15,20,25,30,50], "criterion":['gini','entropy'] } RF_clf = RandomForestClassifier(random_state = 1) RF_grid = GridSearchCV(RF_clf, RF_params, cv = 3, scoring = 'accuracy') RF_grid.fit(X, y) print(RF_grid.best_params_) print("Accuracy (RF): "+ str(RF_grid.best_score_)) ``` ### `Support Vector Machine` ``` # Support Vector Machine (SVM) SVM_params = { 'C': [1, 10, 20, 50, 100], "kernel":['linear','poly','rbf','sigmoid'] } SVM_clf = SVC(gamma='scale') SVM_grid = GridSearchCV(SVM_clf, SVM_params, cv = 3, scoring = 'accuracy') SVM_grid.fit(X, y) print(SVM_grid.best_params_) print("Accuracy:"+ str(SVM_grid.best_score_)) ``` ### `K-Nearest Neighbor` ``` #K-Nearest Neighbor (KNN) KNN_params = { 'n_neighbors': [2, 4, 6, 8] } KNN_clf = KNeighborsClassifier() KNN_grid = GridSearchCV(KNN_clf, KNN_params, cv = 3, scoring = 'accuracy') KNN_grid.fit(X, y) print(KNN_grid.best_params_) print("Accuracy:"+ str(KNN_grid.best_score_)) ``` ### `Artificial Neural Network` ``` # Artificial Neural Network (ANN) ANN_params = { 'optimizer': ['adam','sgd'], 'activation': ['relu','tanh'], 'batch_size': [16,32], 'neurons':[16,32], 'epochs':[30,50], 'patience':[3,5] } ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0) ANN_grid = GridSearchCV(ANN_clf, ANN_params, cv = 3,scoring = 'accuracy') ANN_grid.fit(X, y) print(ANN_grid.best_params_) print("Accuracy (ANN): "+ str(ANN_grid.best_score_)) ``` ### `2] Random Search` ``` from sklearn.model_selection import RandomizedSearchCV from random import randrange as sp_randrange from scipy.stats import randint as sp_randint ``` ### `Random Forest` ``` # Random Forest (RF) RF_params = { 'n_estimators': sp_randint(10,100), 'max_depth': sp_randint(5,50), "criterion":['gini','entropy'] } RF_clf = RandomForestClassifier(random_state = 1) RF_Random = RandomizedSearchCV(RF_clf, param_distributions = RF_params, n_iter = 20,cv = 3,scoring = 'accuracy') RF_Random.fit(X, y) print(RF_Random.best_params_) print("Accuracy (RF):"+ str(RF_Random.best_score_)) ``` ### `Support Vector Machine` ``` # Support Vector Machine(SVM) SVM_params = { 'C': stats.uniform(1,50), "kernel":['poly','rbf'] } SVM_clf = SVC(gamma='scale') SVM_Random = RandomizedSearchCV(SVM_clf, param_distributions = SVM_params, n_iter = 20,cv = 3,scoring = 'accuracy') SVM_Random.fit(X, y) print(SVM_Random.best_params_) print("Accuracy (SVM): "+ str(SVM_Random.best_score_)) ``` ### `K-Nearest Neighbor` ``` # K-Nearest Neighbor (KNN) KNN_params = {'n_neighbors': range(1,20)} KNN_clf = KNeighborsClassifier() KNN_Random = RandomizedSearchCV(KNN_clf, param_distributions = KNN_params,n_iter = 10,cv = 3,scoring = 'accuracy') KNN_Random.fit(X, y) print(KNN_Random.best_params_) print("Accuracy (KNN): "+ str(KNN_Random.best_score_)) ``` ### `Artificial Neural Network` ``` # Artificial Neural Network (ANN) ANN_params = { 'optimizer': ['adam','sgd'], 'activation': ['relu','tanh'], 'batch_size': [16,32], 'neurons':sp_randint(10,100), 'epochs':[30,50], 'patience':sp_randint(5,20) } ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0) ANN_Random = RandomizedSearchCV(ANN_clf, param_distributions = ANN_params, n_iter = 10,cv = 3,scoring = 'accuracy') ANN_Random.fit(X, y) print(ANN_Random.best_params_) print("Accuracy (ANN): "+ str(ANN_Random.best_score_)) ``` ### `3] Bayesian Optimization with Gaussian Process (BO-GP)` ``` from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer ``` ### `Random Factor` ``` #Random Forest (RF) RF_params = { 'n_estimators': Integer(10,100), 'max_depth': Integer(5,50), "criterion":['gini','entropy'] } RF_clf = RandomForestClassifier(random_state = 1) RF_Bayes = BayesSearchCV(RF_clf, RF_params,cv = 3,n_iter = 20, n_jobs = -1,scoring = 'accuracy') RF_Bayes.fit(X, y) print(RF_Bayes.best_params_) print("Accuracy (RF): "+ str(RF_Bayes.best_score_)) ``` ### `Support Vector Machine` ``` # Support Vector Machine (SVM) SVM_params = { 'C': Real(1,50), "kernel":['poly','rbf'] } SVM_clf = SVC(gamma = 'scale') SVM_Bayes = BayesSearchCV(SVM_clf, SVM_params,cv = 3,n_iter = 20, n_jobs = -1,scoring = 'accuracy') SVM_Bayes.fit(X, y) print(SVM_Bayes.best_params_) print("Accuracy (SVM): "+ str(SVM_Bayes.best_score_)) ``` ### `K-Nearest Neighbor` ``` # K-Nearest Neighbor (KNN) KNN_params = {'n_neighbors': Integer(1,20),} KNN_clf = KNeighborsClassifier() KNN_Bayes = BayesSearchCV(KNN_clf, KNN_params,cv = 3,n_iter = 10, n_jobs = -1,scoring = 'accuracy') KNN_Bayes.fit(X, y) print(KNN_Bayes.best_params_) print("Accuracy (KNN): "+ str(KNN_Bayes.best_score_)) ``` ### `Artificial Neural Network` ``` # Artificial Neural Network (ANN) ANN_params = { 'optimizer': ['adam','sgd'], 'activation': ['relu','tanh'], 'batch_size': [16,32], 'neurons':Integer(10,100), 'epochs':[30,50], 'patience':Integer(5,20) } ANN_clf = KerasClassifier(build_fn = ann_model, verbose = 0) ANN_Bayes = BayesSearchCV(ANN_clf, ANN_params,cv = 3,n_iter = 10, scoring = 'accuracy') ANN_Bayes.fit(X, y) print(ANN_Bayes.best_params_) print("Accuracy (ANN): "+ str(ANN_Bayes.best_score_)) ``` ### `4] Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE)` ``` from sklearn.model_selection import StratifiedKFold from hyperopt import hp, fmin, tpe, STATUS_OK, Trials ``` ### `Random Forest` ``` # Random Forest (RF) def RF_fun(params): params = { 'n_estimators': int(params['n_estimators']), 'max_features': int(params['max_features']), "criterion":str(params['criterion']) } RF_clf = RandomForestClassifier(**params) RF_score = cross_val_score(RF_clf, X, y, cv = StratifiedKFold(n_splits = 3),scoring = 'accuracy').mean() return {'loss':-RF_score, 'status': STATUS_OK } RF_space = { 'n_estimators': hp.quniform('n_estimators', 10, 100, 1), "max_features":hp.quniform('max_features', 1, 32, 1), "criterion":hp.choice('criterion',['gini','entropy']) } RF_best = fmin(fn = RF_fun, space = RF_space, algo = tpe.suggest, max_evals = 20) print("Estimated optimum (RF): " +str(RF_best)) ``` ### `Support Vector Machine` ``` # Support Vector Machine (SVM) def SVM_fun(params): params = { 'C': abs(float(params['C'])), "kernel":str(params['kernel']) } SVM_clf = SVC(gamma ='scale', **params) SVM_score = cross_val_score(SVM_clf, X, y, cv = StratifiedKFold(n_splits = 3), scoring ='accuracy').mean() return {'loss':-SVM_score, 'status': STATUS_OK } SVM_space = { 'C': hp.normal('C', 0, 50), "kernel":hp.choice('kernel',['poly','rbf']) } SVM_best = fmin(fn = SVM_fun, space = SVM_space, algo = tpe.suggest, max_evals = 20) print("Estimated optimum (SVM): "+str(SVM_best)) ``` ### `K-Nearest Neighbor` ``` # K-Nearest Neighbor (KNN) def KNN_fun(params): params = {'n_neighbors': abs(int(params['n_neighbors'])) } KNN_clf = KNeighborsClassifier(**params) KNN_score = cross_val_score(KNN_clf, X, y, cv = StratifiedKFold(n_splits=3), scoring='accuracy').mean() return {'loss':-KNN_score, 'status': STATUS_OK } KNN_space = {'n_neighbors': hp.quniform('n_neighbors', 1, 20, 1)} KNN_best = fmin(fn = KNN_fun, space = KNN_space, algo = tpe.suggest, max_evals = 10) print("Estimated optimum (KNN): "+str(KNN_best)) ``` ### `Artificial Neural Network` ``` # Artificial Neural Network (ANN) def ANN_fun(params): params = { "optimizer":str(params['optimizer']), "activation":str(params['activation']), 'batch_size': abs(int(params['batch_size'])), 'neurons': abs(int(params['neurons'])), 'epochs': abs(int(params['epochs'])), 'patience': abs(int(params['patience'])) } ANN_clf = KerasClassifier(build_fn = ann_model,**params, verbose = 0) ANN_score = -np.mean(cross_val_score(ANN_clf, X, y, cv=3, scoring = "accuracy")) return {'loss':ANN_score, 'status': STATUS_OK } ANN_space = { "optimizer":hp.choice('optimizer',['adam','rmsprop','sgd']), "activation":hp.choice('activation',['relu','tanh']), 'batch_size': hp.quniform('batch_size', 16, 32, 16), 'neurons': hp.quniform('neurons', 10, 100, 10), 'epochs': hp.quniform('epochs', 30, 50, 10), 'patience': hp.quniform('patience', 5, 20, 5), } ANN_best = fmin(fn = ANN_fun, space = ANN_space, algo = tpe.suggest, max_evals = 10) print("Estimated optimum (ANN): "+str(ANN_best)) ``` ---
github_jupyter
# Exploring MNIST Manifolds ** November 2017 ** ** Andrew Riberio @ [AndrewRib.com](http://www.andrewrib.com) ** Pg 158 of the [Deep Learning Book](http://www.deeplearningbook.org/), "In the case of images, we can certainly think of many possible transformations that allow us to trace out a manifold in image space: we can gradually dim or brighten the lights, gradually move or rotate objects in the image, gradually alter the colors on the surfaces of objects, and so forth. Multiple manifolds are likely involved in most applications." In this notebook we will explore manifolds in the MNIST dataset by modeling the transformations which represent their traversal. Which manifolds describe the variation in the data? Are some transformations over representend? I.e. do we have a manifold bias? How would we quantify this? We wish to explore these questions here. Resources * [Visualizing MNIST: An Exploration of Dimensionality Reduction](http://colah.github.io/posts/2014-10-Visualizing-MNIST/) * [A Beginner’s Guide to Eigenvectors, PCA, Covariance and Entropy](https://deeplearning4j.org/eigenvector) * [PCA Tutorial](https://strata.uga.edu/software/pdf/pcaTutorial.pdf) ** Note: ** This notebook contains interactive elements and certain latex snippets that will not render in github markdown. You must run this notebook on your local Jupyter notebook environment for interactive elements or render or if you wish to render just the latex by using the url of this repo with the online NBViewer. ## Libraries ``` # Visualization from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt #Interactive Components from ipywidgets import interact # Dataset Operations and Linear Algebra import pandas as pd import numpy as np import math from scipy import stats # Machine Learning from sklearn.cluster import KMeans from sklearn.decomposition import PCA # MNIST Dataset from tensorflow.examples.tutorials.mnist import input_data ``` ## Gathering data from files into a dictionary numpy arrays. ``` mnist = input_data.read_data_sets("MNIST_data/", one_hot=False) unique, counts = np.unique(mnist.train.labels, return_counts=True) sortedCount = sorted(dict(zip(unique, counts)).items(), key=lambda x: x[1],reverse=True) sortedCountLabels = [i[0] for i in sortedCount] sortedCountFreq = [i[1] for i in sortedCount] # TODO: Make more efficient. # First we will zip the training labels with the training images dataWithLabels = zip(mnist.train.labels, mnist.train.images) # Now let's turn this into a dictionary where subsets of the images in respect # to digit class are stored via the corresponding key. # Init dataDict with keys [0,9] and empty lists. digitDict = {} for i in range(0,10): digitDict[i] = [] # Assign a list of image vectors to each corresponding digit class index. for i in dataWithLabels: digitDict[i[0]].append(i[1]) # Convert the lists into numpy matricies. (could be done above, but I claim ignorace) for i in range(0,10): digitDict[i] = np.matrix(digitDict[i]) print("Digit {0} matrix shape: {1}".format(i,digitDict[i].shape)) #nImgs = digitDict[9].shape[0] #avgImg = np.dot(digitDict[9].T, np.ones((nImgs,1)))/nImgs ``` ## Approach 1: PCA With principle components analysis, we can explore sources of variability in the MNIST dataset. The aim is to use this notion of variablity to describe the transformations found in the data. Once we can model the transformations found in the data, we can use them to describe various manifolds in MNIST. Let's begin by using this interactive section to explore the first two principle components as vectors plotted over each digit with respect to each digit class. ``` def pcaVectOnIMG(dataset,elmIndex): X_r = PCA(n_components=2).fit(dataset) pcaVect = X_r.transform(dataset[elmIndex]) origin = [[14], [14]] plt.figure(figsize=(5,5)) plt.imshow(dataset[elmIndex].reshape(28,28),cmap='gray') plt.quiver(*origin, pcaVect[:,0], pcaVect[:,1], color=['r','b','g'], scale=10) plt.show() z = lambda elmIndex=0,digit=1 :pcaVectOnIMG(digitDict[digit],elmIndex) # Will error for some values of elmIndex. interact( z, elmIndex=[0,8000],digit=[0,9]) ``` I hope you noticed how the first two principle components as a vector did a good job showing the left or right lean bias of the 1's; however, when a 1 is stretched or shrunk in the virtical direction, variablility not captured by the first two principle components essentially add noise and reduce the signifigance of where the vector is pointing. Here's an example where the vector does a great job at showing the variablity. ``` pcaVectOnIMG(digitDict[1],1340) ``` Here's an example where our principle component vector does not do a good job at showing the variability. ``` pcaVectOnIMG(digitDict[1],2313) ``` ### Experiment 1.1: Plotting the space of PCA Vectors The following interactive section is designed to explore the space different principle component vectors. Explore how different principle component vectors in R2 describes the variability in the data. ``` def plotPCAVectors(data,componentIndexVec=[0,1],nComponents=120,filterDensity=50): n = data.shape[0] meanDigit = np.dot(data.T, np.ones((n,1)))/n data = data[0::filterDensity] X_r = PCA(n_components=nComponents).fit(data).transform(data) print("fIndex: the first principle component in the vector") print("sIndex: the second principle component in the vector") print("digit: which digit class we are exploring.") plt.figure(figsize=(6,6)) ax = plt.gca() origin = [[14], [14]] # origin point plt.imshow(meanDigit.reshape(28,28),cmap='gray') plt.quiver(*origin, X_r[:,componentIndexVec[0]], X_r[:,componentIndexVec[1]], color=['r','b','g'], scale=13) plt.show() z = lambda fIndex=0,sIndex=1,digit=1:plotPCAVectors(digitDict[digit],[fIndex,sIndex]) interact(z,fIndex=[0,119],sIndex=[0,119],digit=[0,9]) ``` To obtain vectors which best show the variability in R2, we would need to find particular principle component vectors and combine them with vector mathematics. I will take a side step to explore what the magnitude of these vectors mean for the underlying images. ### Experiment 1.2: Magnitude of PCA Vectors ``` def pcaR2Vects(dataset): return PCA(n_components=2).fit(dataset).transform(dataset) #pcaR2Vects(digitDict[1]) def dataWithPCA_R2Vects(dataset): pcaVects = pcaR2Vects(dataset) print(dataset.shape) print(pcaVects.shape) return zip(dataset,pcaVects) def R2Norm(vects): return np.linalg.norm( pcaR2Vects(ds), 2 , axis= 1) ds = digitDict[1] vectMag = list(zip(list(range(0,ds.shape[0],1)),R2Norm( pcaR2Vects(ds) ))) rs = sorted(vectMag, key=lambda x: x[1]) nCases = len(rs) def tst1(elm=0): plt.imshow(ds[rs[elm][0]].reshape(28,28),cmap='gray') plt.show() interact(tst1,elm=[0,20]) interact(tst1,elm=[100,200]) ``` ## Approach 2: MNIST Statistics In this section we will do things like explore the co-variance matrix of the digits. ## Approach 3: K-Means Manifold Clustering Here we will work within digit classes. This first interactive section will allow you to visualize the mean pixel intensity across all samples within each digit class giving a visualization of the average digit. ``` def meanDigitClipped(digitClass): n = digitClass.shape[0] meanDigit = np.clip( np.dot(digitClass.T, np.ones((n,1)))/n, 0.00001,1) return meanDigit def meanDigitVis(digitClass=0): meanImg = meanDigitClipped(digitDict[digitClass]) plt.figure(figsize=(6,6)) plt.imshow(meanImg.reshape(28,28),cmap='gray') plt.show() interact(meanDigitVis,digitClass=[0,9]) ``` We will now define a helper function which will allow us to perform kMeans and return the results in a form easy to work with. ``` def KMeanDict(data,nClusters): kmLabels = KMeans(n_clusters=nClusters, random_state=None).fit(data).labels_ classDict = {label: data[label==kmLabels] for label in np.unique(kmLabels)} for i in classDict: classDict[i] = np.matrix(classDict[i]) return classDict ``` In this next interactive section you can explore different average digits across different partitions of the data. ``` def makeSubplots(nGridRow,nGridCol,figsize=(20,20)): sps = [] fig = plt.figure(figsize=figsize) for i in range(1,(nGridRow*nGridCol)+1): sps.append(fig.add_subplot(nGridRow,nGridCol,i)) return (fig,sps) def kVis(digit=1,nClasses=4): figRows,figCols = (math.ceil(nClasses/4),4) fig,sps = makeSubplots(figRows,figCols) kDict = KMeanDict(digitDict[digit],nClasses) for i in kDict: md = meanDigitClipped(kDict[i]) sps[i].imshow(md.reshape(28,28),cmap='gray') plt.show() interact(kVis,digit=[0,9],nClasses=[2,10]) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # 케라스를 사용한 분산 훈련 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/distribute/keras"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/distribute/keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/distribute/keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/distribute/keras.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 [docs-ko@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로 메일을 보내주시기 바랍니다. ## 개요 `tf.distribute.Strategy` API는 훈련을 여러 처리 장치들로 분산시키는 것을 추상화한 것입니다. 기존의 모델이나 훈련 코드를 조금만 바꾸어 분산 훈련을 할 수 있게 하는 것이 분산 전략 API의 목표입니다. 이 튜토리얼에서는 `tf.distribute.MirroredStrategy`를 사용합니다. 이 전략은 동기화된 훈련 방식을 활용하여 한 장비에 있는 여러 개의 GPU로 그래프 내 복제를 수행합니다. 다시 말하자면, 모델의 모든 변수를 각 프로세서에 복사합니다. 그리고 각 프로세서의 그래디언트(gradient)를 [올 리듀스(all-reduce)](http://mpitutorial.com/tutorials/mpi-reduce-and-allreduce/)를 사용하여 모읍니다. 그다음 모아서 계산한 값을 각 프로세서의 모델 복사본에 적용합니다. `MirroredStategy`는 텐서플로에서 기본으로 제공하는 몇 가지 분산 전략 중 하나입니다. 다른 전략들에 대해서는 [분산 전략 가이드](../../guide/distributed_training.ipynb)를 참고하십시오. ### 케라스 API 이 예는 모델과 훈련 루프를 만들기 위해 `tf.keras` API를 사용합니다. 직접 훈련 코드를 작성하는 방법은 [사용자 정의 훈련 루프로 분산 훈련하기](training_loops.ipynb) 튜토리얼을 참고하십시오. ## 필요한 패키지 가져오기 ``` # 텐서플로와 텐서플로 데이터셋 패키지 가져오기 !pip install tensorflow-gpu==2.0.0-rc1 import tensorflow_datasets as tfds import tensorflow as tf tfds.disable_progress_bar() import os ``` ## 데이터셋 다운로드 MNIST 데이터셋을 [TensorFlow Datasets](https://www.tensorflow.org/datasets)에서 다운로드받은 후 불러옵니다. 이 함수는 `tf.data` 형식을 반환합니다. `with_info`를 `True`로 설정하면 전체 데이터에 대한 메타 정보도 함께 불러옵니다. 이 정보는 `info` 변수에 저장됩니다. 여기에는 훈련과 테스트 샘플 수를 비롯한 여러가지 정보들이 들어있습니다. ``` datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True) mnist_train, mnist_test = datasets['train'], datasets['test'] ``` ## 분산 전략 정의하기 분산과 관련된 처리를 하는 `MirroredStrategy` 객체를 만듭니다. 이 객체가 컨텍스트 관리자(`tf.distribute.MirroredStrategy.scope`)도 제공하는데, 이 안에서 모델을 만들어야 합니다. ``` strategy = tf.distribute.MirroredStrategy() print('장치의 수: {}'.format(strategy.num_replicas_in_sync)) ``` ## 입력 파이프라인 구성하기 다중 GPU로 모델을 훈련할 때는 배치 크기를 늘려야 컴퓨팅 자원을 효과적으로 사용할 수 있습니다. 기본적으로는 GPU 메모리에 맞추어 가능한 가장 큰 배치 크기를 사용하십시오. 이에 맞게 학습률도 조정해야 합니다. ``` # 데이터셋 내 샘플의 수는 info.splits.total_num_examples 로도 # 얻을 수 있습니다. num_train_examples = info.splits['train'].num_examples num_test_examples = info.splits['test'].num_examples BUFFER_SIZE = 10000 BATCH_SIZE_PER_REPLICA = 64 BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync ``` 픽셀의 값은 0~255 사이이므로 [0-1 범위로 정규화](https://en.wikipedia.org/wiki/Feature_scaling)해야 합니다. 정규화 함수를 정의합니다. ``` def scale(image, label): image = tf.cast(image, tf.float32) image /= 255 return image, label ``` 이 함수를 훈련과 테스트 데이터에 적용합니다. 훈련 데이터 순서를 섞고, [훈련을 위해 배치로 묶습니다](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch). ``` train_dataset = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) eval_dataset = mnist_test.map(scale).batch(BATCH_SIZE) ``` ## 모델 만들기 `strategy.scope` 컨텍스트 안에서 케라스 모델을 만들고 컴파일합니다. ``` with strategy.scope(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) ``` ## 콜백 정의하기 여기서 사용하는 콜백은 다음과 같습니다. * *텐서보드(TensorBoard)*: 이 콜백은 텐서보드용 로그를 남겨서, 텐서보드에서 그래프를 그릴 수 있게 해줍니다. * *모델 체크포인트(Checkpoint)*: 이 콜백은 매 에포크(epoch)가 끝난 후 모델을 저장합니다. * *학습률 스케줄러*: 이 콜백을 사용하면 매 에포크 혹은 배치가 끝난 후 학습률을 바꿀 수 있습니다. 콜백을 추가하는 방법을 보여드리기 위하여 노트북에 *학습률*을 표시하는 콜백도 추가하겠습니다. ``` # 체크포인트를 저장할 체크포인트 디렉터리를 지정합니다. checkpoint_dir = './training_checkpoints' # 체크포인트 파일의 이름 checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}") # 학습률을 점점 줄이기 위한 함수 # 필요한 함수를 직접 정의하여 사용할 수 있습니다. def decay(epoch): if epoch < 3: return 1e-3 elif epoch >= 3 and epoch < 7: return 1e-4 else: return 1e-5 # 에포크가 끝날 때마다 학습률을 출력하는 콜백. class PrintLR(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): print('\n에포크 {}의 학습률은 {}입니다.'.format(epoch + 1, model.optimizer.lr.numpy())) callbacks = [ tf.keras.callbacks.TensorBoard(log_dir='./logs'), tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True), tf.keras.callbacks.LearningRateScheduler(decay), PrintLR() ] ``` ## 훈련과 평가 이제 평소처럼 모델을 학습합시다. 모델의 `fit` 함수를 호출하고 튜토리얼의 시작 부분에서 만든 데이터셋을 넘깁니다. 이 단계는 분산 훈련 여부와 상관없이 동일합니다. ``` model.fit(train_dataset, epochs=12, callbacks=callbacks) ``` 아래에서 볼 수 있듯이 체크포인트가 저장되고 있습니다. ``` # 체크포인트 디렉터리 확인하기 !ls {checkpoint_dir} ``` 모델의 성능이 어떤지 확인하기 위하여, 가장 최근 체크포인트를 불러온 후 테스트 데이터에 대하여 `evaluate`를 호출합니다. 평소와 마찬가지로 적절한 데이터셋과 함께 `evaluate`를 호출하면 됩니다. ``` model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) eval_loss, eval_acc = model.evaluate(eval_dataset) print('평가 손실: {}, 평가 정확도: {}'.format(eval_loss, eval_acc)) ``` 텐서보드 로그를 다운로드받은 후 터미널에서 다음과 같이 텐서보드를 실행하여 훈련 결과를 확인할 수 있습니다. ``` $ tensorboard --logdir=path/to/log-directory ``` ``` !ls -sh ./logs ``` ## SavedModel로 내보내기 플랫폼에 무관한 SavedModel 형식으로 그래프와 변수들을 내보냅니다. 모델을 내보낸 후에는, 전략 범위(scope) 없이 불러올 수도 있고, 전략 범위와 함께 불러올 수도 있습니다. ``` path = 'saved_model/' tf.keras.experimental.export_saved_model(model, path) ``` `strategy.scope` 없이 모델 불러오기. ``` unreplicated_model = tf.keras.experimental.load_from_saved_model(path) unreplicated_model.compile( loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) eval_loss, eval_acc = unreplicated_model.evaluate(eval_dataset) print('평가 손실: {}, 평가 정확도: {}'.format(eval_loss, eval_acc)) ``` `strategy.scope`와 함께 모델 불러오기. ``` with strategy.scope(): replicated_model = tf.keras.experimental.load_from_saved_model(path) replicated_model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) eval_loss, eval_acc = replicated_model.evaluate(eval_dataset) print ('평가 손실: {}, 평가 정확도: {}'.format(eval_loss, eval_acc)) ``` ### 예제와 튜토리얼 케라스 적합/컴파일과 함께 분산 전략을 쓰는 예제들이 더 있습니다. 1. `tf.distribute.MirroredStrategy`를 사용하여 학습한 [Transformer](https://github.com/tensorflow/models/blob/master/official/nlp/transformer/transformer_main.py) 예제. 2. `tf.distribute.MirroredStrategy`를 사용하여 학습한 [NCF](https://github.com/tensorflow/models/blob/master/official/recommendation/ncf_keras_main.py) 예제. [분산 전략 가이드](../../guide/distributed_training.ipynb#examples_and_tutorials)에 더 많은 예제 목록이 있습니다. ## 다음 단계 * [분산 전략 가이드](../../guide/distributed_training.ipynb)를 읽어보세요. * [사용자 정의 훈련 루프를 사용한 분산 훈련](training_loops.ipynb) 튜토리얼을 읽어보세요. Note: `tf.distribute.Strategy`은 현재 활발히 개발 중입니다. 근시일내에 예제나 튜토리얼이 더 추가될 수 있습니다. 한 번 사용해 보세요. [깃허브 이슈](https://github.com/tensorflow/tensorflow/issues/new)를 통하여 피드백을 주시면 감사하겠습니다.
github_jupyter
# Electronic structure ## Introduction The molecular Hamiltonian is $$ \mathcal{H} = - \sum_I \frac{\nabla_{R_I}^2}{M_I} - \sum_i \frac{\nabla_{r_i}^2}{m_e} - \sum_I\sum_i \frac{Z_I e^2}{|R_I-r_i|} + \sum_i \sum_{j>i} \frac{e^2}{|r_i-r_j|} + \sum_I\sum_{J>I} \frac{Z_I Z_J e^2}{|R_I-R_J|} $$ Because the nuclei are much heavier than the electrons they do not move on the same time scale and therefore, the behavior of nuclei and electrons can be decoupled. This is the Born-Oppenheimer approximation. Therefore, one can first tackle the electronic problem with nuclear coordinate entering only as parameters. The energy levels of the electrons in the molecule can be found by solving the non-relativistic time independent Schroedinger equation, $$ \mathcal{H}_{\text{el}} |\Psi_{n}\rangle = E_{n} |\Psi_{n}\rangle $$ where $$ \mathcal{H}_{\text{el}} = - \sum_i \frac{\nabla_{r_i}^2}{m_e} - \sum_I\sum_i \frac{Z_I e^2}{|R_I-r_i|} + \sum_i \sum_{j>i} \frac{e^2}{|r_i-r_j|}. $$ In particular the ground state energy is given by: $$ E_0 = \frac{\langle \Psi_0 | H_{\text{el}} | \Psi_0 \rangle}{\langle \Psi_0 | \Psi_0 \rangle} $$ where $\Psi_0$ is the ground state of the system. However, the dimensionality of this problem grows exponentially with the number of degrees of freedom. To tackle this issue we would like to prepare $\Psi_0$ on a quantum computer and measure the Hamiltonian expectation value (or $E_0$) directly. So how do we do that concretely? ## The Hartree-Fock initial state A good starting point for solving this problem is the Hartree-Fock (HF) method. This method approximates a N-body problem into N one-body problems where each electron evolves in the mean-field of the others. Classically solving the HF equations is efficient and leads to the exact exchange energy but does not include any electron correlation. Therefore, it is usually a good starting point to start adding correlation. The Hamiltonian can then be re-expressed in the basis of the solutions of the HF method, also called Molecular Orbitals (MOs): $$ \hat{H}_{elec}=\sum_{pq} h_{pq} \hat{a}^{\dagger}_p \hat{a}_q + \frac{1}{2} \sum_{pqrs} h_{pqrs} \hat{a}^{\dagger}_p \hat{a}^{\dagger}_q \hat{a}_r \hat{a}_s $$ with the 1-body integrals $$ h_{pq} = \int \phi^*_p(r) \left( -\frac{1}{2} \nabla^2 - \sum_{I} \frac{Z_I}{R_I- r} \right) \phi_q(r) $$ and 2-body integrals $$ h_{pqrs} = \int \frac{\phi^*_p(r_1) \phi^*_q(r_2) \phi_r(r_2) \phi_s(r_1)}{|r_1-r_2|}. $$ The MOs ($\phi_u$) can be occupied or virtual (unoccupied). One MO can contain 2 electrons. However, in what follows we actually work with Spin Orbitals which are associated with a spin up ($\alpha$) of spin down ($\beta$) electron. Thus Spin Orbitals can contain one electron or be unoccupied. We now show how to concretely realise these steps with Qiskit. Qiskit is interfaced with different classical codes which are able to find the HF solutions. Interfacing between Qiskit and the following codes is already available: * Gaussian * Psi4 * PyQuante * PySCF In the following we set up a PySCF driver, for the hydrogen molecule at equilibrium bond length (0.735 angstrom) in the singlet state and with no charge. ``` from qiskit_nature.drivers import UnitsType, Molecule from qiskit_nature.drivers.second_quantization import PySCFDriver molecule = Molecule(geometry=[['H', [0., 0., 0.]], ['H', [0., 0., 0.735]]], charge=0, multiplicity=1) driver = PySCFDriver(molecule = molecule, unit=UnitsType.ANGSTROM, basis='sto3g') ``` For further information about the drivers see https://qiskit.org/documentation/nature/apidocs/qiskit_nature.drivers.second_quantization.html ## The mapping from fermions to qubits <img src="aux_files/jw_mapping.png" width="500"> The Hamiltonian given in the previous section is expressed in terms of fermionic operators. To encode the problem into the state of a quantum computer, these operators must be mapped to spin operators (indeed the qubits follow spin statistics). There exist different mapping types with different properties. Qiskit already supports the following mappings: * The Jordan-Wigner 'jordan_wigner' mapping (über das paulische äquivalenzverbot. In The Collected Works of Eugene Paul Wigner (pp. 109-129). Springer, Berlin, Heidelberg (1993)). * The Parity 'parity' (The Journal of chemical physics, 137(22), 224109 (2012)) * The Bravyi-Kitaev 'bravyi_kitaev' (Annals of Physics, 298(1), 210-226 (2002)) The Jordan-Wigner mapping is particularly interesting as it maps each Spin Orbital to a qubit (as shown on the Figure above). Here we set up the Electronic Structure Problem to generate the Second quantized operator and a qubit converter that will map it to a qubit operator. ``` from qiskit_nature.problems.second_quantization import ElectronicStructureProblem from qiskit_nature.converters.second_quantization import QubitConverter from qiskit_nature.mappers.second_quantization import JordanWignerMapper, ParityMapper es_problem = ElectronicStructureProblem(driver) second_q_op = es_problem.second_q_ops() print(second_q_op[0]) ``` If we now transform this Hamiltonian for the given driver defined above we get our qubit operator: ``` qubit_converter = QubitConverter(mapper=JordanWignerMapper()) qubit_op = qubit_converter.convert(second_q_op[0]) print(qubit_op) ``` In the minimal (STO-3G) basis set 4 qubits are required. We can reduce the number of qubits by using the Parity mapping, which allows for the removal of 2 qubits by exploiting known symmetries arising from the mapping. ``` qubit_converter = QubitConverter(mapper = ParityMapper(), two_qubit_reduction=True) qubit_op = qubit_converter.convert(second_q_op[0], num_particles=es_problem.num_particles) print(qubit_op) ``` This time only 2 qubits are needed. Now that the Hamiltonian is ready, it can be used in a quantum algorithm to find information about the electronic structure of the corresponding molecule. Check out our tutorials on Ground State Calculation and Excited States Calculation to learn more about how to do that in Qiskit! ``` import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ```
github_jupyter
``` #The first cell is just to align our markdown tables to the left vs. center %%html <style> table {float:left} </style> ``` # Python Dictionaries ## Student Notes *** ## Learning Objectives In this lesson you will: 1. Learn the fundamentals of dictionaries in Python 2. Work with dictionaries in Python 3. Access data that is stored in a dictionary data structure 4. Analyze data that is stored in dictionaries ## Modules covered in this lesson: >- `pprint`, used to "pretty print" a dictionary's values ## Links to topics and functions: >- <a id='Lists'></a>[Dictionary Notes](#Initial-Notes-on-Dictionaries) >- <a id='methods'></a>[Dictionary methods](#Methods) >- <a id='pretty'></a>[Pretty Print with pprint](#pprint) >- <a id='sort'></a>[Sorting Dictionaries](#Sorting) >- <a id='lambda1'></a>[lambda Function intro](#lambda) >- <a id='analytics'></a>[Analytics with Dictionaries](#Analytics-with-Dictionaries) >- <a id='markdown'></a>[Markdown Exec Summary](#Markdown) >>- This is a handy markdown install that allows us to create nicely formatted reports within jupyter >- <a id='HW'></a> [Homework](#Homework) ### References: Sweigart(2015, pp. 105-121) #### Don't forget about the Python visualizer tool: http://pythontutor.com/visualize.html#mode=display ## Dictionary Methods and New Functions covered in this lesson: |Dict Methods | Functions | |:-----------: |:----------:| |keys() | pprint() | |values() | pformat() | |items() | | |get() | | |setdefault() | | # Initial Notes on Dictionaries >- Dictionaries offer us a way to store and organize data in Python programs much like a database >>- `List Definition`: a *dictionary* is a data structure that allows for storage of almost any data type for indexes >>- *Dictionaries* use a *key* vs an index as in lists to make *key-value* pairs >>- Unlike lists, the items are unordered meaning there is no "first" item like we see with a list at index 0. >>>- Because dictionaries are unordered we can't slice them like we do with lists >>- However, because we can use virtually any value as a key we have much more flexibility in how we can organize our data >>- The key-value pairs in a dictionary are similar to how databases are used to store and organize data >>- Dictionaries start with a `{` and end with a `}` >>- Dictionaries can be nested within other dictionaries # When do we typically use dictionaries? >- When you want to map (associate) some value to another >>- For example, states full name to abbreviation: states = {'Oregon': 'OR'} >>- Or customers of a company: customers = {'fName':'Micah','lName':'McGee', 'email':'micah.mcgee@colorado.edu'} >- Dictionaries can be used when we need to "look up" a value ('Micah') from another value ('fName') >>- We can can think of dictionaries as "look up" tables ## What are the main difference between lists and dictionaries? >- A list is an ordered list of items that we can access and slice by the index numbers >- A dictionary is used for matching some items (keys) to other items (values) #### Let's work through some examples to get familiar with dictionaries ### Another way to get values with the `get()` method ### What if we want to add a city key with a value to our customers dictionary? ### Can we add integer key values? ### Note: end of video 1 # Methods ## Some common dictionary methods ### How can we print all the values in a dictionary? ### How can we print all the keys in a dictionary? ### How about printing out the `key:value` pairs? ### Another way to print out `key:value` pairs ### How do we check if a key or value is already in a dictionary? ### If a key in a dictionary doesn't have a value what can we do so we don't get error codes? >- The `setdefault()` method is used to set a default value for a key so that all keys will have a value ## An example of why using `setdefault()` comes in handy >- We will write a short program to count the number of occurrences for each letter in a given string #### Commented out code for the previous example ``` #Define a string and put any thing in it text = "I wonder how many times each letter comes up in this short text string" #Define an empty dictionary to store our key (letter) and values (counts of letters) count = {} #Write for loop to iterate through our string and count the letters #This for loop "builds" our count dictionary for letter in text: #Here we are defining our key variable, letter if letter != ' ': #This is here to exclude our spaces count.setdefault(letter,0) #We will see what not having the default value does in the next example count[letter] = count[letter] + 1 print(count) ``` #### And here is why we set a default value using `setdefault` >- Note the error code that is returned when we run the next cell ## Let's a look at the previous program in the visualizer tool http://pythontutor.com/visualize.html#mode=display ### Note: end of video 2 # `pprint` ## Now, how do we get our dictionary of counted letters to print in an easier to read format? >- "Pretty" printing using the pprint module and its functions # Sorting ## We can sort dictionaries using the `sorted()` function >- The general syntax for `sorted()` is: sorted(*iterable*, key = *key*, reverse=*reverse*) where, >>- *iterable* is the sequence to sort: list, dictionary, tuple, etc. >>- *key* is optional and represents a function to execute which decides the order. Default is None >>- *reverse* is optional where False will sort ascending and True will sort descending. Default is False ### Sort by keys ### Sort by values using a `lambda` function in the *key* argument >- Here we will introduce `lambda` functions >- `lambda` functions are small anonymous functions which can take any number of arguments but can only have one expression >>- The general syntax is: lambda *arguments* : *expression* >- Usually lambda functions are used inside of other functions ### `lambda` #### Some quick examples using `lambda` functions 1. Using a lambda to add 10 to any number passed in 2. Using a lambda to multiple two numbers 3. Using a lambda to add three numbers ### Now back to our example of sorting a dictionary by the values #### Sort in descending order #### Note: the `sorted()` function did not change our dictionary in place >- If we want to store the sorted dictionary we would need to assign a new dictionary variable ### Note: end of video 3 # Analytics with Dictionaries ### Let's do some analytics on our `count3` dictionary >- Q: How many unique letters were in our text3 string? >- Q: How many total letters were in our text3 string? >- Q: What is the average number of occurrences of letters in our text3 string? After answering these questions print out a message in a full sentences describing the results #### How many unique letters were in our `text3` string? #### How many total letters were in our `text3` string? #### What is the average number of occurrences of letters in the `text3` string? #### Good analytics never ends with simple output or tables but with a written report/statement >- So lets write a summary statement for our findings ### Note: End of video 4 ## Dictionaries with lists embedded in them >- We will create a dictionary to store product prices >>- The general format of our dictionary will be record number (as the key) >>- The list will store product type, product brand, and price data #### What is the value of the 3rd item in the dictionary `products`? #### Why is a list the value returned from the previous cell? #### What is the value of the 6th item in the dictionary? #### How many total products are in the products dictionary? (pretend you can't count them manually) ### Q: How do we return values of a list that is embedded in a dictionary? #### What is the price of the 5th item in the dictionary? #### Return the price of the 3rd item in the dictionary #### Return the item type of the 4th item in products #### Return the brand of the 2nd item in products ### Now write out what was going on in the previous cells: 1. First, we list the dictionary name: `products` 2. Next, the first value in brackets refers to the key value to look up in `products` 3. Finally, the second value in brackets refers to the index number to look up in the embedded list >- On your own, write out what using the syntax `products[5][2]` tells Python to do ### What could our product dictionary look like in a database for a company? |prodID | prodType | prodBrand | prodPrice | |:-----------: |:----------:|:---------:|:----------| |1 | TV | TCL |200 | |2 | PC | HP |500 | |3 | TV | Visio |250 | |4 | Fridge | Samsung |1000 | |5 | TV | LG |850 | ### Note: End of video 5 ## Let's do some analytics for the company that sells items from products ### First, analytics always starts with questions so let's write some 1. How many total products do we have? 2. Whats the total of all prices? 3. What is the average price all products? 4. What is the average price of TVs? #### How many total products do we have? #### What is the total of all prices? #### What is the average price of all products rounded to 2 decimals? #### To answer product specific questions like `Q4` we need to do a bit more >- Let's break that question into subquestions >>- How many total TVs are in products? >>- What is the total price of the TVs? >>- Then what is the average price of all TVs? #### First, how many total TVs are there? #### Next, what is the total price of all TVs? #### Now, we can find average price for all TVs? ## Ok, we got the answer in multiple steps but can we do this in one cell? >- Let's use the individual cells we used above to help us answer our question in one cell ### But we aren't done yet... analytics doesn't stop at simple output ### Note: End of video 6 ## We could also create a TV only price list and then analyze the list data #### What is our max TV price? #### What is our average TV price? ## Our product pricing example in one code cell >- Run this code through the Python tutor to help see how the code works >- http://pythontutor.com/visualize.html#mode=display ### Note: End of video 7 # Build a dictionary using a for loop ## Task: create a dictionary where, >- The keys are integers from 1-5 >- The values are multiples of 10 starting at 10 # Markdown ## A better way to print data using markdown cells >- Follow the steps below to install a module that will allow you to make nicely formatted summary reports ## We can describe and print our results in a better format using markdown cells To be able to do this we have to install some notebook extensions using the Anaconda shell 1. If you have installed Anaconda on your machine then... 2. Search for "Anaconda Powershell prompt" >- On Macs you would use your terminal 3. Open up the Anaconda Powershell and type the following commands >- `pip install jupyter_contrib_nbextensions` >- `jupyter contrib nbextension install --user` >- `jupyter nbextension enable python-markdown/main` 4. After that all installs on your machine, you will need to reload Anaconda and juptyer ### The next cell is a markdown cell that calls the variable values defined in this type-along >- To call the values for variables in a markdown cell use double curly braces,`{`{var}`}` around the variable name Hi boss, here is a summary of our products and TVs: >- {{totProds}} total products >- \${{sumPrice}} total price of products >- \${{avgPrice}} average price of products >- {{tvCount}} total TVs >- \${{tvSum}} total price of TVs >- \${{tvAvg}} average price of TVs ### Note: end of video 8 # Homework tbd <a id='top'></a>[TopPage](#Teaching-Notes)
github_jupyter
# COVID-19 comparison using Pie charts Created by (c) Shardav Bhatt on 17 June 2020 # 1. Introduction Jupyter Notebook Created by Shardav Bhatt Data (as on 16 June 2020) References: 1. Vadodara: https://vmc.gov.in/coronaRelated/covid19dashboard.aspx 2. Gujarat: https://gujcovid19.gujarat.gov.in/ 3. India: https://www.mohfw.gov.in/ 4. Other countries and World: https://www.worldometers.info/coronavirus/ In this notebook, I have considered data of COVID-19 cases at Local level and at Global level. The aim is to determine whether there is a significance difference between Global scenario and Local scenario of COVID-19 cases or not. The comparison is done using Pie charts for active cases, recovered cases and deaths. # 2. Importing necessary modules ``` import matplotlib.pyplot as plt import pandas as pd import numpy as np import datetime ``` # 3. Extracting data from file ``` date = str(np.array(datetime.datetime.now())) data = pd.read_csv('data_17June.csv') d = data.values row = np.zeros((d.shape[0],d.shape[1]-2)) for i in range(d.shape[0]): row[i] = d[i,1:-1] ``` # 4. Creating a funtion to print % in Pie chart ``` def func(pct, allvals): absolute = int(round(pct/100.*np.sum(allvals))) return "{:.1f}% ({:d})".format(pct, absolute) ``` # 5. Plot pre-processing ``` plt.close('all') date = str(np.array(datetime.datetime.now())) labels = 'Infected', 'Recovered', 'Died' fs = 20 C = ['lightskyblue','lightgreen','orange'] def my_plot(i): fig, axs = plt.subplots() axs.pie(row[i], autopct=lambda pct: func(pct, row[i]), explode=(0, 0.1, 0), textprops=dict(color="k", size=fs-2), colors = C, radius=1.5) axs.legend(labels, fontsize = fs-4, bbox_to_anchor=(1.1,1)) figure_title = str(d[i,0])+': '+str(d[i,-1])+' cases on '+date plt.text(1, 1.2, figure_title, horizontalalignment='center', fontsize=fs, transform = axs.transAxes) plt.show() print('\n') ``` # 6. Local scenario of COVID-19 cases ``` for i in range(4): my_plot(i) ``` # My Observations: 1. Death rate in Vadodara city is less compared to state and nation. Death rate of Gujarat is almost double compared to the nation. Death rate of India is less compared to the global death rate. 2. Recovery rates of Vadodara and Gujarat are higher compared to national and global recovery rate. The recovery rate of India and of World are similar. 3. Proportion of active cases in Vadodara and Gujarat is lower compared to national and global active cases. Proportion of active cases of India and world are similar. # 7. Global scenario of COVID-19 cases ``` for i in range(4,d.shape[0]): my_plot(i) ``` # Observations: 1. Russia, Chile, Turkey, Peru have comparatively lower death rate i.e. below 3%. Mexico, Italy and France have comparatively higher death rate i.e. above 10%. 2. Germany, Chile, Turkey, Iran, Italy, Mexico have recovery rate above 75%. These countries are comming out of danger. 3. Russia, India, Peru, Brazil, France, USA have have recovery rate below 53%. These countries needs to recover fast. 4. Proportion of active cases is least in Germany, Italy, Turkey is least
github_jupyter
## In this notebook we are going to Predict the Growth of Apple Stock using Linear Regression Model and CRISP-DM. ``` #importing the libraries import numpy as np import pandas as pd from sklearn import metrics %matplotlib inline import matplotlib.pyplot as plt import math from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split ``` # Data Understanding The data is already processed to price-split values so it is easy to analysis but we are creating new tables to optimize our model ``` #importing Price Split Data data = pd.read_csv('prices-split-adjusted.csv') data #checking data for null values data.isnull().sum() ``` #### There are no null values in the Data table we are going to create # Data Preprocessing Creating Table for a specific Stock ``` #Initializing the Dataset for the Stock to be Analysized data = data.loc[(data['symbol'] == 'AAPL')] data = data.drop(columns=['symbol']) data = data[['date', 'open', 'close', 'low', 'volume', 'high']] data #Number of rows and columns we are working with data.shape ``` Ploting the closing price of the Stock ``` plt.scatter(data.date, data.close, color='blue') plt.xlabel("Time") plt.ylabel("Close") plt.show() ``` ### Here we can see that the Stock is growing in Long-Term with multiple medium downfalls So it is good for Long-term investing ``` #For plotting against time data['date'] = pd.to_datetime(data.date) #Plot for close values on each date data['close'].plot(figsize=(16, 8)) ``` # Linear Regression Here we are going to use LR to make simple prediction of the stock value change. We are checking for accuracy on a particular Stock. ``` x1 = data[['open', 'high', 'low', 'volume']] y1 = data['close'] #Making test and train datasets x1_train, x1_test, y1_train, y1_test = train_test_split(x1, y1, random_state = 0) x1_train.shape x1_test.shape #Initailizing LinearRegression regression = LinearRegression() regression.fit(x1_train, y1_train) print(regression.coef_) print(regression.intercept_) predicted = regression.predict(x1_test) #Predictions for Stock values print(x1_test) predicted.shape ``` # Evaluation of the model Making table for Actual price and Predicted Price ``` dframe = pd.DataFrame(y1_test, predicted) dfr = pd.DataFrame({'Actual_Price':y1_test, 'Predicted_Price':predicted}) print(dfr) #Actual values vs Predicted Values dfr.head(10) from sklearn.metrics import confusion_matrix, accuracy_score #Regrssion Score Analysis regression.score(x1_test, y1_test) print('Mean Absolute Error:', metrics.mean_absolute_error(y1_test, predicted)) print('Mean Squared Error:', metrics.mean_squared_error(y1_test, predicted)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y1_test, predicted))) x2 = dfr.Actual_Price.mean() y2 = dfr.Predicted_Price.mean() Accuracy1 = x2/y2*100 print("The accuracy of the model is " , Accuracy1) ``` # Deploying the model by visualization ### Plotting Acutal Close values vs Predicted Values in LR Model ``` plt.scatter(dfr.Actual_Price, dfr.Predicted_Price, color='red') plt.xlabel("Actual Price") plt.ylabel("Predicted Price") plt.show() ``` We can see that using simple Linear Regression on a Scalar and Linear entity as Stock Price over a period of time gives a simple and straight line. Stating that the stock is growing over time. So now we are some what confident in investing in this stock. To better understand next we are using LSTM model.
github_jupyter
# Beginner's Python—Session Two Finance/Economics Answers ## Analysing blue-chip stocks Figures 1, 2, 3, and 4 below illustrate the stock price time series of four different tech companies (Apple, Facebook, Amazon and Netflix) over the last year. We will use this data to derive some insights on these stocks during the aforementioned period. ``` # Run this cell to get stock data as lists. DO NOT MODIFY THE CODE ON THIS CELL import pandas as pd import matplotlib.pyplot as plt import seaborn as sns standard_palette = list(sns.color_palette("muted")) url = 'https://raw.githubusercontent.com/warwickdatascience/beginners-python/master/session-two/subject_questions/session_two_files/' aapl = pd.read_csv( url + "AAPL.csv").rename(columns={"Open": "Price ($USD)", "Date":"Time"}) fb = pd.read_csv(url + "FB.csv").rename(columns={"Open": "Price ($USD)", "Date":"Time"}) amzn = pd.read_csv(url + "AMZN.csv").rename(columns={"Open": "Price ($USD)", "Date":"Time"}) nflx = pd.read_csv(url + "NFLX.csv").rename(columns={"Open": "Price ($USD)", "Date":"Time"}) datasets = {"AAPL": aapl,"FB": fb,"AMZN": amzn,"NFLX": nflx} aapl_prices = aapl["Price ($USD)"].tolist() fb_prices = fb["Price ($USD)"].tolist() amzn_prices = amzn["Price ($USD)"].tolist() nflx_prices = nflx["Price ($USD)"].tolist() i = 1 fig = plt.figure(figsize=(10,8)) for key in datasets: dataset = datasets[key] fig.add_subplot(2,2,i, title= "Figure {}: ".format(i) + key + " stock price (Oct.2019 - Oct.2020)") ax = sns.lineplot(data=dataset, y="Price ($USD)", x="Time", color=standard_palette[i-1]) ax = ax.xaxis.set_tick_params(bottom=False, labelbottom=False) ax i+=1 plt.tight_layout() plt.show() ``` To make this analysis easier, we assinged the price data to the following lists: - `aapl_prices` - `fb_prices` - `amzn_prices` - `nflx_prices` Did all companies record the same number of stock price observations? Find the number of observations (given by the length of the price lists) for each company and print the results below. ``` print("Number of observations for AAPL:", len(aapl_prices)) print("Number of observations for FB:",len(fb_prices)) print("Number of observations for AMZN:",len(amzn_prices)) print("Number of observations for NFLX:",len(nflx_prices)) ``` What was the average price of Facebook stocks during the last year? Calculate your answer in the cell below, round it to two decimal places, assign it to variable `fb_avg_price_1` and print out the result. (Hint: Your should use the sum(), len(), and round() functions). ``` # Calculating sum of FB prices and number of observations sum_fb_prices = sum(fb_prices) n = len(fb_prices) # Computing average price fb_avg_price_1 = sum_fb_prices/n # Rounding and printing average price fb_avg_price_1 = round(fb_avg_price_1, 2) print("Average FB stock price over the last year: $", fb_avg_price_1) ``` What was the maximum price of an Amazon stock during the last year? Calculate and print your answer below, rounding to two decimal places. ``` max_price = max(amzn_prices) max_price = round(max_price,2) print("Maximum AMZN stock price over the last year: $", max_price) ``` What was the minimum price of Apple stocks during the last year? Calculate and print the answer below, rounding to two decimal places. ``` min_price = min(aapl_prices) min_price = round(min_price,2) print("Minimum AAPL stock price over the last year: $", min_price) ``` What range of values did the price of Netflix stocks have during the last year? Calculate and print the answer below. (Hint: Is it a problem if you use 'range' as a variable name? Why?) ``` rng = max(nflx_prices) - min(nflx_prices) rng = round(rng, 2) print("NFLX stock prices over the past year had a range of $", rng) ``` *Using 'range' as a variable name will lead to an error since it is a reserved word in Python, meaning that it has a fixed use in defining some other function. The same would apply to using 'max' and 'min' as variable names, since those words are reserved for defining the functions we used above, max() and min()* Most financial analysts predict that Facebook stocks will open tomorrow with a price of $265 USD. You, however, know that Facebook will anounce its adquisition of a hot new startup that will likely boost its revenue stream significantly. Do you think the opening FB stock price will be higher or lower than the forecast given? In the cell below, ask the user to input its prediction for the opening FB price tomorrow, assign it to the variable `fb_pred`, and print its datatype WITHOUT using a print statement. ``` fb_pred = input("What is your prediction for the FB stock price tomorrow (in $USD)?") type(fb_pred) ``` *Higher revenues are desirable in a company (all else constant) hence, according to economic intuition, this unanounced and unknown piece of news would increase demand for FB stocks, resulting in an increase in its price. Therefore, it would be reasonable to predict a higher FB stock price than the one forecasted by financial analysts, given this additional information.* The variable `fb_pred` is non-numerical, which means that we won't be able to use it in further calculations. Cast this variable as float-type and add it to the list `fb_prices`. (Hint: To add this variable you will need to use the .append() function. An example of how this is done is shown in the cell below) ``` # Example use of the .append() function to add items at the end of a list my_list = [1,2,3,4,5,6] print("This is my list at the start: ", my_list) # Adding 7 to my list my_list.append(7) print("This is my list after adding 7 at the end of it: ", my_list) # Adding 99 to my list my_list.append(99) print("This is my list after adding 99 at the end of it: ", my_list) fb_pred = float(fb_pred) fb_prices.append(fb_pred) ``` Taking your prediction to be true, compute the new average Facebook stock price in the cell below. Round your answer to two decimal places and print it. Did your prediction affect the FB average price? Compute the difference between these two averages, rounding again to two decimal places. (Hint: Use variable `fb_avg_price_1` from code cell 3, where you computed the initial average FB price) ``` sum_fb_prices = sum(fb_prices) avg_price = sum_fb_prices/ len(fb_prices) avg_price = round(avg_price,2) print("The new average FB stock price is: $", avg_price) delta_avg_price = (avg_price - fb_avg_price_1) delta_avg_price = round(delta_avg_price, 2) print("The change in the average FB stock price given my prediction is: $", delta_avg_price) ```
github_jupyter
# COVID-19 evolution in French departments #### <br> Visualize evolution of the number of people hospitalized in French departments due to COVID-19 infection ``` %load_ext lab_black %matplotlib inline from IPython.display import HTML import requests import zipfile import io from datetime import timedelta, date import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import pandas as pd import geopandas as gpd import contextily as ctx from PIL import Image ``` #### <br> COVID data are open data from the French open data portal data.gouv.fr: https://www.data.gouv.fr/fr/datasets/donnees-relatives-a-lepidemie-du-covid-19/ ``` url_dep = "http://osm13.openstreetmap.fr/~cquest/openfla/export/departements-20140306-5m-shp.zip" covid_url = ( "https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7" ) filter_dep = ["971", "972", "973", "974", "976"] # only metropolitan France figsize = (15, 15) tile_zoom = 7 frame_duration = 500 # in ms ``` #### <br> Load French departements data into a GeoPandas GeoSeries #### More information on these geographical open data can be found here: https://www.data.gouv.fr/fr/datasets/contours-des-departements-francais-issus-d-openstreetmap/ ``` local_path = "tmp/" r = requests.get(url_dep) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(path=local_path) filenames = [ y for y in sorted(z.namelist()) for ending in ["dbf", "prj", "shp", "shx"] if y.endswith(ending) ] dbf, prj, shp, shx = [filename for filename in filenames] fr = gpd.read_file(local_path + shp) # + encoding="utf-8" if needed fr.crs = "epsg:4326" # {'init': 'epsg:4326'} met = fr.query("code_insee not in @filter_dep") met.set_index("code_insee", inplace=True) met = met["geometry"] ``` #### <br> Load the map tile with contextily ``` w, s, e, n = met.total_bounds bck, ext = ctx.bounds2img(w, s, e, n, zoom=tile_zoom, ll=True) ``` #### <br> Plot function to save image at a given date (title) ``` def save_img(df, title, img_name, vmin, vmax): gdf = gpd.GeoDataFrame(df, crs={"init": "epsg:4326"}) gdf_3857 = gdf.to_crs(epsg=3857) # web mercator f, ax = plt.subplots(figsize=figsize) ax.imshow( bck, extent=ext, interpolation="sinc", aspect="equal" ) # load background map divider = make_axes_locatable(ax) cax = divider.append_axes( "right", size="5%", pad=0.1 ) # GeoPandas trick to adjust the legend bar gdf_3857.plot( column="hosp", # Number of people currently hospitalized ax=ax, cax=cax, alpha=0.75, edgecolor="k", legend=True, cmap=matplotlib.cm.get_cmap("magma_r"), vmin=vmin, vmax=vmax, ) ax.set_axis_off() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax.set_title(title, fontsize=25) plt.savefig(img_name, bbox_inches="tight") # pad_inches=-0.1 to remove border plt.close(f) ``` #### <br> Load COVID data into a pandas DataFrame ``` cov = pd.read_csv(covid_url, sep=";", index_col=2, parse_dates=True,) cov = cov.query("sexe == 0") # sum of male/female cov = cov.query("dep not in @filter_dep") cov.dropna(inplace=True) cov.head() ``` #### <br> Add geometry data to COVID DataFrame ``` cov["geometry"] = cov["dep"].map(met) ``` #### <br> Parse recorded days and save one image for each day ``` def daterange(date1, date2): for n in range(int((date2 - date1).days) + 1): yield date1 + timedelta(n) ``` #### Create the folder img at the root of the notebook ``` vmax = cov.hosp.max() for i, dt in enumerate(daterange(cov.index.min(), cov.index.max())): title = dt.strftime("%d-%b-%Y") df = cov.query("jour == @dt") df = df.drop_duplicates(subset=["dep"], keep="first") img_name = "img/" + str(i) + ".png" save_img(df, title, img_name, 0, vmax) ``` #### <br> Compile images in animated gif ``` frames = [] for i, dt in enumerate(daterange(cov.index.min(), cov.index.max())): name = "img/" + str(i) + ".png" frames.append(Image.open(name)) frames[0].save( "covid.gif", format="GIF", append_images=frames[1:], save_all=True, duration=frame_duration, loop=0, ) from IPython.display import HTML HTML("<img src='covid.gif'>") ```
github_jupyter
# **Demos of MultiResUNet models implemented on the CelebAMaskHQ dataset** In this notebook, we display demos of the models tested using the mechanisms mentioned in [MultiResUNet.ipynb](https://drive.google.com/file/d/1H26uaN10rU2V7MnX8vRdE3J0ZMoO7wq2/view?usp=sharing). This demo should work irrespective of any access issues to the author's drive. If any errors in downloading models may occur, kindly notify me. We use gdown to install the JSON files (per model) and their weights for testing, and user input image is utilised in order to provide output. Note : Due to Colab's anomalous nature with matplotlib.pyplot, we will store the final images within the machine. PS : If you would like to see the results for all the demo networks, utilise the 'Run All' feature in the Runtime submenu. Run the snippet below in order to download all the models and their corresponding weights: You can download the models by going through the links mentioned in the comments below- ``` import os import cv2 import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt from keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, BatchNormalization, Activation, add from keras.models import Model, model_from_json from keras.optimizers import Adam from keras.layers.advanced_activations import ELU, LeakyReLU from keras.utils.vis_utils import plot_model from keras import backend as K from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report !pip install gdown # The testing model - # https://drive.google.com/file/d/1-0H74nlLkTnnvSkMG-MPuhuX70LXBOlo/view?usp=sharing # https://drive.google.com/file/d/1--fVnHrfDpujmdX9OWT6PVmNQjQ1mPX8/view?usp=sharing # The F10k model - # https://drive.google.com/file/d/1-GhqkzttGHAkGi0r5XEgowVg8wLcgmY1/view?usp=sharing # https://drive.google.com/file/d/1-CPthO3qPHE_IeykqyG_bndf9UuQbHAO/view?usp=sharing # The FD10 model - # https://drive.google.com/file/d/1yhWML6lThGv_MSGkUOuVhkoD7H-u6gwm/view?usp=sharing # https://drive.google.com/file/d/12S277zHGFN9YPKcX7M7hEwxkokvT5bNt/view?usp=sharing # For the test model : !gdown --id 1-0H74nlLkTnnvSkMG-MPuhuX70LXBOlo --output modelP5.json !gdown --id 1--fVnHrfDpujmdX9OWT6PVmNQjQ1mPX8 --output modelW.h5 # For the F10k model : !gdown --id 1-GhqkzttGHAkGi0r5XEgowVg8wLcgmY1 --output modelP5f10.json !gdown --id 1-CPthO3qPHE_IeykqyG_bndf9UuQbHAO --output modelWf10.h5 # For the FD10 model : !gdown --id 1yhWML6lThGv_MSGkUOuVhkoD7H-u6gwm --output modelP5FD.json !gdown --id 12S277zHGFN9YPKcX7M7hEwxkokvT5bNt --output modelWFD.h5 !ls ``` ### Image input and pre processing : Run the cell below in order to upload the required images to be tested. Accepts .jpg format images. ``` from google.colab import files img_arr = [] uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) if(fn.split('.')[-1]=='jpg'or fn.split('.')[-1]=='jpeg'): img = cv2.imread(fn, cv2.IMREAD_COLOR) img_arr.append(cv2.resize(img,(256, 192), interpolation = cv2.INTER_CUBIC)) else: print(fn+' is not of the valid format.') img_loaded = img_arr img_arr = np.array(img_arr) img_arr = img_arr / 255 print('Number of images uploaded : '+str(len(img_arr))) img_names = list(uploaded.keys()) ``` ## Boilerplate code to run model : This code provides outputs in the format(image-mask-imagewithmask). Your results will be stored under the Files Section (on the left side of the website) in the folder specified by the output during runtime. In order to allow for automatic downloading of the images, just uncomment the ``` #files.download('results_'+model_json.split('.')[0]+'/result_'+str(img_names[i].split('.')[0])+'.png') ``` section of the code below. (NOTE : This feature works only for **Google Chrome** users) ``` from google.colab import files from keras.models import model_from_json def RunModel( model_json, model_weights, image_array, img_names, img_loaded): try: os.makedirs('results_'+model_json.split('.')[0]) except: pass print('Your results will be stored under the Files Section in the folder '+'results_'+model_json.split('.')[0]) # load json and create model json_file = open(model_json, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(model_weights) print("Loaded model from disk") loaded_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # Used in order to ease naming i = 0 yp = loaded_model.predict(x=np.array(image_array), batch_size=1, verbose=1) yp = np.round(yp,0) for ip_img in image_array: # Modification of mask in order to mimic sample output t = yp[i] a = np.concatenate((t,t),axis = 2) b = np.concatenate((a,t),axis = 2) b = b * 255 plt.figure(figsize=(20,10)) plt.subplot(1,3,1) plt.imshow(cv2.cvtColor(np.array(img_loaded[i]), cv2.COLOR_BGR2RGB)) plt.title('Input') plt.subplot(1,3,2) plt.imshow(yp[i].reshape(yp[i].shape[0],yp[i].shape[1])) plt.title('Prediction') plt.subplot(1,3,3) plt.imshow(cv2.cvtColor(cv2.addWeighted(np.uint8(img_loaded[i]), 0.5, np.uint8(b), 0.5, 0.0),cv2.COLOR_BGR2RGB)) plt.title('Prediction with mask') plt.savefig('results_'+model_json.split('.')[0]+'/result_'+str(img_names[i].split('.')[0])+'.png',format='png') plt.close() # Uncomment the line below to allow automatic downloading #files.download('results_'+model_json.split('.')[0]+'/result_'+str(img_names[i].split('.')[0])+'.png') i += 1 ``` # **Model F10k:** This model has been trained on a 80%-20% split amongst the first 10,000 images of the dataset. * Number of epochs: 20 * Time taken : [1:29:40, 267.74s/it] * Jacard Index(final) : 0.8452393049122288 * Dice Coefficient(final) : 0.9139967587791317 * Accuracy(final) : 99.80 % ``` RunModel('modelP5f10.json','modelWf10.h5', img_arr, img_names, img_loaded) ``` # **Model FD10:** The dataset for this model has been split twice: 1. Three sets of 10,000 images each. 2. Each set trained on a 80%-20% split. Each split of the data was used to train the model for 10 epochs each. The split was performed in order to compensate for RAM bottlenecks in our system. * Number of epochs: 10+10+10 = 30 * Time taken : [2:29:04, 2958.94s/it] * Jacard Index(final) : 0.8331437322988224 * Dice Coefficient(final) : 0.9071035040844939 * Accuracy(final) : 99.70 % ``` RunModel('modelP5FD.json','modelWFD.h5',img_arr,img_names,img_loaded) ``` # **EXTRA : Base Testing Model** **This model returns only the left eye segmented as a mask and was made for TESTING PURPOSES ONLY** This model has been trained on a 80%-20% split amongst the first 250 images of the dataset, and was used in order to test the original model mentioned in the paper. * Number of epochs: 10 * Time taken : [0:20:40] * Jacard Index(final) : 0.39899180087352404 * Dice Coefficient(final) : 0.495551362130639337 * Accuracy(final) : 99.80 % **NOTE : THIS MODEL IS SIMPLY A PRECURSOR TO OUR ACTUAL MODELS MENTIONED ABOVE AND SHOULD NOT BE CONSIDERED AS FEASIBLE FOR ANY ASPECTS** ``` RunModel('modelP5.json','modelW.h5',img_arr,img_names,img_loaded) ```
github_jupyter
# 阅读笔记 ** 作者:方跃文 ** ** Email: fyuewen@gmail.com ** ** 时间:始于2017年9月12日, 结束写作于 ** ** 第二章笔记始于2017年9月12日,第一阶段结束语2017年9月28日晚(剩余两个分析案例)** # 第二章 引言 ** 时间: 2017年9月12日 ** 尽管数据处理的目的和领域都大不相同,但是利用python数据处理时候基本都需要完成如下几个大类的任务: 1) 与外界进行数据交互 2) 准备:对数据进行清理、修整、规范化、重塑、切片切块 3) 转换:对数据集做一些数学和统计运算以产生新的数据集,e.g. 根据分组变量对一个大表进行聚合 4) 建模和计算:将数据跟统计模型、机器学习算法联系起来 5) 展示:创建交换式的或者静态的图片或者文字摘要。 ## 来自bit.ly的1.usa.gov数据 ch02中的usagov_bitly_data2012-03-16-1331923249.txt是bit.ly网站收集到的每小时快照数据。文件中的格式为JavaScript Object Notation (JSON)——一种常用的web数据格式。例如如果我们只读取某个文件中的第一行,那么所看到的结果是下面这样: ``` path = "./pydata-book/ch02/usagov_bitly_data2012-03-16-1331923249.txt" open(path).readline() print(path) print(type(path)) import json datach02= [json.loads(line) for line in open(path)] ``` python有许多内置或第三方模块可以将JSON字符转换成python字典对象。这里,我将使用json模块及其loads函数逐行加载已经下载好的数据文件: ``` import json path = "./pydata-book/ch02/usagov_bitly_data2012-03-16-1331923249.txt" records = [json.loads(line) for line in open(path)] ``` 上面最后一行表达式,叫做“列表推导式 list comprehension”。这是一种在一组字符串(或一组别的对象)上执行一条相同操作(如json.loads)的简洁方式。在一个打开的文件句柄上进行迭代即可获得一个由行组成的序列。现在,records对象就成为一组python字典了。 ``` records[0] records[0]['tz'] ``` ### 用纯Python代码对时区进行排序 ** 时间: 2017年9月26日 ** 现在,我们想对时区进行计数,处理的方法有多种。 我们首先考虑的是利用**列表推导式**取出一组时区: ``` time_zones = [rec['tz'] for rec in records] ``` 然而我们发现上面这个出现了‘tz'的keyerror,这是因为并不是所有记录里面都有tz这个字段,为了让程序判断出来,我们需要加上if语句,即 ``` time_zones = [i['tz'] for i in records if 'tz' in i] time_zones[:2] time_zones = [rec['tz'] for rec in records if 'tz' in rec] time_zones[:10] ``` 我们从上面可以看到,的确有些时区字段是空的。此处,为了对时区进行计算,介绍两种办法。 ** 第一种,只使用python标准库** ``` #这种方法是在遍历时区的过程中将计数值保留在字典中: def get_counts(sequence): counts = {} for x in sequence: if x in counts: counts[x] += 1 else: counts[x] = 1 return counts #今天回头看这段代码发现看的不是很明白,特别是我在下面这个cell中, #利用了上述的代码,发现这个结果让人看了有点费解。 #11th Jan. 2018 def get_counts(sequence): counts = {} for x in sequence: if x in counts: counts[x] += 1 else: counts[x] = 1 return counts sequence1={1,23,434,53,23,24} a=get_counts(sequence1) a[23] #11th Jan. 2018 ``` 非常了解Python标准库的话,可以将上述代码写得更加精简: ``` from collections import defaultdict def get_counts2(sequence): counts = defaultdict(int) #所有的值都会被初始化为0 for x in sequence: counts[x] += 1 return counts ``` 上述两种写法中,都将代码写到了函数中。这样的做法,是为了代码段有更高的可重要性,方便对时区进行处理。此处我们只需要将时区 time_zones 传入即可: ``` def get_counts(sequence): counts = {} for x in sequence: if x in counts: counts[x] += 1 else: counts[x] = 1 return counts counts = get_counts(time_zones) counts['America/New_York'] len(time_zones) ``` 如果要想得到前10位的时区及其计数值,我们需要用到一些有关字典的处理技巧: ``` def top_counts(count_dict, n =10): value_key_pairs = [(count, tz) for tz, count in count_dict.items()] value_key_pairs.sort() return value_key_pairs[-n:] top_counts(counts) ``` 我们还可在python标准库中找到collections.Counter类,它能使这个任务更加简单: ``` from collections import Counter counts = Counter(time_zones) counts.most_common(10) ``` **第二种,用pendas对时区进行计数** *DataFrame* 是pendas中最重要的数据结构,它用于将数据表示为一个表格。从一组原始记录中创建DataFrames是很简单的: ``` from pandas import DataFrame, Series import pandas as pd; import numpy as np frame = DataFrame(records) frame frame['tz'][:10] ``` 这里frame的输出形式是摘要试图(summary view),主要是用于较大的DataFrame对象。frame['tz']所返回的Series对象有一个value_counts方法,该方法可以让我们得到所需的信息: ``` tz_counts = frame['tz'].value_counts() tz_counts[:10] ``` 现在,我们想用matplotlib为这段数据生成一张图片。为此,我们先给记录中未知或缺失的时区天上一个替代值。fillna 函数可以替换缺失值(NA),而未知值(空字符串)可以通过布尔型数据索引加以替换: ``` clean_tz = frame['tz'].fillna('Missing') clean_tz[clean_tz == ''] = 'Unknown' tz_counts = clean_tz.value_counts() tz_counts[:10] ``` 利用tz_counts对象的plot方法,我们开得到一张水平条形图: ``` %matplotlib inline tz_counts[:10].plot(kind='barh', rot=0) ``` 我们还可以对这种数据进行很多的处理。比如说,a字段含有执行URL短缩操作的浏览器、设备、应用程序的相关信息: ``` frame['a'][1] frame['a'][50] frame['a'][51] ``` 将这些“agent"字符串(即浏览器的USER——AGENT)中的所有信息都解析出来是一件挺枯燥的工作。不过我们只要掌握了python内置的字符串函数和正则表达式,事情就好办许多了。 比如,我们可以将这种字符串的第一节(与浏览器大致呼应)分离出来并得到另外一份用户行为摘要: ``` results = Series([x.split()[0] for x in frame.a.dropna()]) results[:5] results.value_counts()[:8] ``` 现在假设我们想按Windows和非Windows用户对时区统计信息进行分解。为了简单,我们假定只要agent字符串中包含有"Windows"就认为该用户为Windows用户。由于有的agent确实,我们首先将它们从数据中移除: ``` cframe = frame[frame.a.notnull()] ``` 接下来,根据a值计算出各行是否是Windows: ``` operating_system = np.where(cframe['a'].str.contains('Windows'), 'Windows','Not Windows') operating_system[:5] #注意这句代码执行后的输出跟原书不同 ``` 接下来可以根据时区和新的到的操作系统列表对数据进行分组了: ``` by_tz_os = cframe.groupby(['tz', operating_system]) ``` 然后通过size对分组结果进行计数(类似于上面的value_counts函数),并利用unstack对计数结果进行重塑: ``` agg_counts = by_tz_os.size().unstack().fillna(0) agg_counts[:10] ``` 最后我们来选取最常出现的时区。为了达到这个目的,我们根据agg_counts中的行数构造了一个间接索引数组: ``` #用于按升序排列 indexer = agg_counts.sum(1).argsort() indexer[:10] ``` 然后我们通过过take按照这个舒徐截取了最后的10行: ``` count_subset = agg_counts.take(indexer)[-10:] count_subset ``` 这里可以生成一张条形图。我们将使用stacked = True来生成一张堆积条形图: ``` %matplotlib inline normed_subset = count_subset.div(count_subset.sum(1), axis=0) normed_subset.plot(kind='barh', stacked = True) ``` 这里所用到的所有方法都会在本书后续的章节中详细讲解。(我觉得这句话作者应该早点讲,害的我一直不敢继续读下去,原来这只是一个长长的说明案例啊) ## MovieLens 1M数据集 GroupLens Research 采集了从上世纪九十年代到本世纪初MovieLens用户提供的电影评分数据。这些数据中包括电影评分、电影元数据(风格和年代)以及用户的人口学统计数据(性别年龄等)。基于**机器学习算法**的推荐系统一般都会对此类数据感兴趣。虽然这本书不会详细介绍机器学习技术,不会可以让我们学习如何对数据进行切片切块以满足需求。 MovieLens 1M数据集包含了来自6000名用户对4000部电影的100万条评分数据。它分为三个表:评分、用户信息和电源信息。可以通过pandas.read_table将各个表读到一个pandas DataFrame对象中: ``` import pandas as pd unames = ['user_id', 'gender', 'age', 'occupation', 'zip'] users = pd.read_table('pydata-book/ch02/movielens/users.dat', sep='::', header=None, names = unames) rnames = ['user_id', 'movie_id', 'rating', 'timestamp'] ratings = pd.read_table('pydata-book/ch02/movielens/ratings.dat', sep='::', header=None, names = rnames) mnames = ['movie_id', 'title', 'genres'] movies = pd.read_table('pydata-book/ch02/movielens/movies.dat', sep='::', header=None, names = mnames) users[:5] ratings[:5] movies[:5] ratings[:10] ``` 注意,数据和职业是以编码形式给出的。他们的具体含义请参考该数据集的README文件。 分析散步在三个表中的数据不是件轻松的事情。假设我们想根据性别和年龄计算某电影的平均得分,如果将所有数据都合并为一个表中的话问题就简单多了。我们先用pandas的merge函数将ratings跟users合并到一起,然后再将movies合并进去。**pandas会根据列明的重叠情况推断出哪些列是合并(或连接)键** ``` data = pd.merge(pd.merge(ratings, users), movies) data[:10] ``` 现在我们就可以根据任意个域用户或电源属性对评分数据进行聚合操作了。为了按性别计算每部电源的平均分,我们可以使用pivot_table方法: ``` #书中原文的代码是 mean_ratings = data.pivot_table('rating', rows='title', cols='gender',aggfunc='mean') ``` 显然上面执行通不过,从错误信息看根本没有‘rows’这个参数的功能。我本来想放弃这个代码了,但是还是留了个心眼,去Google了一下,发现去年有人讨论了这个问题:[stackoverflow地址](https://stackoverflow.com/questions/35318269/typeerror-pivot-table-got-an-unexpected-keyword-argument-rows)。 解决方案是 >将 >mean_ratings = data.pivot_table('rating', rows='title', cols='gender', aggfunc='mean') >改成 >mean_ratings = data.pivot_table('rating', index='title', columns='gender', aggfunc='mean') 原因是: > 书中的代码是旧的并且已经被移除了的语法。 ``` mean_ratings = data.pivot_table('rating', index='title', columns='gender', aggfunc='mean') mean_ratings[:5] ``` 上述操作产生了另一个DataFrame,其内容为电源平均得分,行作为电影名。列标为性别。现在,我们打算过滤掉评分数据不够250条的电影。为了达到这个目的,可以先对title进行分组,然后利用size()得到的一个含有各个电影分组大小的Series对象: ``` ratings_by_title = data.groupby('title').size() ratings_by_title[0:10] active_titles = ratings_by_title.index[ratings_by_title >= 250] active_titles ``` 上述所得到的索引中含有评分数据大于250条的电影名称,然后我们就可以据此从前面的mean_ratings中选取所需的行了: ``` mean_ratings = mean_ratings.ix[active_titles] #书中原文用了mean_ratings.ix 但是ix其实已经被弃用了 mean_ratings = mean_ratings.loc[active_titles] mean_ratings ``` 为了了解女性观众最喜欢的电源,我们可以对F列降序: ``` top_female_ratings = mean_ratings.sort_index(by='F', ascending=False) top_female_ratings = mean_ratings.sort_values(by='F', ascending=False) top_female_ratings[:10] ``` ### 计算评分分歧 假设我们想要找出男性和女性观众分歧最大的电影。一个半法是给mean_ratings加上一个用于存放平均得分之差的列,并对它进行排序: ``` mean_ratings['diff'] = mean_ratings['M'] - mean_ratings['F'] ``` 按‘diff'排序即可得到分歧最大且女性观众更喜欢的电影: ``` sorted_by_diff = mean_ratings.sort_index(by = 'diff') sorted_by_diff = mean_ratings.sort_values(by='diff') sorted_by_diff[:15] ``` 对排序结果反序并取出10行,得到的就是男性更喜欢的电影啦: ``` sorted_by_diff[::-1][:15] ``` 如果只想找出分歧最大的电影并且不考虑性别因素,则可以计算得分数据的方差或者标准差: ``` #根据电影名称分组的得分数据的标准差 rating_std_by_title = data.groupby('title')['rating'].std() #根据active_title 进行过滤 rating_std_by_title = rating_std_by_title.loc[active_titles] #根据值对Series进行降序排列 rating_std_by_title.order(ascending=False)[:10] #上一个书中源代码中的order已经被弃用。最新版的可以使用sort_values rating_std_by_title.sort_values(ascending=False)[:10] ``` 作者按: > 可能你已经注意到了,电影分类是以“|”分隔符给出的。如果想对电源的分类进行分析的话,就需要先将其转换成更有用的形式才行。本书后续章节将给出处理方法,到时还需用到这个数据。
github_jupyter
# References ## Custom Code **Doesnt Work** ``` import csv from itertools import combinations def read_data(file_loc='/content/GroceryStoreDataSet.csv'): trans = dict() with open(file_loc) as f: filedata = csv.reader(f, delimiter=',') count = 0 for line in filedata: count += 1 trans[count] = list(set(line)) return trans def frequence(items_lst, trans, check=False): items_counts = dict() for i in items_lst: temp_i = {i} if check: temp_i = set(i) for j in trans.items(): if temp_i.issubset(set(j[1])): if i in items_counts: items_counts[i] += 1 else: items_counts[i] = 1 return items_counts def support(items_counts, trans): support = dict() total_trans = len(trans) for i in items_counts: support[i] = items_counts[i]/total_trans return support def association_rules(items_grater_then_min_support): rules = [] dict_rules = {} for i in items_grater_then_min_support: dict_rules = {} if type(i) != type(str()): i = list(i) temp_i = i[:] for j in range(len(i)): k = temp_i[j] del temp_i[j] dict_rules[k] = temp_i temp_i = i[:] rules.append(dict_rules) temp = [] for i in rules: for j in i.items(): if type(j[1]) != type(str()): temp.append({tuple(j[1])[0]: j[0]}) else: temp.append({j[1]: j[0]}) rules.extend(temp) return rules def confidence(associations, d, min_confidence): ans = {} for i in associations: for j in i.items(): if type(j[0]) == type(str()): left = {j[0]} else: left = set(j[0]) if type(j[1]) == type(str()): right = {j[1]} else: right = set(j[1]) for k in d: if type(k) != type(str()): if left.union(right) - set(k) == set(): up = d[k] if len(right) == len(set(k)) and right - set(k) == set(): down = d[k] else: if len(right) >= len({k}): if right - {k} == set(): down = d[k] elif len(right) <= len({k}): if {k} - right == set(): down = d[k] if up/down >= min_confidence: ans[tuple(left)[0]] = right, up/down, up, down print(ans) def main(min_support, min_confidence, file_loc): trans = read_data() number_of_trans = [len(i) for i in trans.values()] items_lst = set() itemcount_track = list() for i in trans.values(): for j in i: items_lst.add(j) store_item_lst = list(items_lst)[:] items_grater_then_min_support = list() items_counts = frequence(items_lst, trans) itemcount_track.append(items_counts) items_grater_then_min_support.append({j[0]:j[1] for j in support(items_counts, trans).items() if j[1]>min_support}) for i in range(2, max(number_of_trans)+1): item_list = combinations(items_lst, i) items_counts = frequence(item_list, trans, check=True) itemcount_track.append(items_counts) if list({j[0]:j[1] for j in support(items_counts, trans).items() if j[1]>min_support}.keys()) != []: items_grater_then_min_support.append({j[0]:j[1] for j in support(items_counts, trans).items() if j[1]>min_support}) d = {} {d.update(i) for i in itemcount_track} associations = association_rules(items_grater_then_min_support[len(items_grater_then_min_support)-1]) associations_grater_then_confidene = confidence(associations, d, min_confidence) main(0.01, 0.7, 'GroceryStoreDataSet.csv') ``` ## Library based ``` !pip install apyori import apyori from apyori import apriori # Creating Sample Transactions transactions = [ ['Milk', 'Bread', 'Saffron'], ['Milk', 'Saffron'], ['Bread', 'Saffron','Wafer'], ['Bread','Wafer'], ] # Generating association rules Rules = list(apyori.apriori(transactions, min_support=0.5, min_confidence=0.5)) # Extracting rules from the object for i in range(len(Rules)): LHS=list(Rules[i][2][0][0]) RHS=list(Rules[i][2][0][1]) support=Rules[i][1] confidence=Rules[i][2][0][2] lift=Rules[i][2][0][3] print("LHS:",LHS,"--","RHS:",RHS) print("Support:",support) print("Confidence:",confidence) print("Lift:",lift) print(10*"----") !pip install apyori import numpy as np # linear algebra import pandas as pd # data processing import plotly.express as px import apyori from apyori import apriori data = pd.read_csv("/content/Groceries_dataset.csv") data.head() print("Top 10 frequently sold products(Tabular Representation)") x = data['itemDescription'].value_counts().sort_values(ascending=False)[:10] fig = px.bar(x= x.index, y= x.values) fig.update_layout(title_text= "Top 10 frequently sold products (Graphical Representation)", xaxis_title= "Products", yaxis_title="Count") fig.show() rules = apriori(data, min_support = 0.00030, min_confidence = 0.05, min_lift = 3, max_length = 2, target = "rules") association_results = list(rules) print(association_results[0]) for item in association_results: pair = item[0] items = [x for x in pair] print("Rule : ", items[0], " -> " + items[1]) print("Support : ", str(item[1])) print("Confidence : ",str(item[2][0][2])) print("Lift : ", str(item[2][0][3])) print("=============================") ```
github_jupyter
``` ''' ID3 Algorithm Muskan Pandey ''' import csv import math import os def load_csv_to_header_data(filename): fpath = os.path.join(os.getcwd(), filename) fs = csv.reader(open(fpath, newline='\n')) all_row = [] for r in fs: all_row.append(r) headers = all_row[0] idx_to_name, name_to_idx = get_header_name_to_idx_maps(headers) data = { 'header': headers, 'rows': all_row[1:], 'name_to_idx': name_to_idx, 'idx_to_name': idx_to_name } return data def get_header_name_to_idx_maps(headers): name_to_idx = {} idx_to_name = {} for i in range(0, len(headers)): name_to_idx[headers[i]] = i idx_to_name[i] = headers[i] return idx_to_name, name_to_idx def project_columns(data, columns_to_project): data_h = list(data['header']) data_r = list(data['rows']) all_cols = list(range(0, len(data_h))) columns_to_project_ix = [data['name_to_idx'][name] for name in columns_to_project] columns_to_remove = [cidx for cidx in all_cols if cidx not in columns_to_project_ix] for delc in sorted(columns_to_remove, reverse=True): del data_h[delc] for r in data_r: del r[delc] idx_to_name, name_to_idx = get_header_name_to_idx_maps(data_h) return {'header': data_h, 'rows': data_r, 'name_to_idx': name_to_idx, 'idx_to_name': idx_to_name} def get_uniq_values(data): idx_to_name = data['idx_to_name'] idxs = idx_to_name.keys() val_map = {} for idx in iter(idxs): val_map[idx_to_name[idx]] = set() for data_row in data['rows']: for idx in idx_to_name.keys(): att_name = idx_to_name[idx] val = data_row[idx] if val not in val_map.keys(): val_map[att_name].add(val) return val_map def get_class_labels(data, target_attribute): rows = data['rows'] col_idx = data['name_to_idx'][target_attribute] labels = {} for r in rows: val = r[col_idx] if val in labels: labels[val] = labels[val] + 1 else: labels[val] = 1 return labels def entropy(n, labels): ent = 0 for label in labels.keys(): p_x = labels[label] / n ent += - p_x * math.log(p_x, 2) return ent def partition_data(data, group_att): partitions = {} data_rows = data['rows'] partition_att_idx = data['name_to_idx'][group_att] for row in data_rows: row_val = row[partition_att_idx] if row_val not in partitions.keys(): partitions[row_val] = { 'name_to_idx': data['name_to_idx'], 'idx_to_name': data['idx_to_name'], 'rows': list() } partitions[row_val]['rows'].append(row) return partitions def avg_entropy_w_partitions(data, splitting_att, target_attribute): # find uniq values of splitting att data_rows = data['rows'] n = len(data_rows) partitions = partition_data(data, splitting_att) avg_ent = 0 for partition_key in partitions.keys(): partitioned_data = partitions[partition_key] partition_n = len(partitioned_data['rows']) partition_labels = get_class_labels(partitioned_data, target_attribute) partition_entropy = entropy(partition_n, partition_labels) avg_ent += partition_n / n * partition_entropy return avg_ent, partitions def most_common_label(labels): mcl = max(labels, key=lambda k: labels[k]) return mcl def id3(data, uniqs, remaining_atts, target_attribute): labels = get_class_labels(data, target_attribute) node = {} if len(labels.keys()) == 1: node['label'] = next(iter(labels.keys())) return node if len(remaining_atts) == 0: node['label'] = most_common_label(labels) return node n = len(data['rows']) ent = entropy(n, labels) max_info_gain = None max_info_gain_att = None max_info_gain_partitions = None for remaining_att in remaining_atts: avg_ent, partitions = avg_entropy_w_partitions(data, remaining_att, target_attribute) info_gain = ent - avg_ent if max_info_gain is None or info_gain > max_info_gain: max_info_gain = info_gain max_info_gain_att = remaining_att max_info_gain_partitions = partitions if max_info_gain is None: node['label'] = most_common_label(labels) return node node['attribute'] = max_info_gain_att node['nodes'] = {} remaining_atts_for_subtrees = set(remaining_atts) remaining_atts_for_subtrees.discard(max_info_gain_att) uniq_att_values = uniqs[max_info_gain_att] for att_value in uniq_att_values: if att_value not in max_info_gain_partitions.keys(): node['nodes'][att_value] = {'label': most_common_label(labels)} continue partition = max_info_gain_partitions[att_value] node['nodes'][att_value] = id3(partition, uniqs, remaining_atts_for_subtrees, target_attribute) return node def pretty_print_tree(root): stack = [] rules = set() def traverse(node, stack, rules): if 'label' in node: stack.append(' THEN ' + node['label']) rules.add(''.join(stack)) stack.pop() elif 'attribute' in node: ifnd = 'IF ' if not stack else ' AND ' stack.append(ifnd + node['attribute'] + ' EQUALS ') for subnode_key in node['nodes']: stack.append(subnode_key) traverse(node['nodes'][subnode_key], stack, rules) stack.pop() stack.pop() traverse(root, stack, rules) print(os.linesep.join(rules)) def main(): config = {'data_file': 'tennis.csv', 'data_mappers': [], 'data_project_columns': ['Outlook', 'Temperature', 'Humidity', 'Windy', 'PlayTennis'], 'target_attribute': 'PlayTennis'} data = load_csv_to_header_data(config['data_file']) data = project_columns(data, config['data_project_columns']) target_attribute = config['target_attribute'] remaining_attributes = set(data['header']) remaining_attributes.remove(target_attribute) uniqs = get_uniq_values(data) root = id3(data, uniqs, remaining_attributes, target_attribute) pretty_print_tree(root) if __name__ == "__main__": main() ```
github_jupyter
WNixalo 2018/2/11 17:51 [Homework No.1](https://github.com/fastai/numerical-linear-algebra/blob/master/nbs/Homework%201.ipynb) ``` %matplotlib inline import numpy as np import torch as pt import matplotlib.pyplot as plt plt.style.use('seaborn') ``` ## 1. --- 1. Consider the polynomial $p(x) = (x-2)^9 = x^9 - 18x^8 + 144x^7 - 672x^6 + 2016x^5 - 4032x^4 + 5376x^3 - 4608x^2 + 2304x - 512$ a. Plot $p(x)$ for $x=1.920,\,1.921,\,1.922,\ldots,2.080$ evaluating $p$ via its coefficients $1,\,,-18,\,144,\ldots$ b. Plot the same plot again, now evaluating $p$ via the expression $(x-2)^9$. c. Explain the difference. *(The numpy method linspace will be useful for this)* ``` def p(x, mode=0): if mode == 0: return x**9 - 18*x**8 + 144*x**7 - 672*x**6 + 2016*x**5 - 4032*x**4 + 5376*x**3 - 4608*x**2 + 2304*x - 512 else: return (x-2)**9 ``` WNx: *wait, what does it mean to evaluate a function by its coefficients? How is that different than just evaluating it?* --> *does she mean to ignore the exponents? Because that would make* ***b.*** make more sense.. I .. think.* ``` # Signature: np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None) np.linspace(1.92, 2.08, num=161) # np.arange(1.92, 2.08, 0.001) start = 1.92 stop = 2.08 num = int((stop-start)/0.001 + 1) # =161 x = np.linspace(start, stop, num) def p_cœf(x): return x - 18*x + 144*x - 672*x + 2016*x - 4032*x + 5376*x - 4608*x + 2304*x - 512 def p_cœf_alt(x): return p(x,0) def p_ex9(x): return p(x,1) ``` WNx: *huh.. this is a thing.* ```Init signature: np.vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, signature=None)``` ``` vec_pcf = np.vectorize(p_cœf) vec_pcf_alt = np.vectorize(p_cœf_alt) vec_px9 = np.vectorize(p_ex9) y_cf = vec_pcf(x) y_cf_alt = vec_pcf_alt(x) y_x9 = vec_px9(x) y = p(x) ``` **a**, **b**: ``` fig = plt.figure(1, figsize=(12,12)) ax = fig.add_subplot(3,3,1) ax.set_title('Coefficients') ax.plot(y_cf) ax = fig.add_subplot(3,3,2) ax.set_title('$(x - 2)^2$') ax.plot(y_x9) ax = fig.add_subplot(3,3,3) ax.set_title('$p(x)$') ax.plot(y) ax = fig.add_subplot(3,3,4) ax.set_title('Coefficients (Alternate)') ax.plot(y_cf_alt) ax = fig.add_subplot(3,3,5) ax.set_title('All') # ax.plot(y_cf) ax.plot(y_x9) ax.plot(y_cf_alt) ax.plot(y); ``` WNx: *I think my original interpretation of what "evaluate p by its coefficients" meant was wrong, so I'm leaving it out of the final "All" plot, it just drowns everything else out.* **c:** WNx: $p(x) = (x-2)^9$ is the 'general' version of the Coefficient interpretation of $p$. It captures the overall trend of $p$ without all the detail. Kind of an average -- gives you the overall picture of what's going on. For instance you'd compresss signal $p$ to its $(x-2)^9$ form, instead of saving its full coeff form. ## 2. --- 2\. How many different double-precision numbers are there? Express your answer using powers of 2 WNx: $2^{64} - (2^{53} - 2^0$) for IEEE 754 64-bit Double. See: [Quora Link](https://www.quora.com/How-many-distinct-numbers-can-be-represented-as-double-precision) ## 3. --- 3\. Using the updated [Numbers Every Programmer Should Know](https://people.eecs.berkeley.edu/~rcs/research/interactive_latency.html), how much longer does a main memory reference take than an L1 cache look-up? How much longer does a disk seek take than a main memory reference? ``` 3e-3/1e-7 ``` Main memory reference takes **100x** longer than an L1 cache lookup. Disk seek takes **30,000x** longer than a main memory reference. L1 cache: `1e-9`s. MMRef: `1e-7`s. DS: `3e-3`s ## 4. --- 4\. From the Halide Video, what are 4 ways to traverse a 2d array? WNx: **Scanline Order**: Sequentially in Y, within that: Sequentially in X. (row-maj walk) (or): Transpose X&Y and do a Column-Major Traversal. (walk down cols first) **Serial Y, Vectorize X by n**: walk down x in increments (vectors) **Parallel Y, Vectorize X by n**: distribute scanlines into parallel threads Split X & Y by tiles (**Tile-Traversal**). Split X by n, Y by n. Serial Y_outer, Serial X_outer, Serial Y_inner, Serial X_inner See: [Halide Video section](https://youtu.be/3uiEyEKji0M?t=318) ## 5. --- 5\. Using the animations --- ([source](https://www.youtube.com/watch?v=3uiEyEKji0M)), explain what the benefits and pitfalls of each approach. Green squares indicate that a value is being read; red indicates a value is being written. Your answers should be longer in length (give more detail) than just two words. WNx: 1) Parallelizable across scanlines. Entire input computed before output computation. \ Poor Locality. Loading is slow and limited by system memory bandwidth. By the time the `blurred in y` stage goes to read some intermediate data, it's probably been evicted from cache. 2) Parallelizable across scanlines. Locality. \ Redundant Computation. Each point in `blurred in x` is recomputed 3 times. 3) Locality & No redundant computation. \ Serial Dependence --> Poor Parallelism. Introduction of a serial dependence in the scanlines of the output. Relying on having to compute scanline `N-1` before computing scanline `N`. We ca ## 6. --- 6\. Prove that if $A = Q B Q^T$ for some orthogonal matrix $Q$, the $A$ and $B$ have the same singular values. Orthogonal Matrix: $Q^TQ = QQ^T = I \iff Q^T = Q^{-1}$ So.. if you put matrix $B$ in between $Q$ and $Q^T$, what your doing is performing a transformation on $B$ and then performing the inverse of that transformation on it. ie: Returning $B$ to what it was originally. $\Rightarrow$ if $B$ is ultimately unchanged and $A=QBQ^T$ then $A=B$ (or at least same sing.vals?) This -- seems to me -- an inherent property of the orthogonal matrix $Q$. **edit**: ahhh, Singluar Values are not just the values of a matrix. Like Eigen Values, they tell something special about it [Mathematics StackEx link](https://math.stackexchange.com/questions/127500/what-is-the-difference-between-singular-value-and-eigenvalue) ``` ### some tests: # Q is I Q = np.eye(3) A = np.random.randint(-10,10,(3,3)) A Q@A@Q.T # random orthogonal matrix Q # ref: https://stackoverflow.com/a/38426572 from scipy.stats import ortho_group Q = ortho_group.rvs(dim=3) ``` WNx: gonna have to do SVD to find the singular values of $A$. Then make a matrix $B$ st. $A=QBQ^T$. *Then* check that A.σ == B.σ. [C.Mellon U. page on SVD](https://www.cs.cmu.edu/~venkatg/teaching/CStheory-infoage/book-chapter-4.pdf) From the [Lesson 2 notebook](https://github.com/fastai/numerical-linear-algebra/blob/master/nbs/2.%20Topic%20Modeling%20with%20NMF%20and%20SVD.ipynb), I think I'll start with $B$ and compute $A$ acc. to the eqn, then check σ's of both. Aha. So `σ` is `s` is `S`. The diagonal matrix of singular values. Everyone's using different names for the same thing. *bastards*. ``` # setting A & B B = np.random.randint(-100,100,(3,3)) A = Q@B@Q.T Ua, sa, Va = np.linalg.svd(A, full_matrices=False) Ub, sb, Vb = np.linalg.svd(B, full_matrices=False) # sa & sb are the singular values of A and B np.isclose(sa, sb) sa, sb ``` Woohoo! ## 7. --- 7\. What is the *stochastic* part of *stochastic gradient descent*? WNx: *Stochastic* refers to computing the gradient on random mini-batches of the input data.
github_jupyter
# Chapter 6: Physiological and Psychological State Detection in IoT # Use Case 1: Human Activity Recognition (HAR) # Model: LSTM # Step 1: Download Dataset ``` import pandas as pd import numpy as np import pickle import matplotlib.pyplot as plt from scipy import stats import tensorflow as tf import seaborn as sns from pylab import rcParams from sklearn import metrics from sklearn.model_selection import train_test_split %matplotlib inline sns.set(style='whitegrid', palette='muted', font_scale=1.5) rcParams['figure.figsize'] = 14, 8 RANDOM_SEED = 42 ``` # Have a quick look at the data ``` columns = ['user','activity','timestamp', 'x-axis', 'y-axis', 'z-axis'] df = pd.read_csv('data/WISDM_ar_v1.1_raw.txt', header = None, names = columns) df = df.dropna() df.head() ``` # Step 2: Data Exploration The columns we will be most interested in are activity, x-axis, y-axis and z-axis. # Activity-wise data distribution of the dataset ``` df['activity'].value_counts().plot(kind='bar', title='Training data by activity type', color='g'); ``` # Activiy Data Exploration ``` def plot_activity(activity, df): data = df[df['activity'] == activity][['x-axis', 'y-axis', 'z-axis']][:200] axis = data.plot(subplots=True, figsize=(16, 12), title=activity) for ax in axis: ax.legend(loc='lower left', bbox_to_anchor=(1.0, 0.5)) plot_activity("Sitting", df) plot_activity("Standing", df) plot_activity("Walking", df) plot_activity("Jogging", df) ``` # Step 3: Data preprocessing Generally, LSTM model expects fixed-length sequences as training data. As we have seen above 200 time steps contain enough information to distinguish the activities. We use that value to preprocess the dataset. ``` N_TIME_STEPS = 200 N_FEATURES = 3 step = 20 segments = [] labels = [] for i in range(0, len(df) - N_TIME_STEPS, step): xs = df['x-axis'].values[i: i + N_TIME_STEPS] ys = df['y-axis'].values[i: i + N_TIME_STEPS] zs = df['z-axis'].values[i: i + N_TIME_STEPS] label = stats.mode(df['activity'][i: i + N_TIME_STEPS])[0][0] segments.append([xs, ys, zs]) labels.append(label) np.array(segments).shape ``` # Reshape the array/tensor standard form Let's transform it into sequences of 200 rows, each containing x, y and z. Also, apply a one-hot encoding to our labels. ``` # Reshaping segments reshaped_segments = np.asarray(segments, dtype= np.float32).reshape(-1, N_TIME_STEPS, N_FEATURES) labels = np.asarray(pd.get_dummies(labels), dtype = np.float32) # Inspect the reshaped_segments reshaped_segments.shape labels[0] # Datasplit for training and test X_train, X_test, y_train, y_test = train_test_split( reshaped_segments, labels, test_size=0.2, random_state=RANDOM_SEED) len(X_train) len(X_test) ``` # Step 4: Training Model # Model Building Our model contains 2 fully-connected and 2 LSTM layers (stacked on each other) with 64 units each. ``` N_CLASSES = 6 N_HIDDEN_UNITS = 64 # Function for model building def create_LSTM_model(inputs): W = { 'hidden': tf.Variable(tf.random_normal([N_FEATURES, N_HIDDEN_UNITS])), 'output': tf.Variable(tf.random_normal([N_HIDDEN_UNITS, N_CLASSES])) } biases = { 'hidden': tf.Variable(tf.random_normal([N_HIDDEN_UNITS], mean=1.0)), 'output': tf.Variable(tf.random_normal([N_CLASSES])) } X = tf.transpose(inputs, [1, 0, 2]) X = tf.reshape(X, [-1, N_FEATURES]) hidden = tf.nn.relu(tf.matmul(X, W['hidden']) + biases['hidden']) hidden = tf.split(hidden, N_TIME_STEPS, 0) # Stack 2 LSTM layers lstm_layers = [tf.contrib.rnn.BasicLSTMCell(N_HIDDEN_UNITS, forget_bias=1.0) for _ in range(2)] lstm_layers = tf.contrib.rnn.MultiRNNCell(lstm_layers) outputs, _ = tf.contrib.rnn.static_rnn(lstm_layers, hidden, dtype=tf.float32) # Get output for the last time step lstm_last_output = outputs[-1] return tf.matmul(lstm_last_output, W['output']) + biases['output'] # Create placeholder for the model tf.reset_default_graph() X = tf.placeholder(tf.float32, [None, N_TIME_STEPS, N_FEATURES], name="input") Y = tf.placeholder(tf.float32, [None, N_CLASSES]) # Call the model function pred_Y = create_LSTM_model(X) pred_softmax = tf.nn.softmax(pred_Y, name="y_") # L2 Regularisation L2_LOSS = 0.0015 l2 = L2_LOSS * \ sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred_Y, labels = Y)) + l2 # Define optimiser & accuracy LEARNING_RATE = 0.0025 optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(loss) correct_pred = tf.equal(tf.argmax(pred_softmax, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32)) ``` # Training Training could take time, will depend on your computing resources. ``` N_EPOCHS = 50 BATCH_SIZE = 1024 saver = tf.train.Saver() history = dict(train_loss=[], train_acc=[], test_loss=[], test_acc=[]) sess=tf.InteractiveSession() sess.run(tf.global_variables_initializer()) train_count = len(X_train) for i in range(1, N_EPOCHS + 1): for start, end in zip(range(0, train_count, BATCH_SIZE), range(BATCH_SIZE, train_count + 1,BATCH_SIZE)): sess.run(optimizer, feed_dict={X: X_train[start:end], Y: y_train[start:end]}) _, acc_train, loss_train = sess.run([pred_softmax, accuracy, loss], feed_dict={ X: X_train, Y: y_train}) _, acc_test, loss_test = sess.run([pred_softmax, accuracy, loss], feed_dict={ X: X_test, Y: y_test}) history['train_loss'].append(loss_train) history['train_acc'].append(acc_train) history['test_loss'].append(loss_test) history['test_acc'].append(acc_test) if i != 1 and i % 10 != 0: continue print(f'epoch: {i} test accuracy: {acc_test} loss: {loss_test}') predictions, acc_final, loss_final = sess.run([pred_softmax, accuracy, loss], feed_dict={X: X_test, Y: y_test}) print() print(f'final results: accuracy: {acc_final} loss: {loss_final}') #Store the model detail to disk. pickle.dump(predictions, open("predictions.p", "wb")) pickle.dump(history, open("history.p", "wb")) tf.train.write_graph(sess.graph_def, '.', './checkpoint/har_LSTM.pbtxt') saver.save(sess, save_path = "./checkpoint/har_LSTM.ckpt") sess.close() ``` # Step 5: Performance Evaluation ``` # Load the saved model detail for evaluation history = pickle.load(open("history.p", "rb")) predictions = pickle.load(open("predictions.p", "rb")) plt.figure(figsize=(12, 8)) plt.plot(np.array(history['train_loss']), "r--", label="Train loss") plt.plot(np.array(history['train_acc']), "b--", label="Train accuracy") plt.plot(np.array(history['test_loss']), "r-", label="Test loss") plt.plot(np.array(history['test_acc']), "b-", label="Test accuracy") plt.title("Training session's progress over Training Epochs") plt.legend(loc='upper right', shadow=True) plt.ylabel('Training Progress (Loss or Accuracy values)') plt.xlabel('Training Epoch') plt.ylim(0) plt.show() ``` # Confusion matrix ``` LABELS = ['Downstairs', 'Jogging', 'Sitting', 'Standing', 'Upstairs', 'Walking'] max_test = np.argmax(y_test, axis=1) max_predictions = np.argmax(predictions, axis=1) confusion_matrix = metrics.confusion_matrix(max_test, max_predictions) plt.figure(figsize=(16, 14)) sns.heatmap(confusion_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d"); plt.title("Confusion matrix") plt.ylabel('True Activity') plt.xlabel('Predicted Activity') plt.show(); ``` # # Step 6: Exporting the model Finally Model Exporting for IoT devices (Pi 3/Smartphones) ``` from tensorflow.python.tools import freeze_graph MODEL_NAME = 'har_LSTM' input_graph_path = 'checkpoint/' + MODEL_NAME+'.pbtxt' checkpoint_path = './checkpoint/' +MODEL_NAME+'.ckpt' restore_op_name = "save/restore_all" filename_tensor_name = "save/Const:0" output_frozen_graph_name = 'frozen_'+MODEL_NAME+'.pb' freeze_graph.freeze_graph(input_graph_path, input_saver="", input_binary=False, input_checkpoint=checkpoint_path, output_node_names="y_", restore_op_name="save/restore_all", filename_tensor_name="save/Const:0", output_graph=output_frozen_graph_name, clear_devices=True, initializer_nodes="") ```
github_jupyter
## Uncertainty estimation for regression We would demonstrate how to estimate the uncertainty for a regression task. In this case we treat uncertainty as a standard deviation for test data points. As an example dataset we take the kinemtic movement data from UCI database and would estimate the uncertainty prediction with log likelihood metric ``` %load_ext autoreload %autoreload 2 import numpy as np import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader import torch.nn.functional as F from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt from sklearn import metrics from sklearn.metrics import r2_score from alpaca.ue import MCDUE from alpaca.utils.datasets.builder import build_dataset from alpaca.utils.ue_metrics import ndcg, uq_ll from alpaca.ue.masks import BasicBernoulliMask, DecorrelationMask, LeverageScoreMask from alpaca.utils import model_builder import alpaca.nn as ann ``` ## Prepare the dataset The alpaca library has a few regression dataset provided (these datasets often used in the related scientific papers) ``` dataset = build_dataset('kin8nm', val_split=1_000) x_train, y_train = dataset.dataset('train') x_val, y_val = dataset.dataset('val') x_train.shape, y_val.shape train_ds = TensorDataset(torch.FloatTensor(x_train), torch.FloatTensor(y_train)) val_ds = TensorDataset(torch.FloatTensor(x_val), torch.FloatTensor(y_val)) train_loader = DataLoader(train_ds, batch_size=512) val_loader = DataLoader(val_ds, batch_size=512) ``` ## Let's build the simple model We'll replace common nn.Dropout layer with ann.Dropout from alpaca. Alpaca version allow to switch on the dropout during inference without worrying other "training" layers, like batch norm. ``` class MLP(nn.Module): def __init__(self, input_size, base_size=64, dropout_rate=0., dropout_mask=None): super().__init__() self.net = nn.Sequential( nn.Linear(input_size, 4*base_size), nn.CELU(), nn.Linear(4*base_size, 2*base_size), ann.Dropout(dropout_rate, dropout_mask), nn.CELU(), nn.Linear(2*base_size, 1*base_size), ann.Dropout(dropout_rate, dropout_mask), nn.CELU(), nn.Linear(base_size, 1) ) def forward(self, x): return self.net(x) # Train model model = MLP(input_size=8, dropout_rate=0.1, dropout_mask=BasicBernoulliMask) ``` ## Train the model ``` criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters()) model.train() for epochs in range(100): for x_batch, y_batch in train_loader: # Train for one epoch predictions = model(x_batch) loss = criterion(predictions, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() print('Train loss on last batch', loss.item()) # Check model effectiveness model.eval() x_batch, y_batch = next(iter(val_loader)) predictions = model(x_batch).detach().cpu().numpy() print('R2:', r2_score(predictions, y_batch)) ``` ## Estimate uncertainty We compare the log likelihood for constant prediction and monte-carlo uncertainty estimation ``` # Calculate uncertainty estimation estimator = MCDUE(model) predictions, estimations = estimator(x_batch) # Baseline const_std = np.std(y_val) errors = np.abs(predictions - y_batch.reshape((-1)).numpy()) score = uq_ll(errors, np.ones_like(errors) * const_std) print("Quality score for const std is ", score) model.train() estimator = MCDUE(model, nn_runs=100) predictions, estimations = estimator(x_batch) errors = np.abs(predictions - y_batch.reshape((-1)).numpy()) score = uq_ll(np.array(errors), predictions) print("Quality score for monte-carlo dropout is ", score) ```
github_jupyter
``` # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Training an image classification model with a `CustomJob` and getting online predictions <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/master/ai-platform-unified/notebooks/custom_job_image_classification_model_for_online_prediction.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/master/ai-platform-unified/notebooks/custom_job_image_classification_model_for_online_prediction.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> # Overview This tutorial demonstrates how to use the Vertex SDK for Python to train and deploy a custom image classification model for online prediction. ### Dataset The dataset used for this tutorial is the [cifar10 dataset](https://www.tensorflow.org/datasets/catalog/cifar10) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). The version of the dataset you will use is built into TensorFlow. The trained model predicts which type of class an image is from ten classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck. ### Objective In this notebook, you will learn how to create a custom model from a Python script in a Docker container using the Vertex SDK for Python, and then do a prediction on the deployed model. You can alternatively create custom models from the command line using the `gcloud` command-line tool or online using the Google Cloud Console. The steps performed include: - Create a Vertex AI `CustomJob` resource for training a model. - Train the model. - Retrieve and load the model (artifacts). - View the model evaluation. - Upload the model as a Vertex AI `Model` resource. - Deploy the model to a serving `Endpoint` resource. - Make a prediction. - Undeploy the `Model`. ### Costs This tutorial uses billable components of Google Cloud: * Vertex AI * Cloud Storage Learn about [Vertex AI pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage pricing](https://cloud.google.com/storage/pricing). ## Installation Install the latest (preview) version of Vertex SDK for Python. ``` ! pip3 install -U google-cloud-aiplatform ``` Install the Google `cloud-storage` library as well. ``` ! pip3 install google-cloud-storage ``` Install the *pillow* library for creating test images. ``` ! pip3 install -U pillow ``` Install tensorflow to test the model after training ``` ! pip3 install tensorflow ``` ### Restart the Kernel Once you've installed the Vertex SDK for Python, you need to restart the notebook kernel so it can find the packages. ``` import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` ## Before you begin ### GPU runtime **Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change runtime type > GPU** ### Set up your Google Cloud project **The following steps are required, regardless of your notebook environment.** 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) 3. [Enable the Vertex AI API, Compute Engine API and Container Registry API.](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,containerregistry.googleapis.com) 4. The [Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebooks. 5. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. #### Project ID **If you don't know your project ID**, you might be able to get your project ID using `gcloud` command by executing the second cell below. ``` PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ``` #### Region You can also change the `REGION` variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. - Americas: `us-central1` - Europe: `europe-west4` - Asia Pacific: `asia-east1` You must not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. For the latest support per region, see [the Vertex AI locations documenation](https://cloud.google.com/vertex-ai/docs/general/locations) ``` REGION = "us-central1" # @param {type: "string"} ``` #### Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. ``` from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ``` ### Authenticate your Google Cloud account **If you are using Google Cloud Notebooks**, your environment is already authenticated. Skip this step. ``` import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your Google Cloud account. This provides access # to your Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Google Cloud Notebooks, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this tutorial in a notebook locally, replace the string # below with the path to your service account key and run this cell to # authenticate your Google Cloud account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json ``` ### Create a Cloud Storage bucket **The following steps are required, regardless of your notebook environment.** In this tutorial, you upload a Python package containing your training code to a Cloud Storage bucket. Vertex AI runs the code from this package. In this tutorial, Vertex AI also saves the trained model that results from your job in the same bucket. You can then create a Vertex AI `Endpoint` based on this output in order to serve online predictions. Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets. ``` BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]": BUCKET_NAME = PROJECT_ID + "ucaip-custom-" + TIMESTAMP ``` **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ``` ! gsutil mb -l $REGION gs://$BUCKET_NAME ``` Finally, validate access to your Cloud Storage bucket by examining its contents: ``` ! gsutil ls -al gs://$BUCKET_NAME ``` ### Import libraries and define constants #### Import Vertex SDK for Python Import the Vertex SDK for Python into your Python environment. ``` import os import sys import time from google.cloud.aiplatform import gapic as aip from google.protobuf import json_format from google.protobuf.struct_pb2 import Value ``` #### Vertex AI constants Set some constants for Vertex AI: - `API_ENDPOINT`: The Vertex AI API service endpoint for the Job, Model, Endpoint, and Prediction services. - `PARENT`: The Vertex AI location root path for dataset, model and endpoint resources. ``` # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex AI location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION ``` #### Hardware Accelerators Set the hardware accelerators (e.g., GPU), if any, for training and prediction. Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100 Otherwise specify `(None, None)` to use a container image to run on a CPU. *Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, the workaround is to create your own Docker container image for TF 2.3 with GPU support. ``` TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1) DEPLOY_GPU, DEPLOY_NGPU = (None, None) ``` #### Container (Docker) image Next, set the Docker container images for training and prediction. - Set the variable `TF` to the Tensorflow version of the container image. For example, `2-1` would be version 2.1, and `1-15` would be version 1.15. The following list shows some of the pre-built images available: - Tensorflow 1.15 - `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest` - Tensorflow 2.1 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest` - Tensorflow 2.2 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest` - XGBoost - `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1` - Scikit-learn - `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23` - `gcr.io/cloud-aiplatform/prediction/scikit-learn-cpu.0-23` - Pytorch - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest` - `gcr.io/cloud-aiplatform/training/pytorch-gpu.1-4:latest` Vertex AI frequently adds new training and prediction container images. For the latest list, see [Pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers) and [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) ``` TF = "2-1" if TF[0] == "2": if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf2-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf2-cpu.{}".format(TF) else: if TRAIN_GPU: TRAIN_VERSION = "tf-gpu.{}".format(TF) else: TRAIN_VERSION = "tf-cpu.{}".format(TF) if DEPLOY_GPU: DEPLOY_VERSION = "tf-gpu.{}".format(TF) else: DEPLOY_VERSION = "tf-cpu.{}".format(TF) TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION) DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION) print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU) print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) ``` #### Machine Type Next, set the machine type to use for training and prediction. - Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] *Note: The following is not supported for training* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs* ``` MACHINE_TYPE = "n1-standard" VCPU = "4" TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Train machine type", TRAIN_COMPUTE) MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) ``` # Tutorial Now you are ready to start creating your own custom model and training for CIFAR10. ## Clients The Vertex SDK for Python works as a client/server model. On your side, the Python script, you will create a client that sends requests and receives responses from the server -- Vertex AI. Use several clients in this tutorial, so you will set them all up upfront. - Job Service for custom jobs. - Model Service for managed models. - Endpoint Service for deployment. - Prediction Service for serving. ``` # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} predict_client_options = {"api_endpoint": API_ENDPOINT} def create_job_client(): client = aip.JobServiceClient(client_options=client_options) return client def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_endpoint_client(): client = aip.EndpointServiceClient(client_options=client_options) return client def create_prediction_client(): client = aip.PredictionServiceClient(client_options=predict_client_options) return client clients = {} clients["job"] = create_job_client() clients["model"] = create_model_client() clients["endpoint"] = create_endpoint_client() clients["prediction"] = create_prediction_client() for client in clients.items(): print(client) ``` ## Prepare your `CustomJob` specification Now that your clients are ready, your first step is to create a `CustomJob` specification for your custom training job. To practice using the Job service, start by training an **empty job**. In other words, create a `CustomJob` specification that provisions resources for training a job, and initiate the job using the client library's Job service, but configure the `CustomJob` so it doesn't actually train an ML model. This lets you focus on understanding the basic steps. Afterwards, you can create another `CustomJob` with a focus on adding the Python training package for training a CIFAR10 custom model. ### Define a container specification Let's first start by defining a job name and then a container specification: - `JOB_NAME`: A unique name for your custom training job. For convenience, append a timestamp to make the name unique. - `MODEL_DIR`: A location in your Cloud Storage bucket for storing the model artificats. - `image_uri`: The location of the container image in Artifact Registry, Container Registry, or Docker Hub. This can be either a Google Cloud pre-built image or your own container image. - `--model-dir`: A command-line parameter to the container indicating the location to store the model. ``` JOB_NAME = "custom_job_" + TIMESTAMP MODEL_DIR = "gs://{}/{}".format(BUCKET_NAME, JOB_NAME) CONTAINER_SPEC = { "image_uri": TRAIN_IMAGE, "args": ["--model-dir=" + MODEL_DIR], } ``` ### Define the worker pool specification Next, you define the worker pool specification for your custom training job. This tells Vertex AI what type and how many VMs to provision for the training. For this tutorial, you will use a single instance (node). - `replica_count`: The number of VMs to provision of this machine type. - `machine_type`: The type of VM to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators. - `container_spec`: The Docker container to install on the instances. ``` if TRAIN_GPU: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": TRAIN_NGPU, } else: machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} WORKER_POOL_SPEC = [ { "replica_count": 1, "machine_spec": machine_spec, "container_spec": CONTAINER_SPEC, } ] ``` If you were doing distributed training, you would add a second machine description and set the replica count accordingly. In the example below, the first machine descrption is the primary (coordinator), and the second ones are the machines the training is distributed to. ``` WORKER_POOL_SPEC=[ { "replica_count": 1, "machine_spec": { "machine_type": "n1-standard-8" }, "container_spec": CONTAINER_SPEC, }, { "replica_count": 6, "machine_spec": { "machine_type": "n1-standard-8" }, "container_spec": CONTAINER_SPEC } ] ``` ### Assemble the job specification Assemble the description for the `CustomJob` specification. - `display_name`: The human readable name you assign to this `CustomJob`. - `job_spec`: The specification for the `CustomJob`. Since this is an empty job, you only specified the resource requirements. ``` CUSTOM_JOB = { "display_name": JOB_NAME, "job_spec": {"worker_pool_specs": WORKER_POOL_SPEC}, } ``` ## Train the model Start the training of your custom training job on Vertex AI. Use this helper function `create_custom_job`, which takes the following parameter: - `custom_job`: The specification for the `CustomJob`. The helper function calls the job client service's `create_custom_job` method, with the following parameters: - `parent`: The Vertex AI location path to `Dataset`, `Model` and `Endpoint` resources. - `custom_job`: The specification for the `CustomJob`. You will display a handful of the fields returned in `response` object. The two that are of most interest are the following: - `response.name`: The Vertex AI fully qualified identifier assigned to this `CustomJob`. Save this identifier for using in subsequent steps. - `response.state`: The current state of the `CustomJob`. ``` def create_custom_job(custom_job): response = clients["job"].create_custom_job(parent=PARENT, custom_job=CUSTOM_JOB) print("name:", response.name) print("display_name:", response.display_name) print("state:", response.state) print("create_time:", response.create_time) print("update_time:", response.update_time) return response.name # Save the job name JOB_ID = create_custom_job(CUSTOM_JOB) ``` ### List all `CustomJob` resources Now that your `CustomJob` is running, get a list for all the `CustomJob` resources associated with your `PROJECT_ID`. This will probably be just one job, unless you've been running this tutorial multiple times or otherwise been using the Vertex AI job service. Use the helper function `list_custom_jobs`, which calls the job client service's `list_custom_jobs` method. The response object is a list, where each element in the list is a separate job. The `response` object for each `CustomJob` contains: - `name`: The Vertex AI fully qualified identifier for your custom training job. - `display_name`: The human readable name you assigned to your custom training job. - `job_spec`: The job specification you provided for your custom training job. - `state`: The current status of the `CustomJob`. - `start_time`: When the custom training job was created. - `end_time`: When the execution of the custom job ended. - `update_time`: When the last time there was a status update to the `CustomJob`. ``` def list_custom_jobs(): response = clients["job"].list_custom_jobs(parent=PARENT) for job in response: print(response) list_custom_jobs() ``` ### Get information on a custom job Next, use this helper function `get_custom_job`, which takes the following parameter: - `name`: The Vertex AI fully qualified identifier for the `CustomJob`. The helper function gets the job information for just this job by calling the the job client service's `get_custom_job` method, with the following parameter: - `name`: The Vertex AI fully qualified identifier for the `CustomJob`. Recall that you got the Vertex AI fully qualified identifier for the custom job in the `response.name` field when you called the `create_custom_job` method, and you saved the identifier in the variable `JOB_ID`. ``` def get_custom_job(name, silent=False): response = clients["job"].get_custom_job(name=name) if silent: return response print("name:", response.name) print("display_name:", response.display_name) print("state:", response.state) print("create_time:", response.create_time) print("update_time:", response.update_time) return response get_custom_job(JOB_ID) ``` ### View logs In addition to the aforementioned state information on the job information, you can also look at logs associated with the `CustomJob`. #### View logs in the Vertex AI section of the Cloud Console You can view logs in the Vertex AI section of the Cloud Console. The cell below will display a link. Paste the link in the address bar of another tab in your browser. It will display information about your job. ``` print( "https://console.cloud.google.com/ai/platform/locations/{region}/training/{job_id}?project={project_id}".format( region=REGION, job_id=JOB_ID.split("/")[-1], project_id=PROJECT_ID ) ) ``` #### View logs in Cloud Logging You can also view the logs in Cloud Logging. The cell below will display a link. Paste the link in the address bar of another tab in your browser. It will display logs for your job. ``` print( "https://console.cloud.google.com/logs/viewer?resource=ml_job%2Fjob_id%2F{job_id}&project={project_id}".format( job_id=JOB_ID.split("/")[-1], project_id=PROJECT_ID ) ) ``` ### Cancel a `CustomJob` If you want, you can cancel your "empty" training job. Use this helper function `cancel_job`, with the following parameter: - `name`: The Vertex AI fully qualified identifier for your custom training job. The helper function will call the job service client's `cancel_custom_job` method, with the following parameter: - `name`: The Vertex AI fully qualified identifier for your custom training job. Use a try/except around the call since it will throw an exception if the job is already completed (succeeded) -- which most likely it is. ``` def cancel_job(name): try: response = clients["job"].cancel_custom_job(name=name) print(response) except Exception as e: print(e) time.sleep(10) cancel_job(JOB_ID) ``` ### Delete a custom job Next, you can delete your "empty" training job. Use the helper function `delete_job`, with the following parameter: - `name`: The Vertex AI fully qualified identifier for your custom training job. The helper function will call the job service client's `delete_custom_job` method, with the parameter: - `name`: The Vertex AI fully qualified identifier for your custom training job. Afterwards, you can verify that the job has been deleted by calling the method `get_custom_job` for the same job. Use a try/except around the call since it will throw an exception if the job is already deleted -- which most likely it is. ``` def delete_job(name): try: response = clients["job"].delete_custom_job(name=name) print("Delete", response) except Exception as e: print(e) try: response = clients["job"].get_custom_job(name=name) except Exception as e: print(e) time.sleep(10) delete_job(JOB_ID) ``` ## Train a model - CIFAR10 Now that you have seen the basic steps for custom training, you will do a new custom job to train a model. There are two ways you can train a custom model using a container image: - **Use a a Vertex AI pre-built container**. If you use a pre-built container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model. - **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. In this tutorial, you will train a CIFAR10 model using a pre-built container. You need to update the worker pool specification by adding a description for `python_package_spec`. This section will tell the custom job the Python training package to install and which Python module to invoke, along with command line arguments for the Python module. The Python package specification contains the following fields: -`executor_image_spec`: This is the Docker image which is configured for your custom training job. You will continue to use the same one we used earlier for demonstration. -`package_uris`: This is a list of the locations (URIs) of your Python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual Python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the Docker container. -`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix. -`args`: The command line arguments to pass to the corresponding pythom module. In this example, you will be: - `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps (batches) per epoch. - `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training. - `"single"`: single device. - `"mirror"`: all GPU devices on a single VM. - `"multi"`: all GPU devices on all VMs. ``` if TRAIN_GPU: machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": TRAIN_NGPU, } else: machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} if not TRAIN_NGPU or TRAIN_NGPU < 2: TRAIN_STRATEGY = "single" else: TRAIN_STRATEGY = "mirror" EPOCHS = 20 STEPS = 100 DIRECT = True if DIRECT: CMDARGS = [ "--model-dir=" + MODEL_DIR, "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY, ] else: CMDARGS = [ "--epochs=" + str(EPOCHS), "--steps=" + str(STEPS), "--distribute=" + TRAIN_STRATEGY, ] WORKER_POOL_SPEC = [ { "replica_count": 1, "machine_spec": machine_spec, "python_package_spec": { "executor_image_uri": TRAIN_IMAGE, "package_uris": ["gs://" + BUCKET_NAME + "/trainer_cifar.tar.gz"], "python_module": "trainer.task", "args": CMDARGS, }, } ] ``` ### Assemble a job specification Now assemble the description for the `CustomJob` specification: - `display_name`: The human-readable name you assign to this `CustomJob`. - `job_spec`: The specification for the `CustomJob`. - `base_output_directory`: This tells the service the Cloud Storage location where to save the model artifacts (when variable `DIRECT = False`). The service will then pass the location to the training script as the environment variable `AIP_MODEL_DIR`, and the path will be of the form: <output_uri_prefix>/model ``` if DIRECT: JOB_SPEC = {"worker_pool_specs": WORKER_POOL_SPEC} else: JOB_SPEC = { "worker_pool_specs": WORKER_POOL_SPEC, "base_output_directory": {"output_uri_prefix": MODEL_DIR}, } CUSTOM_JOB = {"display_name": JOB_NAME, "job_spec": JOB_SPEC} ``` ### Examine the training package #### Package layout Before you start the training, look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout. - PKG-INFO - README.md - setup.cfg - setup.py - trainer - \_\_init\_\_.py - task.py The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker container. The file `trainer/task.py` is the Python script for executing the `CustomJob`. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). #### Package Assembly In the following cells, you will assemble the training package. ``` # Make folder for Python training script ! rm -rf custom ! mkdir custom # Add package information ! touch custom/README.md setup_cfg = "[egg_info]\n\ tag_build =\n\ tag_date = 0" ! echo "$setup_cfg" > custom/setup.cfg setup_py = "import setuptools\n\ # Requires TensorFlow Datasets\n\ setuptools.setup(\n\ install_requires=[\n\ 'tensorflow_datasets==1.3.0',\n\ ],\n\ packages=setuptools.find_packages())" ! echo "$setup_py" > custom/setup.py pkg_info = "Metadata-Version: 1.0\n\ Name: UNKNOWN\n\ Version: 0.0.0\n\ Summary: Demostration training script\n\ Home-page: www.google.com\n\ Author: Google\n\ Author-email: UNKNOWN\n\ License: Public\n\ Description: Demo\n\ Platform: Vertex AI" ! echo "$pkg_info" > custom/PKG-INFO # Make the training subfolder ! mkdir custom/trainer ! touch custom/trainer/__init__.py ``` #### Task.py contents In the next cell, you will write the contents of the training script task.py. In summary, the training script does the following: - Gets the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`. - Loads CIFAR10 dataset from TF Datasets (tfds). - Builds a simple ConvNet model using TF.Keras model API. - Compiles the model (`compile()`). - Sets a training distribution strategy according to the argument `args.distribute`. - Trains the model (`fit()`) with epochs and steps according to the arguments `args.epochs` and `args.steps` - Saves the trained model (`save(args.model_dir)`) to the specified model directory. ``` %%writefile custom/trainer/task.py # Single, Mirror and Multi-Machine Distributed Training for CIFAR-10 import tensorflow_datasets as tfds import tensorflow as tf from tensorflow.python.client import device_lib import argparse import os import sys tfds.disable_progress_bar() parser = argparse.ArgumentParser() parser.add_argument('--model-dir', dest='model_dir', default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.') parser.add_argument('--lr', dest='lr', default=0.01, type=float, help='Learning rate.') parser.add_argument('--epochs', dest='epochs', default=10, type=int, help='Number of epochs.') parser.add_argument('--steps', dest='steps', default=200, type=int, help='Number of steps per epoch.') parser.add_argument('--distribute', dest='distribute', type=str, default='single', help='distributed training strategy') args = parser.parse_args() print('Python Version = {}'.format(sys.version)) print('TensorFlow Version = {}'.format(tf.__version__)) print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found'))) print('DEVICES', device_lib.list_local_devices()) # Single Machine, single compute device if args.distribute == 'single': if tf.test.is_gpu_available(): strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") else: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") # Single Machine, multiple compute device elif args.distribute == 'mirror': strategy = tf.distribute.MirroredStrategy() # Multiple Machine, multiple compute device elif args.distribute == 'multi': strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() # Multi-worker configuration print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync)) # Preparing dataset BUFFER_SIZE = 10000 BATCH_SIZE = 64 def make_datasets_unbatched(): # Scaling CIFAR10 data from (0, 255] to (0., 1.] def scale(image, label): image = tf.cast(image, tf.float32) image /= 255.0 return image, label datasets, info = tfds.load(name='cifar10', with_info=True, as_supervised=True) return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat() # Build the Keras model def build_and_compile_cnn_model(): model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile( loss=tf.keras.losses.sparse_categorical_crossentropy, optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr), metrics=['accuracy']) return model # Train the model NUM_WORKERS = strategy.num_replicas_in_sync # Here the batch size scales up by number of workers since # `tf.data.Dataset.batch` expects the global batch size. GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE) with strategy.scope(): # Creation of dataset, and model building/compiling need to be within # `strategy.scope()`. model = build_and_compile_cnn_model() model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps) model.save(args.model_dir) ``` #### Store training script on your Cloud Storage bucket Next, package the training folder into a compressed tarball, and then store it in your Cloud Storage bucket. ``` ! rm -f custom.tar custom.tar.gz ! tar cvf custom.tar custom ! gzip custom.tar ! gsutil cp custom.tar.gz gs://$BUCKET_NAME/trainer_cifar.tar.gz ``` ### Train the model Start the training of your custom training job on Vertex AI. Use the helper function from a previous section, `create_custom_job`, which will return the Vertex AI fully qualified identifier assigned to the `CustomJob`. ``` # Save the job name JOB_ID = create_custom_job(CUSTOM_JOB) ``` ### Get information on a `CustomJob` Get the status on our custom training job for CIFAR10 using the helper function defined earlier `get_custom_job`. The job most likely will still be either PENDING or RUNNING. ``` response = get_custom_job(JOB_ID) ``` # Deployment Once your model is done training, you can calculate the actual time it took to train the model by subtracting `start_time` from `end_time`. For your model, you need to know the location of the SavedModel, which the Python script saved in your Cloud Storage bucket at `MODEL_DIR + '/saved_model.pb'`. ``` while True: response = get_custom_job(JOB_ID, True) if response.state != aip.JobState.JOB_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_path_to_deploy = None if response.state == aip.JobState.JOB_STATE_FAILED: break else: if not DIRECT: MODEL_DIR = MODEL_DIR + "/model" model_path_to_deploy = MODEL_DIR print("Training Time:", response.update_time - response.create_time) break time.sleep(60) print("model_to_deploy:", model_path_to_deploy) ``` ## Load the saved model Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Load it from the Cloud Storage bucket in order to evaluate the model and get a prediction. To load the model, use the `tf.keras.models.load_model()` method, passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`. ``` import tensorflow as tf model = tf.keras.models.load_model(MODEL_DIR) ``` ## Evaluate the model Now, find out how good the model is. ### Load evaluation data You will load the CIFAR10 test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the image data, and the corresponding labels. You don't need the training data, so the code loads it as `(_, _)`. Before you can run the data through evaluation, you need to preprocess it: * x_test: Normalize (rescale) the pixel data by dividing each pixel by 255. This will replace each single byte integer pixel with a 32-bit floating point number between 0 and 1. * y_test: The labels are already scalar (sparse). If you look back at the `compile()` step in the `trainer/task.py` script, you will find that it was compiled for sparse labels. Therefore you don't need to perform any preprocessing. ``` import numpy as np from tensorflow.keras.datasets import cifar10 (_, _), (x_test, y_test) = cifar10.load_data() x_test = (x_test / 255.0).astype(np.float32) print(x_test.shape, y_test.shape) ``` ### Evaluate the model Evaluate how well the ConvNet model trained by the `CustomJob` performed. You probably see a result around 37%, which not so good. However, this low performance is expected becuase you only trained for 20 epochs and 100 steps per epoch: see the `task.py fit() call`. ``` model.evaluate(x_test, y_test) ``` ## Upload the model for serving Next, upload your TensorFlow model to Vertex AI in order to create a Vertex AI `Model` resource. During upload, you need to define a serving function to convert data to the format your `Model` expects. If you send encoded data to Vertex AI, your serving function ensures that the data is decoded on the model server before it is passed as input to your `Model`. ### How does the serving function work When you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. If you use one of Vertex AI's pre-built prediction containers, the request content is passed to the serving function as a `tf.string`. The serving function consists of two parts: - `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model; for example, normalizing input data. - `post-processing function`: - Converts the model output to format expected by the receiving application; for example, compressing the output. - Packages the output for the the receiving application; for exmaple, adding headings or making a JSON object. Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content. One consideration you need to consider when building serving functions for TensorFlow Keras models is that they run as static graphs. This means you cannot use TensorFlow graph operations that require a dynamic graph. If you do, you will get an error when you compile the serving function indicating that you are using an EagerTensor, which is not supported. ### Serving function for image data To pass images to the prediction service, you encode the compressed (for example, JPEG) image bytes using base64 -- which makes the content safe from modification while transmitting binary data over the network. Since this TensorFlow model expects input data as raw (uncompressed) bytes, you need to ensure that the base64-encoded data gets converted back to raw bytes before it is passed as input to the TensorFlow model. To resolve this, define a serving function (`serving_fn`) and attach it to the model as a preprocessing step. Add a `@tf.function` decorator so the serving function is fused to the underlying model (instead of upstream on a CPU). When you send a prediction or explanation request, the content of the request is base 64 decoded into a Tensorflow string (`tf.string`), which is passed to the serving function (`serving_fn`). The serving function preprocesses the `tf.string` into raw (uncompressed) numpy bytes (`preprocess_fn`) to match the input requirements of the model: - `io.decode_jpeg`- Decompresses the JPG image which is returned as a Tensorflow tensor with three channels (RGB). - `image.convert_image_dtype` - Changes integer pixel values to float 32. - `image.resize` - Resizes the image to match the input shape for the model. - `resized / 255.0` - Rescales (normalization) the pixel data between 0 and 1. At this point, the data can be passed to the model (`m_call`). ``` CONCRETE_INPUT = "numpy_inputs" def _preprocess(bytes_input): decoded = tf.io.decode_jpeg(bytes_input, channels=3) decoded = tf.image.convert_image_dtype(decoded, tf.float32) resized = tf.image.resize(decoded, size=(32, 32)) rescale = tf.cast(resized / 255.0, tf.float32) return rescale @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def preprocess_fn(bytes_inputs): decoded_images = tf.map_fn( _preprocess, bytes_inputs, dtype=tf.float32, back_prop=False ) return { CONCRETE_INPUT: decoded_images } # User needs to make sure the key matches model's input m_call = tf.function(model.call).get_concrete_function( [tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)] ) @tf.function(input_signature=[tf.TensorSpec([None], tf.string)]) def serving_fn(bytes_inputs): images = preprocess_fn(bytes_inputs) prob = m_call(**images) return prob tf.saved_model.save( model, model_path_to_deploy, signatures={ "serving_default": serving_fn, }, ) ``` ## Get the serving function signature You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer. For your purpose, you need the signature of the serving function. When you send a prediction request as a HTTP request packet, the image data is base64-encoded, and the TensorFlow model takes numpy input. Th Vertex AI pre-built prediction container converts the base64-encoded data to image bytes, and your serving function converts image bytes to a numpy array. When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function. ``` loaded = tf.saved_model.load(model_path_to_deploy) input_name = list( loaded.signatures["serving_default"].structured_input_signature[1].keys() )[0] print("Serving function input:", input_name) ``` ### Debugging the serving function Now that you've downloaded the model with the serving function, you can debug the serving function locally before deploying to a prediction server. #### Test instance Make a test instance in the form passed by the HTTP server in the server binary to the serving function. Start by taking an arbitrary raw (uncompressed) image from our test data (`x_test[0]`). When the request is sent to the prediction server, it will be sent as a non-preprocessed compressed JPEG image. - Reverse the preprocessing: `(x_test[0] * 255).astype(np.uint8)` - Compress into JPEG format: `cv2.imwrite('tmp.jpg', raw)` - Get the raw bytes: `bytes = tf.io.read_file('tmp.jpg')` - Convert the raw bytes to a tf.string: tf.convert_to_tensor( [bytes], tf.string) ``` from PIL import Image raw = (x_test[0] * 255).astype(np.uint8) image = Image.fromarray(raw) image.save("tmp.jpg") bytes = tf.io.read_file("tmp.jpg") tensor = tf.convert_to_tensor([bytes], tf.string) ``` #### Call the serving function Pass the data to the serving function and get the response. - Get the serving function from the fused model: `loaded.signatures["serving_default"]` - Run the serving function on the local CPU: `with tf.device("cpu:0")` - Invoke the serving function: `serving_function(tensor)` ``` serving_function = loaded.signatures["serving_default"] with tf.device("cpu:0"): response = serving_function(tensor) ``` Print the response. You will see in the response the probability values for the ten CIFAR10 classes. ``` print(response) ``` ### Upload the model Use this helper function `upload_model` to upload your model, stored in SavedModel format, to the model service, which will create a Vertex AI `Model` resource. Once you've done that, you can use the model in the same way as any other Vertex AI `Model` resource; for example, you can deploying to an `Endpoint` for serving predictions. The helper function takes the parameters: - `display_name`: A human readable name for the endpoint. - `image_uri`: The container image for the model deployment. - `model_uri`: The Cloud Storage path to our SavedModel artifact. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` script saved the model, which we specified in the variable `MODEL_DIR`. The helper function uses the model client service's `upload_model` method, which takes the parameters: - `parent`: The Vertex AI location root path for `Dataset`, `Model` and `Endpoint` resources. - `model`: The specification for the Vertex AI `Model` resource. The `model` paramter is a dictionary object that consists of the following fields: - `display_name`: A human readable name for the model. - `metadata_schema_uri`: Since the model was built without a Vertex AI managed dataset, leave this blank (`''`). - `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format. - `container_spec`: This is the specification for the Docker container that will run on the `Endpoint` to serve predictions. Use the variable you set earlier: `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. Uploading a model into a Vertex AI `Model` resource returns a long-running operation, since it might take a few moments. You call `response.result()`, which is a synchronous call and will return when the Vertex AI `Model` resource is ready. The helper function returns the Vertex AI fully qualified identifier for the corresponding Vertex AI model instance `upload_model_response.model`. You will save the identifier for subsequent steps in the variable `model_to_deploy_name`. ``` IMAGE_URI = DEPLOY_IMAGE def upload_model(display_name, image_uri, model_uri): model = { "display_name": display_name, "metadata_schema_uri": "", "artifact_uri": model_uri, "container_spec": { "image_uri": image_uri, "command": [], "args": [], "env": [{"name": "env_name", "value": "env_value"}], "ports": [{"container_port": 8080}], "predict_route": "", "health_route": "", }, } response = clients["model"].upload_model(parent=PARENT, model=model) print("Long running operation:", response.operation.name) upload_model_response = response.result(timeout=180) print("upload_model_response") print(" model:", upload_model_response.model) return upload_model_response.model model_to_deploy_name = upload_model( "cifar10-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy ) ``` ### List all models Now that your custom model is uploaded as a Vertex AI `Model`, get a list of all your `Model` resources. Use the following helper function, `list_models`. This helper function calls the Vertex AI model client service's `list_models` method, with the following parameter: - `parent`: The Vertex AI location root path for your `Dataset`, `Model`, and `Endpoint` resources. The response object from the call is a list, where each element is a Vertex AI `Model`. For each `Model`, you will see a few fields: - `name`: The Vertex AI unique identifier for the `Model`. - `display_name`: The human readable name assigned to the model. - `create_time`': Timestamp when the model resource was created. - `update_time`: Timestamp when the model resource was last updated. - `container`: The container image used for training the model. - `artifact_uri`': The Cloud Storage location of the model artifact. ``` def list_models(): response = clients["model"].list_models(parent=PARENT) for model in response: print("name", model.name) print("display_name", model.display_name) print("create_time", model.create_time) print("update_time", model.update_time) print("container", model.container_spec.image_uri) print("artifact_uri", model.artifact_uri) print("\n") list_models() ``` ### Get model information Get the information for just your `Model`. Use the helper function `get_model`, with the following parameter: - `name`: The Vertex AI unique identifier for the `Model`. This helper function calls the Vertex AI model client service's `get_model` method, with the following parameter: - `name`: The Vertex AI unique identifier for the `Model`. ``` def get_model(name): response = clients["model"].get_model(name=name) print(response) get_model(model_to_deploy_name) ``` ### Create an `Endpoint` Use the helper function `create_endpoint` to create an `Endpoint` to deploy the `Model` to for serving predictions, with the following parameter: - `display_name`: A human readable name for the `Endpoint`. The helper function uses the endpoint client service's `create_endpoint` method, which takes the parameter: - `display_name`: A human readable name for the `Endpoint`. Creating an `Endpoint` returns a long-running operation, since it might take a few moments to provision the endpoint for serving. You will call `response.result()`, which is a synchronous call and will return when the `Endpoint` is ready. The helper function returns the Vertex AI fully qualified identifier for the `Endpoint` in the `response.name` field. ``` ENDPOINT_NAME = "cifar10_endpoint-" + TIMESTAMP def create_endpoint(display_name): endpoint = {"display_name": display_name} response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint) print("Long running operation:", response.operation.name) result = response.result(timeout=300) print("result") print(" name:", result.name) print(" display_name:", result.display_name) print(" description:", result.description) print(" labels:", result.labels) print(" create_time:", result.create_time) print(" update_time:", result.update_time) return result.name endpoint_name = create_endpoint(ENDPOINT_NAME) ``` ### Prediction scaling You have several choices on scaling the VMs for handling your online prediction requests: - Single Instance: The online prediction requests are processed on a single VM. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one. - Manual Scaling: The online prediction requests are split across a fixed number of VMs that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of VMs to the same number of nodes. When a `Model` is first deployed to `Endpoint`, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them. - Auto Scaling: The online prediction requests are streamed across an initial number of VMs. Based on time and compute tradeoffs, Vertex AI might automatically scale your `DeployedModel` to provision and deprovision compute instances. - Set the minimum (`MIN_NODES`) to the initial number of VMs and the maximum (`MAX_NODES`) to the maximum number of VMs that you want the service to scale to. The minimum number of VMs corresponds to the field `min_replica_count` and the maximum number of VMs corresponds to the field `max_replica_count`. ``` MIN_NODES = 1 MAX_NODES = 1 ``` ### Deploy model to the endpoint Use the helper function `deploy_model` to deploy the `Model` to the `Endpoint` you created for serving predictions, with the following parameters: - `model`: The Vertex AI fully qualified identifier of the `Model` to deploy. - `deployed_model_display_name`: A human readable name for the `DeployedModel`. - `endpoint`: The Vertex AI fully qualified `Endpoint` identifier to deploy the `Model` to. The helper function calls the endpoint client service's `deploy_model` method, which takes the following parameters: - `endpoint`: The Vertex AI fully qualified `Endpoint` identifier to deploy the `Model` to. - `deployed_model`: The requirements for deploying the `Model`. - `traffic_split`: Percent of the `Endpoint`'s traffic that you want to go to this `DeployedModel`. This is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to the new `DeployedModel` and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then specify as follows, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100. { "0": percent, model_id: percent, ... } The `deployed_model` parameter is specified as a Python dictionary with the minimum required fields: - `model`: The Vertex AI fully qualified identifier of the `Model` to deploy. - `display_name`: A human readable name for the `DeployedModel`. - `dedicated_resources`: This refers to how many VMs (replicas) are scaled for serving prediction requests. - `machine_spec`: The compute resources to provision for each VM. Use the variable you set earlier: `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `min_replica_count`: The number of VMs to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of VMs to scale to, which you set earlier as the variable `MAX_NODES`. - `enable_container_logging`: This enables logging of container events, such as execution failures. The default is False. This is typically set to True when debugging the deployment and then set to False when deployed for production. Configuring the `traffic_split` to split traffic between multiple `DeployedModel` resources might be helpful in a situation like the following: Perhaps you already have a previous version of your model deployed in production as a `DeployedModel` called `v1`. You got better model evaluation on a new model, which you will deploy as `v2`, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy `v2` to the same `Endpoint` as `v1` but only allocate it 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. ``` DEPLOYED_NAME = "cifar10_deployed-" + TIMESTAMP def deploy_model( model, deployed_model_display_name, endpoint, traffic_split={"0": 100} ): if DEPLOY_GPU: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_type": DEPLOY_GPU, "accelerator_count": DEPLOY_NGPU, } else: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_count": 0, } deployed_model = { "model": model, "display_name": deployed_model_display_name, "dedicated_resources": { "min_replica_count": MIN_NODES, "max_replica_count": MAX_NODES, "machine_spec": machine_spec, }, } response = clients["endpoint"].deploy_model( endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split ) print("Long running operation:", response.operation.name) result = response.result() print("result") deployed_model = result.deployed_model print(" deployed_model") print(" id:", deployed_model.id) print(" model:", deployed_model.model) print(" display_name:", deployed_model.display_name) print(" create_time:", deployed_model.create_time) return deployed_model.id deployed_model_id = deploy_model(model_to_deploy_name, DEPLOYED_NAME, endpoint_name) ``` ### List all endpoints Get a list of all your `Endpoint` resources. Use the helper function `list_endpoints`. The helper function calls the endpoint client service's `list_endpoints` method. The returned response object is a list, with an element for each `Endpoint`. The helper function prints a few example fields for each `Endpoint`: - `name`: The Vertex AI identifier for the `Endpoint`. - `display_name`: The human readable name you assigned to the `Endpoint`. - `create_time`: When the `Endpoint` was created. - `deployed_models`: The `DeployedModel`s and associated information that are deployed to this `Endpoint`. ``` def list_endpoints(): response = clients["endpoint"].list_endpoints(parent=PARENT) for endpoint in response: print("name:", endpoint.name) print("display name:", endpoint.display_name) print("create_time:", endpoint.create_time) print("deployed_models", endpoint.deployed_models) print("\n") list_endpoints() ``` ### Get information on this endpoint Get informationfor just your endpoint. Use the helper function `get_endpoint`, with the following parameter: - `name`: The Vertex AI unique identifier for the `Endpoint`. This helper function calls the endpoint client service's `get_endpoint` method, with the following parameter: - `name`: The Vertex AI unique identifier for the managed `Endpoint`. ``` def get_endpoint(name): response = clients["endpoint"].get_endpoint(name=name) print(response) get_endpoint(endpoint_name) ``` ## Make a prediction request Let's now get a prediction from the `Endpoint`. You will use an arbitrary image out of the test (holdout) portion of the dataset as a test image. ``` test_image = x_test[0] test_label = y_test[0] print(test_image.shape) ``` ### Prepare the request content You are going to send the CIFAR10 image as compressed JPG image, instead of the raw uncompressed bytes: - `cv2.imwrite`: Use openCV to write the uncompressed image to disk as a compressed JPEG image. - `tf.io.read_file`: Read the compressed JPG images back into memory as raw bytes. - `base64.b64encode`: Encode the raw bytes into a base64-encoded string. ``` import base64 from PIL import Image image = Image.fromarray((test_image * 255).astype(np.uint8)) image.save("tmp.jpg") bytes = tf.io.read_file("tmp.jpg") b64str = base64.b64encode(bytes.numpy()).decode("utf-8") ``` ### Send the prediction request To send a prediction request, use the helper function `predict_image`, which takes the following parameters: - `image`: The test image data as a numpy array. - `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint`. - `parameters_dict`: Additional parameters for serving -- in this case, None. This function calls the prediction client service's `predict` method with the following parameters: - `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint`. - `instances`: A list of instances (encoded images) to predict. - `parameters`: Additional parameters for serving -- in this case, None. To pass the image data to the prediction service, in the previous step you encoded the bytes into base 64 -- which makes the content safe from modification when transmitting binary data over the network. You need to tell the pre-built container that the content has been base64-encoded, so it will decode the on the other end before passing it to your TensorFlow graph. Each instance in the prediction request is a dictionary with the following form: {input_name: {'b64': content}} - `input_name`: the name of the input layer of the underlying model. - `'b64'`: A key that indicates the content is base64-encoded. - `content`: The compressed JPEG image bytes as a base64-encoded string. Since the `predict()` service can take multiple images (instances), you will send your single image as a list of one image. As a final step, you package the instances list into Google's protobuf format; this is what you pass to the `predict()` service. The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction: - Confidence level for the prediction (`predictions`), between 0 and 1, for each of the ten classes. ``` def predict_image(image, endpoint, parameters_dict): # The format of each instance should conform to the deployed model's prediction input schema. instances_list = [{input_name: {"b64": image}}] instances = [json_format.ParseDict(s, Value()) for s in instances_list] response = clients["prediction"].predict( endpoint=endpoint, instances=instances, parameters=parameters_dict ) print("response") print(" deployed_model_id:", response.deployed_model_id) predictions = response.predictions print("predictions") for prediction in predictions: # See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml for the format of the predictions. print(" prediction:", prediction) predict_image(b64str, endpoint_name, None) ``` ## Undeploy the model Undeploy your `DeployedModel` from the serving `Endpoint`. Use the helper function `undeploy_model`, which takes the following parameters: - `deployed_model_id`: The `DeployedModel` identifier returned by the `Endpoint` service when you deployed the `Model` as a `DeployedModel`. - `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint` where the `DeployedModel` is deployed. This function calls the endpoint client service's `undeploy_model` method, with the following parameters: - `deployed_model_id`: The `DeployedModel` identifier returned by the `Endpoint` service when you dep - `endpoint`: The Vertex AI fully qualified identifier for the `Endpoint` where the `DeployedModel` is deployed. - `traffic_split`: How to split traffic among the remaining `DeployedModel`s on the `Endpoint`. Since this is the only `DeployedModel` on the `Endpoint`, you can leave `traffic_split` empty by setting it to {}. ``` def undeploy_model(deployed_model_id, endpoint): response = clients["endpoint"].undeploy_model( endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={} ) print(response) undeploy_model(deployed_model_id, endpoint_name) ``` # Cleaning up To clean up all Google Cloud resources used in this project, you can [delete the project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: - Dataset - Model - Endpoint - Cloud Storage Bucket ``` delete_dataset = True delete_model = True delete_endpoint = True delete_bucket = True # Delete the dataset using the Vertex AI fully qualified identifier for the dataset try: if delete_dataset: clients["dataset"].delete_dataset(name=dataset["name"]) except Exception as e: print(e) # Delete the model using the Vertex AI fully qualified identifier for the model try: if delete_model: clients["model"].delete_model(name=model_to_deploy_name) except Exception as e: print(e) # Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint try: if delete_endpoint: clients["endpoint"].delete_endpoint(name=endpoint_name) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r gs://$BUCKET_NAME # Collect any unclaimed memory import gc gc.collect() ```
github_jupyter
``` from pathlib import Path import pandas as pd import seaborn as sns import matplotlib.pyplot as plt tests = pd.read_csv('tests.csv') utility = pd.read_csv('utility.csv') train = pd.read_csv('train.csv') welfare = pd.read_csv('welfare.csv') train.update(tests) # Insert Revenue and Regret from test_data into train_data #train_data.update(test_data) # Separate data without DP to make face plots more readable #dp_data = train_data.query('Noise > 0').query('Clip > 0') #no_dp_data = train_data.query('Noise == 0').query('Clip == 0') midx_util = pd.MultiIndex.from_frame(utility.query("Agent == 0")[['Report','Utility']]) # Find max over all misreports util = utility.query("Report > 0").groupby(['Iter','Agent','Noise','Clip','Exp'])['Utility'].max() # Transform to df with index/columns util_data = pd.DataFrame(pd.DataFrame(util).to_records()) # Find min revenue over all misreports tr = train.groupby(['Iter','Noise','Clip','Exp'])['Revenue'].min() # Transform to df with index/columns train_data = pd.DataFrame(pd.DataFrame(tr).to_records()) # Find max regret over all misreports reg = utility.query("Report > 0").groupby(['Iter','Agent','Noise','Clip','Exp'])['Regret'].max() # Transform to df with index/columns reg_data = pd.DataFrame(pd.DataFrame(reg).to_records()) # Find max regret over all misreports reg_s = train.groupby(['Iter','Noise','Clip','Exp'])['Regret'].max() # Transform to df with index/columns reg_sum_data = pd.DataFrame(pd.DataFrame(reg_s).to_records()) # Find min welfare over all misreports wel = welfare.groupby(['Iter','Noise','Clip','Exp'])['Welfare'].min() # Transform to df with index/columns wel_data = pd.DataFrame(pd.DataFrame(wel).to_records()) ``` # Welfare ``` w = sns.FacetGrid(wel_data, col='Noise', row='Clip', hue='Exp') w = w.map(plt.plot, 'Iter', 'Welfare') ``` # Revenue ``` rev = sns.FacetGrid(train_data, col='Noise', row='Clip', hue='Exp') rev = rev.map(plt.plot, 'Iter', 'Revenue') ``` # Sum Regret TODO: Find out wether more agents make regret development more smooth TODO: Is sum regret important? ## TODO: regret calc correct? no noise case should be worse ``` reg_sum = sns.FacetGrid(reg_sum_data, col='Noise', hue='Exp') reg_sum = reg_sum.map(plt.plot, 'Iter', 'Regret') ``` # Regret Note: More hidden layers, but small item size -> faster convergence ``` reg = sns.FacetGrid(reg_data, col='Noise', row='Agent', hue='Exp') reg = reg.map(plt.plot, 'Iter', 'Regret') ``` # Utility Agent 0 ``` ag0 = sns.FacetGrid(util_data.query('Agent == 0'), col='Noise', row='Clip', hue='Exp') ag0 = ag0.map(plt.plot, 'Iter', 'Utility') ``` # Utility All Agents TODO: Group Utils of Agent0 with util clusters of other agents ``` a = sns.FacetGrid(util_data, col='Noise', row='Agent', hue='Exp') a = a.map(plt.plot, 'Iter', 'Utility') ```
github_jupyter
``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt ``` # Supervised Learning Part b - Decision Trees and Forests (optional) Here we'll explore a class of algorithms based on decision trees. Decision trees are at their root extremely intuitive. They encode a series of "if" and "else" choices, similar to how a person might make a decision. However, which questions to ask, and how to proceed for each answer is entirely learned from the data. For example, if you wanted to create a guide to identifying an animal found in nature, you might ask the following series of questions: - Is the animal bigger or smaller than a meter long? + *bigger*: does the animal have horns? - *yes*: are the horns longer than ten centimeters? - *no*: is the animal wearing a collar + *smaller*: does the animal have two or four legs? - *two*: does the animal have wings? - *four*: does the animal have a bushy tail? and so on. This binary splitting of questions is the essence of a decision tree. One of the main benefit of tree-based models is that they require little preprocessing of the data. They can work with variables of different types (continuous and discrete) and are invariant to scaling of the features. Another benefit is that tree-based models are what is called "nonparametric", which means they don't have a fix set of parameters to learn. Instead, a tree model can become more and more flexible, if given more data. In other words, the number of free parameters grows with the number of samples and is not fixed, as for example in linear models. ## Decision Tree Regression A decision tree is a simple binary classification tree that is similar to nearest neighbor classification. It can be used as follows: ``` def make_dataset(n_samples=100): rnd = np.random.RandomState(42) x = np.linspace(-3, 3, n_samples) y_no_noise = np.sin(4 * x) + x y = y_no_noise + rnd.normal(size=len(x)) return x, y x, y = make_dataset() X = x.reshape(-1, 1) plt.xlabel('Feature X') plt.ylabel('Target y') plt.scatter(X, y); from sklearn.tree import DecisionTreeRegressor reg = DecisionTreeRegressor(max_depth=5) reg.fit(X, y) X_fit = np.linspace(-3, 3, 1000).reshape((-1, 1)) y_fit_1 = reg.predict(X_fit) plt.plot(X_fit.ravel(), y_fit_1, color='blue', label="prediction") plt.plot(X.ravel(), y, '.k', label="training data") plt.legend(loc="best"); ``` A single decision tree allows us to estimate the signal in a non-parametric way, but clearly has some issues. In some regions, the model shows high bias and under-fits the data (seen in the long flat lines which don't follow the contours of the data), while in other regions the model shows high variance and over-fits the data (reflected in the narrow spikes which are influenced by noise in single points). ## Decision Tree Classification Decision tree classification work very similarly, by assigning all points within a leaf the majority class in that leaf: ``` def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None): if eps is None: eps = X.std() / 2. x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps xx = np.linspace(x_min, x_max, 100) yy = np.linspace(y_min, y_max, 100) X1, X2 = np.meshgrid(xx, yy) X_grid = np.c_[X1.ravel(), X2.ravel()] try: decision_values = classifier.decision_function(X_grid) levels = [0] fill_levels = [decision_values.min(), 0, decision_values.max()] except AttributeError: # no decision_function decision_values = classifier.predict_proba(X_grid)[:, 1] levels = [.5] fill_levels = [0, .5, 1] if ax is None: ax = plt.gca() if fill: ax.contourf(X1, X2, decision_values.reshape(X1.shape), levels=fill_levels, colors=['blue', 'red']) else: ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels, colors="black") ax.set_xlim(x_min, x_max) ax.set_ylim(y_min, y_max) ax.set_xticks(()) ax.set_yticks(()) from sklearn.datasets import make_blobs from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=100) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) clf = DecisionTreeClassifier(max_depth=5) clf.fit(X_train, y_train) plot_2d_separator(clf, X, fill=True) plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, s=60, alpha=.7) plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, s=60); ``` There are many parameter that control the complexity of a tree, but the one that might be easiest to understand is the maximum depth. This limits how finely the tree can partition the input space, or how many "if-else" questions can be asked before deciding which class a sample lies in. This parameter is important to tune for trees and tree-based models. The interactive plot below shows how underfit and overfit looks like for this model. Having a ``max_depth`` of 1 is clearly an underfit model, while a depth of 7 or 8 clearly overfits. The maximum depth a tree can be grown at for this dataset is 8, at which point each leave only contains samples from a single class. This is known as all leaves being "pure." In the interactive plot below, the regions are assigned blue and red colors to indicate the predicted class for that region. The shade of the color indicates the predicted probability for that class (darker = higher probability), while yellow regions indicate an equal predicted probability for either class. ``` from figures import plot_tree_interactive plot_tree_interactive() ``` Decision trees are fast to train, easy to understand, and often lead to interpretable models. However, single trees often tend to overfit the training data. Playing with the slider above you might notice that the model starts to overfit even before it has a good separation between the classes. Therefore, in practice it is more common to combine multiple trees to produce models that generalize better. The most common methods for combining trees are random forests and gradient boosted trees. ## Random Forests Random forests are simply many trees, built on different random subsets (drawn with replacement) of the data, and using different random subsets (drawn without replacement) of the features for each split. This makes the trees different from each other, and makes them overfit to different aspects. Then, their predictions are averaged, leading to a smoother estimate that overfits less. ``` from figures import plot_forest_interactive plot_forest_interactive() ``` ___ ## Exercise Use a decision tree or random forests to create a classifier for the ``breast_cancer`` dataset. ___
github_jupyter
[User Struggles &lt;](10_Struggles.ipynb) | [&gt; Use of Special Features](12_Magic.ipynb) # What can we learn about API design for data science? There are a lot of different ways of spelling out functionality in APIs and some of them are painful, while others are highly usable. We may be able to learn things about API design by looking at what APIs people are using and how. We can help to design good APIs by advising the granularity questions (lots of small objects/functions, or a few with lots or arguments)? ## Results Summary: - Code cells - On average, a code cell is 10.37 lines long (median = 6). The longest cell is 40,759 lines long. - Variables - On average, there are 5.15 object definitions in a notebook. Median = 0.0. (Among notebooks with at least one object, Median = 10.0) - Parameters - Across all function calls, there are an average of 1.057 arguments per function. - On average, a call to a user defined function has 1.65 parameters. - On average, a call to a non user-defined function has 1.017 parameters. - This is a statistically significant difference. We are 95% confident that the true average number of parameters in user-defined function calls is between 0.62 and 0.64 higher than the average number of parameters in non user-defined function calls. - Functions - Across all function calls, there are an average of 1.13 arguments per function. ----- # Import Packages and Load Data ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime import math from collections import deque import scipy.stats as st import ast import astpretty import pickle import re import os import load_data ``` Load dataframes ``` notebooks_temp = load_data.load_notebooks() repos_temp = load_data.load_repos() ``` Load aggregated dataframes. Code used to create them is in [aggregate.py](aggregate.py). ``` all_objects_df_temp = load_data.load_objects() cell_stats_df_temp = load_data.load_cell_stats() cell_types_df_temp = load_data.load_cell_types() function_defs_df_temp = load_data.load_function_defs() function_use_df_temp = load_data.load_function_use() all_objects_df_temp = load_data.load_objects() errors_df_temp = load_data.load_errors() nb_imports_temp = load_data.load_nb_imports() ``` Load list of lines of code per code cell. ``` lines_per_code_cell = load_data.load_lines() ``` Load statuses. For some analysis we will remove files that couldn't be parsed with the python abstract syntax tree. ``` statuses_df_temp = load_data.load_statuses() ``` --- # Tidy Data ### Only looking at Python notebooks ``` notebooks = notebooks_temp.copy()[notebooks_temp.lang_name == 'python'].reset_index(drop=True) print("{0:,} ({1}%) of notebooks were written in Python. The remaining {2}% have been removed.".format( len(notebooks), round(100*len(notebooks)/len(notebooks_temp), 2), round(100 - 100*len(notebooks)/len(notebooks_temp), 2) )) ``` ### Needed for some analysis: limit to notebooks that could be parsed with Python ast ``` statuses_df = statuses_df_temp.copy()[statuses_df_temp.syntax == True].reset_index(drop=True) notebooks_ast = notebooks.copy()[notebooks.file.isin(statuses_df.file)].reset_index(drop=True) print("{0}% of python notebooks were able to be parsed by Python AST.".format( round(100*len(notebooks_ast)/len(notebooks), 2), round(100 - 100*len(notebooks_ast)/len(notebooks), 2) )) print("{0}% of python 3 notebooks were able to be parsed by Python AST.".format( round(100*len(notebooks_ast)/len(notebooks[[str(l).startswith('3') for l in notebooks.lang_version]&(notebooks.lang_name == 'python')]), 2), round(100 - 100*len(notebooks_ast)/len(notebooks[[str(l).startswith('3') for l in notebooks.lang_version]&(notebooks.lang_name == 'python')]), 2) )) ``` ### Update repos and aggregated dataframe to reflect notebooks in question All python notebooks not in ipynb checkpoints ``` cell_stats_df = cell_stats_df_temp.copy()[cell_stats_df_temp.file.isin(notebooks.file)] cell_types_df = cell_types_df_temp.copy()[cell_types_df_temp.file.isin(notebooks.file)] repos = repos_temp.copy()[repos_temp.repo_id.isin(notebooks.repo_id)] errors_df = errors_df_temp.copy()[errors_df_temp.file.isin(notebooks.file)] nb_imports = nb_imports_temp.copy()[nb_imports_temp.file.isin(nb_imports_temp.file)] ``` Python notebooks in ipynb checkpoints that were able to be parsed ``` function_defs_df = function_defs_df_temp.copy()[function_defs_df_temp.file.isin(notebooks_ast.file)] function_use_df = function_use_df_temp.copy()[function_use_df_temp.file.isin(notebooks_ast.file)] all_objects_df = all_objects_df_temp.copy()[all_objects_df_temp.file.isin(notebooks_ast.file)] ``` ### Delete temp dataframes to save space ``` del notebooks_temp del repos_temp del cell_stats_df_temp del cell_types_df_temp del function_defs_df_temp del function_use_df_temp del all_objects_df_temp del errors_df_temp ``` --- # Manipulate Data Add num_errors to errors dataframe ``` errors_df['num_errors'] = [len(e) for e in errors_df['error_names']] ``` Add num_objects column to objects dataframe ``` all_objects_df['num_objects'] = [len(obj) for obj in all_objects_df['objects']] ``` Group function definitions by notebook ``` function_defs_stats_df = function_defs_df.groupby('file')['function'].count().reset_index().merge( function_defs_df.groupby('file')['parameters'].sum().reset_index(), on = 'file' ) ``` --- # Visualizations and Statistics ## How long are code cells? ``` pd.Series(lines_per_code_cell).aggregate(['mean','median','min','max']) plt.hist(lines_per_code_cell, bins = range(50), color = 'teal') plt.xlim(0,50) plt.xlabel('Lines of Code') plt.ylabel('Number of Cells') plt.title('Code Cell Length') plt.show() ``` On average, code cells have 10.30 lines of code. The typical code cell has 6 lines of code (median). ## What is a typical number of objects in a notebook? Calculate summary statistics for the number of objects in each notebook. Only consider 'name' assigments as objects. Setting the value in a list or data frame (subscript) and altering the attributes of an object (attribute) should not count as object assignments ``` mean_objs = all_objects_df.num_objects.mean() median_objs = all_objects_df.num_objects.median() median_objs_with = all_objects_df[all_objects_df!=0].num_objects.median() print('On average, among notebooks that were able to be parsed with Python abstract syntax tree, there are {0} object definitions in a notebook. Median = {1}. (Among notebooks with at least one object, Median = {2})'.format( round(mean_objs, 2), median_objs, median_objs_with )) plt.hist(all_objects_df.num_objects, bins = 20, color='teal') plt.title('What is a typical number of objects in a notebook?') plt.xlabel('Number of Objects') plt.ylabel('Number of Notebooks') plt.yscale('log') plt.show() ``` ## How many functions are called? ``` function_use_df['unique_user_def'] = [len(set(user_def)) for user_def in function_use_df.user_def] function_use_df['unique_not_user_def'] = [len(set(not_user_def)) for not_user_def in function_use_df.not_user_def] function_use_df['unique'] = function_use_df['unique_user_def'] + function_use_df['unique_not_user_def'] print('There are an average of {0} unique functions called in each notebook (median = {1}).'.format( round(function_use_df.unique.mean(), 2), function_use_df.unique.median() )) print('There are an average of {0} unique user-defined functions called in each notebook (median = {1}).'.format( round(function_use_df.unique_user_def.mean(), 2), function_use_df.unique_user_def.median() )) print('There are an average of {0} unique not user-defined functions called in each notebook (median = {1}).'.format( round(function_use_df.unique_not_user_def.mean(), 2), function_use_df.unique_not_user_def.median() )) fig = plt.figure(figsize = (6, 3)) plt.subplot(1,2,1) plt.hist(function_use_df[function_use_df.unique_not_user_def < 100].unique_not_user_def, color = 'teal', bins = 20) plt.ylim(0, 600000) plt.title('Not user define functions') plt.ylabel('Number of notebooks') plt.xlabel('Number of functions') plt.subplot(1,2,2) plt.hist(function_use_df[function_use_df.unique_user_def < 100].unique_user_def, color = 'navy', bins = 20) plt.ylim(0, 600000) plt.yticks([],[]) plt.title('User defined functions') plt.xlabel('Number of functions') plt.tight_layout() plt.show() print("{0} ({1}%) notebooks have no user defined functions.".format( sum(function_use_df.unique_user_def == 0), round(100*sum(function_use_df.unique_user_def == 0)/len(function_use_df)) )) ``` ### Is number of functions used associated with number of errors in a notebook? ``` errors_funcs_df = errors_df[['file','num_errors']].merge(function_use_df[['file','unique','unique_user_def','parameters']], on = 'file') errors_funcs_df[['num_errors','unique']].corr() ``` The very weak correlation of 0.019 provides no evidence that the number of function calls in a notebook is associated with the number of errors in a notebook. ### Is number of functions defined associated with number of errors in a notebook? ``` errors_funcs_df[['num_errors','unique_user_def']].corr() ``` The very weak correlation of 0.01 provides no evidence that the number of user defined functions in a notebook is associated with the number of errors in a notebook. ### Is the average number of parameters associated wth the number of errors in a notebook? ``` errors_funcs_df['avg_params'] = [sum(p)/len(p) if len(p) > 0 else None for p in errors_funcs_df.parameters] errors_funcs_df[['num_errors','avg_params']].corr() ``` The very weak correlation of -0.004 provides no evidence that the average number of parameters of function calls in a notebook is associated with the number of errors in a notebook. ## How many arguments are typical to pass into functions? ``` # 35 seconds all_params = load_data.flatten(function_use_df.parameters) print("Across all function calls, there are an average of {0} arguments per function.".format( round(pd.Series(all_params).mean(), 2) )) param_counts = pd.Series(all_params).value_counts().reset_index().rename( columns={'index':'Arguments',0:'Count'} ) plt.hist(pd.Series(all_params), bins = range(25), color = 'teal') plt.xlabel('Arguments') plt.xlim(-1, 20) plt.ylim(0, 35000000) plt.yticks(range(0, 35000000, 5000000), range(0, 35, 5)) plt.xticks(range(25)) plt.title('How many arguments are passed into functions?') plt.ylabel('Number of Function Calls\n(millions)') plt.show() ``` ### Parameters of user-defined functions #### Based on Definitions ``` # # 2 min # start = datetime.datetime.now() # function_defs_stats_df['avg_params'] = [ # row.parameters / row.function # if row.function != 0 else 0 # for _, row in function_defs_stats_df.iterrows() # ] # end = datetime.datetime.now() # print(end - start) # user_mean_params = function_defs_stats_df.avg_params.mean() # print("On average, a user defined function has {0} parameters.".format( # round(user_mean_params, 2) # )) plt.hist( function_defs_df.parameters, color = 'teal', bins = range(25) ) plt.xticks(range(25)) plt.xlim(-1, 20) plt.ylim(0, 2500000) plt.yticks(range(0, 2500000, 500000), pd.Series(range(0, 25, 5))/10) plt.title('How many arguments are in user defined functions?') plt.xlabel('Arguments') plt.ylabel('Number of Functions\n(millions)') plt.show() ``` [User Struggles &lt;](10_Struggles.ipynb) | [&gt; Use of Special Features](12_Magic.ipynb)
github_jupyter
# Fully Bayesian inference for generalized GP models with HMC *James Hensman, 2015-16* Converted to candlegp *Thomas Viehmann* It's possible to construct a very flexible models with Gaussian processes by combining them with different likelihoods (sometimes called 'families' in the GLM literature). This makes inference of the GP intractable since the likelihoods is not generally conjugate to the Gaussian process. The general form of the model is $$\theta \sim p(\theta)\\f \sim \mathcal {GP}(m(x; \theta),\, k(x, x'; \theta))\\y_i \sim p(y | g(f(x_i))\,.$$ To perform inference in this model, we'll run MCMC using Hamiltonian Monte Carlo (HMC) over the function-values and the parameters $\theta$ jointly. Key to an effective scheme is rotation of the field using the Cholesky decomposition. We write $$\theta \sim p(\theta)\\v \sim \mathcal {N}(0,\, I)\\LL^\top = K\\f = m + Lv\\y_i \sim p(y | g(f(x_i))\,.$$ Joint HMC over v and the function values is not widely adopted in the literature becate of the difficulty in differentiating $LL^\top=K$. We've made this derivative available in tensorflow, and so application of HMC is relatively straightforward. ### Exponential Regression example The first illustration in this notebook is 'Exponential Regression'. The model is $$\theta \sim p(\theta)\\f \sim \mathcal {GP}(0, k(x, x'; \theta))\\f_i = f(x_i)\\y_i \sim \mathcal {Exp} (e^{f_i})$$ We'll use MCMC to deal with both the kernel parameters $\theta$ and the latent function values $f$. first, generate a data set. ``` import sys, os sys.path.append(os.path.join(os.getcwd(),'..')) import candlegp import candlegp.training.hmc import numpy import torch from torch.autograd import Variable from matplotlib import pyplot pyplot.style.use('ggplot') %matplotlib inline X = Variable(torch.linspace(-3,3,20,out=torch.DoubleTensor())) Y = Variable(torch.from_numpy(numpy.random.exponential(((X.data.sin())**2).numpy()))) ``` GPflow's model for fully-Bayesian MCMC is called GPMC. It's constructed like any other model, but contains a parameter `V` which represents the centered values of the function. ``` #build the model k = candlegp.kernels.Matern32(1,ARD=False).double() + candlegp.kernels.Bias(1).double() l = candlegp.likelihoods.Exponential() m = candlegp.models.GPMC(X[:,None], Y[:,None], k, l) m ``` The `V` parameter already has a prior applied. We'll add priors to the parameters also (these are rather arbitrary, for illustration). ``` m.kern.kern_list[0].lengthscales.prior = candlegp.priors.Gamma(1., 1., ttype=torch.DoubleTensor) m.kern.kern_list[0].variance.prior = candlegp.priors.Gamma(1.,1., ttype=torch.DoubleTensor) m.kern.kern_list[1].variance.prior = candlegp.priors.Gamma(1.,1., ttype=torch.DoubleTensor) m.V.prior = candlegp.priors.Gaussian(0.,1., ttype=torch.DoubleTensor) m ``` Running HMC is as easy as hitting m.sample(). GPflow only has HMC sampling for the moment, and it's a relatively vanilla implementation (no NUTS, for example). There are two setting to tune, the step size (epsilon) and the maximum noumber of steps Lmax. Each proposal will take a random number of steps between 1 and Lmax, each of length epsilon. We'll use the `verbose` setting so that we can see the acceptance rate. ``` # start near MAP opt = torch.optim.LBFGS(m.parameters(), lr=1e-2, max_iter=40) def eval_model(): obj = m() opt.zero_grad() obj.backward() return obj for i in range(50): obj = m() opt.zero_grad() obj.backward() opt.step(eval_model) if i%5==0: print(i,':',obj.data[0]) m res = candlegp.training.hmc.hmc_sample(m,500,0.2,burn=50, thin=10) xtest = torch.linspace(-4,4,100).double().unsqueeze(1) f_samples = [] for i in range(len(res[0])): for j,mp in enumerate(m.parameters()): mp.set(res[j+1][i]) f_samples.append(m.predict_f_samples(Variable(xtest), 5).squeeze(0).t()) f_samples = torch.cat(f_samples, dim=0) rate_samples = torch.exp(f_samples) pyplot.figure(figsize=(12, 6)) line, = pyplot.plot(xtest.numpy(), rate_samples.data.mean(0).numpy(), lw=2) pyplot.fill_between(xtest[:,0], numpy.percentile(rate_samples.data.numpy(), 5, axis=0), numpy.percentile(rate_samples.data.numpy(), 95, axis=0), color=line.get_color(), alpha = 0.2) pyplot.plot(X.data.numpy(), Y.data.numpy(), 'kx', mew=2) pyplot.ylim(-0.1, numpy.max(numpy.percentile(rate_samples.data.numpy(), 95, axis=0))) import pandas df = pandas.DataFrame(res[1:],index=[n for n,p in m.named_parameters()]).transpose() df[:10] df["kern.kern_list.1.variance"].apply(lambda x: x[0]).hist(bins=20) ``` # Sparse Version Do the same with sparse: ``` Z = torch.linspace(-3,3,5).double().unsqueeze(1) k2 = candlegp.kernels.Matern32(1,ARD=False).double() + candlegp.kernels.Bias(1).double() l2 = candlegp.likelihoods.Exponential() m2 = candlegp.models.SGPMC(X[:,None], Y[:,None], k2, l2, Z) m2.kern.kern_list[0].lengthscales.prior = candlegp.priors.Gamma(1., 1., ttype=torch.DoubleTensor) m2.kern.kern_list[0].variance.prior = candlegp.priors.Gamma(1.,1., ttype=torch.DoubleTensor) m2.kern.kern_list[1].variance.prior = candlegp.priors.Gamma(1.,1., ttype=torch.DoubleTensor) m2.V.prior = candlegp.priors.Gaussian(0.,1., ttype=torch.DoubleTensor) m2 # start near MAP opt = torch.optim.LBFGS(m2.parameters(), lr=1e-2, max_iter=40) def eval_model(): obj = m2() opt.zero_grad() obj.backward() return obj for i in range(50): obj = m2() opt.zero_grad() obj.backward() opt.step(eval_model) if i%5==0: print(i,':',obj.data[0]) m2 res = candlegp.training.hmc.hmc_sample(m,500,0.2,burn=50, thin=10) xtest = torch.linspace(-4,4,100).double().unsqueeze(1) f_samples = [] for i in range(len(res[0])): for j,mp in enumerate(m.parameters()): mp.set(res[j+1][i]) f_samples.append(m.predict_f_samples(Variable(xtest), 5).squeeze(0).t()) f_samples = torch.cat(f_samples, dim=0) rate_samples = torch.exp(f_samples) pyplot.figure(figsize=(12, 6)) line, = pyplot.plot(xtest.numpy(), rate_samples.data.mean(0).numpy(), lw=2) pyplot.fill_between(xtest[:,0], numpy.percentile(rate_samples.data.numpy(), 5, axis=0), numpy.percentile(rate_samples.data.numpy(), 95, axis=0), color=line.get_color(), alpha = 0.2) pyplot.plot(X.data.numpy(), Y.data.numpy(), 'kx', mew=2) pyplot.plot(m2.Z.get().data.numpy(),numpy.zeros(m2.num_inducing),'o') pyplot.ylim(-0.1, numpy.max(numpy.percentile(rate_samples.data.numpy(), 95, axis=0))) ```
github_jupyter
# Batch Normalization Training deep models is difficult and getting them to converge in a reasonable amount of time can be tricky. In this section, we describe batch normalization, one popular and effective technique that has been found to accelerate the convergence of deep nets and ([together with residual blocks, which we cover next](resnet.md)) has recently enabled practitioners to routinely train networks with over 100 layers. ## Training Deep Networks Let's review some of the practical challenges when training deep networks. 1. Data preprocessing often proves to be a crucial consideration for effective statistical modeling. Recall our application of deep networks to [predicting house prices](../chapter_deep-learning-basics/kaggle-house-price.md). In that example, we standardized our input features to each have a mean of *zero* and variance of *one*. Standardizing input data typically makes it easier to train models since parameters are a-priori at a similar scale. 1. For a typical MLP or CNN, as we train the model, the activations in intermediate layers of the network may assume different orders of magnitude (both across nodes in the same layer, and over time due to updating the model's parameters). The authors of the batch normalization technique postulated that this drift in the distribution of activations could hamper the convergence of the network. Intuitively, we might conjecture that if one layer has activation values that are 100x that of another layer, we might need to adjust learning rates adaptively per layer (or even per node within a layer). 1. Deeper networks are complex and easily capable of overfitting. This means that regularization becomes more critical. Empirically, we note that even with dropout, models can overfit badly and we might benefit from other regularization heuristics. In 2015, [Ioffe and Szegedy introduced Batch Normalization (BN)](https://arxiv.org/abs/1502.03167), a clever heuristic that has proved immensely useful for improving the reliability and speed of convergence when training deep models. In each training iteration, BN normalizes the activations of each hidden layer node (on each layer where it is applied) by subtracting its mean and dividing by its standard deviation, estimating both based on the current minibatch. Note that if our batch size was $1$, we wouldn't be able to learn anything because during training, every hidden node would take value $0$. However, with large enough minibatches, the approach proves effective and stable. In a nutshell, the idea in Batch Normalization is to transform the activation at a given layer from $\mathbf{x}$ to $$\mathrm{BN}(\mathbf{x}) = \mathbf{\gamma} \odot \frac{\mathbf{x} - \hat{\mathbf{\mu}}}{\hat\sigma} + \mathbf{\beta}$$ Here, $\hat{\mathbf{\mu}}$ is the estimate of the mean and $\hat{\mathbf{\sigma}}$ is the estimate of the variance. The result is that the activations are approximately rescaled to zero mean and unit variance. Since this may not be quite what we want, we allow for a coordinate-wise scaling coefficient $\mathbf{\gamma}$ and an offset $\mathbf{\beta}$. Consequently, the activations for intermediate layers cannot diverge any longer: we are actively rescaling them back to a given order of magnitude via $\mathbf{\mu}$ and $\sigma$. Intuitively, it is hoped that this normalization allows us to be more aggressive in picking large learning rates. To address the fact that in some cases the activations may actually *need* to differ from standardized data, BN also introduces scaling coefficients $\mathbf{\gamma}$ and an offset $\mathbf{\beta}$. In principle, we might want to use all of our training data to estimate the mean and variance. However, the activations correpsonding to each example change each time we update our model. To remedy this problem, BN uses only the current minibatch for estimating $\hat{\mathbf{\mu}}$ and $\hat\sigma$. It is precisely due to this fact that we normalize based only on the *currect batch* that *batch normalization* derives its name. To indicate which minibatch $\mathcal{B}$ we draw this from, we denote the quantities with $\hat{\mathbf{\mu}}_\mathcal{B}$ and $\hat\sigma_\mathcal{B}$. $$\hat{\mathbf{\mu}}_\mathcal{B} \leftarrow \frac{1}{|\mathcal{B}|} \sum_{\mathbf{x} \in \mathcal{B}} \mathbf{x} \text{ and } \hat{\mathbf{\sigma}}_\mathcal{B}^2 \leftarrow \frac{1}{|\mathcal{B}|} \sum_{\mathbf{x} \in \mathcal{B}} (\mathbf{x} - \mathbf{\mu}_{\mathcal{B}})^2 + \epsilon$$ Note that we add a small constant $\epsilon > 0$ to the variance estimate to ensure that we never end up dividing by zero, even in cases where the empirical variance estimate might vanish by accident. The estimates $\hat{\mathbf{\mu}}_\mathcal{B}$ and $\hat{\mathbf{\sigma}}_\mathcal{B}$ counteract the scaling issue by using unbiased but noisy estimates of mean and variance. Normally we would consider this a problem. After all, each minibatch has different data, different labels and with it, different activations, predictions and errors. As it turns out, this is actually beneficial. This natural variation appears to act as a form of regularization, conferring benefits (as observed empirically) in mitigating overfitting. In other recent preliminary research, [Teye, Azizpour and Smith, 2018](https://arxiv.org/pdf/1802.06455.pdf) and [Luo et al, 2018](https://arxiv.org/pdf/1809.00846.pdf) relate the properties of BN to Bayesian Priors and penalties respectively. In particular, this sheds some light on the puzzle why BN works best for moderate sizes of minibatches in the range 50-100. We are now ready to take a look at how batch normalization works in practice. ## Batch Normalization Layers The batch normalization methods for fully-connected layers and convolutional layers are slightly different. This is due to the dimensionality of the data generated by convolutional layers. We discuss both cases below. Note that one of the key differences between BN and other layers is that BN operates on a a full minibatch at a time (otherwise it cannot compute the mean and variance parameters per batch). ### Fully-Connected Layers Usually we apply the batch normalization layer between the affine transformation and the activation function in a fully-connected layer. In the following, we denote by $\mathbf{u}$ the input and by $\mathbf{x} = \mathbf{W}\mathbf{u} + \mathbf{b}$ the output of the linear transform. This yields the following variant of BN: $$\mathbf{y} = \phi(\mathrm{BN}(\mathbf{x})) = \phi(\mathrm{BN}(\mathbf{W}\mathbf{u} + \mathbf{b}))$$ Recall that mean and variance are computed on the *same* minibatch $\mathcal{B}$ on which the transformation is applied. Also recall that the scaling coefficient $\mathbf{\gamma}$ and the offset $\mathbf{\beta}$ are parameters that need to be learned. They ensure that the effect of batch normalization can be neutralized as needed. ### Convolutional Layers For convolutional layers, batch normalization occurs after the convolution computation and before the application of the activation function. If the convolution computation outputs multiple channels, we need to carry out batch normalization for *each* of the outputs of these channels, and each channel has an independent scale parameter and shift parameter, both of which are scalars. Assume that there are $m$ examples in the mini-batch. On a single channel, we assume that the height and width of the convolution computation output are $p$ and $q$, respectively. We need to carry out batch normalization for $m \times p \times q$ elements in this channel simultaneously. While carrying out the standardization computation for these elements, we use the same mean and variance. In other words, we use the means and variances of the $m \times p \times q$ elements in this channel rather than one per pixel. ### Batch Normalization During Prediction At prediction time, we might not have the luxury of computing offsets per batch—we might be required to make one prediction at a time. Secondly, the uncertainty in $\mathbf{\mu}$ and $\mathbf{\sigma}$, as arising from a minibatch are undesirable once we've trained the model. One way to mitigate this is to compute more stable estimates on a larger set for once (e.g. via a moving average) and then fix them at prediction time. Consequently, BN behaves differently during training and at test time (recall that dropout also behaves differently at train and test times). ## Implementation from Scratch Next, we will implement the batch normalization layer with `torch.Tensor` from scratch: ``` import sys sys.path.insert(0, '..') import d2l import torch import torch.nn as nn def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): # Use torch.is_grad_enabled() to determine whether the current mode is training mode or # prediction mode if not torch.is_grad_enabled(): # If it is the prediction mode, directly use the mean and variance # obtained from the incoming moving average X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: # When using a fully connected layer, calculate the mean and # variance on the feature dimension mean = X.mean(dim=0) var = ((X - mean) ** 2).mean(dim=0) else: # When using a two-dimensional convolutional layer, calculate the # mean and variance on the channel dimension (axis=1). Here we # need to maintain the shape of X, so that the broadcast operation # can be carried out later mean = X.mean(dim=(0, 2, 3), keepdim=True) var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True) # In training mode, the current mean and variance are used for the # standardization X_hat = (X - mean) / torch.sqrt(var + eps) # Update the mean and variance of the moving average moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta # Scale and shift return Y, moving_mean, moving_var ``` Now, we can customize a `BatchNorm` layer. This retains the scale parameter `gamma` and the shift parameter `beta` involved in gradient finding and iteration, and it also maintains the mean and variance obtained from the moving average, so that they can be used during model prediction. The `num_features` parameter required by the `BatchNorm` instance is the number of outputs for a fully-connected layer and the number of output channels for a convolutional layer. The `num_dims` parameter also required by this instance is 2 for a fully-connected layer and 4 for a convolutional layer. Besides the algorithm per se, also note the design pattern in implementing layers. Typically one defines the math in a separate function, say `batch_norm`. This is then integrated into a custom layer that mostly focuses on bookkeeping, such as moving data to the right device context, ensuring that variables are properly initialized, keeping track of the running averages for mean and variance, etc. That way we achieve a clean separation of math and boilerplate code. We have to specify the number of features throughout. ``` class BatchNorm(nn.Module): def __init__(self, num_features, num_dims, **kwargs): super(BatchNorm, self).__init__(**kwargs) if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) # The scale parameter and the shift parameter involved in gradient # finding and iteration are initialized to 0 and 1 respectively self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) # All the variables not involved in gradient finding and iteration are # initialized to 0 on the CPU self.moving_mean = torch.zeros(shape) self.moving_var = torch.zeros(shape) def forward(self, X): # If X is not on the CPU, copy moving_mean and moving_var to the # device where X is located if self.moving_mean.device != X.device: self.moving_mean = self.moving_mean.to(X.device) self.moving_var = self.moving_var.to(X.device) # Save the updated moving_mean and moving_var Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9) return Y ``` ## Use a Batch Normalization LeNet Next, we will modify the LeNet model in order to apply the batch normalization layer. We add the batch normalization layer after all the convolutional layers and after all fully-connected layers. As discussed, we add it before the activation layer. ``` class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), -1) net = nn.Sequential(nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.MaxPool2d(kernel_size=2, stride=2), Flatten(), nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) ``` Next we train the modified model, again on Fashion-MNIST. The code is virtually identical to that in previous steps. The main difference is the considerably larger learning rate. ``` lr, num_epochs, batch_size, device = 1, 5, 256, d2l.try_gpu() #Initialization of Weights def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: torch.nn.init.xavier_uniform_(m.weight) net.apply(init_weights) criterion = nn.CrossEntropyLoss() train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch5(net, train_iter, test_iter, criterion, num_epochs, batch_size, device, lr) ``` Let's have a look at the scale parameter `gamma` and the shift parameter `beta` learned from the first batch normalization layer. ``` list(net.children())[1].gamma.reshape((-1,)), list(net.children())[1].beta.reshape((-1,)) ``` ## Concise Implementation Compared with the `BatchNorm` class, which we just defined ourselves, the `_BatchNorm` class defined by the `nn.modules.batchnorm` model in Pytorch is easier to use. We have `nn.BatchNorm1d` and `nn.BatchNorm2d` for num_dims= 2 and 4 respectively. The number of features is to be passed as argument. Instead, these parameter values will be obtained automatically by delayed initialization. The code looks virtually identical (save for the lack of an explicit specification of the dimensionality of the features for the Batch Normalization layers). ``` net = nn.Sequential(nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(), nn.MaxPool2d(kernel_size=2, stride=2), Flatten(), nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(), nn.Linear(84, 10)) ``` Use the same hyper-parameter to carry out the training. Note that as usual, the Pytorch variant runs much faster since its code has been compiled to C++/CUDA vs our custom implementation, which must be interpreted by Python. ``` lr, num_epochs, batch_size, device = 1, 5, 256, d2l.try_gpu() #Initialization of Weights def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: torch.nn.init.xavier_uniform_(m.weight) net.apply(init_weights) criterion = nn.CrossEntropyLoss() train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch5(net, train_iter, test_iter, criterion, num_epochs, batch_size, device, lr) ``` ## Controversy Intuitively, batch normalization is thought to somehow make the optimization landscape smoother. However, we must be careful to distinguish between speculative intuitions and true explanations for the phenomena that we observe when training deep models. Recall that we do not even know why simpler deep neural networks (MLPs and conventional CNNs) generalize so well. Despite dropout and L2 regularization, they remain too flexible to admit conventional learning-theoretic generalization guarantees. In the original paper proposing batch normalization, the authors, in addition to introducing a powerful and useful tool offered an explanation for why it works: by reducing *internal covariate shift*. Presumably by *internal covariate shift* the authors meant something like the intuition expressed above---the notion that the distribution of activations changes over the course of training. However there were two problems with this explanation: (1) This drift is very different from *covariate shift*, rendering the name a misnomer. (2) The explanation remains ill-defined (and thus unproven)---rendering *why precisely this technique works* an open question. Throughout this book we aim to convey the intuitions that practitioners use to guide their development of deep neural networks. However, it's important to separate these guiding heuristics from established sceintific fact. Eventually, when you master this material and start writing your own research papers you will want to be clear to delineate between technical claims and hunches. Following the success of batch normalization, its explanation and via *internal covariate shift* became a hot topic that has been revisted several times both in the technical literature and in the broader discourse about how machine learning research ought to be presented. Ali Rahimi popularly raised this issue during a memorable speech while accepting a Test of Time Award at the NeurIPS conference in 2017 and the issue was revisited in a recent position paper on troubling trends in machine learning ([Lipton et al, 2018](https://arxiv.org/abs/1807.03341)). In the technical literature other authors ([Santukar et al., 2018](https://arxiv.org/abs/1805.11604)) have proposed alternative explanations for the success of BN, some claiming that BN's success comes despite exhibiting behavior that is in some ways opposite to those claimed in the original paper. ## Summary * During model training, batch normalization continuously adjusts the intermediate output of the neural network by utilizing the mean and standard deviation of the mini-batch, so that the values of the intermediate output in each layer throughout the neural network are more stable. * The batch normalization methods for fully connected layers and convolutional layers are slightly different. * Like a dropout layer, batch normalization layers have different computation results in training mode and prediction mode. * Batch Normalization has many beneficial side effects, primarily that of regularization. On the other hand, the original motivation of reducing covariate shift seems not to be a valid explanation. ## Exercises 1. Can we remove the fully connected affine transformation before the batch normalization or the bias parameter in convolution computation? * Find an equivalent transformation that applies prior to the fully connected layer. * Is this reformulation effective. Why (not)? 1. Compare the learning rates for LeNet with and without batch normalization. * Plot the decrease in training and test error. * What about the region of convergence? How large can you make the learning rate? 1. Do we need Batch Normalization in every layer? Experiment with it? 1. Can you replace Dropout by Batch Normalization? How does the behavior change? 1. Fix the coefficients `beta` and `gamma` (add the parameter `grad_req='null'` at the time of construction to avoid calculating the gradient), and observe and analyze the results. 1. Review the Pytoch documentation for `_BatchNorm` to see the other applications for Batch Normalization. 1. Research ideas - think of other normalization transforms that you can apply? Can you apply the probability integral transform? How about a full rank covariance estimate? ## References [1] Ioffe, S., & Szegedy, C. (2015). Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv preprint arXiv:1502.03167.
github_jupyter
``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.experimental import enable_iterative_imputer # now you can import normally from sklearn.impute from sklearn.impute import IterativeImputer # Merge train_features.csv & train_labels.csv train = pd.merge(pd.read_csv('https://raw.githubusercontent.com/JackRossProjects/Dengue-Fever-Predictive-Modeling/master/DengueFeverData/dengue_features_train.csv'), pd.read_csv('https://raw.githubusercontent.com/JackRossProjects/Dengue-Fever-Predictive-Modeling/master/DengueFeverData/dengue_labels_train.csv')) # Read test_features.csv & sample_submission.csv test = pd.read_csv('https://raw.githubusercontent.com/JackRossProjects/Dengue-Fever-Predictive-Modeling/master/DengueFeverData/dengue_features_test.csv') sample_submission = pd.read_csv('https://raw.githubusercontent.com/JackRossProjects/Dengue-Fever-Predictive-Modeling/master/DengueFeverData/submission_format.csv') train, val = train_test_split(train, test_size=len(test), random_state=42) sj_train = train.loc[train['city'] == 'sj'] iq_train = train.loc[train['city'] == 'iq'] sj_test = test.loc[test['city'] == 'sj'] iq_test = test.loc[test['city'] == 'iq'] sj_val = val.loc[val['city'] == 'sj'] iq_val = val.loc[val['city'] == 'iq'] fig, ax = plt.subplots() sns.lineplot(x='week_start_date', y='total_cases', data=sj_train, ax=ax, color='r', label='Total Cases (San Juan)') ax2 = ax.twinx() sns.lineplot(x='week_start_date', y='total_cases', data=iq_train, ax=ax2, color='black', label='Total Cases (Iquitos)') ax.set_xlabel('Time', fontsize=14) ax.set_ylabel('Total Cases (San Juan)', fontsize=18) ax2.set_ylabel('Total Cases (Iquitos)', fontsize=18) ax2.set_title('Total Dengue Fever Cases', fontsize=20) ax.legend(loc='upper left') #ax.legend(["Total Cases (San Juan)"], loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1) #ax2.legend(["Total Cases (Iquitos)"], loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1) ax.set_xticks([]) ax = plt.gca() plt.show(); from statsmodels.tsa.arima_model import ARIMA from statsmodels.tsa.stattools import adfuller import statsmodels.api as sm X = pd.read_csv("https://raw.githubusercontent.com/JackRossProjects/Dengue-Fever-Predictive-Modeling/master/DengueFeverData/dengue_features_train.csv") y = pd.read_csv("https://raw.githubusercontent.com/JackRossProjects/Dengue-Fever-Predictive-Modeling/master/DengueFeverData/dengue_labels_train.csv") #setting a datetime index y.index = pd.DatetimeIndex(X.week_start_date) #seperating the data for the 2 cities sj_cases = y.total_cases[y.city == 'sj'] iq_cases = y.total_cases[y.city == 'iq'] #month sampling sj_monthly = sj_cases.resample('M').sum() iq_monthly = iq_cases.resample('M').sum() fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(sj_monthly, lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(sj_monthly, lags=40, ax=ax2) ``` - AR: There is significant autocorrelation at lags upto 3 or even 4 - I: Periodicity occurs at every 12th month, which is expected - MA: There is a clear partial auto correlation until lag 3 ``` fig, ax = plt.subplots() sns.lineplot(x='weekofyear', y='total_cases', data=sj_train, ax=ax, color='r', label='Total Cases (San Juan)') ax2 = ax.twinx() sns.lineplot(x='weekofyear', y='total_cases', data=iq_train, ax=ax2, color='black', label='Total Cases (Iquitos)') ax.set_xlabel('Week', fontsize=14) ax.set_ylabel('Total Cases (San Juan)', fontsize=18) ax2.set_ylabel('Total Cases (Iquitos)', fontsize=18) ax2.set_title('Total Dengue Fever Cases (Per Week)', fontsize=20) ax.legend(loc='upper left') #ax.legend(["Total Cases (San Juan)"], loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1) #ax2.legend(["Total Cases (Iquitos)"], loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1) ax = plt.gca() plt.show(); ## SEASONS - I think seasonality could play into total cases of Dengue because the disease spreads from mosquitos # that thrive in hot/wet conditions. # The climate of Puerto Rico is tropical, hot all year round, with a hot and muggy season from May to October # and a relatively cool season from December to March, with November and April as intermediate months. # Peru has two seasons owing to its proximity to the equator. These are not traditionally known as summer and # winter, but as the rainy/wet season ('summer') which runs from December to March, and the dry season ('winter') # which runs from May to September. sj_train['month'] = pd.DatetimeIndex(sj_train['week_start_date']).month sj_test['month'] = pd.DatetimeIndex(sj_test['week_start_date']).month sj_val['month'] = pd.DatetimeIndex(sj_val['week_start_date']).month iq_val['month'] = pd.DatetimeIndex(iq_val['week_start_date']).month iq_train['month'] = pd.DatetimeIndex(iq_train['week_start_date']).month iq_test['month'] = pd.DatetimeIndex(iq_test['week_start_date']).month fig, ax = plt.subplots() sns.lineplot(x='month', y='total_cases', data=sj_train, ax=ax, color='r', label='Total Cases (San Juan)') ax2 = ax.twinx() sns.lineplot(x='month', y='total_cases', data=iq_train, ax=ax2, color='black', label='Total Cases (Iquitos)') ax.set_xlabel('Month', fontsize=14) ax.set_ylabel('Total Cases (San Juan)', fontsize=18) ax2.set_ylabel('Total Cases (Iquitos)', fontsize=18) ax2.set_title('Total Dengue Fever Cases (Per Month)', fontsize=20) ax.legend(loc='upper left') #ax.legend(["Total Cases (San Juan)"], loc='upper left', fancybox=True, framealpha=1, shadow=True, borderpad=1) #ax2.legend(["Total Cases (Iquitos)"], loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1) ax = plt.gca() plt.show(); # hot = 3, cool = 2, intermediate = 1 season = [] for value in sj_train["month"]: if value >= 5 and value < 11: season.append(3) elif value == 12 or value <= 3: season.append(2) else: season.append(1) sj_train["season"] = season season = [] for value in sj_test["month"]: if value >= 5 and value < 11: season.append(3) elif value == 12 or value <= 3: season.append(2) else: season.append(1) sj_test["season"] = season season = [] for value in sj_val["month"]: if value >= 5 and value < 11: season.append(3) elif value == 12 or value <= 3: season.append(2) else: season.append(1) sj_val["season"] = season # Rainy = 2, dry = 1 season = [] for value in iq_train["month"]: if value == 12 or value <= 4: season.append(2) else: season.append(1) iq_train["season"] = season season = [] for value in iq_test["month"]: if value == 12 or value <= 4: season.append(2) else: season.append(1) iq_test["season"] = season season = [] for value in iq_val["month"]: if value == 12 or value <= 4: season.append(2) else: season.append(1) iq_val["season"] = season iq_train # SJ - hot/muggy season = YYYY-05-DD through YYYY-10-DD # SJ - cool season = YYYY-12-DD through YYYY-03-DD # SJ - intermediate season = YYYY-11-DD AND NOT THROUGH YYYY-04-DD # IQ - rainy season = YYYY-12-DD through YYYY-03-DD # IQ - dry season = YYYY-05-DD through YYYY-09-DD sj_correlations = sj_train.corr() iq_correlations = iq_train.corr() sj_corr_heat = sns.heatmap(sj_correlations, cmap="Greens") plt.title('San Juan Feature Correlations'); (sj_correlations .total_cases .drop('total_cases') .sort_values(ascending=True) .plot .barh(title='San Juan Feature Importance Plot',color='red')); sj_train = sj_train.drop('year', axis=1) sj_test = sj_test.drop('year', axis=1) sj_val = sj_val.drop('year', axis=1) sj_train = sj_train.drop('reanalysis_tdtr_k', axis=1) sj_test = sj_test.drop('reanalysis_tdtr_k', axis=1) sj_val = sj_val.drop('reanalysis_tdtr_k', axis=1) iq_corr_heat = sns.heatmap(iq_correlations, cmap='Greens') plt.title('Iquitos Feature Correlations'); (iq_correlations .total_cases .drop('total_cases') .sort_values(ascending=True) .plot .barh(title='Iquitos Feature Importance Plot',color='red')); # Baselines target = 'total_cases' sj_features = sj_train.columns.drop([target]) sj_X_train = sj_train[sj_features] sj_y_train = sj_train[target] sj_X_val = sj_val[sj_features] sj_y_val = sj_val[target] target = 'total_cases' iq_features = iq_train.columns.drop([target]) iq_X_train = iq_train[iq_features] iq_y_train = iq_train[target] iq_X_val = iq_val[iq_features] iq_y_val = iq_val[target] sj_y_train_log = np.log1p(sj_y_train) iq_y_train_log = np.log1p(iq_y_train) sj_guess = sj_y_train.mean() sj_y_pred = [sj_guess] * len(sj_y_train) mae = mean_absolute_error(sj_y_train, sj_y_pred) print(f'Train Error: {mae:.2f} percentage points') iq_guess = iq_y_train.mean() iq_y_pred = [iq_guess] * len(iq_y_train) mae = mean_absolute_error(iq_y_train, iq_y_pred) print(f'Train Error: {mae:.2f} percentage points') # San Juan - Linear Regression Model sj_pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='most_frequent'), Ridge(normalize=True) ) # Fit on train, score on val sj_pipeline.fit(sj_X_train, sj_y_train) sj_y_pred0 = sj_pipeline.predict(sj_X_val) print('MAE', mean_absolute_error(sj_y_val, sj_y_pred0)) # Iquitos - Linear Regression Model iq_pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='median'), Ridge(normalize=True) ) # Fit on train, score on val iq_pipeline.fit(iq_X_train, iq_y_train) iq_y_pred0 = iq_pipeline.predict(iq_X_val) print('MAE', mean_absolute_error(iq_y_val, iq_y_pred0)) # San Juan - Random Forest Classifier rf = RandomForestRegressor(random_state = 42) sj_pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='most_frequent'), RandomForestRegressor(n_jobs=-1, random_state=42) ) sj_pipeline.fit(sj_X_train, sj_y_train) sj_y_pred = sj_pipeline.predict(sj_X_val) print('MAE', mean_absolute_error(sj_y_val, sj_y_pred)) # Iquitos - Random Forest Classifier iq_pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='most_frequent'), RandomForestRegressor(n_jobs=-1, random_state=42) ) # Fit on train, score on val iq_pipeline.fit(iq_X_train, iq_y_train) iq_y_pred = iq_pipeline.predict(iq_X_val) print('MAE', mean_absolute_error(iq_y_val, iq_y_pred)) sj_val1 = sj_val iq_val1 = iq_val sj_val1['true'] = sj_y_pred.astype(int) iq_val1['true'] = iq_y_pred0.astype(int) fig, ax = plt.subplots() sns.lineplot(x='week_start_date', y='total_cases', data=sj_val, ax=ax, color='r', label='Actual') sns.lineplot(x='week_start_date', y='true', data=sj_val1, ax=ax, color='black', label='Predicted') ax.set_xlabel('Time (1990-2007)', fontsize=14) ax.set_ylabel('Total Cases', fontsize=18) ax.set_title('Actual vs Predicted Cases of Dengue (San Juan)', fontsize=18) ax.legend(loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1) #ax2.legend(["Total Cases (Iquitos)"], loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1) ax.set_xticks([]) ax = plt.gca() plt.show(); fig, ax = plt.subplots() sns.lineplot(x='week_start_date', y='total_cases', data=iq_val, ax=ax, color='r', label='Actual') sns.lineplot(x='week_start_date', y='true', data=iq_val1, ax=ax, color='black', label='Predicted') ax.set_xlabel('Time (2000-2009)', fontsize=14) ax.set_ylabel('Total Cases', fontsize=18) ax.set_title('Actual vs Predicted Cases of Dengue (Iquitos)', fontsize=18) ax.legend(loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1) #ax2.legend(["Total Cases (Iquitos)"], loc='upper right', fancybox=True, framealpha=1, shadow=True, borderpad=1) ax.set_xticks([]) ax = plt.gca() plt.show(); import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import pandas as pd from tensorflow import keras from keras.layers.core import Dense, Dropout from keras.layers.recurrent import LSTM from keras.models import Sequential model = models.Sequential() model.add(LSTM(103, input_shape=(len(sj_train))) model.add(Dense(50, activation='relu')) model.add(Dense(1)) model.compile(loss='mae', optimizer='adam') ```
github_jupyter
# Co-refinement of multiple contrast DMPC datasets in *refnx* This Jupyter notebook demonstrates the utility of the *refnx* package for analysis of neutron reflectometry data. Specifically: - the co-refinement of three contrast variation datasets of a DMPC (1,2-dimyristoyl-sn-glycero-3-phosphocholine) bilayer measured at the solid-liquid interface with a common model - the use of the `LipidLeaflet` component to parameterise the model in terms of physically relevant parameters - the use of Bayesian Markov Chain Monte Carlo (MCMC) to investigate the Posterior distribution of the curvefitting system. - the intrinsic usefulness of Jupyter notebooks to facilitate reproducible research in scientific data analysis <img src="DMPC.png"> The images produced in this notebook are used directly in production of the *refnx* paper. The Jupyter notebook are executable documents that can be distributed, enabling others to reproduce the data analysis contained in the document. The *refnx* documentation at https://refnx.readthedocs.io/en/latest/index.html can be consulted for further details. The first step in most Python scripts is to import modules and functions that are going to be used ``` # use matplotlib for plotting %matplotlib inline import matplotlib.pyplot as plt import numpy as np import os.path import refnx, scipy # the analysis module contains the curvefitting engine from refnx.analysis import CurveFitter, Objective, Parameter, GlobalObjective, process_chain # the reflect module contains functionality relevant to reflectometry from refnx.reflect import SLD, ReflectModel, Structure, LipidLeaflet # the ReflectDataset object will contain the data from refnx.dataset import ReflectDataset ``` In order for the analysis to be exactly reproducible the same package versions must be used. The *conda* packaging manager, and *pip*, can be used to ensure this is the case. ``` # version numbers used in this analysis refnx.version.version, scipy.version.version ``` The `ReflectDataset` class is used to represent a dataset. They can be constructed by supplying a filename ``` data_d2o = ReflectDataset('c_PLP0016596.dat') data_d2o.name = "d2o" data_hdmix = ReflectDataset('c_PLP0016601.dat') data_hdmix.name = "hdmix" data_h2o = ReflectDataset('c_PLP0016607.dat') data_h2o.name = "h2o" ``` A `SLD` object is used to represent the Scattering Length Density of a material. It has `real` and `imag` attributes because the SLD is a complex number, with the imaginary part accounting for absorption. The units of SLD are $10^{-6} \mathring{A}^{-2}$ The `real` and `imag` attributes are `Parameter` objects. These `Parameter` objects contain the: parameter value, whether it allowed to vary, any interparameter constraints, and bounds applied to the parameter. The bounds applied to a parameter are probability distributions which encode the log-prior probability of the parameter having a certain value. ``` si = SLD(2.07 + 0j) sio2 = SLD(3.47 + 0j) # the following represent the solvent contrasts used in the experiment d2o = SLD(6.36 + 0j) h2o = SLD(-0.56 + 0j) hdmix = SLD(2.07 + 0j) # We want the `real` attribute parameter to vary in the analysis, and we want to apply # uniform bounds. The `setp` method of a Parameter is a way of changing many aspects of # Parameter behaviour at once. d2o.real.setp(vary=True, bounds=(6.1, 6.36)) d2o.real.name='d2o SLD' ``` The `LipidLeaflet` class is used to describe a single lipid leaflet in our interfacial model. A leaflet consists of a head and tail group region. Since we are studying a bilayer then inner and outer `LipidLeaflet`'s are required. ``` # Parameter for the area per molecule each DMPC molecule occupies at the surface. We # use the same area per molecule for the inner and outer leaflets. apm = Parameter(56, 'area per molecule', vary=True, bounds=(52, 65)) # the sum of scattering lengths for the lipid head and tail in Angstrom. b_heads = Parameter(6.01e-4, 'b_heads') b_tails = Parameter(-2.92e-4, 'b_tails') # the volume occupied by the head and tail groups in cubic Angstrom. v_heads = Parameter(319, 'v_heads') v_tails = Parameter(782, 'v_tails') # the head and tail group thicknesses. inner_head_thickness = Parameter(9, 'inner_head_thickness', vary=True, bounds=(4, 11)) outer_head_thickness = Parameter(9, 'outer_head_thickness', vary=True, bounds=(4, 11)) tail_thickness = Parameter(14, 'tail_thickness', vary=True, bounds=(10, 17)) # finally construct a `LipidLeaflet` object for the inner and outer leaflets. # Note that here the inner and outer leaflets use the same area per molecule, # same tail thickness, etc, but this is not necessary if the inner and outer # leaflets are different. inner_leaflet = LipidLeaflet(apm, b_heads, v_heads, inner_head_thickness, b_tails, v_tails, tail_thickness, 3, 3) # we reverse the monolayer for the outer leaflet because the tail groups face upwards outer_leaflet = LipidLeaflet(apm, b_heads, v_heads, outer_head_thickness, b_tails, v_tails, tail_thickness, 3, 0, reverse_monolayer=True) ``` The `Slab` Component represents a layer of uniform scattering length density of a given thickness in our interfacial model. Here we make `Slabs` from `SLD` objects, but other approaches are possible. ``` # Slab constructed from SLD object. sio2_slab = sio2(15, 3) sio2_slab.thick.setp(vary=True, bounds=(2, 30)) sio2_slab.thick.name = 'sio2 thickness' sio2_slab.rough.setp(vary=True, bounds=(0, 7)) sio2_slab.rough.name = name='sio2 roughness' sio2_slab.vfsolv.setp(0.1, vary=True, bounds=(0., 0.5)) sio2_slab.vfsolv.name = 'sio2 solvation' solv_roughness = Parameter(3, 'bilayer/solvent roughness') solv_roughness.setp(vary=True, bounds=(0, 5)) ``` Once all the `Component`s have been constructed we can chain them together to compose a `Structure` object. The `Structure` object represents the interfacial structure of our system. We create different `Structure`s for each contrast. It is important to note that each of the `Structure`s share many components, such as the `LipidLeaflet` objects. This means that parameters used to construct those components are shared between all the `Structure`s, which enables co-refinement of multiple datasets. An alternate way to carry this out would be to apply constraints to underlying parameters, but this way is clearer. Note that the final component for each structure is a `Slab` created from the solvent `SLD`s, we give those slabs a zero thickness. ``` s_d2o = si | sio2_slab | inner_leaflet | outer_leaflet | d2o(0, solv_roughness) s_hdmix = si | sio2_slab | inner_leaflet | outer_leaflet | hdmix(0, solv_roughness) s_h2o = si | sio2_slab | inner_leaflet | outer_leaflet | h2o(0, solv_roughness) ``` The `Structure`s created in the previous step describe the interfacial structure, these structures are used to create `ReflectModel` objects that know how to apply resolution smearing, scaling factors and background. ``` model_d2o = ReflectModel(s_d2o) model_hdmix = ReflectModel(s_hdmix) model_h2o = ReflectModel(s_h2o) model_d2o.scale.setp(vary=True, bounds=(0.9, 1.1)) model_d2o.bkg.setp(vary=True, bounds=(-5e-7, 1e-6)) model_hdmix.bkg.setp(vary=True, bounds=(-5e-7, 1e-6)) model_h2o.bkg.setp(vary=True, bounds=(-5e-7, 1e-6)) ``` An `Objective` is constructed from a `ReflectDataset` and `ReflectModel`. Amongst other things `Objective`s can calculate chi-squared, log-likelihood probability, log-prior probability, etc. We then combine all the individual `Objective`s into a `GlobalObjective`. ``` objective_d2o = Objective(model_d2o, data_d2o) objective_hdmix = Objective(model_hdmix, data_hdmix) objective_h2o = Objective(model_h2o, data_h2o) global_objective = GlobalObjective([objective_d2o, objective_hdmix, objective_h2o]) ``` A `CurveFitter` object can perform least squares fitting, or MCMC sampling on the `Objective` used to construct it. ``` fitter = CurveFitter(global_objective, nwalkers=200) ``` We initialise the MCMC walkers by jittering around the best fit. Other modes of initialisation are possible: from a supplied covariance matrix, by sampling from the prior distributions, or by supplying known positions from an array. ``` # we seed the numpy random number generator to get reproducible numbers # during walker initialisation np.random.seed(1) fitter.initialise('jitter') ``` In MCMC sampling a burn in period is used to allow the workers to be more representative of the distribution they are sampling. Here we do a number of samples, then discard them. The last chain position is kept to provide a starting point for the 'production' run. ``` # set random_state for reproducible pseudo-random number streams fitter.sample(1000, random_state=321); ``` The shape of the chain containing the samples is `(number_steps, number_walkers, number_parameters)` ``` print(fitter.chain.shape) ``` At the start of the sampling run the walkers in the MCMC ensemble probably won't distributed according the distribution they are sampling. We can discard, or burn, the initial steps. Let's have a look at the steps for a parameter (e.g. the area-per-molecule) to see if they've reached equilibrium (i.e. distributed around a mean). ``` for i in range(200): plt.plot(fitter.chain[:, i, 5].flat) ``` Although it's hard to tell from this graph it seems that ~500 steps is enough for equilibration, so let's discard these initial steps that acted as the burn-in period. ``` fitter.reset() ``` Now we do a production sampling run. In this example the total number of samples is the number of walkers (200 by default) multiplied by the number of steps: 8000 * 200 = 1 600 000. The sampling engine automatically makes full use of the total number of processing cores available to it, but this is specifiable. In addition MPI can be used, which make it useful for sampling on a cluster - MCMC is embarrassingly parallel. Samples can be saved to file as they are acquired, useful for checkpointing sampling state. ``` fitter.sample(8000, random_state=123); ``` However, successive steps are correlated to previous steps to some degree, and the chain should be thinned to ensure the samples are independent. Let's see how much we should thin by by looking at the autocorrelation of a parameter. ``` plt.plot(fitter.acf()[:, 5]) plt.xlim(0, 1000); ``` For the sampling done here thinning by 400 should be sufficient. ``` process_chain(global_objective, fitter.chain, nthin=400); ``` The sampling gives each varying parameter its own MCMC chain, which can be processed to give relevant statistics, or histogrammed, etc. The relationship between chains encodes the covariance of all the parameters. The chains are automatically processed to calculate the median of all the samples, and the half width of the [15.87, 84.13] percentiles. These two values are taken to be the 'fitted' parameter value, and its standard deviation. Each Parameter set to this median value, and given an `stderr` attribute. We can see those statistics by printing the objective. ``` print(global_objective) ``` Now let's see how the 'fitted' models compare to the data. We could use `global_objective.plot()`, but because we want to do a bit more tweaking for the graphics (such as vertical offsets) we're going to create the graph manually. We're also going to examine the spread in the posterior distribution. ``` hdmix_mult = 0.01 h2o_mult = 0.1 # the data plt.errorbar(data_d2o.x, data_d2o.y, data_d2o.y_err, label='$\mathregular{D_2O}$', ms=4, marker='o', lw=0, elinewidth=1) plt.errorbar(data_h2o.x, data_h2o.y * h2o_mult, data_h2o.y_err * h2o_mult, label='$\mathregular{H_2O}$', ms=4, marker='^', lw=0, elinewidth=1) plt.errorbar(data_hdmix.x, data_hdmix.y * hdmix_mult, data_hdmix.y_err * hdmix_mult, label='$\mathregular{HD_{mix}}$', ms=4, marker='^', lw=0, elinewidth=1) # the median of the posterior plt.plot(data_d2o.x, objective_d2o.generative(), color='r', zorder=20) plt.plot(data_hdmix.x, objective_hdmix.generative() * hdmix_mult, color='r', zorder=20) plt.plot(data_h2o.x, objective_h2o.generative() * h2o_mult, color='r', zorder=20) # plot the spread of the fits for the different datasets gen = global_objective.pgen(500) save_pars = np.copy(global_objective.parameters) for i in range(500): global_objective.setp(next(gen)) plt.plot(data_d2o.x, objective_d2o.generative(), color='k', alpha=0.02, zorder=10) plt.plot(data_hdmix.x, objective_hdmix.generative() * hdmix_mult, color='k', alpha=0.02, zorder=10) plt.plot(data_h2o.x, objective_h2o.generative() * h2o_mult, color='k', alpha=0.02, zorder=10) # put back the saved parameters global_objective.setp(save_pars) ax = plt.gca() ax.text(-0.04, 1e-11, 'a)') plt.legend() plt.yscale('log') plt.ylabel('Reflectivity') plt.xlabel('Q /$\AA^{-1}$') plt.ylim(1e-10, 2); plt.xlim(0.004, 0.3) plt.savefig('global_fit.pdf') ``` We can investigate the posterior distribution by a corner plot, this reveals interparameter covariances. ``` global_objective.corner(); plt.savefig('corner.pdf') ``` The variation in scattering length density profiles can be visualised by a little bit of processing. This enables one to see what range of SLD profiles are statistically possible. ``` saved_params = np.array(objective_d2o.parameters) z, median_sld = s_d2o.sld_profile() for pvec in objective_d2o.pgen(ngen=500): objective_d2o.setp(pvec) zs, sld = s_d2o.sld_profile() plt.plot(zs, sld, color='k', alpha=0.05) # put back saved_params objective_d2o.setp(saved_params) ax = plt.gca() ax.text(-50, -1.6, 'b)') plt.plot(z, median_sld, lw=2, color='r'); plt.ylabel('scattering length density / $10^{-6}\AA^{-2}$') plt.xlabel('distance / $\AA$') plt.savefig('d2o_sld_spread.pdf') ```
github_jupyter
``` import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.dates as mdates import matplotlib.lines as mlines import matplotlib.patches as mpatches import datetime from typing import Union sns.set_theme(style="whitegrid") ``` ## Analyze CS Data ``` df = pd.read_csv("data/cs.csv", index_col=0) def vec_dt_replace(series, year=None, month=None, day=None): return pd.to_datetime( {'year': series.dt.year if year is None else year, 'month': series.dt.month if month is None else month, 'day': series.dt.day if day is None else day}) df_1720 = df[(df['season'].isin(['F16', 'F17', 'F18', 'F19', 'F20']))] df.columns ``` ### Make it easier to filter through programs using the decision, the institution, gre, gpa, etc. ``` def create_filter(df, degree: str = None, decisionfin: Union[str, list] = None, institution: Union[str, list] = None, gpa: bool = False, gre: bool = False): filt = [True] * len(df) if degree is not None: filt = (filt) & (df['degree'] == degree) if decisionfin is not None: if isinstance(decisionfin, str): filt = (filt) & (df['decisionfin'].str.contains(decisionfin, case=False)) elif isinstance(decisionfin, list): filt = (filt) & (df['decisionfin'].isin(decisionfin)) if institution is not None: if isinstance(institution, str): filt = (filt) & (df['institution'].str.contains(institution, case=False)) elif isinstance(institution, list): filt = (filt) & (df['institution'].isin(institution)) if gpa: filt = (filt) & (~df['gpafin'].isna()) & (df['gpafin'] <= 4) if gre: filt = (filt) & (~df['grev'].isna()) & (~df['grem'].isna()) & (~df['grew'].isna()) & (df['new_gre']) return filt ``` ### Actual function that generates the images ``` def get_uni_stats(u_df, search: str = None, title: str = None, degree: str = 'MS', field: str = 'CS', hue='decisionfin'): title = title if title is not None else search if degree not in ['MS', 'PhD', 'MEng', 'MFA', 'MBA', 'Other']: degree = 'MS' # Clean up the data a bit, this probably needs a lot more work # Maybe its own method, too u_df = u_df.copy() u_df = u_df[~u_df['decdate'].isna()] u_df.loc[:,'year'] = u_df['decdate'].str[-4:].astype(int) u_df = u_df[(u_df['year'] > 2000) & (u_df['year'] < datetime.datetime.now().year)] # Normalize to 2020. 2020 is a good choice because it's recent AND it's a leap year u_df.loc[:, 'uniform_dates'] = vec_dt_replace(pd.to_datetime(u_df['decdate']), year=2020) # Get december dates to be from "2019" so Fall decisions that came in Dec come before the Jan ones. dec_filter = u_df['uniform_dates'] > datetime.datetime.strptime('2020-11-30', '%Y-%m-%d') u_df.loc[dec_filter, 'uniform_dates'] = vec_dt_replace(pd.to_datetime(u_df[dec_filter]['uniform_dates']), year=2019) # Trying to pick red/green colorblind-friendly colors flatui = ["#2eff71", "#ff0000", "#0000ff"] sns.set_palette(flatui) acc_patch = mpatches.Patch(color='#2eff7180') rej_patch = mpatches.Patch(color='#ff000080') int_patch = mpatches.Patch(color='#0000ff80') acc_line = mlines.Line2D([], [], color='#2eff71') rej_line = mlines.Line2D([], [], color='#ff0000') int_line = mlines.Line2D([], [], color='#0000ff') hue_order = ['Accepted', 'Rejected', 'Interview'] if hue == 'status': hue_order = ['American', 'International', 'International with US Degree', 'Other'] # This generates 4 graphs, so let's make it a 2x2 grid fig, ax = plt.subplots(2,2) fig.set_size_inches(20, 20) # Timeline stats mscs_filt = create_filter(u_df, degree, ['Accepted', 'Rejected', 'Interview'], institution = search) mscs_filt = (mscs_filt) & (u_df['uniform_dates'].astype(str) <= '2020-06-00') sns.histplot(data=u_df[mscs_filt], x='uniform_dates', hue=hue, cumulative=True, discrete=False, element='step', fill=False, hue_order=hue_order, ax=ax[0][0]) locator = mdates.AutoDateLocator(minticks=3, maxticks=7) formatter = mdates.ConciseDateFormatter(locator) formatter.formats = ['%b', # years '%b', # months '%d', # days '%H:%M', # hrs '%H:%M', # min '%S.%f', ] # secs # Hide the year formatter.zero_formats = ['%b', # years '%b', # months '%d', # days '%H:%M', # hrs '%H:%M', # min '%S.%f', ] # secs # Hide the year formatter.offset_formats = ['', # years '', # months '%d', # days '%H:%M', # hrs '%H:%M', # mins '%S.%f', ] # secs ax[0][0].xaxis.set_major_locator(locator) ax[0][0].xaxis.set_major_formatter(formatter) h, l = ax[0][0].get_legend_handles_labels() # Add frequency counts if h is not None and l is not None: if hue == 'decisionfin': counts = u_df[mscs_filt][hue].value_counts().reindex(hue_order) l = [f'{value} (n={count})' for value, count in counts.iteritems()] ax[0][0].legend(handles=[acc_line, rej_line, int_line], labels=l, title="Decision") ax[0][0].set_xlabel("Date") ax[0][0].set_ylabel("Count") ax[0][0].set_title("Cumsum of decisions") # Get GPA stats mscs_filt = create_filter(u_df, degree, ['Accepted', 'Rejected'], institution = search, gpa = True) sns.histplot(data=u_df[mscs_filt], x='gpafin', hue=hue, hue_order=hue_order, bins=20, ax=ax[0][1]) ax[0][1].set_xlabel("GPA") ax[0][1].set_ylabel("Count") ax[0][1].set_title("GPA Distribution") # Add frequency counts h, l = ax[0][1].get_legend_handles_labels() if h is not None and l is not None: if hue == 'decisionfin': counts = u_df[mscs_filt][hue].value_counts().reindex(hue_order) l = [f'{value} (n={count})' for value, count in counts.iteritems()] ax[0][1].legend(handles=[acc_patch, rej_patch], labels=l, title="Decision") # Get GRE stats mscs_filt = create_filter(u_df, degree, ['Accepted', 'Rejected', 'Interview'], institution = search, gre = True) dfq = u_df[mscs_filt][['grem', hue]] dfq = dfq.assign(gre_type='Quant') dfq.columns = ['score', hue, 'gre_type'] dfv = u_df[mscs_filt][['grev', hue]] dfv = dfv.assign(gre_type='Verbal') dfv.columns = ['score', hue, 'gre_type'] cdf = pd.concat([dfq, dfv]) sns.boxplot(data=cdf, x='gre_type', y='score', hue=hue, linewidth=2.5, hue_order=hue_order, ax=ax[1][0]) leg = ax[1][0].get_legend() if leg is not None: leg.set_title('Decision') ax[1][0].set_xlabel("GRE Section") ax[1][0].set_ylabel("Score") ax[1][0].set_title("GRE Score distribution") # Get GRE AWA stats mscs_filt = create_filter(u_df, degree, ['Accepted', 'Rejected', 'Interview'], institution = search, gre = True) sns.boxplot(data=u_df[mscs_filt], x=['AWA'] * len(u_df[mscs_filt]), y='grew', hue=hue, linewidth=2.5, hue_order=hue_order, ax=ax[1][1]) leg = ax[1][1].get_legend() if leg is not None: leg.set_title('Decision') ax[1][1].set_xlabel("GRE Section") ax[1][1].set_ylabel("Score") ax[1][1].set_title("GRE AWA Score distribution") # Save file to output directory fig.suptitle(title + ', ' + field + ' ' + degree, size='xx-large') plt.savefig('output/' + title + '_' + field + ' ' + degree + '.png') fig get_uni_stats(df_1720, search='cornell university', title='Cornell University', degree='MS', field='CS') ``` ## Other things you could analyze For instance how many interviews per university, and thus know how likely it is that the interview process is a must if you wanna be accepted. ### Bad interview analysis ``` df_1720['is_int'] = 0 df_1720.loc[df_1720['decisionfin'] == 'Interview', 'is_int'] = 1 df_1720.groupby(by='institution').agg({'is_int': sum}).sort_values(by='is_int', ascending=False).head(10) ``` # Analyze other fields ``` hisdf = pd.read_csv("data/all.csv", index_col=0, low_memory=False) hisdf.columns get_uni_stats(hisdf, title='All Universities', degree='PhD', field='All') ``` ## Answering Questions ### GPA Inflation ``` hisdf['decyear'] = hisdf['decdate'].str.slice(-4) hisdf['decyear'] = hisdf['decyear'].astype(int, errors='ignore') hisdf = hisdf[(hisdf['decyear'] >= '2009') & (hisdf['decyear'] <= '2020') & (hisdf['status'].isin(['American', 'International with US Degree'])) & (hisdf['gpafin'] <= 4)] gpadf = hisdf[~hisdf['decyear'].isnull()].groupby(by=['decyear']).agg({'gpafin': 'mean'}) fig, ax = plt.subplots() sns.barplot(x = gpadf.index, y=gpadf['gpafin'], ax=ax) ax.set_ylim([0, 4]) ax.set_xlabel("Year of Submission") ax.set_ylabel("GPA Mean") ax.set_title("GPA Behaviour over the Years") plt.show() fig.savefig("output/gpa_inflation.png") ``` ### Do International Students Have Significantly Different Stats? ``` get_uni_stats(hisdf, title='All Universities by Status', degree='PhD', field='All', hue='status') hisdf['major'].value_counts() ```
github_jupyter
# Creating a logistic regression to predict absenteeism ## Import the relevant libraries ``` # import the relevant libraries import pandas as pd import numpy as np ``` ## Load the data ``` # load the preprocessed CSV data data_preprocessed = pd.read_csv('Absenteeism_preprocessed.csv') # eyeball the data data_preprocessed.head() ``` ## Create the targets ``` # find the median of 'Absenteeism Time in Hours' data_preprocessed['Absenteeism Time in Hours'].median() # create targets for our logistic regression # they have to be categories and we must find a way to say if someone is 'being absent too much' or not # what we've decided to do is to take the median of the dataset as a cut-off line # in this way the dataset will be balanced (there will be roughly equal number of 0s and 1s for the logistic regression) # as balancing is a great problem for ML, this will work great for us # alternatively, if we had more data, we could have found other ways to deal with the issue # for instance, we could have assigned some arbitrary value as a cut-off line, instead of the median # note that what line does is to assign 1 to anyone who has been absent 4 hours or more (more than 3 hours) # that is the equivalent of taking half a day off # initial code from the lecture # targets = np.where(data_preprocessed['Absenteeism Time in Hours'] > 3, 1, 0) # parameterized code targets = np.where(data_preprocessed['Absenteeism Time in Hours'] > data_preprocessed['Absenteeism Time in Hours'].median(), 1, 0) # eyeball the targets targets # create a Series in the original data frame that will contain the targets for the regression data_preprocessed['Excessive Absenteeism'] = targets # check what happened # maybe manually see how the targets were created data_preprocessed.head() ``` ## A comment on the targets ``` # check if dataset is balanced (what % of targets are 1s) # targets.sum() will give us the number of 1s that there are # the shape[0] will give us the length of the targets array targets.sum() / targets.shape[0] # create a checkpoint by dropping the unnecessary variables # also drop the variables we 'eliminated' after exploring the weights data_with_targets = data_preprocessed.drop(['Absenteeism Time in Hours'],axis=1) # check if the line above is a checkpoint :) # if data_with_targets is data_preprocessed = True, then the two are pointing to the same object # if it is False, then the two variables are completely different and this is in fact a checkpoint data_with_targets is data_preprocessed # check what's inside data_with_targets.head() ``` ## Select the inputs for the regression ``` data_with_targets.shape # Selects all rows and all columns until 14 (excluding) data_with_targets.iloc[:,:14] # Selects all rows and all columns but the last one (basically the same operation) data_with_targets.iloc[:,:-1] # Create a variable that will contain the inputs (everything without the targets) unscaled_inputs = data_with_targets.iloc[:,:-1] ``` ## Standardize the data ``` # standardize the inputs # standardization is one of the most common preprocessing tools # since data of different magnitude (scale) can be biased towards high values, # we want all inputs to be of similar magnitude # this is a peculiarity of machine learning in general - most (but not all) algorithms do badly with unscaled data # a very useful module we can use is StandardScaler # it has much more capabilities than the straightforward 'preprocessing' method from sklearn.preprocessing import StandardScaler # we will create a variable that will contain the scaling information for this particular dataset # here's the full documentation: http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html # define scaler as an object absenteeism_scaler = StandardScaler() # import the libraries needed to create the Custom Scaler # note that all of them are a part of the sklearn package # moreover, one of them is actually the StandardScaler module, # so you can imagine that the Custom Scaler is build on it from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import StandardScaler # create the Custom Scaler class class CustomScaler(BaseEstimator,TransformerMixin): # init or what information we need to declare a CustomScaler object # and what is calculated/declared as we do def __init__(self,columns,copy=True,with_mean=True,with_std=True): # scaler is nothing but a Standard Scaler object self.scaler = StandardScaler(copy,with_mean,with_std) # with some columns 'twist' self.columns = columns self.mean_ = None self.var_ = None # the fit method, which, again based on StandardScale def fit(self, X, y=None): self.scaler.fit(X[self.columns], y) self.mean_ = np.mean(X[self.columns]) self.var_ = np.var(X[self.columns]) return self # the transform method which does the actual scaling def transform(self, X, y=None, copy=None): # record the initial order of the columns init_col_order = X.columns # scale all features that you chose when creating the instance of the class X_scaled = pd.DataFrame(self.scaler.transform(X[self.columns]), columns=self.columns) # declare a variable containing all information that was not scaled X_not_scaled = X.loc[:,~X.columns.isin(self.columns)] # return a data frame which contains all scaled features and all 'not scaled' features # use the original order (that you recorded in the beginning) return pd.concat([X_not_scaled, X_scaled], axis=1)[init_col_order] # check what are all columns that we've got unscaled_inputs.columns.values # choose the columns to scale # we later augmented this code and put it in comments # columns_to_scale = ['Month Value','Day of the Week', 'Transportation Expense', 'Distance to Work', #'Age', 'Daily Work Load Average', 'Body Mass Index', 'Children', 'Pet'] # select the columns to omit columns_to_omit = ['Reason_1', 'Reason_2', 'Reason_3', 'Reason_4','Education'] # create the columns to scale, based on the columns to omit # use list comprehension to iterate over the list columns_to_scale = [x for x in unscaled_inputs.columns.values if x not in columns_to_omit] # declare a scaler object, specifying the columns you want to scale absenteeism_scaler = CustomScaler(columns_to_scale) # fit the data (calculate mean and standard deviation); they are automatically stored inside the object absenteeism_scaler.fit(unscaled_inputs) # standardizes the data, using the transform method # in the last line, we fitted the data - in other words # we found the internal parameters of a model that will be used to transform data. # transforming applies these parameters to our data # note that when you get new data, you can just call 'scaler' again and transform it in the same way as now scaled_inputs = absenteeism_scaler.transform(unscaled_inputs) # the scaled_inputs are now an ndarray, because sklearn works with ndarrays scaled_inputs # check the shape of the inputs scaled_inputs.shape ``` ## Split the data into train & test and shuffle ### Import the relevant module ``` # import train_test_split so we can split our data into train and test from sklearn.model_selection import train_test_split ``` ### Split ``` # check how this method works train_test_split(scaled_inputs, targets) # declare 4 variables for the split x_train, x_test, y_train, y_test = train_test_split(scaled_inputs, targets, #train_size = 0.8, test_size = 0.2, random_state = 20) # check the shape of the train inputs and targets print (x_train.shape, y_train.shape) # check the shape of the test inputs and targets print (x_test.shape, y_test.shape) ``` ## Logistic regression with sklearn ``` # import the LogReg model from sklearn from sklearn.linear_model import LogisticRegression # import the 'metrics' module, which includes important metrics we may want to use from sklearn import metrics ``` ### Training the model ``` # create a logistic regression object reg = LogisticRegression() # fit our train inputs # that is basically the whole training part of the machine learning reg.fit(x_train,y_train) # assess the train accuracy of the model reg.score(x_train,y_train) ``` ### Manually check the accuracy ``` # find the model outputs according to our model model_outputs = reg.predict(x_train) model_outputs # compare them with the targets y_train # ACTUALLY compare the two variables model_outputs == y_train # find out in how many instances we predicted correctly np.sum((model_outputs==y_train)) # get the total number of instances model_outputs.shape[0] # calculate the accuracy of the model np.sum((model_outputs==y_train)) / model_outputs.shape[0] ``` ### Finding the intercept and coefficients ``` # get the intercept (bias) of our model reg.intercept_ # get the coefficients (weights) of our model reg.coef_ # check what were the names of our columns unscaled_inputs.columns.values # save the names of the columns in an ad-hoc variable feature_name = unscaled_inputs.columns.values # use the coefficients from this table (they will be exported later and will be used in Tableau) # transpose the model coefficients (model.coef_) and throws them into a df (a vertical organization, so that they can be # multiplied by certain matrices later) summary_table = pd.DataFrame (columns=['Feature name'], data = feature_name) # add the coefficient values to the summary table summary_table['Coefficient'] = np.transpose(reg.coef_) # display the summary table summary_table # do a little Python trick to move the intercept to the top of the summary table # move all indices by 1 summary_table.index = summary_table.index + 1 # add the intercept at index 0 summary_table.loc[0] = ['Intercept', reg.intercept_[0]] # sort the df by index summary_table = summary_table.sort_index() summary_table ``` ## Interpreting the coefficients ``` # create a new Series called: 'Odds ratio' which will show the.. odds ratio of each feature summary_table['Odds_ratio'] = np.exp(summary_table.Coefficient) # display the df summary_table # sort the table according to odds ratio # note that by default, the sort_values method sorts values by 'ascending' summary_table.sort_values('Odds_ratio', ascending=False) ```
github_jupyter
# <center> Step 1.1 Attempt to Import and Transform Data with PyCaret </center> # In this notebook, I attempted to import, transform, and clean data using the setup() function in PyCaret. From this experiment, my conclusion is that PyCaret may not be a "one-stop shop" solution for data cleaning, data imputing, and creating calculated fields. Since my target variables are each calculated fields, I was unable to incorporate them into the raw dataset using methods in PyCaret. Other notebooks in this repository will illustrate how PyCaret is a very capable low-code modeling tool, once you have a processed, "clean" dataset. More information about the PyCaret library is available here: www.pycaret.org ``` #Import packages import pandas as pd import re import glob import datetime import numpy as np from pycaret.classification import * ``` ### <center> Data Import </center> ### ``` #Import groups of customer data appended_data = [] for file in glob.glob('Cust*'): data = pd.read_csv(file) appended_data.append(data) cust_df = pd.concat(appended_data) cust_df.head() #Import groups of order data appended_data = [] for file in glob.glob('Ord*'): data = pd.read_csv(file) appended_data.append(data) order_df = pd.concat(appended_data) order_df.head() df = pd.merge(order_df, cust_df, how='inner', on = 'owner_no') df.shape df.nunique() df = df.drop([ 'prod_season_desc', 'postal_code', 'state_desc', 'Lifetime Giving' ], axis=1 ) df.dtypes df.columns df.geo_area_desc.value_counts() df['OP Prelim Capacity'].value_counts() #Attempt to setup data for PyCaret df_1 = setup( data=df, target = 'first_cont_order', categorical_features = [ 'MOS_desc', 'channel_desc', 'delivery_desc', ], ordinal_features = { 'geo_area_desc': [ '1-Philadelphia City (20 mi.)', '2-Greater Philadelphia(70 mi.)', '3-New York City (20 mi.)' '4-DC (20 mi.)', '5-NEC (140 mi. from Philly)', '6-NEC (210 mi. from Philly)', '7-USA Balance' ], 'OP Prelim Capacity': ['U','X',1,2,3,4,5,6,7,8,9,10] }, high_cardinality_features = [ 'order_dt', 'tot_ticket_paid_amt', 'tot_contribution_paid_amt', 'First Order Date', 'First Contribution Date', 'LTV Tkt Value', ], date_features = [ 'First Order Date', 'order_dt', 'First Contribution Date' ], numeric_features = [ 'tot_ticket_paid_amt', 'tot_contribution_paid_amt', 'num_seats_pur', 'first_cont_order', 'first_cont_after', 'LTV Tkt Value' ], combine_rare_levels = True, remove_multicollinearity = True, profile = True ) ``` In the above method, an error is thrown because we have no target in the raw dataset - all targets for this project stem from calculated features.
github_jupyter
## Instructions - Run all cells to initialize app (`Menu Bar > Kernel > Restart & Run All`) - Select a system ID with data picker, text input, or key selection (see below) - When ready, run SCSF algorithm by clicked red button (caution: this could take a few minutes to complete) - Re-run last cell of notebook (`view_ts` function) after SCSF algorithm completes to look at close up of of 5 days in data set ``` %load_ext autoreload %autoreload 2 %matplotlib notebook from sys import path path.append('..') from statistical_clear_sky.utilities.data_loading import load_results, load_sys from statistical_clear_sky.dataviewer import PointBrowser, view_ts df = load_results() ``` # Year over year degradation analysis: results data viewer The SCSF algorithm can be used to estimate the YOY degradation of a PV system from historical data, without the need for a physical system model. NREL also provides the [RdTools](https://www.nrel.gov/pv/rdtools.html) for estimating YOY degradation rates, which takes the classic approach, using a physical system model. Approximately 300 systems from around the US had their historical power data analysed by both tools. The applet presented here investigates these results, plotting the SCSF estimate versus the RdTools estimate for each system. When a point is selected in the top-left frame, the record for the two estimates is shown in the upper right. Each record has the following fields: - `rd`: the RdTools YOY degradation estimate - `deg`: the SCSF YOY degradation estimate - `difference`: the difference between the RdTools estimate and the SCSF estimate - `rd_range`: RdTools also provides quantile estimates on the YOY deg value. This is the P95 minus to P5 values. A larger range indicates larger uncertainty in the RdTools estimate. - `res-median`: the median of the residuals between the SCSF clear sky estimate and the raw data - `res-var`: the variance of the residuals between the SCSF clear sky estimate and the raw data - `res-L0norm`: L0 refers to the first column vector in the L matrix of the GLR model. If the model is a good fit for the data, we expect this vector to look similar to the first left singular vector of the SVD of the data matrix. This field is the norm of the residual between the L0 vector and the first left singular vector. If it is small, these two vectors are similar, and if it large, they are dissimilar. - `rd_low`: the P5 value of the RdTools estimate - `rd_high`: the P95 value of the RdTools estimate - `all-pass`: SCSF algorithm passed all three internal solver checks (should be `True` for all data presented here) - `fix-ts`: this is `True` if the SCSF initialization method detected and fixed a timestamp shift issue - `num-days`: the number of days available in that system's data set - `num-days-used`: the number of days selected by the SCSF algorithm - `use-frac`: the fraction of available days used by the algorithm In the data selector pane, the orange points have one or more of the three SCSF residual metrics (`res-median`, `res-var`, and `res-L0norm`) in the top fifth percentile over all the data (i.e., at least one of the metrics is an outlier). ### Data selection Individual systems may be selected by the following ways: - click a point in the upper left plot - enter a system ID number in the text box (applet pickes nearest used ID number) - cycle through systems in descending order (from currently selected) with `a` key - cycle through systems in ascending order (from currently selected) with `s` key Once a system ID is selected, the raw time series power data is retreived from GISMo's servers and displayed in the second row. This data is cached in memory after retrieval, making repeated queries speedier. Some interesting system IDs to check out: - `31195` : a very clean, well-behaved system - `29890` : jump in apparent capacity in first year, SCSF approach shows robustness - `15386` : apparent "recovery" in year 4 (look at days `1150` through `1155` in particular) - `21167` : time shift in data, automatically detected and corrected by SCSF - &nbsp;&nbsp;`5429` : tree growth on west side of PV system - `18613` : an interesting shade pattern - `34369` : another interesting shade pattern - &nbsp;&nbsp;`1533` : almost no winter power output - &nbsp;&nbsp;`2912` : very poor quality data - `27133` : large inverter clipping, fit by SCSF (`clear_day_start=20`) - `28532` : small inverter clipping, missed by SCSF (`clear_day_start=70`) - `29757` : star patterns ### Run SCSF Algorithm After a system's data is loaded, you may press the red "run SCSF" button to execute the SCSF algorithm. The algorithm typically completes in 2-5 minutes, but occaisionally can take upwards of 10. Updates from the algorithm will be printed in the third row. This is replaced with a heatmap view of the clear sky estimate after the algorithm completes. As soon as the algorithm is initialized (but before the minimization process) the daily energy and selected days are displayed in the bottom row. After the algorithm completes, this plot is updated with the estimated clear sky daily energy. ``` browser = PointBrowser(df) ``` NOTE: you can use the toolbar directly above to zoom and pan on the data picker plot (top-left). When you're done, click the tool a second time to turn it off, returning the "data picker' functionaliity to your cursor. ### Viewing results as time series Run the following cell after running the SCSF algorithm on a loaded system data set in the previous cell. This brings up an interactive plot, showing the raw data, the clear sky estimate, and the daily weight value. Setting the `clear_day_start` keyword argument centers the plot on a clear day selected by the initialization method. If you set the `day_start` instead, the plot will start on that exact day number. After runing the cell, try selecting the "pan axis" tool from the toolbar on the bottom and navigating around the data. ``` view_ts(browser, day_start=452) if browser.iterative_fitting is not None: _ = browser.iterative_fitting.plot_lr(figsize=(10, 4)) ```
github_jupyter
``` # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` <a target="_blank" href="https://colab.research.google.com/github/GoogleCloudPlatform/keras-idiomatic-programmer/blob/master/books/deep-learning-design-patterns/Workshops/Novice/Deep%20Learning%20Design%20Patterns%20-%20Workshop%20-%20Chapter%20III%20-%202.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # Deep Learning Design Patterns - Code Labs ## Lab Exercise #3 - Get Familiar with Hyperparameters - Answers ## Prerequistes: 1. Familiar with Python 2. Completed Chapter III: Training Foundation ## Objectives: 1. Hand setting Epochs and Mini-Batches 2. Use ImageDataGenerator for batch generation. 3. Finding good learning rate. ## Epochs and Mini-Batches In this section, we will hand-roll our own code (vs. builtin feeders) to feed the training data for training. We will need to handle the following: 1. Set a mini-batch size (128) and calculate how many batches will be in the training data. 1. Set the number of epochs (number of times we pass the full training data for training) 2. Randomly shuffle the training data on each epoch. 3. Iterate through the training data on batch at a time. You fill in the blanks (replace the ??), make sure it passes the Python interpreter. ``` from tensorflow.keras.datasets import cifar10 import random # Let's use the CIFAR-10 dataset (x_train, y_train), (x_test, y_test) = cifar10.load_data() # We will use a mini-batch size of 128 batch_size = 128 # Calculate the total number of mini-batches in an epoch # HINT: It has something to do with the (mini) batch size batches = len(x_train) // 128 # Let's use a seed so we can randomly shuffle both the pixel data and labels the same shuffle. seed = 101 # Let's do 5 passes (epochs) over the dataset epochs = 5 for epoch in range(epochs): # Shuffle the dataset at the beginning of each epoch # HINT: We have to shuffle the image data and labels from the training data random.seed(seed) random.shuffle(x_train) random.seed(seed) random.shuffle(y_train) # Set a new seed for the next shuffle seed += random.randint(0, 100) # Iterate (sequential) through the shuffled training data, one batch at a time. for batch in range(batches): # Get the next batch of data # HINT: if the begin of the batch is at location X, then the end is X + batc x_batch = x_train[batch * batch_size:(batch+1) * batch_size] y_batch = y_train[batch * batch_size:(batch+1) * batch_size] print("Epoch", epoch+1, "Batch", batch+1) print("Done - the last line above this should be: Epoch 5, Batch 390") ``` ## ImageDataGenerator and Batch Generation In this section, we will use the **Keras** ImageDataGenerator to automatically generate out mini-batches (vs. hand generating them), and shuffling the training data on each epoch. ``` from tensorflow.keras.preprocessing.image import ImageDataGenerator # Let's use the CIFAR-10 dataset (x_train, y_train), (x_test, y_test) = cifar10.load_data() # We will use a mini-batch size of 128 batch_size = 128 # Calculate the total number of mini-batches in an epoch batches = len(x_train) // batch_size # instantiate an Image Data generator object # HINT: Image Data generator is a big giveaway datagen = ImageDataGenerator() # Let's do 5 passes (epochs) over the dataset epochs = 5 for epoch in range(epochs): # Use generator to create batches # HINT: The method is about flowing data from in-memory (vs. on-disk) batch = 0 for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=batch_size, shuffle=True): batch += 1 # Keep track of the number of batches so far. print("Epoch", epoch+1, "Batch", batch) # At the end of the training data, let's loop around for the next epoch. if batch == batches: break print("Done - the last line above this should be: Epoch 5, Batch 390") ``` ## Learning Rate Let's show how to do short epochs to get a feel on what might be the right learning rate for your training. ``` from tensorflow.keras import Sequential, optimizers from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from tensorflow.keras.utils import to_categorical import numpy as np # Let's use the CIFAR-10 dataset (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize the pixel data x_train = (x_train / 255.0).astype(np.float32) x_test = (x_test / 255.0).astype(np.float32) # One-hot encode the labels y_train = to_categorical(y_train) y_test = to_categorical(y_test) def convNet(input_shape, nclasses): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(nclasses, activation='softmax')) return model # Create a simple CNN and set learning rate very high (0.1)) # HINT: how would you abbreviate learning rate? model = convNet((32, 32, 3), 10) model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.1), metrics=['accuracy']) # Let's take a fraction of the training data to test the learning rate (2%) x_tmp = x_train[0:1000] y_tmp = y_train[0:1000] # Let's run 3 epochs at learning rate = 0.1 model.fit(x_tmp, y_tmp, epochs=3, batch_size=32, verbose=1) ``` Argh, it's horrible. The loss on the first epoch is high (14.0+) and then never goes down - like it's stuck. Hum, okay now you experiment with different learning rates to find one where the loss goes down rapidly and a steady increase in accuracy. ``` model = convNet((32, 32, 3), 10) # Pick your own learning rate until the results are good. # HINT: It's going to be a lot smaller than 0.1 model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.001), metrics=['accuracy']) # Let's run 3 epochs at your learning rate model.fit(x_tmp, y_tmp, epochs=3, batch_size=32, verbose=1) ``` ## End of Lab Exercise
github_jupyter
# Chapter 6 - Model Deployment for Time Series Forecasting - Serving ## Deployment This script allows you to use the model in a webservice and get the desired results. Once the model is trained, it's possible to deploy it in a service. #### For this you need the following steps: * Retrieve the workspace * Get or register the model * Create a docker image * Create the ACI service * Deploy the service * Test the service Import Azure Machine Learning Python SDK and other modules. ``` import ast import json import os import azureml.core import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn from azureml.core import Workspace from azureml.core.environment import Environment from azureml.core.model import InferenceConfig, Model from azureml.core.webservice import AciWebservice from sklearn.preprocessing import MinMaxScaler from energydemandforecasting.utils import load_data ``` ### Retrieve AML workspace The workspace that was used for training must be retrieved. ``` ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep="\n") ``` ### Get or register the model (optional) We already registered the model in the training script. But if the model you want to use is only saved locally, you can uncomment and run the following cell, that will register your model in the workspace. Parameters may need adjustment. ``` # model = Model.register(model_path = "path_of_your_model", # model_name = "name_of_your_model", # tags = {'type': "Time series ARIMA model"}, # description = "Time series ARIMA model", # workspace = ws) # get the already registered model model = Model.list(ws, name="arimamodel")[0] print(model) ``` ## Get or Register an Environment We already registered the environment in the training script. ``` # my_azureml_env = Environment.from_conda_specification(name = "my_azureml_env", # file_path = "./energydemandforecasting/azureml-env.yml") # my_azureml_env.register(workspace=ws) my_azureml_env = Environment.get(workspace=ws, name="my_azureml_env") inference_config = InferenceConfig( entry_script="energydemandforecasting/score.py", environment=my_azureml_env ) # Set deployment configuration deployment_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1) aci_service_name = "aci-service-arima" # Define the model, inference, & deployment configuration and web service name and location to deploy service = Model.deploy( workspace=ws, name=aci_service_name, models=[model], inference_config=inference_config, deployment_config=deployment_config, ) service.wait_for_deployment(True) ``` ### Call the service and test it The service is tested on the `energy.csv` data. ``` # load the data to use for testing and encode it in json energy_pd = load_data("./data/energy.csv") energy = pd.DataFrame.to_json(energy_pd, date_format="iso") energy = json.loads(energy) energy = json.dumps({"energy": energy}) # Call the service to get the prediction for this time series prediction = service.run(energy) ``` ### Plot the result * Convert the prediction to a data frame containing correct indices and columns. * Scale the original data as in the training. * Plot the original data and the prediction. ``` # prediction is a string, convert it to a dictionary prediction = ast.literal_eval(prediction) # convert the dictionary to pandas dataframe prediction_df = pd.DataFrame.from_dict(prediction) prediction_df.columns = ["load"] prediction_df.index = energy_pd.iloc[2500:2510].index # Scale the original data scaler = MinMaxScaler() energy_pd["load"] = scaler.fit_transform( np.array(energy_pd.loc[:, "load"].values).reshape(-1, 1) ) # Visualize a part of the data before the forecasting original_data = energy_pd.iloc[1500:2501] # Plot the forecasted data points fig = plt.figure(figsize=(15, 8)) plt.plot_date( x=original_data.index, y=original_data, fmt="-", xdate=True, label="original load", color="red", ) plt.plot_date( x=prediction_df.index, y=prediction_df, fmt="-", xdate=True, label="predicted load", color="yellow", ) ``` ### Cleanup The service costs money during deployment. We should clean this up ``` service.delete() ```
github_jupyter
# Forest Inference Library (FIL) The forest inference library is used to load saved forest models of xgboost, lightgbm or protobuf and perform inference on them. It can be used to perform both classification and regression. In this notebook, we'll begin by fitting a model with XGBoost and saving it. We'll then load the saved model into FIL and use it to infer on new data. FIL works in the same way with lightgbm and protobuf model as well. The model accepts both numpy arrays and cuDF dataframes. In order to convert your dataset to cudf format please read the cudf documentation on https://rapidsai.github.io/projects/cudf/en/latest/. For additional information on the forest inference library please refer to the documentation on https://rapidsai.github.io/projects/cuml/en/latest/index.html ``` !conda install -c rapidsai -c nvidia -c conda-forge \ -c defaults rapids=0.13 python=3.6 conda install -c conda-forge xgboost import numpy as np import os from cuml.test.utils import array_equal from cuml.utils.import_utils import has_xgboost from sklearn.datasets import make_classification from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from cuml import ForestInference ``` ### Check for xgboost Checks if xgboost is present, if not then it throws an error. ``` if has_xgboost(): import xgboost as xgb else: raise ImportError("Please install xgboost using the conda package," " Use conda install -c conda-forge xgboost " "command to install xgboost") ``` ## Train helper function Defines a simple function that trains the XGBoost model and returns the trained model. For additional information on the xgboost library please refer to the documentation on : https://xgboost.readthedocs.io/en/latest/parameter.html ``` def train_xgboost_model(X_train, y_train, num_rounds, model_path): # set the xgboost model parameters params = {'silent': 1, 'eval_metric':'error', 'objective':'binary:logistic', 'max_depth': 25} dtrain = xgb.DMatrix(X_train, label=y_train) # train the xgboost model bst = xgb.train(params, dtrain, num_rounds) # save the trained xgboost model bst.save_model(model_path) return bst ``` ## Predict helper function Uses the trained xgboost model to perform prediction and return the labels. ``` def predict_xgboost_model(X_validation, y_validation, xgb_model): # predict using the xgboost model dvalidation = xgb.DMatrix(X_validation, label=y_validation) xgb_preds = xgb_model.predict(dvalidation) # convert the predicted values from xgboost into class labels xgb_preds = np.around(xgb_preds) return xgb_preds ``` ## Define parameters ``` n_rows = 10000 n_columns = 100 n_categories = 2 random_state = np.random.RandomState(43210) # enter path to the directory where the trained model will be saved model_path = 'xgb.model' # num of iterations for which the model is trained num_rounds = 15 ``` ## Generate data ``` # create the dataset X, y = make_classification(n_samples=n_rows, n_features=n_columns, n_informative=int(n_columns/5), n_classes=n_categories, random_state=random_state) train_size = 0.8 # convert the dataset to np.float32 X = X.astype(np.float32) y = y.astype(np.float32) # split the dataset into training and validation splits X_train, X_validation, y_train, y_validation = train_test_split( X, y, train_size=train_size) ``` ## Train and Predict the model Invoke the function to train the model and get predictions so that we can validate them. ``` # train the xgboost model xgboost_model = train_xgboost_model(X_train, y_train, num_rounds, model_path) %%time # test the xgboost model trained_model_preds = predict_xgboost_model(X_validation, y_validation, xgboost_model) ``` ## Load Forest Inference Library (FIL) The load function of the ForestInference class accepts the following parameters: filename : str Path to saved model file in a treelite-compatible format (See https://treelite.readthedocs.io/en/latest/treelite-api.html output_class : bool If true, return a 1 or 0 depending on whether the raw prediction exceeds the threshold. If False, just return the raw prediction. threshold : float Cutoff value above which a prediction is set to 1.0 Only used if the model is classification and output_class is True algo : string name of the algo from (from algo_t enum) 'NAIVE' - simple inference using shared memory 'TREE_REORG' - similar to naive but trees rearranged to be more coalescing-friendly 'BATCH_TREE_REORG' - similar to TREE_REORG but predicting multiple rows per thread block model_type : str Format of saved treelite model to load. Can be 'xgboost', 'lightgbm', or 'protobuf' ## Loaded the saved model Use FIL to load the saved xgboost model ``` fm = ForestInference.load(filename=model_path, algo='BATCH_TREE_REORG', output_class=True, threshold=0.50, model_type='xgboost') ``` ## Predict using FIL ``` %%time # perform prediction on the model loaded from path fil_preds = fm.predict(X_validation) ``` ## Evaluate results Verify the predictions for the original and FIL model match. ``` print("The shape of predictions obtained from xgboost : ",(trained_model_preds).shape) print("The shape of predictions obtained from FIL : ",(fil_preds).shape) print("Are the predictions for xgboost and FIL the same : " , array_equal(trained_model_preds, fil_preds)) ```
github_jupyter
# Using Mitiq with Qiskit quantum programs This notebook shows how to use Mitiq to mitigate errors in Qiskit quantum programs. **This has been adapted to mitigate error from a 16-bit floating point build of Qrack, as a "noisy simulator".** Truncation error (to improve execution time) of simulation motivates a use case for error mitigation. The main sections are: * **Section 1: Computing quantum expectation values without error mitigation** * **Section 2: Applying zero-noise extrapolation (ZNE)** * ~~**Section 3: Applying probabilistic error cancellation (PEC)**~~ Useful links: * Qiskit repository: https://github.com/Qiskit/qiskit * Qiskit documentation: https://qiskit.org/documentation/ * IBMQ portal: https://www.ibm.com/quantum-computing/ * Mitiq repository: https://github.com/unitaryfund/mitiq * Mitiq documentation: https://mitiq.readthedocs.io/en/stable/ * Mitiq white paper: https://mitiq.readthedocs.io/en/stable/ * Unitary Fund: https://unitary.fund This notebook requires the Python packages: `qiskit-qrack-provider`, `qiskit`, `cirq` and `mitiq`. If necessary, uncomment the next code cell to install them. ``` !pip install qiskit-qrack-provider --quiet !pip install qiskit --quiet !pip install cirq --quiet !pip install mitiq --quiet import warnings warnings.filterwarnings(action='ignore') # Optional warning filter ``` ## Section 1: Computing quantum expectation values without error mitigation ### Define the circuit of interest We first define an arbitrary circuit of interest. We randomly initialize the qubit width with single qubit gates and then run a QFT algorithm. ``` import math import random import cirq from mitiq.interface import convert_to_mitiq, convert_from_mitiq n_qubits = 10 qubits = cirq.LineQubit.range(n_qubits) circuit = cirq.Circuit() for i in range(n_qubits): # Initialize with uniformly random single qubit gates, across full width. circuit.append(cirq.rx(random.uniform(0, 2 * math.pi)).on(qubits[i])) circuit.append(cirq.ry(random.uniform(0, 2 * math.pi)).on(qubits[i])) circuit.append(cirq.rz(random.uniform(0, 2 * math.pi)).on(qubits[i])) circuit.append(cirq.qft(*qubits, without_reverse=True)) circuit = convert_from_mitiq(convert_to_mitiq(circuit)[0], "qiskit") print(f"Randomized benchmarking circuit with {len(circuit)} gates generated.") # print(circuit) ``` ### Execute the circuit with an noiseless backend ``` import qiskit # Set the number of shots shots = 10 ** 6 # Initialize ideal backend (classical noiseless simulator) ideal_backend = qiskit.Aer.get_backend('aer_simulator') # Append measurements circuit_to_run = circuit.copy() circuit_to_run.measure_all() # Run and get count job = ideal_backend.run(circuit_to_run, shots=shots) counts = job.result().get_counts() key = max(counts) # Expectation value ideal_value = (counts[key] if key in counts else 0) / shots ideal_value ``` ### Execute the circuit with an noisy backend (without error mitigation) ``` from qiskit.providers.qrack import Qrack # Select a noisy backend noisy_backend = Qrack.get_backend('qasm_simulator') # Append measurements circuit_to_run = circuit.copy() circuit_to_run.measure_all() # Run and get counts print(f"Executing circuit with {len(circuit)} gates using {shots} shots.") job = noisy_backend.run(circuit_to_run, shots=shots) counts = job.result().get_counts() # Compute expectation value of the observable noisy_value = (counts[key] if key in counts else 0) / shots noisy_value ``` ## Section 2: Applying zero-noise extrapolation (ZNE) ### Define an executor function We rewrite the last code cell of the previous section as a function that we call `executor`. This function takes as input a Qiskit circuit and returns the final noisy expectation value. Hint: It is usually better, but not necessary, to set `optimization_level=0` to avoid automatic circuit optimizations. ``` def executor(circuit, shots=shots): """Executes the input circuit and returns the noisy expectation value <A>, where A=|00>00|. """ # Select a noisy backend # noisy_backend = qiskit.IBMQ.load_account().get_backend("ibmq_lima") # noisy_backend = FakeLima() # Simulator with noise model similar to "ibmq_lima" noisy_backend = Qrack.get_backend('qasm_simulator') # Append measurements circuit_to_run = circuit.copy() circuit_to_run.measure_all() # Run and get counts print(f"Executing circuit with {len(circuit)} gates using {shots} shots.") job = noisy_backend.run(circuit_to_run, shots=shots) counts = job.result().get_counts() # Compute expectation value of the observable noisy_value = (counts[key] if key in counts else 0) / shots return noisy_value ``` ### Run zero-noise extrapolation with Mitiq ``` from mitiq import zne zne_value = zne.execute_with_zne(circuit, executor) unmitigated_error = abs(ideal_value - noisy_value) zne_error = abs(ideal_value - zne_value) print(f"Estimation error without Mitiq : {unmitigated_error}") print(f"Estimation error with Mitiq (ZNE): {zne_error}") print(f"ZNE reduced the estimation error by {(unmitigated_error - zne_error) / unmitigated_error :.1%}.") ``` Below we show how to select different options for zero-noise extrapolation. ### Select a noise scaling method ``` # Choise a unitary folding function noise_scaling_function = zne.scaling.fold_global ``` Let's check the effect of the noise scaling function on the input circuit: ``` noise_scaled_circuit = noise_scaling_function(circuit, scale_factor=2) print(f"The input circuit has {len(circuit)} gates") print(f"The scaled circuit has {len(noise_scaled_circuit)} gates") ``` ### Select an extrapolation model ``` factory = zne.inference.RichardsonFactory(scale_factors = [1, 2, 3]) # Examples: # Richardson extrapolation # factory = zne.inference.RichardsonFactory(scale_factors = [1, 2, 3]) # Exponential extrapolation # factory = zne.inference.ExpFactory(scale_factors = [1, 2, 3], asymptote=0.25) # Exponential adaptive extrapolation # factory = zne.inference.AdaExpFactory(steps=10, asymptote=0.25) ``` ### Apply ZNE with non-default options ``` zne_value = zne.execute_with_zne(circuit, executor, scale_noise=noise_scaling_function, factory=factory) unmitigated_error = abs(ideal_value - noisy_value) zne_error = abs(ideal_value - zne_value) print(f"Estimation error without Mitiq : {unmitigated_error}") print(f"Estimation error with Mitiq (ZNE): {zne_error}") print(f"ZNE reduced the estimation error by {(unmitigated_error - zne_error) / unmitigated_error :.1%}.") ``` ### Analize and visualize ZNE data ``` _ = factory.plot_fit() factory.get_expectation_values() factory.get_scale_factors() factory.get_zero_noise_limit() ```
github_jupyter
# Import Modules ``` import sys import numpy as np import pandas as pd sys.path.insert(0, "..") from local_methods import compare_rdf_ij # ######################################################### import pickle; import os path_i = os.path.join( os.environ["HOME"], "__temp__", "temp_2.pickle") with open(path_i, "rb") as fle: df_rdf_i, df_rdf_j = pickle.load(fle) # ######################################################### def create_interp_df(df_i, x_combined): """ """ r_combined = x_combined # df_i = df_rdf_j tmp_list = [] data_dict_list = [] for r_i in r_combined: # print("r_i:", r_i) data_dict_i = dict() # ################################################# min_r = df_i.r.min() max_r = df_i.r.max() # ################################################# if r_i in df_i.r.tolist(): row_i = df_i[df_i.r == r_i].iloc[0] g_new = row_i.g else: # print(r_i) # tmp_list.append(r_i) if (r_i < min_r) or (r_i > max_r): g_new = 0. else: # break from scipy.interpolate import interp1d inter_fun = interp1d( df_i.r, df_i.g, kind='linear', axis=-1, copy=True, bounds_error=None, # fill_value=None, assume_sorted=False, ) g_new = inter_fun(r_i) data_dict_i["r"] = r_i data_dict_i["g"] = g_new data_dict_list.append(data_dict_i) df_tmp = pd.DataFrame(data_dict_list) return(df_tmp) r_combined = np.sort((df_rdf_j.r.tolist() + df_rdf_i.r.tolist())) r_combined = np.sort(list(set(r_combined))) df_interp_i = create_interp_df(df_rdf_i, r_combined) df_interp_j = create_interp_df(df_rdf_j, r_combined) compare_rdf_ij( df_rdf_i=df_interp_i, df_rdf_j=df_interp_j) ``` ``` # # df_rdf_i.head() # # df_rdf_j.head() # print(df_rdf_j.shape[0]) # print(df_rdf_i.shape[0]) # len(tmp_list) # len(r_combined) # for i in r_combined: # print(i) # for i_cnt, row_i in df_rdf_i.iterrows(): # tmp = 42 # row_i.r # import plotly.graph_objs as go # trace = go.Scatter( # x=df_tmp.r, # y=df_tmp.g, # ) # data = [trace] # fig = go.Figure(data=data) # fig.show() # # r_i # # min_r # id_min = df_i[df_i.r > r_i].r.idxmin() # row_i = df_i.loc[id_min] # r_1 = row_i.r # g_1 = row_i.g # id_max = df_i[df_i.r < r_i].r.idxmax() # row_i = df_i.loc[id_max] # r_0 = row_i.r # g_0 = row_i.g # r_i = 0.7222222222222222 # dr = 0.2 # # ######################################################### ```
github_jupyter
# Chapter 1 Tutorial You can use NetworkX to construct and draw graphs that are undirected or directed, with weighted or unweighted edges. An array of functions to analyze graphs is available. This tutorial takes you through a few basic examples and exercises. Note that many exercises are followed by a block with some `assert` statements. These assertions may be preceded by some setup code. They are provided to give you feedback that you are on the right path -- receiving an `AssertionError` probably means you've done something wrong. ## Official documentation for version used in this tutorial https://networkx.github.io/documentation/networkx-2.2/ ## Official tutorial for version used in this tutorial https://networkx.github.io/documentation/networkx-2.2/tutorial.html # The `import` statement Recall that `import` statements go at the top of your code, telling Python to load an external module. In this case we want to load NetworkX, but give it a short alias `nx` since we'll have to type it repeatedly, hence the `as` statement. Lines starting with the `%` character are not Python code, they are "magic" directives for Jupyter notebook. The `%matplotlib inline` magic tells Jupyter Notebook to draw graphics inline i.e. in the notebook. This magic should be used right after the import statement. ``` import networkx as nx %matplotlib inline ``` Let's check the installed version of NetworkX. Version 2 is incompatible with v1, so we want to make sure we're not using an out of date package. ``` nx.__version__ ``` # Creating and drawing undirected graphs ``` # a "plain" graph is undirected G = nx.Graph() # give each a node a 'name', which is a letter in this case. G.add_node('a') # the add_nodes_from method allows adding nodes from a sequence, in this case a list nodes_to_add = ['b', 'c', 'd'] G.add_nodes_from(nodes_to_add) # add edge from 'a' to 'b' # since this graph is undirected, the order doesn't matter here G.add_edge('a', 'b') # just like add_nodes_from, we can add edges from a sequence # edges should be specified as 2-tuples edges_to_add = [('a', 'c'), ('b', 'c'), ('c', 'd')] G.add_edges_from(edges_to_add) # draw the graph nx.draw(G, with_labels=True) ``` There are many optional arguments to the draw function to customize the appearance. ``` nx.draw(G, with_labels=True, node_color='blue', node_size=1600, font_color='white', font_size=16, ) ``` # A note on naming conventions Usually in Python, variables are named in `snake_case`, i.e. lowercase with underscores separating words. Classes are conventionally named in `CamelCase`, i.e. with the first letter of each word capitalized. Obviously NetworkX doesn't use this convention, often using single capital letters for the names of graphs. This is an example of convention leaking from the world of discrete mathematics. Since most of the documentation you will find online uses this convention, we will follow it as well. # Graph methods The graph object has some properties and methods giving data about the whole graph. ``` # List all of the nodes G.nodes() # List all of the edges G.edges() ``` NodeView and EdgeView objects have iterators, so we can use them in `for` loops: ``` for node in G.nodes: print(node) for edge in G.edges: print(edge) ``` Note that the edges are given as 2-tuples, the same way we entered them. We can get the number of nodes and edges in a graph using the `number_of_` methods. ``` G.number_of_nodes() G.number_of_edges() ``` Some graph methods take an edge or node as argument. These provide the graph properties of the given edge or node. For example, the `.neighbors()` method gives the nodes linked to the given node: ``` # list of neighbors of node 'b' G.neighbors('b') ``` For performance reasons, many graph methods return iterators instead of lists. They are convenient to loop over: ``` for neighbor in G.neighbors('b'): print(neighbor) ``` and you can always use the `list` constructor to make a list from an iterator: ``` list(G.neighbors('b')) ``` # NetworkX functions vs. Graph methods The previous data are available via graph *methods*, *i.e.* they are called from the graph object: G.<method_name>(<arguments>) While several of the most-used NetworkX functions are provided as methods, many more of them are module functions and are called like this: nx.<function_name>(G, <arguments>) that is, with the graph provided as the first, and maybe only, argument. Here are a couple of examples of NetworkX module functions that provide information about a graph: ``` nx.is_tree(G) nx.is_connected(G) ``` # Node and edge existence To check if a node is present in a graph, you can use the `has_node()` method: ``` G.has_node('a') G.has_node('x') ``` Additionally, the loop syntax used above: `for n in G.nodes` suggests another way we can check if a node is in a graph: ``` 'd' in G.nodes ``` Likewise we can check if two nodes are connected by an edge: ``` G.has_edge('a', 'b') G.has_edge('a', 'd') ('c', 'd') in G.edges ``` # Node degree One of the most important questions we can ask about a node in a graph is how many other nodes it connects to. Using the `.neighbors()` method from above, we could formulate this question as so: ``` len(list(G.neighbors('a'))) ``` but this is such a common task that NetworkX provides us a graph method to do this in a much clearer way: ``` G.degree('a') ``` # EXERCISE 1 Often in the context of trees, a node with degree 1 is called a *leaf*. Write a function named `get_leaves` that takes a graph as an argument, loops through the nodes, and returns a list of nodes with degree 1. ``` def get_leaves(G): G = nx.Graph() G.add_edges_from([ ('a', 'b'), ('a', 'd'), ('c', 'd'), ]) assert set(get_leaves(G)) == {'c', 'b'} ``` # Aside: comprehensions Often we have one sequence of values and we want to generate a new sequence by applying an operation to each item in the first. List comprehensions and generator expressions are compact ways to do this. List comprehensions are specified inside square brackets, and immediately produce a list of the result. ``` items = ['spider', 'y', 'banana'] [item.upper() for item in items] ``` In the context of NetworkX, this is often used to do something with the node or edge lists: ``` print(G.nodes()) print([G.degree(n) for n in G.nodes()]) ``` Generator expressions are slightly different as they are evaluated [lazily](https://en.wikipedia.org/wiki/Lazy_evaluation). These are specified using round braces, and if they are being expressed as a function argument, they can be specified without any braces. These are most often used in the context of aggregations like the `max` function: ``` g = (len(item) for item in items) list(g) max(len(item) for item in items) sorted(item.upper() for item in items) ``` # Node names The node names don't have to be single characters -- they can be strings or integers or any immutable object, and the types can be mixed. The example below uses strings and integers for names. ``` G = nx.Graph() G.add_nodes_from(['cat','dog','virus',13]) G.add_edge('cat','dog') nx.draw(G, with_labels=True, font_color='white', node_size=1000) ``` # Adjacency lists One compact way to represent a graph is an adjacency list. This is most useful for unweighted graphs, directed or undirected. In an adjacency list, each line contains some number of node names. The first node name is the "source" and each other node name on the line is a "target". For instance, given the following adjacency list: ``` a d e b c c d e ``` the edges are as follows: ``` (a, d) (a, e) (b, c) ``` The nodes on their own line exist so that we are sure to include any singleton nodes. Note that if our graph is undirected, we only need to specify one direction for each edge. Importantly, whether the graph is directed or undirected is often not contained in the file itself -- you have to infer it. This is one limitation of the format. In the `datasets` directory, there is a file called `friends.adjlist`. It's a plain text file, so you can open it on your computer or in GitHub, but here are its contents: ``` print(open('datasets/friends.adjlist').read()) ``` NetworkX provides a way to read a graph from an adjacency list: `nx.read_adjlist()`. We will name this graph SG, for social graph. ``` SG = nx.read_adjlist('datasets/friends.adjlist') ``` We know how to draw this graph: ``` nx.draw(SG, node_size=2000, node_color='lightblue', with_labels=True) ``` And we know how to get information such as the number of friends linked from a node: ``` SG.degree('Alice') ``` # EXERCISE 2 Write a function max_degree that takes a graph as its argument, and returns a 2-tuple with the name and degree of the node with highest degree. ``` def max_degree(G): SG = nx.read_adjlist('datasets/friends.adjlist') assert max_degree(SG) == ('Claire', 4) ``` # EXERCISE 3 Write a function `mutual_friends` that takes a graph and two nodes as arguments, and returns a list (or set) of nodes that are linked to both given nodes. For example, in the graph `SG` drawn above, mutual_friends(SG, 'Alice', 'Claire') == ['Frank'] an empty list or set should be returned in the case where two nodes have no mutual friends, e.g. George and Bob in `SG` drawn above. ``` def mutual_friends(G, node_1, node_2): SG = nx.read_adjlist('datasets/friends.adjlist') assert mutual_friends(SG, 'Alice', 'Claire') == ['Frank'] assert mutual_friends(SG, 'George', 'Bob') == [] assert sorted(mutual_friends(SG, 'Claire', 'George')) == ['Dennis', 'Frank'] ``` # Directed graphs Unless otherwise specified, we assume graph edges are undirected -- they are symmetric and go both ways. But some relationships, e.g. predator-prey relationships, are asymmetric and best represented as directed graphs. NetworkX provides the `DiGraph` class for directed graphs. ``` D = nx.DiGraph() D.add_edges_from([(1,2),(2,3),(3,2),(3,4),(3,5),(4,5),(4,6),(5,6),(6,4),(4,2)]) nx.draw(D, with_labels=True) ``` Note the asymmetry in graph methods dealing with edges such as `has_edge()`: ``` D.has_edge(1,2) D.has_edge(2,1) ``` Instead of the symmetric relationship "neighbors", nodes in directed graphs have predecessors ("in-neighbors") and successors ("out-neighbors"): ``` print('Successors of 2:', list(D.successors(2))) print('Predecessors of 2:', list(D.predecessors(2))) ``` Directed graphs have in-degree and out-degree, giving the number of edges pointing to and from the given node, respectively: ``` D.in_degree(2) D.out_degree(2) ``` ### Caveat Since NetworkX 2, the `.degree()` method on a directed graph gives the total degree: in-degree plus out-degree. However, in a bit of confusing nomenclature, the `neighbors` method is a synonym for `successors`, giving only the edges originating from the given node. This makes sense if you consider `neighbors` to be all the nodes reachable from the given node by following links, but it's easy to make the mistake of writing `.neighbors()` in your code when you really want both predecessors and successors. ``` D.degree(2) print('Successors of 2:', list(D.successors(2))) print('"Neighbors" of 2:', list(D.neighbors(2))) ```
github_jupyter
``` %matplotlib inline from pyvista import set_plot_theme set_plot_theme('document') ``` # Export a GemPy Model to MOOSE This section briefly describes how to export a GemPy model to get a working input file for MOOSE. This example is mainly taken from the tutorial `gempy export MOOSE <https://github.com/cgre-aachen/gempy/blob/master/examples/integrations/gempy_export_MOOSE.py>`_ from the official GemPy repository. It will guide you through the process of exporting a geological model generated in `GemPy <https://www.gempy.org/>`_ (Tutorial Chapter 1-1 therein) so it is usable as a Mesh # in the `MOOSE <https://mooseframework.org/>`_ framework. ## Creating a geological model The procedure of generating a geological model is presented in detail in `Chapter 1-1 <https://nbviewer.jupyter.org/github/cgre-aachen/gempy/blob/master/notebooks/tutorials/ch1-1_Basics.ipynb>`_ of the GemPy tutorials, so it will only be briefly presented here: ``` import gempy as gp import matplotlib.pyplot as plt geo_model = gp.create_model('tutorial_moose_exp') gp.init_data(geo_model, [0,2000., 0,2000., 0,2000.], [50, 50, 80], path_o = "../../data/GemPy/simple_fault_model_orientations.csv", path_i = "../../data/GemPy/simple_fault_model_points.csv", default_values = True) ``` present the units and series ``` geo_model.surfaces ``` combine units in series and make two series, as the fault needs its own ``` gp.map_series_to_surfaces(geo_model, {"Fault_Series" : 'Main_Fault', "Strat_Series" : ('Sandstone_2', 'Siltstone', 'Shale', 'Sandstone_1', 'basement')}, remove_unused_series=True) # set the fault series to be fault object geo_model.set_is_fault(['Fault_Series'], change_color=False) ``` check whether series were assigned correctly ``` geo_model.surfaces ``` ## Model generation After loading in the data, we set it up for interpolation and compute the model. ``` gp.set_interpolator(geo_model, compile_theano=True, theano_optimizer='fast_compile', verbose=[]) gp.compute_model(geo_model, compute_mesh=False) # sphinx_gallery_thumbnail_number = 1 gp.plot_2d(geo_model, direction='y', cell_number=45,show_data=False, show_boundaries=False, show_topography=False) ``` ## Exporting the Model to MOOSE The voxel-model above already is the same as a model discretized in a hexahedral grid, so my immediately be used as input in a simulation tool, e.g. `MOOSE <https://mooseframework.org/>`_. For this, we need to access to the unit IDs assigned to each voxel in GemPy. The array containing these IDs is called `lith_block`. ``` ids = geo_model.solutions.lith_block print(ids) ``` This array has the shape of `(x,)` and would be immediately useful, if GemPy and the chosen simulation code would _populate_ a grid in the same way. Of course, however, that is not the case. This is why we have to restructure the `lith_block` array, so it can be read correctly by MOOSE. The model resolution is extracted, so is the model extent: ``` nx, ny, nz = geo_model.grid.regular_grid.resolution # model extent xmin, xmax, ymin, ymax, zmin, zmax = geo_model.grid.regular_grid.extent ``` These two parameters are important to, a) restructure `lith_block`, and b) write the input file for MOOSE correctly. For a), we need to reshape `lith_block` again to its three dimensions and _re-flatten_ it in a _MOOSE-conform_ way, i.e. reshape to 3D array and then flattened: ``` units = ids.reshape((nx, ny, nz)) # flatten MOOSE conform units = units.flatten('F') ``` The importance of `nx, ny, nz` is apparent from the cell above. But what about `xmin`, ..., `zmax`? A MOOSE input-file for mesh generation has the following syntax: .. code-block:: python [MeshGenerators] [./gmg] type = GeneratedMeshGenerator dim = 3 nx = 50 ny = 50 nz = 80 xmin = 0.0 xmax = 2000.0 yim = 0.0 ymax = 2000.0 zmin = 0.0 zmax = 2000.0 block_id = '1 2 3 4 5 6' block_name = 'Main_Fault Sandstone_2 Siltstone Shale Sandstone_1 basement' [../] [./subdomains] type = ElementSubdomainIDGenerator input = gmg subdomain_ids = ' ' # here you paste the transformed lith_block vector [../] [] [Mesh] type = MeshGeneratorMesh [] So these parameters are required inputs in the `[MeshGenerators]` object in the MOOSE input file. `GemPy` has a method to directly create such an input file, stored in `gempy.utils.export.py`. The following cell shows how to call the method: ``` import gempy.utils.export as export export.export_moose_input(geo_model, path='') ``` This method automatically stores a file `geo_model_units_moose_input.i` at the specified path. Either this input file could be extended with parameters to directly run a simulation, or it is used just for creating a mesh. In the latter case, the next step would be, to run the compiled MOOSE executable witch the optional flag `--mesh-only`. E.g. with using the `PorousFlow module <https://mooseframework.inl.gov/modules/porous_flow/>`_: ```bash $path_to_moose/moose/modules/porous_flow/porous_flow-opt -i pct_voxel_mesh.i --mesh-only ``` How to compile MOOSE is described in their `documentation <https://mooseframework.inl.gov/getting_started/index.html>`_. The now generated mesh with the name `geo_model_units_moose_input_in.e` can be used as input for another MOOSE input file, which contains the main simulation parameters. To call the file with the grid, the following part has to be added in the MOOSE simulation input file: .. code-block:: python [Mesh] file = geo_model_units_moose_input_in.e [] The final output of the simulation may also be such an `.e`, which can, for instance, be opened with `paraview <https://www.paraview.org/>`_. A simulated temperature field (purely conductive) of the created model would look like this: <img src="file://../../_static/GemPy_model_combined.png" width="800" alt="Side by side example of gempy model and MOOSE HT-simulation">
github_jupyter
# Videos and Exercises for Session 2: Data Structuring in Pandas I In this combined teaching module and exercise set, you will be working with structuring data. We will start out with a recap of some basic function and methods that become available in pandas. Then there will be a short intermezzo, where you will be required to think a bit about how to write readible code and make use of the method chaining opportunities that come with python. Then finally, you will get to work with some more advanced data types that are handled natively with pandas. The structure is as follows: 1. Welcome (Back to) Pandas - DataFrames and Series - Operations with Elementary Data Types in the Context of Pandas - Boolean Series - String Operations - Numeric Operations and Methods 2. Readible Code and Method Chaining 3. More Advanced Data Types - Categorical Data - Time Series Data **NOTE:** I might be speaking quite slowly in some of the videos. A good advice is to turn up the speed to x1.25 or x1.5 if you want to get through without spending too much time:) ## Packages Load in the required modules and set up the plotting library: ``` %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython.display import YouTubeVideo ``` Wonder what `%matplotlib inline` does? It's not too important for now. However, if you are interested, you can read more about it [here](https://stackoverflow.com/questions/43027980/purpose-of-matplotlib-inline). # Part 1: Welcome (Back to) Pandas As mentioned during the lecture, data structuring skills are necessary to become a great data scientist. There is no way around it. Let's start with the basics. In the first video, we start out by talking about the fundamentals of Pandas: - The Pandas DataFrame and the Pandas Series. - We then proceed and discuss how indices and colums can be named and selected. - Finally, we make some simple operations. **NOTE:** This is mainly repitition of what you already worked with in assignment 0. If you feel comfortable with this, you should proceed to the associated exercises that will get you warmed up for the rest of this notebook. ``` YouTubeVideo('uSiqryfDYo0', width=640, height=360) ``` In this set of introductory exercises, we are going to briefly touch upon some of the tools from the video. Again, be aware that there is going to be some repition of the content from assignment 0 here - but these things are all very useful to get under your skin! > **Ex. 2.1.1**: Run the following two lines below. Explain to yourself what the second line did, and how each entry affected it. In relation to this, what did the first line ensure? When can this be useful? Try and play around with the seed. ``` np.random.seed(seed=161193) np_arr=np.round(np.random.exponential(10, size=(10**5,3)),0) ``` Now, we want you to investigate how easy it is to go from a numpy `array` to a pandas `DataFrame`. You are going to do something similar to this repeatedly in this course! > **Ex. 2.1.2**: Create a Pandas DataFrame called `my_df` from `np_arr`: ``` # YOUR CODE HERE raise NotImplementedError() my_df ``` Before we continue working with this DataFrame, we want to make sure that you are capable of transforming it back to different types of containers. This is another very fundamental tool in your data science toolbox. > **Ex. 2.1.3**: In the following cell, generate an `array`, a `list` and a `dict` from `my_df` (you should be able to do each of these things in one line). Verify that you did this properly by printing your output. Inspect the dictionary: How do you access the value associated with the column index 2 and row index 8543? ``` # YOUR CODE HERE raise NotImplementedError() ``` A noticeable distiction between pandas dataframes and, say, numpy arrays is the option to label indices and (in particular) columns. This is helpful when working with large sets of data. > **Ex. 2.1.4**: Going back to the DataFrame `my_df`, we now want you to try the following: > 1. Label the columns 'apples', 'bananas' and 'oranges' > 2. Change the indices to 'cust**x**' where **x** is a sequential numbering of the lines starting from 1 (*hint:* There are many ways to do this, e.g. with a list comprehension!) > 3. Print the first 10 observations using the `head()` method. ``` # YOUR CODE HERE raise NotImplementedError() ``` As we saw in the video, we can basically extract a series from a pandas dataframe by simply selecting one column from the dataframe and storing it as a new object. > **Ex. 2.1.5**: Now, we want you to select only a single column and inspect the output a bit: > 1. Select 'apples' and store it as 'apples_ser'. Try using the key-based method and the object-based method, respectively. > 2. Check the data type. Thinking about our first lines of code, why is it not an `integer`? Convert the data in both 'apples_ser' and 'my_df' to intergers. ``` # YOUR CODE HERE raise NotImplementedError() ``` ## Boolean Series ``` YouTubeVideo('CfiV8QmfGVk', width=640, height=360) ``` Boolean 'data' is the simplest data type that we work with. These variables are binary, and basically represent a True or False value. As seen in the video, they become extraordinarily useful when working with pandas series. In particular, boolean series can be applied when we only want to work with certain rows in a dataframe. > **Ex. 2.1.6**: Drop all rows for which 'Apples' is strictly smaller than 5. ``` # YOUR CODE HERE raise NotImplementedError() ``` ## Numeric Operications and Methods ``` YouTubeVideo('ClB-n1f9sBM', width=640, height=360) ``` As we saw in the video, we can easily make vectorized operations with multiple numpy arrays as well as pandas series and dataframes. This feature can become useful in many cases - in particular, when you have a large set of data and you do not want to make a (computationally exhaustive) loop through all you observation. > **Ex. 2.1.7**: In this exercise, we want you to make operations on multiple vectors and matrices using a combination of numpy arrays and pandas dataframes. > 1. Create a numpy array (with 3 elements) of costs associated with different types of fruits. Call this 'price'. Entries should be 1, 4 and 3, respectively. > 2. Create a new dataframe with information on each costumer's expenses associated with each type of fruit. This should involve multiplying your cost array with the 'my_df'. > 3. Create a new column called 'Total expenditure' by summing over fruit-specific prices for each customer (*hint:* Learn more about such sum [here](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.sum.html)). > 4. Sort the dataframe by total expenditure, starting with the customer with highest expenditure. ``` # YOUR CODE HERE raise NotImplementedError() ``` When we have a large data set, it is difficult to get an idea of it's characteristics by simply sorting and printing. In such cases, we much rather want to look at some kinds of descriptive statistics for our data. > **Ex. 2.1.8**: In this exercise, we will ask you to use two methods from the video in order to shed some light on the distribution of total expenditure: > 1. Use `describe()` to get an idea about the distribution of total expenditure in the data set. What does the distance between `mean` and the `50%` quantile tell you about the skewness? > 2. Use the `cut` method to split the sample into 6 equal sized bins of expenditures from 0 to 600. Find the *share* of observations within each of these bins (we will work more with *categorical data* later in this session). ``` # YOUR CODE HERE raise NotImplementedError() ``` ## String Operations ``` YouTubeVideo('Yjdd8rKj_WI', width=640, height=360) ``` Finally, we saw that pandas handles string variable natively. In particular, there is a set of string-specific operations (`.str`) that can be applied to a whole pandas series without having to loop through each row. > **Ex. 2.1.9**: In this final introductory exercise, we want you to > 1. Create a new variable (column) based on the index called 'Customer'. This should capitalize the customer id and write out 'Cust' to 'Customer' (i.e. you should get something similar to 'Customer **x**'). > 2. Reset the index. ``` # YOUR CODE HERE raise NotImplementedError() ``` # Part 2: Readible Code Before proceeding to part 3 on advanced data types that are handled natively in pandas, we want you to think a bit about writing readable code. In particular, if we have lots of code, it may be very difficult for others or ourselves to read. Therefore, providing some structure and meta text can help reading the code. In the following, you will learn about readible code. Then we ask you to use what you learned in order to process the NOAA data from assignment 0. Don't spend too long on this exercise - we are going to use the data for the following exercises. ## Commenting When making code it's good practice to document different parts of the code. In particular describing functions and complex code. The example below shows how to make multi-line comments (as a string, which is not assigned) and in-line comments using the `#` character. ```python def my_fct(x,y): ''' Computes the sum of input values (multi-line comment as string) ''' z = x+y # Here we perform the summation (in-line comment) return z ``` ## Method chaining We can write multiple operations together by putting them one after the other, which is known as `method chaining`. Using this, we only need to assign them once to a new object and therefore we save a lot of code. We change the example below into one using a method chain: Example without method chain ```python df_temp1 = df.loc[selection] df_temp2 = df_temp1.sort_values(by=X) df_out = df_temp2.reset_index() ``` Example with method chain - one line ```python df_out = df.loc[selection].sort_values(by=X).reset_index() ``` As seen in the example, although using less code, our method chain will get more and more difficult to read if we include two or more operations. We can overcome this problem of long chains by splitting into multiple lines with line breaks: Example with method chain - line break ```python df_out = df\ .loc[selection]\ .sort_values(by=X)\ .reset_index() ``` Note that the backslash allows us to make a visual line break, but the code is read as one line. ## Exercises for Part 2 We continue with the exercise of analyzing NOAA data. We start out reviewing what we did in Assignment 0. > **Ex. 2.2.1:** The code below runs through all the steps we completed in Assignment 0.4 step by step. Your task is to document this code in your own words. You should also make your own annotation of parts. In addition, you should make the code more readable by applying method chaining. > >*Hint:* After loading the data and selecting the first four columns, everything can be stated in one method chain! It's OK if you use more, though. ```python import pandas as pd def load_weather(year): url = f"ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/{year}.csv.gz" df_weather = pd.read_csv(url, header=None) df_weather = df_weather.iloc[:,:4] column_names = ['station', 'datetime', 'obs_type', 'obs_value'] df_weather.columns = column_names df_weather['obs_value'] = df_weather['obs_value'] / 10 selection_tmax = df_weather.obs_type == 'TMAX' df_select = df_weather.loc[selection_tmax] df_sorted = df_select.sort_values(by=['station', 'datetime']) df_reset = df_sorted.reset_index(drop=True) df_out = df_reset.copy() return df_out ``` ``` # YOUR CODE HERE raise NotImplementedError() load_weather(1863) ``` # Part 3: Advanced Data Types When you start working with data, you quickly realize that data comes in many different types. Some distinctions between data types are obvious to make. For instance, numeric variables should consists of meaningful values that represent something that can be measured (a length, weight, economic value, etc.) with a number. String variables, on the other hand, are variables that *may* also consist of letters and other characters in addition to numbers. And boolean variables are simply binary with only two possible values. However, we also encounter lots of other data types in applied work within social sciences. Categorical variables can only take a (often quite strictly) limited number of values. And time series data are characterized by being indexed in some time order. In this part, we will explore different these two data types and how you work with them in Pandas. > **Ex. 2.3.1:** Load the data for year 1863. Extract the area code (country and state) from the station name into a separate column. > > _Hint:_ The station column contains a GHCND ID, given to each weather station by NOAA. The format of these ID's is a 2 letter country/state code, followed by possible additional information on the third character and then an integer identifying the specific station. A simple approach is to assume a fixed length of the country ID. A more complex way would be to use the [`re`](https://docs.python.org/3.8/library/re.html) module. ``` # YOUR CODE HERE raise NotImplementedError() ``` ## Categorical Data Pandas has built-in features for working specifically with categorical data. In the video below, we will discuss how you can think about categorical data, and how you can efficiently work with it in pandas. ``` YouTubeVideo('x2TCnT46cl4', width=640, height=360) ``` Having watched the video introducing categorical data, we now want you to try and work with it. > **Ex. 2.3.2:** Do the following: > 1. Convert the `area` column to a categorical variable. > 2. Transform the `obs_value` column from a continuous to a categorical variable by partitioning it into `3` intervals. The first interval should contain observations with values of `obs_value` up to the 10% quantile. The second interval should contain observations with values of `obs_value` up to the 90% quantile. The third interval should contain the rest of the observations. Call this new column for `obs_value_cat`. This can be done using the `pd.qcut()` method. > 3. Make another column with `obs_value` as a categorical variable but this time label the 3 intervals as `["cold", "medium", "hot"]`. This can be done by specifying the `labels` parameter in the `pd.qcut()` method of pandas. Call this new column for `obs_value_cat_labeled`. ``` # INCLUDED IN ASSIGNMENT 1 # INCLUDED IN ASSIGNMENT 1 ``` ## Time Series Data Our coverage of basic Python did not include time. This is another elementary datatypes, that has its own native structure or maybe converted to an integer using a smart method. See more below. ``` YouTubeVideo('DmkT7PdqgAA', width=640, height=360) ``` > **Ex. 2.3.3:** Convert the date formated as string to datetime. Call this column `datetime_dt`. Make a new column named `month` with the month for each observation. Set the datetime variable as temporal index. > > *Hint:* Look up `.set_index()` setting the index. ``` # YOUR CODE HERE raise NotImplementedError() ``` > **Ex. 2.3.4:** Update your annotated function above with processing of area and temporal data. ``` # INCLUDED IN ASSIGNMENT 1 # INCLUDED IN ASSIGNMENT 1# YOUR CODE HERE raise NotImplementedError() ``` > **Ex. 2.3.5:** Make a timeseries plot for the station called `AGE00135039`. > > _Hint:_ for this you need to know a few methods of the pandas Series objects, for instance `.plot()`. ``` # YOUR CODE HERE raise NotImplementedError() ```
github_jupyter
# Image Segmentation U-Net + [https://ithelp.ithome.com.tw/articles/10240314](https://ithelp.ithome.com.tw/articles/10240314) + [https://www.kaggle.com/tikutiku/hubmap-tilespadded-inference-v2](https://www.kaggle.com/tikutiku/hubmap-tilespadded-inference-v2) 這次改用 kaggle HuBMAP 腎絲球辨識競賽第一名所使用的模型 主要也是 U-Net 的架構,在 Encoding 的部分改用 ResNet 實作 而 Decoding 的部分則用到了 CBAM, Hypercolumns 的技術 # Load data ``` from google.colab import drive drive.mount('/content/drive') !tar -xf "/content/drive/MyDrive/Colab Notebooks/annotations.tar.gz" -C /content !tar -xf "/content/drive/MyDrive/Colab Notebooks/images.tar.gz" -C /content import os # https://stackoverflow.com/questions/60058588/tesnorflow-2-0-tf-random-set-seed-not-working-since-i-am-getting-different-resul SEED = 2021 os.environ['PYTHONHASHSEED'] = str(SEED) import numpy import math import random VERSION = 'ResNet34-UNet' DATA_ROOT_DIR = '/content/' SEED = 2021 IMG_SIZE = (160, 160) NUM_CLASSES = 4 BATCH_SIZE = 25 EPOCHES = 50 import tensorflow.keras as keras import keras.layers as layers import tensorflow as tf def reset_random_seed(): os.environ['PYTHONHASHSEED'] = str(SEED) numpy.random.seed(SEED) random.seed(SEED) tf.random.set_seed(SEED) reset_random_seed() files = [] for file in os.listdir(DATA_ROOT_DIR + 'images'): # file = Abyssinian_1.jpg if file.endswith('jpg'): fn = file.split('.')[0] if os.path.isfile(DATA_ROOT_DIR + 'annotations/trimaps/' + fn + '.png'): files.append(fn) files = sorted(files) from IPython.display import display from tensorflow.keras.preprocessing.image import load_img from PIL import ImageOps import PIL # ImageOps.autocontrast() method maximizes (normalize) image contrast. # This function calculates a histogram of the input image, # removes cutoff percent of the lightest and darkest pixels from the histogram, # and remaps the image so that the darkest pixel becomes black (0), # and the lightest becomes white (255). img = PIL.ImageOps.autocontrast(load_img(DATA_ROOT_DIR + 'annotations/trimaps/' + files[0] + '.png')) display(img) ``` ## Data Generator ``` # reference: https://www.tensorflow.org/api_docs/python/tf/keras/utils/Sequence class OxfordPets(keras.utils.Sequence): def __init__(self, files): self.files = files def __len__(self): return math.ceil(len(self.files) / BATCH_SIZE) def __getitem__(self, index): x, y = [], [] for i in range(index * BATCH_SIZE, min((index+1) * BATCH_SIZE, len(self.files))): # target size in load_img # (img_height, img_width) x.append(numpy.array(load_img(DATA_ROOT_DIR + 'images/' + self.files[i] + '.jpg', target_size = IMG_SIZE), dtype='float32')) y.append(numpy.array(load_img(DATA_ROOT_DIR + 'annotations/trimaps/' + self.files[i] + '.png', target_size = IMG_SIZE, color_mode="grayscale"), dtype='uint8')) return numpy.array(x), numpy.array(y) ``` ## Build model ``` class ResBlock(keras.Model): def __init__(self, channels, strides=1): super().__init__() self.conv1 = layers.Conv2D( channels, 3, strides=strides, padding='same', use_bias=False) self.bn1 = layers.BatchNormalization() self.conv2 = layers.Conv2D( channels, 3, strides=1, padding='same', use_bias=False) self.bn2 = layers.BatchNormalization() if strides != 1: self.shortcut = keras.Sequential([ layers.Conv2D(channels, 1, strides, padding='same', use_bias=False) ]) else: self.shortcut = keras.Sequential() def call(self, inputs): x = self.conv1(inputs) x = self.bn1(x) x = tf.nn.relu(x) x = self.conv2(x) x = self.bn2(x) shortcut = self.shortcut(inputs) return tf.nn.relu(tf.add(x, shortcut)) def get_config(self): return {} class Encoder(keras.Model): def __init__(self, channels, repeat, strides): super().__init__() self.resBlocks = keras.Sequential() self.resBlocks.add(ResBlock(channels, strides)) for _ in range(1, repeat): self.resBlocks.add(ResBlock(channels, strides=1)) def call(self, inputs): return self.resBlocks(inputs) def get_config(self): return {} class ChannelAttention(keras.Model): def __init__(self, reduction): super().__init__() self.globalMaxPool = layers.GlobalMaxPooling2D(keepdims=True) self.globalAvgPool = layers.GlobalAveragePooling2D(keepdims=True) self.reduction = reduction def build(self, input_shape): self.fc = keras.Sequential([ layers.Conv2D(input_shape[-1]//self.reduction, 3, padding='same'), layers.ReLU(), layers.Conv2D(input_shape[-1], 1, padding='valid') ]) def call(self, inputs): x1 = self.globalMaxPool(inputs) x2 = self.globalAvgPool(inputs) x1 = self.fc(x1) x2 = self.fc(x2) x = tf.nn.sigmoid(layers.add([x1, x2])) return x class SpatialAttention(keras.Model): def __init__(self): super().__init__() self.conv3x3 = layers.Conv2D(1, 3, padding='same') def call(self, inputs): # https://github.com/kobiso/CBAM-tensorflow/blob/master/attention_module.py#L95 x1 = tf.math.reduce_max(inputs, axis=3, keepdims=True) x2 = tf.math.reduce_mean(inputs, axis=3, keepdims=True) x = tf.concat([x1, x2], 3) x = self.conv3x3(x) x = tf.nn.sigmoid(x) return x class CBAM(keras.Model): def __init__(self, reduction): super().__init__() self.channelAttention = ChannelAttention(reduction) self.spaialAttention = SpatialAttention() def call(self, inputs): x = inputs * self.channelAttention(inputs) x = x * self.spaialAttention(x) return x class Decoder(keras.Model): def __init__(self, channels, upsample=True): super().__init__() self.bn1 = layers.BatchNormalization() self.bn2 = layers.BatchNormalization() if upsample: self.upsample = keras.Sequential([ layers.UpSampling2D(2, interpolation='nearest') ]) else: self.upsample = keras.Sequential() self.conv3x3_2 = layers.Conv2D( channels, 3, padding='same', use_bias=False) self.conv1x1 = layers.Conv2D(channels, 1, use_bias=False) self.cbam = CBAM(reduction=16) def build(self, input_shape): self.conv3x3_1 = layers.Conv2D( input_shape[-1], 3, padding='same', use_bias=False) def call(self, inputs): x = self.bn1(inputs) x = tf.nn.relu(x) x = self.upsample(x) x = self.conv3x3_1(x) x = self.bn2(x) x = tf.nn.relu(x) x = self.conv3x3_2(x) x = self.cbam(x) shortcut = self.conv1x1(self.upsample(inputs)) x += shortcut return x def get_config(self): return {} def ResNet34UNet(input_shape, num_classes): inputs = keras.Input(shape=input_shape) # Encode by ResNet34 x = layers.Conv2D(64, 7, strides=2, padding='same', use_bias=False)(inputs) x = layers.BatchNormalization()(x) x = tf.nn.relu(x) x0 = layers.MaxPooling2D(3, strides=2, padding='same')(x) # ResNet34 x1 = Encoder(64, 3, strides=1)(x0) x2 = Encoder(128, 4, strides=2)(x1) x3 = Encoder(256, 6, strides=2)(x2) x4 = Encoder(512, 3, strides=2)(x3) # Center Block y5 = layers.Conv2D(512, 3, padding='same', use_bias=False)(x4) # Decode y4 = Decoder(64)(layers.Concatenate(axis=3)([x4, y5])) y3 = Decoder(64)(layers.Concatenate(axis=3)([x3, y4])) y2 = Decoder(64)(layers.Concatenate(axis=3)([x2, y3])) y1 = Decoder(64)(layers.Concatenate(axis=3)([x1, y2])) y0 = Decoder(64)(y1) # Hypercolumn y4 = layers.UpSampling2D(16, interpolation='bilinear')(y4) y3 = layers.UpSampling2D(8, interpolation='bilinear')(y3) y2 = layers.UpSampling2D(4, interpolation='bilinear')(y2) y1 = layers.UpSampling2D(2, interpolation='bilinear')(y1) hypercolumn = layers.Concatenate(axis=3)([y0, y1, y2, y3, y4]) # Final conv outputs = keras.Sequential([ layers.Conv2D(64, 3, padding='same', use_bias=False), layers.ELU(), layers.Conv2D(num_classes, 1, use_bias=False) ])(hypercolumn) outputs = tf.nn.softmax(outputs) return keras.Model(inputs, outputs) ``` ## Start Training! ``` keras.backend.clear_session() reset_random_seed() m = ResNet34UNet(IMG_SIZE+(3,), NUM_CLASSES) m.summary() keras.utils.plot_model(m, show_shapes=True) files = sorted(files) rng = numpy.random.default_rng(SEED) rng.shuffle(files) middle = math.ceil(len(files) * 0.8) train = OxfordPets(files[:middle]) test = OxfordPets(files[middle:]) # 確定每次訓練時 files 的順序皆相同 print(files[:10]) keras.backend.clear_session() reset_random_seed() callbacks = [ keras.callbacks.ModelCheckpoint(DATA_ROOT_DIR + VERSION + ".h5", monitor='val_loss', save_best_only=True, save_weights_only=True) ] m.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=['accuracy']) history = m.fit(train, validation_data=test, epochs=EPOCHES, callbacks=callbacks) ``` # Evaluate ``` import matplotlib.pyplot as plt plt.plot(history.history['loss'], label='loss') plt.plot(history.history['val_loss'], label = 'val_loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.ylim([0, 2]) plt.legend(loc='upper right') print(min(history.history['val_loss'])) plt.plot(history.history['accuracy'], label='accuracy') plt.plot(history.history['val_accuracy'], label='val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0, 1]) plt.legend(loc='lower right') print(max(history.history['val_accuracy'])) import json with open(DATA_ROOT_DIR + VERSION + '.json', 'w') as f: data = { 'loss': history.history['loss'], 'val_loss': history.history['val_loss'] } json.dump(data, f) ``` ## Show result ``` def mask_to_img(predict): mask = numpy.argmax(predict, axis=-1) # numpy.expand_dims() expand the shape of an array. # Insert a new axis that will appear at the axis position in the expanded # array shape. mask = numpy.expand_dims(mask, axis = -1) return PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask)) demo_data = OxfordPets(files[:20]) demo_res = m.predict(demo_data) plt.figure(figsize=(8, 10)) for i in range(20): plt.subplot(5, 4, i+1) plt.xticks([]) plt.yticks([]) plt.imshow(keras.preprocessing.image.array_to_img(demo_data.__getitem__(0)[0][i])) plt.imshow(mask_to_img(demo_res[i]), cmap='jet', alpha=0.3) plt.show() plt.close() import shutil val_loss = min(history.history['val_loss']) shutil.copy(DATA_ROOT_DIR + VERSION + '.json', '/content/drive/MyDrive/Models/%s-%.4f.json' % (VERSION, val_loss)) shutil.copy(DATA_ROOT_DIR + VERSION + '.h5', '/content/drive/MyDrive/Models/%s-%.4f.h5' % (VERSION, val_loss)) ```
github_jupyter
<p><font size="6"><b>Numpy</b></font></p> > *DS Python for GIS and Geoscience* > *October, 2020* > > *© 2020, Joris Van den Bossche and Stijn Van Hoey. Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)* --- ``` import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from matplotlib.lines import Line2D import rasterio from rasterio.plot import plotting_extent, show ``` ## Introduction On of the most fundamental parts of the scientific python 'ecosystem' is [numpy](https://numpy.org/). A lot of other packages - you already used Pandas and GeoPandas in this course - are built on top of Numpy and the `ndarray` (n-dimensional array) data type it provides. ``` import numpy as np ``` Let's start again from reading in a GeoTiff data set from file, thiss time a Sentinal Band 4 of the City of Ghent: ``` with rasterio.open("./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff") as src: b4_data = src.read() b4_data_meta = src.meta show(src) ``` As we learnt in the previous lesson, Rasterio returns a Numpy `ndarray`: ``` type(b4_data) b4_data ``` Numpy supports different `dtype`s (`float`, `int`,...), but all elements of an array do have the same dtype. Note that NumPy auto-detects the data-type from the input. ``` b4_data.dtype ``` The data type of this specific array `b4_data` is 16bit unsigned integer. More information on the data types Numpy supports is available in the [documentation](https://numpy.org/devdocs/user/basics.types.html#array-types-and-conversions-between-types). Detailed info on data types is out of scope of this course, but remember that using 16bit unsigned integer, it can contain `2**16` different (all positive) integer values: ``` 2**16 ``` Let's check this by calculating the minimum and maximum value in the array: ``` b4_data.min(), b4_data.max() ``` Converting to another data type is supported by `astype` method. When floats are preferred during calculation: ``` b4_data.astype(float) b4_data.max() ``` Just as any other object in Python, the `ndarray` has a number of attributes. We already checkes the `dtype` attribute. The `shape` and `ndim` of the array are other relevant attribute: ``` b4_data.shape, b4_data.ndim ``` Hence, we have a single band with dimensions (317, 625) and data type `uint16`. Compare this to the metadata stored in the geotiff file: ``` #!gdalinfo ./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff ``` The metadata on the dimensions and the datatype correspond, but the spatial information is lost when we only store the Numpy array. Numpy works very well together with the other fundamental scientific Python package [Matplotlib](https://matplotlib.org/). An useful plot function to know when working with raster data is `imshow`: ``` fig, ax = plt.subplots() ax.imshow(b4_data.squeeze()); ``` __Note:__ Numpy function `squeeze` used to get rid of the single-value dimension of the numpy array. As the Numpy array does not contain any spatial information, the x and y axis labels are defined by the indices of the array. Remark that the Rasterio plot returned this plot with the coordinate information in the axis labels. With a small trick, the same result can be achieved with Matplotlib: 1. When reading in a data set using Rasterio, use the `plotting_extent` function from rasterio to get the spatial extent: ``` from rasterio.plot import plotting_extent with rasterio.open("./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff") as src: b4_data = src.read() b4_data_meta = src.meta b4_data_extent = plotting_extent(src) # NEW b4_data_extent ``` 2. Add the `extent` argument to the `imshow` plot ``` fig, ax = plt.subplots() ax.imshow(b4_data.squeeze(), extent=b4_data_extent) ``` <div class="alert alert-info" style="font-size:120%"> **REMEMBER**: <br> The [`numpy` package](https://numpy.org/) is the backbone of the scientific Python ecosystem. The `ndarray` provides an efficient data type to store and manipulate raster data, but it does NOT contain any spatial information. Use the spatial `extent` trick to add coordinate information to imshow plot axis. Convert to the preferred datatype using `astype()` method. </div> ## Reshape, slice and index ``` b4_data.shape ``` We already used `squeeze` to remove the single-value dimension. We could also select the data we needed, similar to slicing in lists or Pandas DataFrames: ``` b4_data[0] b4 = b4_data[0] b4.shape ``` If you do not like the order of dimensions of the data, you can switch these using `transpose`: ``` b4.transpose(1, 0).shape ``` Getting rid of the dimensions and flattening all values into a single 1-D array can be done using `flatten` method: ``` b4.flatten().shape ``` Flattening an arrya is useful to create a histogram with Matplotlib: ``` plt.hist(b4.flatten(), bins=100); # slice, subsample, reverse # slice + assign # fancy indexing # fancy indexing + assign b4 = b4_data[0] ``` Select a specific row/column: ``` b4[10].shape b4[:, -2:].shape ``` Select every nth element in a given dimension: ``` b4[100:200:10, :].shape ``` Reversing an array: ``` b4[:, ::-1].shape # Note you can also np.flip an array b4[0, :4] b4_rev = b4[:, ::-1] b4_rev[0, -4:] ``` You can also combine assignment and slicing: ``` b4[0, :3] = 10 b4 ``` Use a __condition__ to select data, also called fancy indexing or boolean indexing: ``` b4 < 1000 ``` Onle keep the data which are True for the given condition ``` b4[b4 < 1000] ``` Or combine assignment and fancy indexing, e.g. a reclassification of the raster data: ``` b4[b4 < 5000] = 0 # assign the value 0 to all elements with a value lower than 5000 b4 ``` A powerfull shortcut to handle this kind or reclassifications is the `np.where` function: ``` np.where(b4 < 5000, 10, b4) ``` <div class="alert alert-success"> **EXERCISE**: * Read in the file `./data/gent/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff` with rasterio and assign the data to a new variable `tc_data`. * Select only the *second* layer of `tc_data` and assign the output to a new variable `tc_g`. * Assign to each of the elements in the `tc_g` array with a value above 15000 the new value 65535. <details><summary>Hints</summary> * You can combine the assignment of new values together with fancy indexing of a numpy array. * Python (and also Numpy) uses 0 as the first-element index </details> </div> ``` # %load _solutions/11-numpy1.py # %load _solutions/11-numpy2.py # %load _solutions/11-numpy3.py ``` <div class="alert alert-success"> **EXERCISE**: Subsample the ndarray `tc_data` by taking only the one out of each 5 data points for all layers at the same time (Be aware that this is a naive resampling implementation for educational purposes only). <details><summary>Hints</summary> * The result should still be a 3-D array with 3 elements in the first dimension. </details> </div> ``` # %load _solutions/11-numpy4.py # %load _solutions/11-numpy5.py ``` <div class="alert alert-success"> **EXERCISE**: Elements with the value `65535` do represent 'Not a Number' (NaN) values. However, Numpy does not support NaN values for integer data, so we'll convert to float first as data type. After reading in the data set `./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04_(Raw).tiff` (assign data to variable `b4_data`): * Count the number of elements that are equal to `65535` * Convert the data type to `float`, assign the result to a new variable `b4_data_f` * Assign Nan (`np.nan`) value to each of the elements of `b4_data_f` equal to `65535` * Count the number of Nan values in the `b4_data_f` data * Make a histogram of both the `b4_data` and `b4_data_f` data. Can you spot the difference? <details><summary>Hints</summary> * `np.nan` represents _Not a Number (NaN)_ in Numpy. You can assign an element to it, e.g. `dummy[2] = np.nan` * `np.sum` will by default sum all of the elements of the input array and can also count boolean values (True = 1 and False = 0), resulting from a conditional expression. * To test if a value is a nan, Numpy provides `np.isnan(...)` which results in an element-wise check returning boolean values. * Check the help of the `plt.hist` command to find out more about the `bins` and the `log` arguments. </details> </div> ``` # %load _solutions/11-numpy6.py # %load _solutions/11-numpy7.py # %load _solutions/11-numpy8.py # %load _solutions/11-numpy9.py # %load _solutions/11-numpy10.py ``` ## Reductions, element-wise calculations and broadcasting Up until now, we worked with the 16bit integer values. For specific applications we might want to rescale this data. A (fake) example is the linear transformation to the range 0-1 after log conversion of the data. To do so, we need to calculate _for each element_ in the original $b$ array the following: $$x_i= \log(b_i)$$ $$z_i=\frac{x_i-\min(x)}{\max(x)-\min(x)}$$ __1. reductions__ As part of it, we need the minimum `min(x)` and the maximum `max(x)` of the array. These __reductions__ (aggregations) are provided by Numpy and can be applied along one or more of the data dimensions, called the __axis__: ``` dummy = np.arange(1, 10).reshape(3, 3) dummy np.min(dummy), np.min(dummy, axis=0), np.min(dummy, axis=1) dummy = np.arange(1, 25).reshape(2, 3, 4) dummy.shape, dummy np.min(dummy), np.min(dummy, axis=0), np.min(dummy, axis=(0, 1)), np.min(dummy, axis=(0, 2)) ``` In some applications, the usage of the `keepdims=True` is useful to keep the number of dimensions after reduction: ``` np.min(dummy, axis=(0, 2), keepdims=True) ``` When working with Nan values, the result will be Nan as well: ``` np.min(np.array([1., 2., np.nan])) ``` Use the `nanmin`, `nan...` version of the function instead, if available: ``` np.nanmin(np.array([1., 2., np.nan])) ``` __2. Element-wise__ The __for each element__ is crucial for Numpy. The typical answer in programming would be a `for`-loop, but Numpy is optimized to do these calculations __element-wise__ (i.e. for all elements together): ``` dummy = np.arange(1, 10) dummy dummy*10 ``` Instead of: ``` [el*20 for el in dummy] ``` Numpy provides most of the familiar arithmetic operators to apply on an element-by-element basis: ``` np.exp(dummy), np.sin(dummy), dummy**2, np.log(dummy) ``` For some function, you can either use the `np.min(my_array)` or the `my_array.min()` approach: ``` dummy.min() == np.min(dummy) ``` __3. Broadcasting__ When we combine arrays with different shapes during arithmetic operations, Numpy applies a set of __broadcoasting__ rules and the smaller array is _broadcast_ across the larger array so that they have compatible shapes. An important consequence for out application is: ``` np.array([1, 2, 3]) + 4. , np.array([1, 2, 3]) + np.array([4.]), np.array([1, 2, 3]) + np.array([4., 4., 4.]) ``` The smallest array is broadcasted to make both compatible. It starts with the trailing (i.e. rightmost) dimensions. Exploring all the rules are out of scope in this lesson and are well explained in the [broadcasting Numpy documentation](https://numpy.org/devdocs/user/basics.broadcasting.html#general-broadcasting-rules). __Back to our function__ By combining these three elements, we know enough to translate our conversion into Numpy code on the example data set: ``` with rasterio.open("./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff") as src: b4_data = src.read() b4_data = b4_data.squeeze().astype(float) # squeeze and convert to float b4_data[b4_data == 0.0] = 0.00001 # to overcome zero-division error ``` Take the log of al the values __element-wise__: ``` b4_data_log = np.log(b4_data) ``` Get the min and max __reductions__: ``` b4_min, b4_max = b4_data_log.min(), b4_data_log.max() ``` __Broadcast__ our single value `b4_min` and `b4_max` to all elements of `b4_data_log`: ``` b4_rescaled = ((b4_data_log - b4_min)/(b4_max - b4_min)) plt.hist(b4_rescaled.flatten(), bins=100); ``` __Remark 1:__ One-dimensional linear interpolation towards a new value range can be calculated using the `np.interp` function as well. For the range 0 -> 1: ``` np.interp(b4_data, (b4_data.min(), b4_data.max()), (0, 1)) ``` __Remark 2: Why not iterate over the values of a list?__ Let's use the rescaling example to compare the calculation with Numpy versus a list comprehension (for-loop in Python): ``` b4_min, b4_max = b4_data.min(), b4_data.max() ``` With Numpy: ``` %%time rescaled_values_1 = ((b4_data - b4_min)/(b4_max - b4_min)) ``` Using a list with a for loop: ``` b4_as_list = b4_data.flatten().tolist() %%time rescaled_values_2 = [((data_point - b4_min)/(b4_max - b4_min)) for data_point in b4_as_list] np.allclose(rescaled_values_1.flatten(), rescaled_values_2) # np.allclose also works element wise ``` <div class="alert alert-info" style="font-size:120%"> **REMEMBER**: <br> The combination of element-wise calculations, efficient reductions and broadcasting provides Numpy a lot of power. In general, it is a good advice to __avoid for loops__ when working with Numpy arrays. </div> ### Let's practice! <div class="alert alert-success"> **EXERCISE**: The data set `./data/herstappe/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff` (assign to variable `herstappe_data`) contains 3 bands. The `imshow` function of Matplotlib can plot 3-D (RGB) data sets, but when running `plt.imshow(herstappe_data)`, we got the following error: ``` ... TypeError: Invalid shape (3, 227, 447) for image data ``` - Check in the help op `plt.imshow` why the `herstappe_data` can not be plot as such - Adjust the data to fix the behavior of `plt.imshow(herstappe_data)` Next, plot a greyscale version of the data as well. Instead of using a custom function just rely on the sum of the 3 bands as a proxy. <details><summary>Hints</summary> * In a Jupyter Notebook, us the SHIFT-TAB combination when the cursor is on the `imshow` function or type in a new cell `?plt.imshow` to see the documentation of a function. * The `imshow` function requires the different color bands as last dimension, so we will need to transpose the image array. * Add the extent to see the coordinates in the axis labels. * A greyscale image requires a greyscale `cmap`, checkt he available names in [the documentation online](https://matplotlib.org/tutorials/colors/colormaps.html) </details> </div> ``` # %load _solutions/11-numpy11.py # %load _solutions/11-numpy12.py # %load _solutions/11-numpy13.py ``` <div class="alert alert-success"> **EXERCISE**: The data set `./data/herstappe/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff` (assign to variable `herstappe_data`) has values ranging in between 0.11325, 0.8575. To improve the quality of the visualization, stretch __each of the layers individually__ to the values to the range 0. to 1. with a linear transformation: $$z_i=\frac{x_i-\min(x)}{\max(x)-\min(x)}$$ Make a plot of the end result and compare with the plots of the previous exercise. <details><summary>Hints</summary> * Keep into account that the data set is 3-dimensional. Have a look at the optional arguments for the reduction/aggregation functions in terms of `axis` and `keepdims`. * You need the minimal/maximal value over 2 axis to end up with a min/max for each of the layers. * Broadcasting starts comparison of the alignment on the last dimension. </details> </div> ``` with rasterio.open("./data/herstappe/raster/2020-09-17_Sentinel_2_L1C_True_color.tiff") as src: herstappe_data = src.read() herstappe_extent = plotting_extent(src) # %load _solutions/11-numpy14.py # %load _solutions/11-numpy15.py # %load _solutions/11-numpy16.py ``` <div class="alert alert-success"> **EXERCISE**: You want to reclassify the values of the 4th band data to a fixed set of classes: * x < 0.05 need to be 10 * 0.05 < x < 0.1 need to be 20 * x > 0.1 need to be 30 Use the data set `./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04_(Raw).tiff` (assign data to variable `b4_data`): * Read the data set and exclude the single-value dimension to end up with a 2D array. * Convert to float data type. and normalize the values to the range [0., 1.]. * Create a new variable `b4_data_classified` with the same shape as `b4_data` but datatype int. * Assign the new values (10, 20, 30) to the elements for which each of the conditions apply. * Make a image plot of the reclassified variable `b4_data_classified`. </div> ``` with rasterio.open("./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff") as src: b4_data = src.read() b4_data_extent = plotting_extent(src) # %load _solutions/11-numpy17.py # %load _solutions/11-numpy18.py # %load _solutions/11-numpy19.py # %load _solutions/11-numpy20.py # %load _solutions/11-numpy21.py ``` <div class="alert alert-success"> **EXERCISE**: The data sets `./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff` and `./data/gent/raster/2020-09-17_Sentinel_2_L1C_B08.tiff` contain respectively the 4th and the 8th band of a sentinel satellite image. To derive the [Normalized Difference Vegetation Index) (NDVI)](https://nl.wikipedia.org/wiki/Normalized_Difference_Vegetation_Index), the two bands need to be combined as follows: $$\frac{band_8 - band_4}{band_8 + band_4} $$ Process the images and create a plot of the NDVI: - Read both data sets using Rasterio and store them in resp. `b4_data` and `b8_data`. - Combine both data sets using the `np.vstack` function and assign it to the variable `b48_bands` - Transform the data range of each of the layers to the range .0 - 1. - For the values equal to zero in the `b48_bands` data set, assign a new (very small) value 1e-6 - Calculate the NDVI - Plot the NDVI and select an appropriate colormap. <details><summary>Hints</summary> * For more specific adjustments to the colormap, have a check on the [Matplotlib documentation on colormap normalization](https://matplotlib.org/3.3.2/tutorials/colors/colormapnorms.html) </details> </div> ``` # %load _solutions/11-numpy22.py # %load _solutions/11-numpy23.py b48_bands.shape # %load _solutions/11-numpy24.py # %load _solutions/11-numpy25.py # %load _solutions/11-numpy26.py ``` Using a Matplotlib norm to adjust colormap influence on image https://matplotlib.org/api/_as_gen/matplotlib.colors.TwoSlopeNorm.html ``` # %load _solutions/11-numpy27.py # %load _solutions/11-numpy28.py ``` --- ## For the curious: Some more building blocks Numpy provides lower-level building blocks used by other packages and you will once in a also need to rely on these functions to do some custom implementation. Some other useful building blocks with repect to reclassification could potentially help you: - Remember the `np.where` function? ``` dummy = np.arange(1, 10).reshape(3, 3) dummy np.where(dummy > 4, 0, dummy) ``` - Clip the values in yanour array to defined limits can be done using `np.clip` ``` dummy = np.arange(1, 10).reshape(3, 3) dummy np.clip(dummy, 2, 6) ``` - Numpy provides also a `np.histogram` function, which is really useful to get the bincounts over a custom bin-set: ``` np.histogram(b4_data_classified, bins=[5, 15, 25, 35]) np.histogram(b4_data, bins=[0.001, 0.1, 0.2, 0.5]) ``` - The `np.digitize` function return the indices of the bins to which each value in input array belongs. As such, it can be used to select and manipulate values containing to a specific bin: ``` dummy = np.arange(9).reshape(3, 3) np.random.shuffle(dummy) dummy ``` Define the bin to which each of the values belong to, using the bins x<2, 2<=x<4, x>=4: ``` id_mask = np.digitize(dummy, bins=[2, 4]) id_mask dummy[id_mask == 1] = 20 dummy ``` Besides, it is also a practical method to create discrete classified maps: 1. Apply digitize to create classes: ``` ndvi_class_bins = [-np.inf, 0, 0.3, np.inf] # These limits are for demo purposes only ndvi_landsat_class = np.digitize(ndvi, ndvi_class_bins) ``` 2. Define custom colors and names: ``` nbr_colors = ["gray", "yellowgreen", "g"] ndvi_names = ["No Vegetation", "Bare Area", "Vegetation"] ``` 3. Prepare Matplotlib elements: ``` nbr_cmap = ListedColormap(nbr_colors) # fake entries required for each class to create the legend dummy_data = [Line2D([0], [0], color=color, lw=4) for color in nbr_colors] ``` 4. Make the plot and add a legend: ``` fig, ax = plt.subplots(figsize=(12, 12)) im = ax.imshow(ndvi_landsat_class, cmap=nbr_cmap, extent=b4_data_extent) ax.legend(dummy_data, ndvi_names, loc='upper left', framealpha=1) ``` - Find the modal (most common) value in an array is not provided by Numpy itself, but is available in the Scipy package: ``` from scipy.stats import mode mode(b4_data.flatten()), mode(b4_data_classified.flatten()) ``` ### Side-note on convolution In case you need custom convolutions for your 2D array, check the `scipy.signal.convolve` function as the Numpy function only works for 1-D arrays. ``` from scipy import signal with rasterio.open("./data/gent/raster/2020-09-17_Sentinel_2_L1C_B04.tiff") as src: b4_data = src.read() b4_data_extent b4_data = b4_data.squeeze().astype(float) ``` As an example, apply a low pass filter example as window, smoothing the image: ``` window = np.ones((5, 5), dtype=int) window[1:-1, 1:-1] = 4 window[2, 2] = 12 window grad = signal.convolve(b4_data, window, mode='same') fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6)) ax0.imshow(b4_data, extent=b4_data_extent) ax1.imshow(grad, extent=b4_data_extent) ```
github_jupyter
``` from google.colab import drive drive.mount('/content/drive') import os os.chdir('drive/My Drive/Colab Notebooks/ML_and_NN_course/module 1') cwd=os.getcwd() print(cwd) import numpy as np import pandas as pd import matplotlib.pyplot as plt from utils import * data = pd.read_csv('train_data.csv', sep = ',') data.head() data.describe() data.plot(kind='scatter', x='size', y='price', figsize=(10,5)) ``` # Feature scaling We can speed up gradient descent by having each of our input values in roughly the same range. This is because θ will descend quickly on small ranges and slowly on large ranges, and so will oscillate inefficiently down to the optimum when the variables are very uneven. > The way to prevent this is to modify the ranges of our input variables so that they are all roughly the same. Ideally: > −1 ≤ x≤ 1 > These aren’t exact requirements; we are only trying to speed things up. The goal is to get all input variables into roughly one of these ranges, give or take a few. > Two techniques to help with this are **feature scaling and mean normalization**. Feature scaling involves dividing the input values by the range (i.e. the maximum value minus the minimum value) of the input variable, resulting in a new range of just 1. Mean normalization involves subtracting the average value for an input variable from the values for that input variable resulting in a new average value for the input variable of just zero. To implement both of these techniques, adjust your input values as shown in this formula: ![alt text](https://miro.medium.com/max/143/1*WHNo6emX78_Ff9s7ahh2Eg.png) ``` data = (data - np.mean(data))/np.std(data) data.describe() data.plot(kind='scatter', x='size', y='price', figsize=(10,5)) #theta = np.matrix(np.array([0,0])) theta=np.random.randn(1,2) ``` ### Inserting a Column of ones since the theta(parameters) has 2 elements we can obtain hypothesis function simply by dot product between input and theta if we insert a column of ones so that theta[0] * 1 = theta[0] only [we use it inside computecost funtion only] .Hence the dot product will compute (theta[0] * 1 + theta[1]*data['x']) which is a required hypothesis function ``` data.insert(0, 'Ones', 1) data.head() X=data.iloc[:,0:2] X.head() y=data['price'] y.head(),y.shape x = np.matrix(X) y = np.matrix(y) y=y.T x.shape, theta.shape, y.shape theta def computeCost(x, y, theta): """ Compute cost for linear regression. Computes the cost of using theta as the parameter for linear regression to fit the data points in X and y. Parameters ---------- X : array_like The input dataset of shape (m , n+1) <Here n is 1 and we added one more column of ones>, where m is the number of examples, and n is the number of features. <Hence the dimension is (46,2) y : array_like The values of the function at each data point. This is a vector of shape (m, 1). theta : array_like The parameters for the regression function. This is a vector of shape (1,n+1 ). Returns ------- J : float The value of the regression cost function. Instructions ------------ Compute the cost of a particular choice of theta. You should set J to the cost. """ # initialize some useful values m =46 # number of training examples # You need to return the following variables correctly J = 0 h = np.matmul(x, theta.T) J = (1/(2 * m)) * np.sum(np.square(h - y)) return J computeCost(x,y,theta) num_iters=250 new_theta, cost = gradientDescent(x, y, theta,num_iters, lr=0.1) print(new_theta, cost) Model_price = new_theta[0, 0] + (new_theta[0, 1] * x) fig, ax = plt.subplots(figsize=(10,5)) ax.plot(x, Model_price, 'r', label='Prediction') ax.scatter(data['size'],data.price, label='Training Data') ax.legend(loc=2) ax.set_xlabel('Size') ax.set_ylabel('Price') ax.set_title('Predicted Price vs. Size') fig, ax = plt.subplots(figsize=(12,8)) ax.plot(np.arange(num_iters), cost, 'r') ax.set_xlabel('Iterations') ax.set_ylabel('Cost') ax.set_title('MSE vs. Iterations') ``` ## Problem of Overshooting ``` num_iters=250 new_theta, cost = gradientDescent(x, y, theta,num_iters, lr=2.1) print(new_theta, cost) fig, ax = plt.subplots(figsize=(12,8)) ax.plot(np.arange(num_iters), cost, 'r') ax.set_xlabel('Iterations') ax.set_ylabel('Cost') ax.set_title('MSE vs. Iterations') theta,new_theta ```
github_jupyter
``` import numpy as np import pandas as pd from matplotlib import pyplot as plt from tqdm import tqdm %matplotlib inline from torch.utils.data import Dataset, DataLoader import torch import torchvision import torch.nn as nn import torch.optim as optim from torch.nn import functional as F device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark= False m = 2000 # 5, 50, 100, 500, 2000 train_size = 100 # 100, 500, 2000, 10000 desired_num = train_size + 1000 tr_i = 0 tr_j = train_size tr_k = desired_num tr_i, tr_j, tr_k ``` # Generate dataset ``` np.random.seed(12) y = np.random.randint(0,3,500) idx= [] for i in range(3): print(i,sum(y==i)) idx.append(y==i) x = np.zeros((500,)) np.random.seed(12) x[idx[0]] = np.random.uniform(low =-1,high =0,size= sum(idx[0])) x[idx[1]] = np.random.uniform(low =0,high =1,size= sum(idx[1])) x[idx[2]] = np.random.uniform(low =2,high =3,size= sum(idx[2])) x[idx[0]][0], x[idx[2]][5] print(x.shape,y.shape) idx= [] for i in range(3): idx.append(y==i) for i in range(3): y= np.zeros(x[idx[i]].shape[0]) plt.scatter(x[idx[i]],y,label="class_"+str(i)) plt.legend() bg_idx = [ np.where(idx[2] == True)[0]] bg_idx = np.concatenate(bg_idx, axis = 0) bg_idx.shape np.unique(bg_idx).shape x = x - np.mean(x[bg_idx], axis = 0, keepdims = True) np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True) x = x/np.std(x[bg_idx], axis = 0, keepdims = True) np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True) for i in range(3): y= np.zeros(x[idx[i]].shape[0]) plt.scatter(x[idx[i]],y,label="class_"+str(i)) plt.legend() foreground_classes = {'class_0','class_1' } background_classes = {'class_2'} fg_class = np.random.randint(0,2) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(2,3) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) print(a.shape) print(fg_class , fg_idx) a.shape np.reshape(a,(m,1)) mosaic_list_of_images =[] mosaic_label = [] fore_idx=[] for j in range(desired_num): np.random.seed(j) fg_class = np.random.randint(0,2) fg_idx = np.random.randint(0,m) a = [] for i in range(m): if i == fg_idx: b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1) a.append(x[b]) # print("foreground "+str(fg_class)+" present at " + str(fg_idx)) else: bg_class = np.random.randint(2,3) b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1) a.append(x[b]) # print("background "+str(bg_class)+" present at " + str(i)) a = np.concatenate(a,axis=0) mosaic_list_of_images.append(np.reshape(a,(m,1))) mosaic_label.append(fg_class) fore_idx.append(fg_idx) mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T mosaic_list_of_images.shape mosaic_list_of_images.shape, mosaic_list_of_images[0] for j in range(m): print(mosaic_list_of_images[0][j]) def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m): """ mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point labels : mosaic_dataset labels foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9 """ avg_image_dataset = [] cnt = 0 counter = np.zeros(m) for i in range(len(mosaic_dataset)): img = torch.zeros([1], dtype=torch.float64) np.random.seed(int(dataset_number*10000 + i)) give_pref = foreground_index[i] #np.random.randint(0,9) # print("outside", give_pref,foreground_index[i]) for j in range(m): if j == give_pref: img = img + mosaic_dataset[i][j]*dataset_number/m #2 is data dim else : img = img + mosaic_dataset[i][j]*(m-dataset_number)/((m-1)*m) if give_pref == foreground_index[i] : # print("equal are", give_pref,foreground_index[i]) cnt += 1 counter[give_pref] += 1 else : counter[give_pref] += 1 avg_image_dataset.append(img) print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt)) print("the averaging are done as ", counter) return avg_image_dataset , labels , foreground_index avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m) test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m) avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0) # mean = torch.mean(avg_image_dataset_1, keepdims= True, axis = 0) # std = torch.std(avg_image_dataset_1, keepdims= True, axis = 0) # avg_image_dataset_1 = (avg_image_dataset_1 - mean) / std # print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0)) # print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0)) # print("=="*40) test_dataset = torch.stack(test_dataset, axis = 0) # mean = torch.mean(test_dataset, keepdims= True, axis = 0) # std = torch.std(test_dataset, keepdims= True, axis = 0) # test_dataset = (test_dataset - mean) / std # print(torch.mean(test_dataset, keepdims= True, axis = 0)) # print(torch.std(test_dataset, keepdims= True, axis = 0)) # print("=="*40) x1 = (avg_image_dataset_1).numpy() y1 = np.array(labels_1) # idx1 = [] # for i in range(3): # idx1.append(y1 == i) # for i in range(3): # z = np.zeros(x1[idx1[i]].shape[0]) # plt.scatter(x1[idx1[i]],z,label="class_"+str(i)) # plt.legend() plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0') plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1') # plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2') plt.legend() plt.title("dataset1 CIN with alpha = 1/"+str(m)) x1 = (avg_image_dataset_1).numpy() y1 = np.array(labels_1) idx_1 = y1==0 idx_2 = np.where(idx_1==True)[0] idx_3 = np.where(idx_1==False)[0] color = ['#1F77B4','orange', 'brown'] true_point = len(idx_2) plt.scatter(x1[idx_2[:25]], y1[idx_2[:25]]*0, label='class 0', c= color[0], marker='o') plt.scatter(x1[idx_3[:25]], y1[idx_3[:25]]*0, label='class 1', c= color[1], marker='o') plt.scatter(x1[idx_3[50:75]], y1[idx_3[50:75]]*0, c= color[1], marker='o') plt.scatter(x1[idx_2[50:75]], y1[idx_2[50:75]]*0, c= color[0], marker='o') plt.legend() plt.xticks( fontsize=14, fontweight = 'bold') plt.yticks( fontsize=14, fontweight = 'bold') plt.xlabel("X", fontsize=14, fontweight = 'bold') # plt.savefig(fp_cin+"ds1_alpha_04.png", bbox_inches="tight") # plt.savefig(fp_cin+"ds1_alpha_04.pdf", bbox_inches="tight") avg_image_dataset_1[0:10] x1 = (test_dataset).numpy()/m y1 = np.array(labels) # idx1 = [] # for i in range(3): # idx1.append(y1 == i) # for i in range(3): # z = np.zeros(x1[idx1[i]].shape[0]) # plt.scatter(x1[idx1[i]],z,label="class_"+str(i)) # plt.legend() plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0') plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1') # plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2') plt.legend() plt.title("test dataset1 ") test_dataset.numpy()[0:10]/m test_dataset = test_dataset/m test_dataset.numpy()[0:10] class MosaicDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list_of_images, mosaic_label): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list_of_images self.label = mosaic_label #self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx] avg_image_dataset_1[0].shape, avg_image_dataset_1[0] batch = 200 traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 ) trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True) testdata_1 = MosaicDataset(test_dataset, labels ) testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False) class Whatnet(nn.Module): def __init__(self): super(Whatnet,self).__init__() self.linear1 = nn.Linear(1,50) self.linear2 = nn.Linear(50,2) torch.nn.init.xavier_normal_(self.linear1.weight) torch.nn.init.zeros_(self.linear1.bias) torch.nn.init.xavier_normal_(self.linear2.weight) torch.nn.init.zeros_(self.linear2.bias) def forward(self,x): x = F.relu(self.linear1(x)) x = (self.linear2(x)) return x def calculate_loss(dataloader,model,criter): model.eval() r_loss = 0 with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") outputs = model(inputs) loss = criter(outputs, labels) r_loss += loss.item() return r_loss/(i+1) def test_all(number, testloader,net): correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= net(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() pred = np.concatenate(pred, axis = 0) out = np.concatenate(out, axis = 0) print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) ) print("correct: ", correct, "total ", total) print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total)) def train_all(trainloader, ds_number, testloader_list, lr_list): final_loss = [] for LR in lr_list: print("--"*20, "Learning Rate used is", LR) torch.manual_seed(12) net = Whatnet().double() net = net.to("cuda") criterion_net = nn.CrossEntropyLoss() optimizer_net = optim.Adam(net.parameters(), lr = LR ) #, momentum=0.9) acti = [] loss_curi = [] epochs = 1500 running_loss = calculate_loss(trainloader,net,criterion_net) loss_curi.append(running_loss) print('epoch: [%d ] loss: %.3f' %(0,running_loss)) for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 net.train() for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_net.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion_net(outputs, labels) # print statistics running_loss += loss.item() loss.backward() optimizer_net.step() running_loss = calculate_loss(trainloader,net,criterion_net) if(epoch%200 == 0): print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) loss_curi.append(running_loss) #loss per epoch if running_loss<=0.05: print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) break print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total)) for i, j in enumerate(testloader_list): test_all(i+1, j,net) print("--"*40) final_loss.append(loss_curi) return final_loss train_loss_all=[] testloader_list= [ testloader_1 ] lr_list = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5 ] fin_loss = train_all(trainloader_1, 1, testloader_list, lr_list) train_loss_all.append(fin_loss) %matplotlib inline len(fin_loss) for i,j in enumerate(fin_loss): plt.plot(j,label ="LR = "+str(lr_list[i])) plt.xlabel("Epochs") plt.ylabel("Training_loss") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ```
github_jupyter
``` %matplotlib inline import numpy as np from matplotlib import pyplot as plt from astropy import time as astropytime from ctapipe.io import EventSource, EventSeeker from ctapipe.visualization import CameraDisplay from ctapipe.instrument import CameraGeometry from ctapipe.image import tailcuts_clean, dilate, hillas_parameters, HillasParameterizationError path = '../obs/NectarCAM.Run1388.0001.fits.fz' cmap = 'gnuplot2' reader=EventSource(input_url=path) seeker = EventSeeker(reader) ``` ## Look at waveform image for a particular event ``` evt = seeker.get_event_index(25) import time from IPython import display adcsum = evt.r0.tel[0].waveform[0].sum(axis=1) camera = CameraGeometry.from_name("NectarCam-003") for i in range(len(evt.r0.tel[0].waveform[0].T)): image = evt.r0.tel[0].waveform[0].T[i] plt.clf() fig = plt.figure(figsize=(13,9)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) disp2= CameraDisplay(geometry=camera, image=adcsum, ax=ax1, title='Sum ADC', cmap=cmap) #disp2.cmap = cmap #disp2.add_colorbar() disp = CameraDisplay(geometry=camera, image=image, ax=ax2, title='Waveform (ADC), T={} ns'.format(i), cmap=cmap) #disp.cmap = cmap disp.add_colorbar() display.display(plt.gcf()) display.clear_output(wait=True) time.sleep(0.2) ``` ## Or look at an integrated charge image for a particular event ``` evt=seeker.get_event_index(0) image = evt.r0.tel[0].waveform.sum(axis=2) disp = CameraDisplay(geometry=camera, image=image[0], cmap=cmap) run_start = astropytime.Time(evt.nectarcam.tel[0].svc.date, format='unix').iso print('Run started at {}'.format(run_start)) evt.index.event_id ``` ## Extract interleaved pedestals ``` # Evaluate pedestal from interleaved pedestals from same input run max_events = 500 read_ped = EventSource(input_url=path) peds = [] for i, ev in enumerate(read_ped): if len(peds) > max_events: break if ev.trigger.event_type == 32: # print('Event {}, trigger type {}'.format(i,ev.r0.tel[0].trigger_type)) wfs = ev.r0.tel[0].waveform wfs_hi = wfs[0].sum(axis=1) peds.append(wfs_hi) peds = np.array(peds) peds = peds.mean(axis=0) disp = CameraDisplay(geometry=camera, image=peds, cmap=cmap) disp.cmap = cmap disp.add_colorbar() plt.plot(peds) ``` ## Calibration ``` adc_to_pe = 58. evt = next(iter(seeker)) print('Event {}, trigger type {}'.format(evt.index.event_id, evt.trigger.event_type)) if evt.trigger.event_type ==1: raw = evt.r0.tel[0].waveform[0].sum(axis=1) charges = ((raw - peds)/adc_to_pe) disp = CameraDisplay(geometry=camera, image=charges, cmap='gnuplot2') disp.cmap = cmap disp.add_colorbar() #Comment: if this cell says that "charges" is not defined it's because the event type is not 1. Re-run it. ``` ## Hillas cleaning ``` cleanmask = tailcuts_clean(camera, charges, picture_thresh=10, boundary_thresh=5, min_number_picture_neighbors=3) charges[~cleanmask] = 0 try: hillas_param = hillas_parameters(camera, charges) disp = CameraDisplay(geometry=camera, image=charges, cmap='gnuplot2') disp.cmap = cmap disp.add_colorbar() disp.overlay_moments(hillas_param, with_label=False, color='red', alpha=0.7, linewidth=2, linestyle='dashed') disp.highlight_pixels(cleanmask, color='white', alpha=0.3, linewidth=2) print(hillas_param) except HillasParameterizationError: pass print('Cleaned image: charge = {} pe'.format(charges.sum())) ``` ## Loop over events ``` import time from IPython import display for i, evt in enumerate(reader): if evt.trigger.event_type == 1: raw = evt.r0.tel[0].waveform[0].sum(axis=1) charges = ((raw - peds)/adc_to_pe) cleanmask = tailcuts_clean(camera, charges, picture_thresh=10, boundary_thresh=5, min_number_picture_neighbors=3) charges[~cleanmask] = 0 plt.clf() disp = CameraDisplay(geometry=camera, image=charges, cmap='gnuplot2') disp.cmap = cmap disp.add_colorbar() try: hillas_param = hillas_parameters(camera, charges) disp.overlay_moments(hillas_param, with_label=False, color='red', alpha=0.7, linewidth=2, linestyle='dashed') disp.highlight_pixels(cleanmask, color='white', alpha=0.3, linewidth=2) except HillasParameterizationError: pass display.display(plt.gcf()) display.clear_output(wait=True) time.sleep(0.2) ```
github_jupyter
[![AnalyticsDojo](https://github.com/rpi-techfundamentals/spring2019-materials/blob/master/fig/final-logo.png?raw=1)](http://rpi.analyticsdojo.com) <center><h1>Linear Regression</h1></center> <center><h3><a href = 'http://introml.analyticsdojo.com'>introml.analyticsdojo.com</a></h3></center> # Linear Regression Adopted from Hands-On Machine Learning with Scikit-Learn and TensorFlow **Chapter 4 – Training Linear Models**. [You can access the book here.](http://proquestcombo.safaribooksonline.com.libproxy.rpi.edu/book/programming/9781491962282.) Origional Material has been released under this license. Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ in [this repository](https://github.com/ageron/handson-ml). ## Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures: ``` # To support both python 2 and python 3 #from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Let's generate some random data. import numpy as np X = 2 * np.random.rand(100, 1) y = 4 + 3 * X + np.random.randn(100, 1) plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.axis([0, 2, 0, 15]) ``` ## Linear Regression - Linear regression involves fitting the optimal values for \theta that minimize the error. $$h_0(x) = \theta_0 + \theta_1x$$ Below, we are just adding a constant using the [numpy concatenate function](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.c_.html). ``` #This will add a 1 to the X matrix X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance X_b ``` ## Linear regression using the Normal Equation Using matrix calculus, we can actually solve for the optimal value for theta. The regression question below calculates the optimal theta. These are the coefficients relevant to understand. ## $$ \theta = (X^T X)^{-1}X^T \vec{y} $$ In order to calculate this, we are using the `dot` product function for Numpy and `T` to transpose matrix. `linalg.inv(a)` takes the inverse. `np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)` ``` theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y) #This is the intercept and the coefficient. theta_best #This just Calcultes the line. X_new = np.array([[0], [2]]) X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance y_predict = X_new_b.dot(theta_best) y_predict ``` The figure in the book actually corresponds to the following code, with a legend and axis labels: ``` plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions") plt.plot(X, y, "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$y$", rotation=0, fontsize=18) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 2, 0, 15]) # We can also do this much easier with the linear regression model. from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X, y) lin_reg.intercept_, lin_reg.coef_ lin_reg.predict(X_new) ``` ## Linear regression using batch gradient descent Where `m` is the number of iteratations (1) $$Gradient = \frac{2}{m}X^T(X\theta - y)$$ (2) $$\theta = \theta - \eta Gradient$$ (3) $$\theta := \theta - \eta\frac{2}{m}X^T(X\theta - y)$$ ``` eta = 0.1#learning rate n_iterations = 1000 m = 100 #size of training set theta = np.random.randn(2,1) #Starting point. for iteration in range(n_iterations): gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y) theta = theta - eta * gradients print("Ending:", theta) # theta X_new_b.dot(theta) ``` #### Adopted from: https://github.com/ageron/handson-ml Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/sample-weight/auto-ml-sample-weight.png) # Automated Machine Learning _**Sample Weight**_ ## Contents 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Train](#Train) 1. [Test](#Test) ## Introduction In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use sample weight with AutoML. Sample weight is used where some sample values are more important than others. Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook. In this notebook you will learn how to configure AutoML to use `sample_weight` and you will see the difference sample weight makes to the test results. ## Setup As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. ``` import logging from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import datasets import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.train.automl import AutoMLConfig ws = Workspace.from_config() # Choose names for the regular and the sample weight experiments. experiment_name = 'non_sample_weight_experiment' sample_weight_experiment_name = 'sample_weight_experiment' experiment = Experiment(ws, experiment_name) sample_weight_experiment=Experiment(ws, sample_weight_experiment_name) output = {} output['SDK version'] = azureml.core.VERSION output['Subscription ID'] = ws.subscription_id output['Workspace Name'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ## Train Instantiate two `AutoMLConfig` objects. One will be used with `sample_weight` and one without. ``` digits = datasets.load_digits() X_train = digits.data[100:,:] y_train = digits.target[100:] # The example makes the sample weight 0.9 for the digit 4 and 0.1 for all other digits. # This makes the model more likely to classify as 4 if the image it not clear. sample_weight = np.array([(0.9 if x == 4 else 0.01) for x in y_train]) automl_classifier = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', primary_metric = 'AUC_weighted', iteration_timeout_minutes = 60, iterations = 10, n_cross_validations = 2, verbosity = logging.INFO, X = X_train, y = y_train) automl_sample_weight = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', primary_metric = 'AUC_weighted', iteration_timeout_minutes = 60, iterations = 10, n_cross_validations = 2, verbosity = logging.INFO, X = X_train, y = y_train, sample_weight = sample_weight) ``` Call the `submit` method on the experiment objects and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. In this example, we specify `show_output = True` to print currently running iterations to the console. ``` local_run = experiment.submit(automl_classifier, show_output = True) sample_weight_run = sample_weight_experiment.submit(automl_sample_weight, show_output = True) best_run, fitted_model = local_run.get_output() best_run_sample_weight, fitted_model_sample_weight = sample_weight_run.get_output() ``` ## Test #### Load Test Data ``` digits = datasets.load_digits() X_test = digits.data[:100, :] y_test = digits.target[:100] images = digits.images[:100] ``` #### Compare the Models The prediction from the sample weight model is more likely to correctly predict 4's. However, it is also more likely to predict 4 for some images that are not labelled as 4. ``` # Randomly select digits and test. for index in range(0,len(y_test)): predicted = fitted_model.predict(X_test[index:index + 1])[0] predicted_sample_weight = fitted_model_sample_weight.predict(X_test[index:index + 1])[0] label = y_test[index] if predicted == 4 or predicted_sample_weight == 4 or label == 4: title = "Label value = %d Predicted value = %d Prediced with sample weight = %d" % (label, predicted, predicted_sample_weight) fig = plt.figure(1, figsize=(3,3)) ax1 = fig.add_axes((0,0,.8,.8)) ax1.set_title(title) plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest') plt.show() ```
github_jupyter
``` import os import numpy as np import pandas as pd import pystan from astropy.table import Table import matplotlib.pyplot as plt %matplotlib inline import corner import random ``` load csv files on ESA vo space ``` lensedQSO = pd.read_csv("http://vospace.esac.esa.int/vospace/sh/baf64b11fe35d35f18879b1d292b0c4b02286a?dl=1") allwiseQSO = pd.read_csv("http://vospace.esac.esa.int/vospace/sh/d18d69255b40f4178ec5155a679a33e1dbddd37?dl=1") lensedQSO.head() allwiseQSO.head() ``` Here we restrict to a random sample of the allwise QSOs to speed up the computation. The results might change slightly according to the selected sample. ``` lqsoNew = lensedQSO[np.isfinite(lensedQSO['pmra'])].copy() qsoNew = allwiseQSO[np.isfinite(allwiseQSO['pmra'])].sample(n=len(lqsoNew)).copy() def sigma2(ea,ed,c) : """ the largest eigen value of the covariance matrix defined by ea : right ascention error ed : declination error c : correlation """ res = np.power(ea,2) + np.power(ed,2) res = res + np.sqrt(np.power(ea-ed,2) + np.power(2*ea*ed*c,2)) return res/2 def setMu(d): """ set mu, mu_error and mu_norm taking in account the correlation """ d['mu'] = np.sqrt(np.power(d.pmra,2)+np.power(d.pmdec,2)) d['mu_error'] = np.sqrt(sigma2(d.pmra_error,d.pmdec_error,d.pmra_pmdec_corr)) d['mu_over_error'] = d.mu/d.mu_error setMu(lqsoNew) setMu(qsoNew) ``` # Model 1 A model to compare the distribution of the proper motion assuming a log normal distribution. ``` Nl = len(lqsoNew) Nq = len(qsoNew) bayesmod1 = """ data{ int<lower=0> Nq; //number of quasars int<lower=0> Nl; //number of lens vector[Nq] muqhat; //propermotion of qso vector[Nl] mulhat; //propermotion of lens vector<lower=0>[Nq] sigq; //error on pm of qso vector<lower=0>[Nl] sigl; //error on pm of lens } parameters{ //population parameters real mu1; real mu2; real<lower=0> sigma1; real<lower=0> sigma2; vector<lower=0>[Nq] muq; //propermotion of qso vector<lower=0>[Nl] mul; //propermotion of lens } model{ // prior mu1 ~ normal(0,1); mu2 ~ normal(0,1); sigma1 ~ normal(0,1); sigma2 ~ normal(0,1); //likelihood muqhat ~ normal(muq, sigq); mulhat ~ normal(mul, sigl); muq ~ lognormal(mu1, sigma1); mul ~ lognormal(mu2, sigma2); } """ mixedData = { 'Nq': Nq, 'Nl': Nl, 'muqhat': qsoNew.mu, 'mulhat': lqsoNew.mu, 'sigq': qsoNew.mu_error, 'sigl': lqsoNew.mu_error } sm1 = pystan.StanModel(model_code=bayesmod1) fit1 = sm1.sampling(data=mixedData, iter=1000, chains=1) # Could be run for longer, but 1 chain at 1000 samples takes ~4hrs params=fit1.extract() mu1=params['mu1'] mu2=params['mu2'] sigma1=params['sigma1'] sigma2=params['sigma2'] pop_params = np.vstack([mu1,mu2,sigma1,sigma2]) fig = corner.corner(pop_params.T, labels=[r"$\mu_1$", r"$\mu_2$", r"$\sigma_1$", r"$\sigma_2$"], quantiles=[0.16, 0.5, 0.84], plot_contours=False, smooth=True) print('mu lqso: ', round(np.mean(mu1),2),'+/-', round(np.std(mu1),2)) print('mu qso: ', round(np.mean(mu2),2), '+/-', round(np.std(mu2),2)) print('sigma lqso: ', round(np.mean(sigma1),2),'+/-', round(np.std(sigma1),2)) print('sigma qso: ', round(np.mean(sigma2),2), '+/-', round(np.std(sigma2),2)) ``` # Model 2 A model to compare the distribution of the proper motion vectors using normal prior and multi normal distribution to fully use Gaia likelihood. ``` bayesmod2 = """ data{ int<lower=0> N; //number of objects row_vector[2] pmhat[N]; //propermotion observed cov_matrix[2] Sig[N]; //error on propermotion } parameters{ //population parameters row_vector[2] mu; row_vector<lower=0>[2] sigma; row_vector[2] pm[N]; //true propermotion } model{ //priors on hyper params mu ~ normal(0,1); sigma ~ normal(0,1); //observed proper motions for(n in 1:N){ pm[n] ~ normal(mu, sigma); pmhat[n] ~ multi_normal(pm[n], Sig[n]); } } """ sm2 = pystan.StanModel(model_code=bayesmod2) ``` ## Test with toy data ``` #make some toy data Nq = 100 #number of quasar Nl = 100 #number of lens muq = [-0.1,-0.1]#population parameters mul = [0.3,0.4] sigq = np.reshape([0.02**2,0,0,0.01**2],[2,2]) sigl = np.reshape([0.06**2,0,0,0.05**2],[2,2]) Sigmaq = np.reshape([0.01**2,0,0,0.01**2],[2,2])#observational uncertainty covariance matrix Sigmal = np.reshape([0.03**2,0,0,0.04**2],[2,2]) #observed proper mootions pmq = np.empty([Nq, 2]) pmqhat = np.empty([Nq, 2]) for iq in np.arange(Nq): pmq[iq, :] = np.random.multivariate_normal(muq, sigq) pmqhat[iq,:] = np.random.multivariate_normal(pmq[iq], Sigmaq) pml = np.empty([Nl, 2]) pmlhat = np.empty([Nl, 2]) for il in np.arange(Nl): pml[il, :] = np.random.multivariate_normal(mul, sigl) pmlhat[il,:] = np.random.multivariate_normal(pml[il], Sigmal) qsodata={ 'N': Nq, 'pmhat': pmqhat, 'Sig': np.dstack([[Sigmaq]*Nq]), } fitqso = sm2.sampling(data=qsodata, init='random', iter=2000, chains=1) lqsodata={ 'N': Nl, 'pmhat': pmlhat, 'Sig': np.dstack([[Sigmal]*Nl]), } fitlqso = sm2.sampling(data=lqsodata, init='0', iter=2000, chains=1) paramsqso=fitqso.extract() paramslqso=fitlqso.extract() muq=paramsqso['mu'] sigmaq=paramsqso['sigma'] mul=paramslqso['mu'] sigmal=paramslqso['sigma'] pop_params_q = np.vstack([muq[:,0],muq[:,1],sigmaq[:,0],sigmaq[:,1]]) pop_params_l = np.vstack([mul[:,0],mul[:,1],sigmal[:,0],sigmal[:,1]]) fig = corner.corner(pop_params_q.T, labels=[r"$\mu_q^a$",r"$\mu_q^d$", r"$\sigma_q^r$",r"$\sigma_q^d$", ], quantiles=[0.16, 0.5, 0.84], plot_contours=False, smooth=True) fig = corner.corner(pop_params_l.T, labels=[r"$\mu_l^a$",r"$\mu_l^d$", r"$\sigma_l^r$",r"$\sigma_l^d$", ], quantiles=[0.16, 0.5, 0.84], plot_contours=False, smooth=True) print('muq ra: ', round(np.mean(muq[:,0]),2),'+/-', round(np.std(muq[:,0]),2)) print('muq dec: ', round(np.mean(muq[:,1]),2), '+/-', round(np.std(muq[:,1]),2)) print('mul ra: ', round(np.mean(mul[:,0]),2),'+/-', round(np.std(mul[:,0]),2)) print('mul dec: ', round(np.mean(mul[:,1]),2), '+/-', round(np.std(mul[:,1]),2)) print('sigmaq ra: ', round(np.mean(sigmaq[:,0]),2),'+/-', round(np.std(sigmaq[:,0]),2)) print('sigmaq dec: ', round(np.mean(sigmaq[:,1]),2), '+/-', round(np.std(sigmaq[:,1]),2)) print('sigmal ra: ', round(np.mean(sigmal[:,0]),2),'+/-', round(np.std(sigmal[:,0]),2)) print('sigmal dec: ', round(np.mean(sigmal[:,1]),2), '+/-', round(np.std(sigmal[:,1]),2)) ``` # Now try on real data ``` def is_pos_def(x): #check covariance matrices are positive definite return np.all(np.linalg.eigvals(x)>0) ``` ### Lensed QSOs ``` Nl = len(lqsoNew) dpmra2 = lqsoNew.pmra_error**2 dpmdec2 = lqsoNew.pmdec_error**2 dpmrapmdec = lqsoNew.pmra_pmdec_corr*lqsoNew.pmra_error*lqsoNew.pmdec_error lqsodata={ 'N': Nl, 'pmhat': np.dstack([lqsoNew.pmra, lqsoNew.pmdec])[0], 'Sig': np.reshape(np.dstack([dpmra2,dpmrapmdec, dpmrapmdec, dpmdec2]), [Nl,2,2]) } fitlqso = sm2.sampling(data=lqsodata, iter=2000, chains=4) paramslqso = fitlqso.extract() mul=paramslqso['mu'] sigmal=paramslqso['sigma'] pop_params_l = np.vstack([mul[:,0],mul[:,1],sigmal[:,0],sigmal[:,1]]) fig = corner.corner(pop_params_l.T, labels=[r"$\mu_l^a$",r"$\mu_l^d$", r"$\sigma_l^r$",r"$\sigma_l^d$", ], quantiles=[0.16, 0.5, 0.84], plot_contours=False, smooth=True) print('mul ra: ', round(np.mean(mul[:,0]),2),'+/-', round(np.std(mul[:,0]),2)) print('mul dec: ', round(np.mean(mul[:,1]),2), '+/-', round(np.std(mul[:,1]),2)) print('sigmal ra: ', round(np.mean(sigmal[:,0]),2),'+/-', round(np.std(sigmal[:,0]),2)) print('sigmal dec: ', round(np.mean(sigmal[:,1]),2), '+/-', round(np.std(sigmal[:,1]),2)) ``` ### QSO's sample ``` allwisenew2 = allwiseQSO.sample(n=Nl) Nq = len(allwisenew2) dpmra2 = allwisenew2.pmra_error**2 dpmdec2 = allwisenew2.pmdec_error**2 dpmrapmdec = allwisenew2.pmra_pmdec_corr*allwisenew2.pmra_error*allwisenew2.pmdec_error qsodata={ 'N': Nq, 'pmhat': np.dstack([allwisenew2.pmra, allwisenew2.pmdec])[0], 'Sig': np.reshape(np.dstack([dpmra2,dpmrapmdec, dpmrapmdec, dpmdec2]), [Nq,2,2]) } fitqso = sm2.sampling(data=qsodata, iter=2000, chains=4) paramsqso = fitqso.extract() muq=paramsqso['mu'] sigmaq=paramsqso['sigma'] pop_params_q = np.vstack([muq[:,0],muq[:,1],sigmaq[:,0],sigmaq[:,1]]) fig = corner.corner(pop_params_q.T, labels=[r"$\mu_q^a$",r"$\mu_q^d$", r"$\sigma_q^r$",r"$\sigma_q^d$", ], quantiles=[0.16, 0.5, 0.84], plot_contours=False, smooth=True) print('muq ra: ', round(np.mean(muq[:,0]),2),'+/-', round(np.std(muq[:,0]),2)) print('muq dec: ', round(np.mean(muq[:,1]),2), '+/-', round(np.std(muq[:,1]),2)) print('sigmaq ra: ', round(np.mean(sigmaq[:,0]),2),'+/-', round(np.std(sigmaq[:,0]),2)) print('sigmaq dec: ', round(np.mean(sigmaq[:,1]),2), '+/-', round(np.std(sigmaq[:,1]),2)) ```
github_jupyter
# Week 4 Assignment: Custom training with tf.distribute.Strategy Welcome to the final assignment of this course! For this week, you will implement a distribution strategy to train on the [Oxford Flowers 102](https://www.tensorflow.org/datasets/catalog/oxford_flowers102) dataset. As the name suggests, distribution strategies allow you to setup training across multiple devices. We are just using a single device in this lab but the syntax you'll apply should also work when you have a multi-device setup. Let's begin! ## Imports ``` from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf import tensorflow_hub as hub # Helper libraries import numpy as np import os from tqdm import tqdm ``` ## Download the dataset ``` import tensorflow_datasets as tfds tfds.disable_progress_bar() splits = ['train[:80%]', 'train[80%:90%]', 'train[90%:]'] (train_examples, validation_examples, test_examples), info = tfds.load('oxford_flowers102', with_info=True, as_supervised=True, split = splits, data_dir='data/') num_examples = info.splits['train'].num_examples num_classes = info.features['label'].num_classes ``` ## Create a strategy to distribute the variables and the graph How does `tf.distribute.MirroredStrategy` strategy work? * All the variables and the model graph are replicated on the replicas. * Input is evenly distributed across the replicas. * Each replica calculates the loss and gradients for the input it received. * The gradients are synced across all the replicas by summing them. * After the sync, the same update is made to the copies of the variables on each replica. ``` # If the list of devices is not specified in the # `tf.distribute.MirroredStrategy` constructor, it will be auto-detected. strategy = tf.distribute.MirroredStrategy() print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) ``` ## Setup input pipeline Set some constants, including the buffer size, number of epochs, and the image size. ``` BUFFER_SIZE = num_examples EPOCHS = 10 pixels = 224 MODULE_HANDLE = 'data/resnet_50_feature_vector' IMAGE_SIZE = (pixels, pixels) print("Using {} with input size {}".format(MODULE_HANDLE, IMAGE_SIZE)) ``` Define a function to format the image (resizes the image and scales the pixel values to range from [0,1]. ``` def format_image(image, label): image = tf.image.resize(image, IMAGE_SIZE) / 255.0 return image, label ``` ## Set the global batch size (please complete this section) Given the batch size per replica and the strategy, set the global batch size. - The global batch size is the batch size per replica times the number of replicas in the strategy. Hint: You'll want to use the `num_replicas_in_sync` stored in the [strategy](https://www.tensorflow.org/api_docs/python/tf/distribute/Strategy). ``` # GRADED FUNCTION def set_global_batch_size(batch_size_per_replica, strategy): ''' Args: batch_size_per_replica (int) - batch size per replica strategy (tf.distribute.Strategy) - distribution strategy ''' # set the global batch size ### START CODE HERE ### global_batch_size = batch_size_per_replica * strategy.num_replicas_in_sync ### END CODD HERE ### return global_batch_size ``` Set the GLOBAL_BATCH_SIZE with the function that you just defined ``` BATCH_SIZE_PER_REPLICA = 64 GLOBAL_BATCH_SIZE = set_global_batch_size(BATCH_SIZE_PER_REPLICA, strategy) print(GLOBAL_BATCH_SIZE) ``` **Expected Output:** ``` 64 ``` Create the datasets using the global batch size and distribute the batches for training, validation and test batches ``` train_batches = train_examples.shuffle(num_examples // 4).map(format_image).batch(BATCH_SIZE_PER_REPLICA).prefetch(1) validation_batches = validation_examples.map(format_image).batch(BATCH_SIZE_PER_REPLICA).prefetch(1) test_batches = test_examples.map(format_image).batch(1) ``` ## Define the distributed datasets (please complete this section) Create the distributed datasets using `experimental_distribute_dataset()` of the [Strategy](https://www.tensorflow.org/api_docs/python/tf/distribute/Strategy) class and pass in the training batches. - Do the same for the validation batches and test batches. ``` # GRADED FUNCTION def distribute_datasets(strategy, train_batches, validation_batches, test_batches): ### START CODE HERE ### train_dist_dataset = strategy.experimental_distribute_dataset(train_batches) val_dist_dataset = strategy.experimental_distribute_dataset(validation_batches) test_dist_dataset = strategy.experimental_distribute_dataset(test_batches) ### END CODE HERE ### return train_dist_dataset, val_dist_dataset, test_dist_dataset ``` Call the function that you just defined to get the distributed datasets. ``` train_dist_dataset, val_dist_dataset, test_dist_dataset = distribute_datasets(strategy, train_batches, validation_batches, test_batches) ``` Take a look at the type of the train_dist_dataset ``` print(type(train_dist_dataset)) print(type(val_dist_dataset)) print(type(test_dist_dataset)) ``` **Expected Output:** ``` <class 'tensorflow.python.distribute.input_lib.DistributedDataset'> <class 'tensorflow.python.distribute.input_lib.DistributedDataset'> <class 'tensorflow.python.distribute.input_lib.DistributedDataset'> ``` Also get familiar with a single batch from the train_dist_dataset: - Each batch has 64 features and labels ``` # Take a look at a single batch from the train_dist_dataset x = iter(train_dist_dataset).get_next() print(f"x is a tuple that contains {len(x)} values ") print(f"x[0] contains the features, and has shape {x[0].shape}") print(f" so it has {x[0].shape[0]} examples in the batch, each is an image that is {x[0].shape[1:]}") print(f"x[1] contains the labels, and has shape {x[1].shape}") ``` ## Create the model Use the Model Subclassing API to create model `ResNetModel` as a subclass of `tf.keras.Model`. ``` class ResNetModel(tf.keras.Model): def __init__(self, classes): super(ResNetModel, self).__init__() self._feature_extractor = hub.KerasLayer(MODULE_HANDLE, trainable=False) self._classifier = tf.keras.layers.Dense(classes, activation='softmax') def call(self, inputs): x = self._feature_extractor(inputs) x = self._classifier(x) return x ``` Create a checkpoint directory to store the checkpoints (the model's weights during training). ``` # Create a checkpoint directory to store the checkpoints. checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") ``` ## Define the loss function You'll define the `loss_object` and `compute_loss` within the `strategy.scope()`. - `loss_object` will be used later to calculate the loss on the test set. - `compute_loss` will be used later to calculate the average loss on the training data. You will be using these two loss calculations later. ``` with strategy.scope(): # Set reduction to `NONE` so we can do the reduction afterwards and divide by # global batch size. loss_object = tf.keras.losses.SparseCategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) # or loss_fn = tf.keras.losses.sparse_categorical_crossentropy def compute_loss(labels, predictions): per_example_loss = loss_object(labels, predictions) return tf.nn.compute_average_loss(per_example_loss, global_batch_size=GLOBAL_BATCH_SIZE) test_loss = tf.keras.metrics.Mean(name='test_loss') ``` ## Define the metrics to track loss and accuracy These metrics track the test loss and training and test accuracy. - You can use `.result()` to get the accumulated statistics at any time, for example, `train_accuracy.result()`. ``` with strategy.scope(): train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='test_accuracy') ``` ## Instantiate the model, optimizer, and checkpoints This code is given to you. Just remember that they are created within the `strategy.scope()`. - Instantiate the ResNetModel, passing in the number of classes - Create an instance of the Adam optimizer. - Create a checkpoint for this model and its optimizer. ``` # model and optimizer must be created under `strategy.scope`. with strategy.scope(): model = ResNetModel(classes=num_classes) optimizer = tf.keras.optimizers.Adam() checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) ``` ## Training loop (please complete this section) You will define a regular training step and test step, which could work without a distributed strategy. You can then use `strategy.run` to apply these functions in a distributed manner. - Notice that you'll define `train_step` and `test_step` inside another function `train_testp_step_fns`, which will then return these two functions. ### Define train_step Within the strategy's scope, define `train_step(inputs)` - `inputs` will be a tuple containing `(images, labels)`. - Create a gradient tape block. - Within the gradient tape block: - Call the model, passing in the images and setting training to be `True` (complete this part). - Call the `compute_loss` function (defined earlier) to compute the training loss (complete this part). - Use the gradient tape to calculate the gradients. - Use the optimizer to update the weights using the gradients. ### Define test_step Also within the strategy's scope, define `test_step(inputs)` - `inputs` is a tuple containing `(images, labels)`. - Call the model, passing in the images and set training to `False`, because the model is not going to train on the test data. (complete this part). - Use the `loss_object`, which will compute the test loss. Check `compute_loss`, defined earlier, to see what parameters to pass into `loss_object`. (complete this part). - Next, update `test_loss` (the running test loss) with the `t_loss` (the loss for the current batch). - Also update the `test_accuracy`. ``` # GRADED FUNCTION def train_test_step_fns(strategy, model, compute_loss, optimizer, train_accuracy, loss_object, test_loss, test_accuracy): with strategy.scope(): def train_step(inputs): images, labels = inputs with tf.GradientTape() as tape: ### START CODE HERE ### predictions = model(images, training=True) loss = compute_loss(labels, predictions) ### END CODE HERE ### gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_accuracy.update_state(labels, predictions) return loss def test_step(inputs): images, labels = inputs ### START CODE HERE ### predictions = model(images, training=False) t_loss = loss_object(labels, predictions) ### END CODE HERE ### test_loss.update_state(t_loss) test_accuracy.update_state(labels, predictions) return train_step, test_step ``` Use the `train_test_step_fns` function to produce the `train_step` and `test_step` functions. ``` train_step, test_step = train_test_step_fns(strategy, model, compute_loss, optimizer, train_accuracy, loss_object, test_loss, test_accuracy) ``` ## Distributed training and testing (please complete this section) The `train_step` and `test_step` could be used in a non-distributed, regular model training. To apply them in a distributed way, you'll use [strategy.run](https://www.tensorflow.org/api_docs/python/tf/distribute/Strategy#run). `distributed_train_step` - Call the `run` function of the `strategy`, passing in the train step function (which you defined earlier), as well as the arguments that go in the train step function. - The run function is defined like this `run(fn, args=() )`. - `args` will take in the dataset inputs `distributed_test_step` - Similar to training, the distributed test step will use the `run` function of your strategy, taking in the test step function as well as the dataset inputs that go into the test step function. #### Hint: - You saw earlier that each batch in `train_dist_dataset` is tuple with two values: - a batch of features - a batch of labels. Let's think about how you'll want to pass in the dataset inputs into `args` by running this next cell of code: ``` #See various ways of passing in the inputs def fun1(args=()): print(f"number of arguments passed is {len(args)}") list_of_inputs = [1,2] print("When passing in args=list_of_inputs:") fun1(args=list_of_inputs) print() print("When passing in args=(list_of_inputs)") fun1(args=(list_of_inputs)) print() print("When passing in args=(list_of_inputs,)") fun1(args=(list_of_inputs,)) ``` Notice that depending on how `list_of_inputs` is passed to `args` affects whether `fun1` sees one or two positional arguments. - If you see an error message about positional arguments when running the training code later, please come back to check how you're passing in the inputs to `run`. Please complete the following function. ``` def distributed_train_test_step_fns(strategy, train_step, test_step, model, compute_loss, optimizer, train_accuracy, loss_object, test_loss, test_accuracy): with strategy.scope(): @tf.function def distributed_train_step(dataset_inputs): ### START CODE HERE ### per_replica_losses = strategy.run(train_step, args=(dataset_inputs,)) ### END CODE HERE ### return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None) @tf.function def distributed_test_step(dataset_inputs): ### START CODE HERE ### return strategy.run(test_step, args = (dataset_inputs,)) ### END CODE HERE ### return distributed_train_step, distributed_test_step ``` Call the function that you just defined to get the distributed train step function and distributed test step function. ``` distributed_train_step, distributed_test_step = distributed_train_test_step_fns(strategy, train_step, test_step, model, compute_loss, optimizer, train_accuracy, loss_object, test_loss, test_accuracy) ``` **An important note before you continue:** The following sections will guide you through how to train your model and save it to a .zip file. These sections are **not** required for you to pass this assignment but you are encouraged to continue anyway. If you consider no more work is needed in previous sections, please submit now and carry on. After training your model, you can download it as a .zip file and upload it back to the platform to know how well it performed. However, training your model takes around 20 minutes within the Coursera environment. Because of this, there are two methods to train your model: **Method 1** If 20 mins is too long for you, we recommend to download this notebook (after submitting it for grading) and upload it to [Colab](https://colab.research.google.com/) to finish the training in a GPU-enabled runtime. If you decide to do this, these are the steps to follow: - Save this notebok. - Click the `jupyter` logo on the upper left corner of the window. This will take you to the Jupyter workspace. - Select this notebook (C2W4_Assignment.ipynb) and click `Shutdown`. - Once the notebook is shutdown, you can go ahead and download it. - Head over to [Colab](https://colab.research.google.com/) and select the `upload` tab and upload your notebook. - Before running any cell go into `Runtime` --> `Change Runtime Type` and make sure that `GPU` is enabled. - Run all of the cells in the notebook. After training, follow the rest of the instructions of the notebook to download your model. **Method 2** If you prefer to wait the 20 minutes and not leave Coursera, keep going through this notebook. Once you are done, follow these steps: - Click the `jupyter` logo on the upper left corner of the window. This will take you to the jupyter filesystem. - In the filesystem you should see a file named `mymodel.zip`. Go ahead and download it. Independent of the method you choose, you should end up with a `mymodel.zip` file which can be uploaded for evaluation after this assignment. Once again, this is optional but we strongly encourage you to do it as it is a lot of fun. With this out of the way, let's continue. ## Run the distributed training in a loop You'll now use a for-loop to go through the desired number of epochs and train the model in a distributed manner. In each epoch: - Loop through each distributed training set - For each training batch, call `distributed_train_step` and get the loss. - After going through all training batches, calculate the training loss as the average of the batch losses. - Loop through each batch of the distributed test set. - For each test batch, run the distributed test step. The test loss and test accuracy are updated within the test step function. - Print the epoch number, training loss, training accuracy, test loss and test accuracy. - Reset the losses and accuracies before continuing to another epoch. ``` # Running this cell in Coursera takes around 20 mins with strategy.scope(): for epoch in range(EPOCHS): # TRAIN LOOP total_loss = 0.0 num_batches = 0 for x in tqdm(train_dist_dataset): total_loss += distributed_train_step(x) num_batches += 1 train_loss = total_loss / num_batches # TEST LOOP for x in test_dist_dataset: distributed_test_step(x) template = ("Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, " "Test Accuracy: {}") print (template.format(epoch+1, train_loss, train_accuracy.result()*100, test_loss.result(), test_accuracy.result()*100)) test_loss.reset_states() train_accuracy.reset_states() test_accuracy.reset_states() ``` Things to note in the example above: * We are iterating over the `train_dist_dataset` and `test_dist_dataset` using a `for x in ...` construct. * The scaled loss is the return value of the `distributed_train_step`. This value is aggregated across replicas using the `tf.distribute.Strategy.reduce` call and then across batches by summing the return value of the `tf.distribute.Strategy.reduce` calls. * `tf.keras.Metrics` should be updated inside `train_step` and `test_step` that gets executed by `tf.distribute.Strategy.experimental_run_v2`. *`tf.distribute.Strategy.experimental_run_v2` returns results from each local replica in the strategy, and there are multiple ways to consume this result. You can do `tf.distribute.Strategy.reduce` to get an aggregated value. You can also do `tf.distribute.Strategy.experimental_local_results` to get the list of values contained in the result, one per local replica. # Save the Model for submission (Optional) You'll get a saved model of this trained model. You'll then need to zip that to upload it to the testing infrastructure. We provide the code to help you with that here: ## Step 1: Save the model as a SavedModel This code will save your model as a SavedModel ``` model_save_path = "./tmp/mymodel/1/" tf.saved_model.save(model, model_save_path) ``` ## Step 2: Zip the SavedModel Directory into /mymodel.zip This code will zip your saved model directory contents into a single file. If you are on colab, you can use the file browser pane to the left of colab to find `mymodel.zip`. Right click on it and select 'Download'. If the download fails because you aren't allowed to download multiple files from colab, check out the guidance here: https://ccm.net/faq/32938-google-chrome-allow-websites-to-perform-simultaneous-downloads If you are in Coursera, follow the instructions previously provided. It's a large file, so it might take some time to download. ``` import os import zipfile def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) zipf = zipfile.ZipFile('./mymodel.zip', 'w', zipfile.ZIP_DEFLATED) zipdir('./tmp/mymodel/1/', zipf) zipf.close() ```
github_jupyter
# Hate speech classification by k-fold cross validation on movies dataset The class labels depict the following: 0: Normal speech, 1: Offensive speech 2: Hate speech #### To work with this, the following folder paths needs to be created in the directory of this notebook: classification_reports/ : This will contain all the classification reports generated by the model movies/ : contains all_movies.csv file movies/for_training/: contains 6 movies used for cross validation training and testing ``` ! pip install transformers==2.6.0 import pandas as pd from matplotlib import pyplot as plt import numpy as np import re import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import os import glob from transformers import BertTokenizer, TFBertForSequenceClassification from transformers import InputExample, InputFeatures tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") ``` --- ### Cross validation #### 6-fold cross validation on movies Methods to convert the data into the data required by the model for training and testing ``` def convert_data_to_examples_cv(train, DATA_COLUMN, LABEL_COLUMN): train_InputExamples = train.apply( lambda x: InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this case text_a=x[DATA_COLUMN], text_b=None, label=x[LABEL_COLUMN]), axis=1) return train_InputExamples def convert_examples_to_tf_dataset_cv(examples, tokenizer, max_length=128): features = [] # -> will hold InputFeatures to be converted later for e in examples: # Documentation is really strong for this method, so please take a look at it input_dict = tokenizer.encode_plus( e.text_a, add_special_tokens=True, max_length=max_length, # truncates if len(s) > max_length return_token_type_ids=True, return_attention_mask=True, pad_to_max_length=True, # pads to the right by default # CHECK THIS for pad_to_max_length truncation=True ) input_ids, token_type_ids, attention_mask = (input_dict["input_ids"], input_dict["token_type_ids"], input_dict['attention_mask']) features.append( InputFeatures( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=e.label ) ) def gen(): for f in features: yield ( { "input_ids": f.input_ids, "attention_mask": f.attention_mask, "token_type_ids": f.token_type_ids, }, f.label, ) return tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64), ( { "input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None]), "token_type_ids": tf.TensorShape([None]), }, tf.TensorShape([]), ), ) def train_bert(df_train, df_test): # initialize model with 3 labels, for hate, offensive and normal class classification model = TFBertForSequenceClassification.from_pretrained("bert-base-uncased", trainable=True, num_labels=3) tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") train = df_train[['text', 'majority_answer']] train.columns = ['DATA_COLUMN', 'LABEL_COLUMN'] test = df_test[['text', 'majority_answer']] test.columns = ['DATA_COLUMN', 'LABEL_COLUMN'] DATA_COLUMN = 'DATA_COLUMN' LABEL_COLUMN = 'LABEL_COLUMN' train_InputExamples = convert_data_to_examples_cv(train, DATA_COLUMN, LABEL_COLUMN) test_InputExamples = convert_data_to_examples_cv(test, DATA_COLUMN, LABEL_COLUMN) train_data = convert_examples_to_tf_dataset_cv(list(train_InputExamples), tokenizer) train_data = train_data.batch(32) valid_data = convert_examples_to_tf_dataset_cv(list(test_InputExamples), tokenizer) valid_data = valid_data.batch(32) # compile and fit model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-6, epsilon=1e-08, clipnorm=1.0), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.SparseCategoricalAccuracy('accuracy')]) print('train data type',type(train_data)) model.fit(train_data, epochs=6, validation_data=valid_data) test_data = convert_examples_to_tf_dataset_cv(list(test_InputExamples), tokenizer) test_data = test_data.batch(32) print('predicting') preds = model.predict(test_data) # classification return classification_report(pd.DataFrame(test['LABEL_COLUMN']), np.argmax(preds[0], axis=1), output_dict=True) def load_movies_to_df(path): df_movies = [] for filename in glob.glob(path + '*.csv'): df_movies.append(pd.read_csv(filename)) return df_movies df_movies = load_movies_to_df('movies/for_training/') classification_reports = [] df_main = pd.DataFrame() # perform cross folding for i in range(len(df_movies)): df_train = pd.concat(df_movies[0:i] + df_movies[i + 1:]) df_test = df_movies[i] train_movies = df_train['movie_name'].unique() test_movie = df_test['movie_name'].unique() print(','.join(train_movies)) print(test_movie[0]) report = train_bert(df_train, df_test) classification_reports.append(report) print('Train movies: ', str(','.join(train_movies))) print('Test movie: ', str(test_movie[0])) print('Classification report: \n', classification_reports[i]) print('------------------------------------------------') df_cr = pd.DataFrame(classification_reports[i]).transpose() df_cr['movie_train'] = str(','.join(train_movies)) df_cr['movie_test'] = str(test_movie[0]) df_cr.to_csv('classification_reports/'+'bert_cv_testmovie_'+str(test_movie[0])+'.csv') df_main = df_main.append(df_cr) df_main.to_csv('classification_reports/bert_crossvalid_movies.csv') print(df_main) len(classification_reports[0]) df_main.head() def get_precision_recall_f1(category, result_df): precision = result_df[result_df.label==category].precision.mean() recall = result_df[result_df.label==category].recall.mean() f1 = result_df[result_df.label==category]['f1-score'].mean() return {'label': category, 'precision': precision, 'recall': recall, 'f1': f1} df_cv= pd.read_csv('classification_reports/bert_crossvalid_movies.csv') len(classification_reports[0]) df_main.head() def get_precision_recall_f1(category, result_df): precision = result_df[result_df.label==category].precision.mean() recall = result_df[result_df.label==category].recall.mean() f1 = result_df[result_df.label==category]['f1-score'].mean() return {'label': category, 'precision': precision, 'recall': recall, 'f1': f1} df_cv= pd.read_csv('classification_reports/bert_crossvalid_movies.csv') df_cv = df_cv.rename(columns={'Unnamed: 0': 'label', 'b': 'Y'}) df_cv.head() normal_dict = get_precision_recall_f1('0', df_cv) offensive_dict = get_precision_recall_f1('1',df_cv) hate_dict = get_precision_recall_f1('2',df_cv) ``` #### Aggregated classification results for all 6 folds ``` df_result = pd.DataFrame([normal_dict, offensive_dict, hate_dict]) df_result for cr in classification_reports: print(cr) ```
github_jupyter
# Feature Exploration for Proxy Model - have many different feature models (by prefix) - do boxplot and PCA for features ``` !pip install git+https://github.com/IBM/ibm-security-notebooks.git # Default settings, constants import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt pd.set_option('display.max_columns', None) pd.set_option('display.max_colwidth', -1) pd.set_option('mode.chained_assignment', None) FIGSIZE=(15,8) matplotlib.rcParams['figure.figsize'] = FIGSIZE # Data is from AQL.proxy_model query from pyclient.qradar import QRadar, AQL qi = QRadar(console='YOUR-CONSOLE-IP-ADDRESS', username='admin', token='YOUR-SERVICE-TOKEN') _df = pd.DataFrame.from_records(qi.search(AQL.proxy_model)) _df.fillna(0, inplace=True) print(_df.shape) _df.head(10) _df.describe() # Different Feature groups ALL = 'All Columns' PREFIX = [ 'General', 'Network', 'Time', 'Proxy', ALL ] from sklearn import preprocessing import matplotlib.pyplot as plt def boxplot(df, prefix): # drop text columns df = df.drop('user',axis=1).drop('timeslice',axis=1) min_max_scaler = preprocessing.MinMaxScaler() # StandardScaler, MinMaxScaler, RobustScaler scaled = pd.DataFrame(min_max_scaler.fit_transform(df.values), columns=df.columns) scaled.boxplot(figsize=FIGSIZE, rot=90) plt.title(f'Boxplot for {prefix}') plt.show() for prefix in PREFIX: df = _df if prefix != ALL: cols = ['user', 'timeslice'] cols.extend([col for col in _df if col.startswith(prefix.lower()+'_')]) df = _df[cols] boxplot(df, prefix) from sklearn.decomposition import PCA from sklearn import preprocessing X = 'PC 1' Y = 'PC 2' def pca(df, prefix): # drop text columns df = df.drop('user',axis=1).drop('timeslice',axis=1) # scale data or else some columns dominate min_max_scaler = preprocessing.StandardScaler() # StandardScaler, MinMaxScaler, RobustScaler df = pd.DataFrame(min_max_scaler.fit_transform(df.values), columns=df.columns) pca = PCA(n_components=2) components = pca.fit_transform(df) components_df = pd.DataFrame(components, columns = [X, Y]) df[X] = components_df[X] df[Y] = components_df[Y] ax1 = df.plot(kind='scatter', x=X, y=Y, color='grey', s=1, title=f'PCA for {prefix}') plt.show() for prefix in PREFIX: df = _df if prefix != ALL: cols = ['user', 'timeslice'] cols.extend([col for col in _df if col.startswith(prefix.lower()+'_')]) df = _df[cols] pca(df, prefix) # users vs population, look for all outlier points and graph on PCA # specific user vs self, plot own PCA ```
github_jupyter
``` import keras keras.__version__ ``` # 5.1 - Introduction to convnets This notebook contains the code sample found in Chapter 5, Section 1 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments. ---- First, let's take a practical look at a very simple convnet example. We will use our convnet to classify MNIST digits, a task that you've already been through in Chapter 2, using a densely-connected network (our test accuracy then was 97.8%). Even though our convnet will be very basic, its accuracy will still blow out of the water that of the densely-connected model from Chapter 2. The 6 lines of code below show you what a basic convnet looks like. It's a stack of `Conv2D` and `MaxPooling2D` layers. We'll see in a minute what they do concretely. Importantly, a convnet takes as input tensors of shape `(image_height, image_width, image_channels)` (not including the batch dimension). In our case, we will configure our convnet to process inputs of size `(28, 28, 1)`, which is the format of MNIST images. We do this via passing the argument `input_shape=(28, 28, 1)` to our first layer. ``` from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) ``` Let's display the architecture of our convnet so far: ``` model.summary() ``` You can see above that the output of every `Conv2D` and `MaxPooling2D` layer is a 3D tensor of shape `(height, width, channels)`. The width and height dimensions tend to shrink as we go deeper in the network. The number of channels is controlled by the first argument passed to the `Conv2D` layers (e.g. 32 or 64). The next step would be to feed our last output tensor (of shape `(3, 3, 64)`) into a densely-connected classifier network like those you are already familiar with: a stack of `Dense` layers. These classifiers process vectors, which are 1D, whereas our current output is a 3D tensor. So first, we will have to flatten our 3D outputs to 1D, and then add a few `Dense` layers on top: ``` model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10, activation='softmax')) ``` We are going to do 10-way classification, so we use a final layer with 10 outputs and a softmax activation. Now here's what our network looks like: ``` model.summary() ``` As you can see, our `(3, 3, 64)` outputs were flattened into vectors of shape `(576,)`, before going through two `Dense` layers. Now, let's train our convnet on the MNIST digits. We will reuse a lot of the code we have already covered in the MNIST example from Chapter 2. ``` from keras.datasets import mnist from keras.utils import to_categorical (train_images, train_labels), (test_images, test_labels) = mnist.load_data() train_images = train_images.reshape((60000, 28, 28, 1)) train_images = train_images.astype('float32') / 255 test_images = test_images.reshape((10000, 28, 28, 1)) test_images = test_images.astype('float32') / 255 train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(train_images, train_labels, epochs=5, batch_size=64) ``` Let's evaluate the model on the test data: ``` test_loss, test_acc = model.evaluate(test_images, test_labels) test_acc ``` While our densely-connected network from Chapter 2 had a test accuracy of 97.8%, our basic convnet has a test accuracy of 99.3%: we decreased our error rate by 68% (relative). Not bad!
github_jupyter
``` from PIL import Image from IPython.display import display import random import json import os import glob PROJECT_NAME = "PFP Test" TOTAL_IMAGES = 5 # Number of random unique images we want to generate IMAGES_BASE_URI = "https://gateway.pinata.cloud/ipfs/" METADATA_PATH = './output/'; IMAGES_PATH = './output/'; METADATA_FILE_NAME = 'all-traits.json'; # Clean up generated files for dir in [IMAGES_PATH, METADATA_PATH]: files = glob.glob(dir + '/**/*', recursive=True) for f in files: try: os.remove(f) except OSError as e: print("Error: %s : %s" % (f, e.strerror)) if not os.path.exists(dir): os.mkdir(dir) # Each image is made up a series of traits # The weightings for each trait drive the rarity and add up to 100% background = ["Blue", "Orange", "Purple", "Red", "Yellow"] background_weights = [30, 40, 15, 5, 10] circle = ["Blue", "Green", "Orange", "Red", "Yellow"] circle_weights = [30, 40, 15, 5, 10] square = ["Blue", "Green", "Orange", "Red", "Yellow"] square_weights = [30, 40, 15, 5, 10] # Dictionary variable for each trait. # Eech trait corresponds to its file name background_files = { "Blue": "blue", "Orange": "orange", "Purple": "purple", "Red": "red", "Yellow": "yellow", } circle_files = { "Blue": "blue-circle", "Green": "green-circle", "Orange": "orange-circle", "Red": "red-circle", "Yellow": "yellow-circle" } square_files = { "Blue": "blue-square", "Green": "green-square", "Orange": "orange-square", "Red": "red-square", "Yellow": "yellow-square" } ## Generate Traits all_images = [] # A recursive function to generate unique image combinations def create_new_image(): new_image = {} # # For each trait category, select a random trait based on the weightings new_image ["Background"] = random.choices(background, background_weights)[0] new_image ["Circle"] = random.choices(circle, circle_weights)[0] new_image ["Square"] = random.choices(square, square_weights)[0] if new_image in all_images: return create_new_image() else: return new_image # Generate the unique combinations based on trait weightings for i in range(TOTAL_IMAGES): new_trait_image = create_new_image() all_images.append(new_trait_image) # Returns true if all images are unique def all_images_unique(all_images): seen = list() return not any(i in seen or seen.append(i) for i in all_images) print("Are all images unique?", all_images_unique(all_images)) # Add token Id to each image i = 0 for item in all_images: item["tokenId"] = i i = i + 1 print(all_images) # Get Trait Counts background_count = {} for item in background: background_count[item] = 0 circle_count = {} for item in circle: circle_count[item] = 0 square_count = {} for item in square: square_count[item] = 0 for image in all_images: background_count[image["Background"]] += 1 circle_count[image["Circle"]] += 1 square_count[image["Square"]] += 1 print(background_count) print(circle_count) print(square_count) #### Generate Metadata for all Traits with open(METADATA_PATH + METADATA_FILE_NAME, 'w') as outfile: json.dump(all_images, outfile, indent=4) #### Generate Images for item in all_images: im1 = Image.open(f'./trait-layers/backgrounds/{background_files[item["Background"]]}.jpg').convert('RGBA') im2 = Image.open(f'./trait-layers/circles/{circle_files[item["Circle"]]}.png').convert('RGBA') im3 = Image.open(f'./trait-layers/squares/{square_files[item["Square"]]}.png').convert('RGBA') #Create each composite com1 = Image.alpha_composite(im1, im2) com2 = Image.alpha_composite(com1, im3) #Convert to RGB rgb_im = com2.convert('RGB') file_name = str(item["tokenId"]) + ".png" rgb_im.save(IMAGES_PATH + file_name) #### Generate Metadata for each Image f = open(METADATA_PATH + METADATA_FILE_NAME,) data = json.load(f) def getAttribute(key, value): return { "trait_type": key, "value": value } for i in data: token_id = i['tokenId'] token = { "image": IMAGES_BASE_URI + str(token_id) + '.png', "tokenId": token_id, "name": PROJECT_NAME + ' ' + str(token_id), "attributes": [] } token["attributes"].append(getAttribute("Background", i["Background"])) token["attributes"].append(getAttribute("Circle", i["Circle"])) token["attributes"].append(getAttribute("Square", i["Square"])) with open(METADATA_PATH + str(token_id) + '.json', 'w') as outfile: json.dump(token, outfile, indent=4) f.close() ```
github_jupyter
<style> pre { white-space: pre-wrap !important; } .table-striped > tbody > tr:nth-of-type(odd) { background-color: #f9f9f9; } .table-striped > tbody > tr:nth-of-type(even) { background-color: white; } .table-striped td, .table-striped th, .table-striped tr { border: 1px solid black; border-collapse: collapse; margin: 1em 2em; } .rendered_html td, .rendered_html th { text-align: left; vertical-align: middle; padding: 4px; } </style> # Machine Learning (basic): the Iris dataset If you want to try out this notebook with a live Python kernel, use mybinder: <a class="reference external image-reference" href="https://mybinder.org/v2/gh/vaexio/vaex/latest?filepath=docs%2Fsource%2Fexample_ml_iris.ipynb"><img alt="https://mybinder.org/badge_logo.svg" src="https://mybinder.org/badge_logo.svg" width="150px"></a> While `vaex.ml` does not yet implement predictive models, we provide wrappers to powerful libraries (e.g. [Scikit-learn](https://scikit-learn.org/), [xgboost](https://xgboost.readthedocs.io/)) and make them work efficiently with `vaex`. `vaex.ml` does implement a variety of standard data transformers (e.g. PCA, numerical scalers, categorical encoders) and a very efficient KMeans algorithm that take full advantage of `vaex`. The following is a simple example on use of `vaex.ml`. We will be using the well known Iris dataset, and we will use it to build a model which distinguishes between the three Irish species ([Iris setosa](https://en.wikipedia.org/wiki/Iris_setosa), [Iris virginica](https://en.wikipedia.org/wiki/Iris_virginica) and [Iris versicolor](https://en.wikipedia.org/wiki/Iris_versicolor)). Lets start by importing the common libraries, load and inspect the data. ``` import vaex import vaex.ml import pylab as plt df = vaex.ml.datasets.load_iris() df ``` Splitting the data into _train_ and _test_ steps should be done immediately, before any manipulation is done on the data. `vaex.ml` contains a `train_test_split` method which creates shallow copies of the main DataFrame, meaning that no extra memory is used when defining train and test sets. Note that the `train_test_split` method does an ordered split of the main DataFrame to create the two sets. In some cases, one may need to shuffle the data. If shuffling is required, we recommend the following: ``` df.export("shuffled", shuffle=True) df = vaex.open("shuffled.hdf5) df_train, df_test = df.ml.train_test_split(test_size=0.2) ``` In the present scenario, the dataset is already shuffled, so we can simply do the split right away. ``` # Orderd split in train and test df_train, df_test = df.ml.train_test_split(test_size=0.2) ``` As this is a very simple tutorial, we will just use the columns already provided as features for training the model. ``` features = df_train.column_names[:4] features ``` ## PCA The `vaex.ml` module contains several classes for dataset transformations that are commonly used to pre-process data prior to building a model. These include numerical feature scalers, category encoders, and [PCA](https://en.wikipedia.org/wiki/Principal_component_analysis) transformations. We have adopted the [scikit-learn](https://scikit-learn.org/stable/) API, meaning that all transformers have the `.fit` and `.transform` methods. Let's use apply a PCA transformation on the training set. There is no need to scale the data beforehand, since the PCA also normalizes the data. ``` pca = vaex.ml.PCA(features=features, n_components=4) df_train = pca.fit_transform(df_train) df_train ``` The result of pca `.fit_transform` method is a shallow copy of the DataFrame which contains the resulting columns of the transformation, in this case the PCA components, as virtual columns. This means that the transformed DataFrame takes no memory at all! So while this example is made with only 120 sample, this would work in the same way even for millions or billions of samples. ## Gradient boosting trees Now let's train a gradient boosting model. While `vaex.ml` does not currently include this type of models, we support the popular boosted trees libraries [xgboost](https://xgboost.readthedocs.io/en/latest/), [lightgbm](https://lightgbm.readthedocs.io/en/latest/), and [catboost](https://catboost.ai/). In this tutorial we will use the `lightgbm` classifier. ``` import lightgbm import vaex.ml.sklearn # Features on which to train the model train_features = df_train.get_column_names(regex='PCA_.*') # The target column target = 'class_' # Instantiate the LightGBM Classifier booster = lightgbm.sklearn.LGBMClassifier(num_leaves=5, max_depth=5, n_estimators=100, random_state=42) # Make it a vaex transformer (for the automagic pipeline and lazy predictions) model = vaex.ml.sklearn.SKLearnPredictor(features=train_features, target=target, model=booster, prediction_name='prediction') # Train and predict model.fit(df=df_train) df_train = model.transform(df=df_train) df_train ``` Notice that after training the model, we use the `.transform` method to obtain a shallow copy of the DataFrame which contains the prediction of the model, in a form of a virtual column. This makes it easy to evaluate the model, and easily create various diagnostic plots. If required, one can call the `.predict` method, which will result in an in-memory `numpy.array` housing the predictions. ## Automatic pipelines Assuming we are happy with the performance of the model, we can continue and apply our transformations and model to the test set. Unlike other libraries, we do not need to explicitly create a pipeline here in order to propagate the transformations. In fact, with `vaex` and `vaex.ml`, a pipeline is automatically being created as one is doing the exploration of the data. Each `vaex` DataFrame contains a _state,_ which is a (serializable) object containing information of all transformations applied to the DataFrame (filtering, creation of new virtual columns, transformations). Recall that the outputs of both the PCA transformation and the boosted model were in fact virtual columns, and thus are stored in the state of `df_train`. All we need to do, is to apply this state to another similar DataFrame (e.g. the test set), and all the changes will be propagated. ``` state = df_train.state_get() df_test.state_set(state) df_test ``` ## Production Now `df_test` contains all the transformations we applied on the training set (`df_train`), including the model prediction. The transfer of state from one DataFrame to another can be extremely valuable for putting models in production. ## Performance Finally, let's check the model performance. ``` from sklearn.metrics import accuracy_score acc = accuracy_score(y_true=df_test.class_.values, y_pred=df_test.prediction.values) acc *= 100. print(f'Test set accuracy: {acc}%') ``` The model get perfect accuracy of 100%. This is not surprising as this problem is rather easy: doing a PCA transformation on the features nicely separates the 3 flower species. Plotting the first two PCA axes, and colouring the samples according to their class already shows an almost perfect separation. ``` plt.figure(figsize=(8, 4)) df_test.scatter(df_test.PCA_0, df_test.PCA_1, c_expr=df_test.class_, s=50) plt.show() ```
github_jupyter
# Introduction to Programming - Lecture 3 ### Material covered : 1. Conditional statement "if" and Logical Operators 2. For loops ## If-else and Logical Operators "If" is the simplest conditional statement. It simply checks if an evaluation condition is True and if it is then it executes a certain block of code. The checking is usually done by a logical operator such as --> more than( > ), less than( < ), equal to( == ), not equal to ( != or not), etc if <condition>: execute statement(s) ### Binary Logical Operators : 1. Greater than ( > ) 2. Less than ( < ) 3. Equal to ( == ) ### Unary Logical Operators : 1. ! or not Let us look at some examples : ``` # Greater than ( > ) if 1 > 0: print("One is more than zero") else: print("BITS Pilani Goa Campus is better than IIT Kanpur") # Less than ( < ) if 12 < 42: print("Yes, 12 is less than 42") else: print("Everyone registered in CTE Python will pass with distinction (90%+ marks)") # Equal to ( == ) if 2 + 2 == 4: print("Two plus Two equals Four") else: print("Lite......") # Not equal to ( != ) if 1 != 0: print("Sachin") else: print("Kohli") # Inversion operator if not 2 + 2 == 4: print("Lite....") else: print("CTE Python....") ``` ## Chained Conditional if <condition 1> is True: execute <statement(s) > 1 elif <condition 2> is True: execute <statement(s) > 2 elif <condition 3> is True: execute <statement(s) > 3 elif <condition 4> is True: execute <statement(s) > 4 . . . else: execute <statement(s)> n #### Let us see an example : ``` if 1 < 0: print(1, end='') elif 2 == 3: print(2, end='') elif not 3 == 4: print(3, end='') print(".....Lite") ``` #### The condition need not directly involve a logical operator. For example : ``` a = [1, 2, 3] if isinstance(a, list): print(a, "is a list !") if 2 in a: print("Yes, 2 is in ", a) if 5 not in a: print("No, 5 is not in ", a) ``` ## Iterations Iterative constructs are a key feature of any well developed programming language. Looping can be done over an iterable which can be a list, set, dictionary, etc. Let us concern ourselves with lists for now. ## Common library functions to use in loops : ### range Usage : range(start, stop, step) This library function returns a list which is an arithmetic progression. The default step value is 1. #### Examples ``` list(range(0, 10)) list(range(-4, 10)) list(range(0, 10, 3)) list(range(0, 5, -6)) list(range(-10, 10, -5)) list(range(10, 2, 3)) list(range(10, 1, -2)) ``` ### enumerate Usage : enumerate(<list>, starting_index=1) This library function returns an iterable object contains tuples of structure (index, list value at index). Extra optional argument provides a different starting index. Default starting index is 0. In that case the tuples are of the structure (index, list value at <index - starting_index>) #### Examples ``` list(enumerate(range(0,5))) list(enumerate(range(0,5), 10)) ``` ## For Loop ### Structure for value in iterable: execute statement(s) Only by looking at many examples will this become clear ``` a = [1, 2, 3, 4, 5] for value in a: print(value, end=' ') b = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] for sublist in b: for value in sublist: print(value, end=' ') ``` ## Tuples Tuples are the immutable equivalent of lists. Declared by : (value_1, value_2, .....) ``` a = (2, 3, 4) print(a) ``` #### You cannot change any element of a tuple to a new value since tuples are immutable types. For example : ``` a[1] = 10 print(a) a = ["One", "Two", "Three"] for i, value in enumerate(a): print("Value at index <", i, "> of list<a> is : ", value) for i in range(0, 10): for j in range(0, i): print("*", end='') print("") for i in range(0, 10): for j in range(10, i, -1): print(" ", end='') for k in range(0, i): print("*", end='') print("") ``` ## Short in-class assignment Write a program to print the following pattern. ********** ****--**** ***----*** **------** *--------* **------** ***----*** ****--**** ********** First line --> 10 stars Second line --> 4 stars ,2 dashes, 4 stars Third line --> 3 stars, 4 dashes, 3 stars . . . Eigth line --> 4 stars ,2 dashes, 4 stars Ninth line --> 10 stars again. ``` for i in range(0, 5): for j in range(0, 5 - i): print("*", end='') for k in range(0, 2*i): print("-", end='') for j in range(0, 5 - i): print("*", end='') print("") for i in range(3, -1, -1): for j in range(0, 5 - i): print("*", end='') for k in range(0, 2*i): print("-", end='') for j in range(0, 5 - i): print("*", end='') print("") ```
github_jupyter
## Code Setup ``` %load_ext autoreload %autoreload 2 import math from functools import partial import numpy as np from xgboost import XGBClassifier, XGBRegressor from sklearn.pipeline import make_pipeline from sklearn.base import clone, BaseEstimator, TransformerMixin, ClassifierMixin from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.model_selection import GridSearchCV import pandas as pd import seaborn as sns import statsmodels.api as sm import scipy import tensorflow.keras.backend as K from tensorflow.keras import callbacks, layers, wrappers, models, constraints, optimizers import tensorflow as tf from category_encoders import OrdinalEncoder from augury import model_tracking from augury.ml_data import MLData from augury.ml_estimators import StackingEstimator, ConfidenceEstimator from augury.ml_estimators.stacking_estimator import ML_PIPELINE from augury.sklearn import ( TeammatchToMatchConverter, EloRegressor, bits_scorer, _calculate_bits, TimeSeriesRegressor, ColumnDropper, CorrelationSelector, ) from augury.settings import SEED, CV_YEAR_RANGE, TRAIN_YEAR_RANGE, CATEGORY_COLS, TEAM_NAMES, ROUND_TYPES, VENUES np.random.seed(SEED) data = MLData() data.data se = StackingEstimator() se.fit(*data.train_data) ``` ## ELO ``` elo = ConfidenceEstimator() data.train_year_range = TRAIN_YEAR_RANGE elo.fit(*data.train_data) bits = bits_scorer(elo, *data.test_data) bits ``` ## Time-series model ``` # Based on StackingEstimator predictions tsr = TimeSeriesRegressor(sm.tsa.ARIMA, order=(6, 0, 1), confidence=True) data.train_year_range = TRAIN_YEAR_RANGE X_train, y_train = data.train_data X_train_filt = X_train.query('year >= @se.min_year') X_train_ts = pd.DataFrame( { 'pred_margin': se.predict(X_train), 'date': X_train_filt['date'], 'team': X_train_filt['team'], }, index=X_train_filt.index, ) tsr.fit(X_train_ts, y_train) bits = bits_scorer(tsr, *data.test_data) bits # Based on raw time-series data tsr = TimeSeriesRegressor( sm.tsa.ARIMA, order=(6, 0, 1), exog_cols=["at_home", "oppo_cum_percent"], confidence=True ) tsr.fit(*data.train_data) bits = bits_scorer(tsr, *data.test_data) bits ``` ## ML algorithms ### Code setup ``` # train/validation/test data for models that use full data set data.train_year_range = TRAIN_YEAR_RANGE X_train, _y_train = data.train_data # Arbitrarily making draws losses, because why not y_train = (_y_train > 0).astype(int) _X_test, _y_test = data.test_data test_years = _X_test['year'].drop_duplicates().sort_values() # Separating the two default test years into validation & test sets, # so we can use the validation set for early stopping during XGB training X_val = _X_test.query('year < @test_years.iloc[-1]') X_test = _X_test.query('year == @test_years.iloc[-1]') y_win = (_y_test > 0).astype(int) y_val = y_win.loc[(slice(None), test_years.iloc[:-1], slice(None))] y_test = y_win.loc[(slice(None), test_years.iloc[-1:], slice(None))] # train/validation/test data for models based on main model's predictions X_train_se = se.predict(data.train_data[0]).reshape(-1, 1) y_train_se = y_train.loc[(slice(None), slice(1965, None), slice(None))] X_pred = pd.DataFrame(se.predict(data.test_data[0]), columns=['pred_margin'], index=data.test_data[0].index) X_val_se = X_pred.loc[(slice(None), test_years.iloc[:-1], slice(None)), :] X_test_se = X_pred.loc[(slice(None), test_years.iloc[-1:], slice(None)), :] ``` #### Base bits calculation ``` LOG_BASE = 2 # For regressors that might try to predict negative values or 0, # we need a slightly positive minimum to not get errors when calculating # logarithms MIN_VAL = 1 * 10 ** -10 def _positive_pred(y_pred): min_array = np.repeat(MIN_VAL, len(y_pred)) return np.where(y_pred <= 0, min_array, y_pred) def _draw_bits(y_pred, y_true): return 1 + (0.5 * np.log2(_positive_pred(y_pred * (1 - y_pred)))) def _win_bits(y_pred, y_true): return 1 + np.log2(_positive_pred(y_pred)) def _loss_bits(y_pred, y_true): return 1 + np.log2(_positive_pred(1 - y_pred)) # Raw bits calculations per http://probabilistic-footy.monash.edu/~footy/about.shtml def calculate_bits(y_pred, y_true): return np.where( y_true == 0.5, _draw_bits(y_pred, y_true), np.where( y_true == 1.0, _win_bits(y_pred, y_true), _loss_bits(y_pred, y_true) ) ) def bits_error(y_pred, y_true): # We adjust bits calculation to make a valid ML error formula such that 0 represents a correct prediction, # and the further off the prediction the higher the error value. return np.where( y_true == 0.5, -1 * _draw_bits(y_pred, y_true), np.where( y_true == 1.0, 1 - _win_bits(y_pred, y_true), 1 + (-1 * _loss_bits(y_pred, y_true)), ), ) ``` ### XGBoost #### Code setup ``` def _draw_bits_gradient(y_pred, y_true): return (y_pred - 0.5) / (math.log(2) * (y_pred - y_pred**2)) def _win_bits_gradient(y_pred, y_true): return -1 / (math.log(2) * y_pred) def _loss_bits_gradient(y_pred, y_true): return 1 / (math.log(2) * (1 - y_pred)) def _bits_gradient(y_pred, y_true): return np.where( y_true == 0.5, _draw_bits_gradient(y_pred, y_true), np.where( y_true == 1.0, _win_bits_gradient(y_pred, y_true), _loss_bits_gradient(y_pred, y_true), ), ) def _draw_bits_hessian(y_pred, y_true): return (y_pred**2 - y_pred + 0.5) / (math.log(2) * y_pred**2 * (y_pred - 1)**2) def _win_bits_hessian(y_pred, y_true): return 1 / (math.log(2) * y_pred**2) def _loss_bits_hessian(y_pred, y_true): return 1 / (math.log(2) * (1 - y_pred)**2) def _bits_hessian(y_pred, y_true): return np.where( y_true == 0.5, _draw_bits_hessian(y_pred, y_true), np.where( y_true == 1.0, _win_bits_hessian(y_pred, y_true), _loss_bits_hessian(y_pred, y_true), ), ) # Per documentation, follows typical Scikit-learn order (true, pred), but rest of XGB callbacks # keep XGB default order (pred, true), so all other functions follow the latter convention # to minimise inconsistency def bits_objective(y_true, y_pred): return _bits_gradient(y_pred, y_true), _bits_hessian(y_pred, y_true) def bits_metric(y_pred, y_true_matrix): y_true = y_true_matrix.get_label() return 'mean_bits_error', bits_error(y_pred, y_true).mean() bits_df = ( pd .DataFrame( np.vstack((np.repeat([0, 0.5, 1], 100), np.random.uniform(low=0.0, high=1.0, size=300))).T, columns=['result', 'conf_win'] ) .astype({'result': 'category'}) .assign( bits=lambda df: calculate_bits(df['conf_win'], df['result']), bits_error=lambda df: bits_error(df['conf_win'], df['result']), bits_grad=lambda df: _bits_gradient(df['conf_win'], df['result']), bits_hess=lambda df: _bits_hessian(df['conf_win'], df['result']), result_label=lambda df: df['result'].astype(str).map({'1.0': 'win', '0.0': 'loss', '0.5': 'draw'}), ).melt( id_vars=['result_label', 'conf_win'], value_vars=['bits', 'bits_error', 'bits_grad', 'bits_hess'], var_name='bits_metric', ) ) bits_df sns.relplot( data=bits_df, x='conf_win', y='value', col="bits_metric", col_wrap=2, hue='result_label', kind='line', height=3, aspect=2, facet_kws={'sharey': False, 'sharex': False}, ) ``` #### XGBClassifier ``` # Baseline without custom objective or metric xgbc = clone(ML_PIPELINE) # We don't need the DataFrameConverter xgbc.steps = xgbc.steps[1:] xgbc.steps[-1] = ('xgbclassifier', XGBClassifier(random_state=SEED),) xgbc.fit(X_train, y_train) bits = bits_scorer(xgbc, X_test, y_test, proba=True) bits # XGB with custom objective and metric X_val_trans = xgbc[:-1].transform(X_val) xgbc.set_params(xgbclassifier__objective=bits_objective) xgbc.fit( X_train, y_train, xgbclassifier__eval_metric=bits_metric, xgbclassifier__eval_set=[(X_val_trans, y_val)], xgbclassifier__early_stopping_rounds=5, ) bits = bits_scorer(xgbc, X_test, y_test, proba=True) bits # XGB with custom objective and metric, based on StackingEstimator predictions xgb = make_pipeline(StandardScaler(), XGBClassifier(random_state=SEED, objective=bits_objective)) X_val_trans = xgb[:-1].fit_transform(X_val_se) xgb.fit( X_train_se, y_train_se, xgbclassifier__eval_metric=bits_metric, xgbclassifier__eval_set=[(X_val_trans, y_val)], xgbclassifier__early_stopping_rounds=5, ) bits = bits_scorer( xgb, X_test_se, y_test, proba=True, # Temporarily adding info_df to allow reshaping of X even when it doesn't have # the usual categorical columns info_df=data.test_data[0].loc[ (slice(None), test_years.iloc[-1], slice(None)), ['team', 'oppo_team', 'year', 'round_number', 'at_home'] ], ) bits ``` #### XGBRegressor ``` # Baseline without custom objective or metric xgbr = clone(ML_PIPELINE) # We don't need the DataFrameConverter for this xgbr.steps = xgbr.steps[1:] xgbr.steps[-1] = ('xgbregressor', XGBRegressor(random_state=SEED, objective='reg:squarederror')) xgbr.fit(X_train, y_train) bits = bits_scorer(xgbr, X_test, y_test) bits # XGB with custom objective & metric X_val_trans = xgbr[:-1].transform(X_val) xgbr.set_params(xgbregressor__objective=bits_objective) xgbr.fit( X_train, y_train, xgbregressor__eval_metric=bits_metric, xgbregressor__eval_set=[(X_val_trans, y_val)], xgbregressor__early_stopping_rounds=5, ) bits = bits_scorer(xgbr, X_test, y_test) bits ``` #### Conclusion `XGBClassifier` without custom loss/metric for bits performs slightly better than the classifier that uses the bits loss/metric, but I imaginge some tuning would make the custom-loss model better since the defaults are set to work well together out-of-the-box. Regardless, the classifier performs noticeably better than `XGBRegressor`. ### Keras #### Code setup ``` # Basic NN architecture copied from notebook 6.2-all-data-nn # For regressors that might try to predict negative values or 0, # we need a slightly positive minimum to not get errors when calculating # logarithms MIN_VAL = 1 * 10 ** -10 N_TEAMS = data.data['team'].drop_duplicates().count() N_ROUND_TYPES = data.data['round_type'].drop_duplicates().count() N_VENUES = data.data['venue'].drop_duplicates().count() ELO_MODEL_COLS = [ "prev_match_oppo_team", "oppo_prev_match_oppo_team", "prev_match_at_home", "oppo_prev_match_at_home", "date", ] N_FEATURES = len(data.train_data[0].columns) - len(ELO_MODEL_COLS) class InputLister(BaseEstimator, TransformerMixin): def __init__(self, n_inputs=1): self.n_inputs = n_inputs def fit(self, X, y=None): return self def transform(self, X, y=None): return [X[:, n] if n < self.n_inputs - 1 else X[:, n:] for n in range(self.n_inputs)] class NanFiller(BaseEstimator, TransformerMixin): def fit(self, X, y=None): return self def transform(self, X, y=None): return np.nan_to_num(X) def _positive_pred_tensor(y_pred): return tf.where(tf.math.less_equal(y_pred, tf.constant(0.0)), tf.constant(MIN_VAL), y_pred) def _log2(x): return tf.math.divide( tf.math.log(_positive_pred_tensor(x)), tf.math.log(tf.constant(2.0)) ) def _draw_bits_tensor(y_true, y_pred): return tf.math.add( tf.constant(1.0), tf.math.scalar_mul( tf.constant(0.5), _log2(tf.math.multiply(y_pred, tf.math.subtract(tf.constant(1.0), y_pred))), ), ) def _win_bits_tensor(y_true, y_pred): return tf.math.add(tf.constant(1.0), _log2(y_pred)) def _loss_bits_tensor(y_true, y_pred): return tf.math.add(tf.constant(1.0), _log2(tf.math.subtract(tf.constant(1.0), y_pred))) def bits_loss(y_true, y_pred): y_true_f = tf.cast(y_true, tf.float32) y_pred_win = y_pred[:, -1:] # We adjust bits calculation to make a valid ML error formula such that 0 represents a correct prediction, # and the further off the prediction the higher the error value. return K.mean( tf.where( tf.math.equal(y_true_f, tf.constant(0.5)), tf.math.scalar_mul(tf.constant(-1.0), _draw_bits_tensor(y_true_f, y_pred_win)), tf.where( tf.math.equal(y_true_f, tf.constant(1.0)), tf.math.subtract(tf.constant(1.0), _win_bits_tensor(y_true_f, y_pred_win)), tf.math.add( tf.constant(1.0), tf.math.scalar_mul(tf.constant(-1.0), _loss_bits_tensor(y_true_f, y_pred_win)), ), ), ), ) def create_nn_model( n_hidden_layers=0, n_cells=50, dropout_rate=0.1, n_labels=1, label_activation='sigmoid', loss=bits_loss, **kwargs, ): team_input = layers.Input(shape=(1,), dtype='int32', name='team') oppo_team_input = layers.Input(shape=(1,), dtype='int32', name='oppo_team') round_type_input = layers.Input(shape=(1,), dtype='int32', name='round_type') venue_input = layers.Input(shape=(1,), dtype='int32', name='venue') stats_input = layers.Input(shape=(N_FEATURES - 4,), dtype='float32', name='stats') team_layer = layers.Embedding(N_TEAMS * 2, 4, input_length=1)(team_input) flatten_team_layer = layers.Flatten()(team_layer) oppo_team_layer = layers.Embedding(N_TEAMS * 2, 4, input_length=1)(oppo_team_input) flatten_oppo_team_layer = layers.Flatten()(oppo_team_layer) round_type_layer = layers.Embedding(N_ROUND_TYPES * 2, 4, input_length=1)(round_type_input) flatten_round_layer = layers.Flatten()(round_type_layer) venue_layer = layers.Embedding(N_VENUES * 2, 4, input_length=1)(venue_input) flatten_venue_layer = layers.Flatten()(venue_layer) concated_layers = layers.concatenate( [flatten_team_layer, flatten_oppo_team_layer, flatten_round_layer, flatten_venue_layer, stats_input] ) layer_n = layers.Dense(n_cells, input_shape=(N_FEATURES,), activation='relu')(concated_layers) dropout_n = layers.Dropout(dropout_rate)(layer_n) for _ in range(n_hidden_layers - 1): layer_n = layers.Dense(n_cells, input_shape=(N_FEATURES,), activation='relu')(dropout_n) dropout_n = layers.Dropout(dropout_rate)(layer_n) output = layers.Dense(n_labels, activation=label_activation)(dropout_n) model = models.Model( inputs=[ team_input, oppo_team_input, round_type_input, venue_input, stats_input ], outputs=output ) model.compile(loss=loss, optimizer='adam') return lambda: model class KerasWrapper(BaseEstimator, ClassifierMixin): def __init__( self, model_func=create_nn_model, n_hidden_layers=2, n_cells=25, dropout_rate=0.1, label_activation='sigmoid', n_labels=1, loss=bits_loss, embed_dim=4, **kwargs, ): self.model_func = model_func self.n_hidden_layers = n_hidden_layers self.n_cells = n_cells self.dropout_rate = dropout_rate self.label_activation = label_activation self.n_labels = n_labels self.loss = loss self.embed_dim = embed_dim self.kwargs = kwargs self._create_model() def fit(self, X, y, validation_data=None): self._model.fit(X, y, validation_data=validation_data) return self def predict_proba(self, X): return self._model.predict(X) def predict(self, X): return self.predict_proba(X) if self.n_labels == 1 else np.argmax(self.predict_proba(X), axis=1) def set_params(self, **params): super().set_params(**params) self._create_model() return self @property def history(self): return self._model.model.history def _create_model(self): K.clear_session() self._model = wrappers.scikit_learn.KerasRegressor( build_fn=self.model_func( n_hidden_layers=self.n_hidden_layers, n_cells=self.n_cells, dropout_rate=self.dropout_rate, label_activation=self.label_activation, n_labels=self.n_labels, loss=self.loss, embed_dim=self.embed_dim, **self.kwargs, ), epochs=20, callbacks=[callbacks.EarlyStopping(monitor='val_loss', patience=5)] ) # Adapted this code from: http://zachmoshe.com/2017/04/03/pickling-keras-models.html # Keras has since been updated to be picklable, but my custom tensorflow loss function is not # (at least I can figure out how to pickle it). So, this is necessary # for basic Scikit-learn functionality like get/set params, grid search, and multiprocessing. def __getstate__(self): model_str = "" with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as f: models.save_model(self.model, f.name, overwrite=True) model_str = f.read() d = {key: value for key, value in self.__dict__.items() if key != 'model'} d.update({'model_str': model_str}) return d def __setstate__(self, state): with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as f: f.write(state['model_str']) f.flush() model = models.load_model(f.name) d = {value: key for value, key in state.items() if key != 'model_str'} d.update({'model': model}) self.__dict__ = d ``` #### Keras classifier ``` # Keras classifier with full data set nn = make_pipeline( ColumnDropper(cols_to_drop=ELO_MODEL_COLS), ColumnTransformer( [ ( "ordinalencoder", OrdinalEncoder(handle_unknown='return_nan', handle_missing='return_nan'), slice(4), ) ], remainder=StandardScaler(), ), NanFiller(), InputLister(n_inputs=5), KerasWrapper( n_hidden_layers=2, n_cells=25, dropout_rate=0.1, n_labels=2, label_activation='softmax', loss=bits_loss, ), ) X_val_trans = nn[:-1].fit_transform(X_val) nn.fit(X_train, y_train, keraswrapper__validation_data=(X_val_trans, y_val)) model_tracking.graph_tf_model_history(nn[-1].history) bits = bits_scorer(nn, X_test, y_test, proba=True) bits ``` #### Keras classifier based on predictions ``` N_ROWS = len(data.data) def create_embed_model( n_hidden_layers=0, n_cells=50, dropout_rate=0.1, n_labels=1, label_activation='sigmoid', loss=bits_loss, embed_dim=4, **kwargs, ): pred_input = layers.Input(shape=(1,), dtype='int32', name='preds') layer_n = layers.Dense(n_cells, input_shape=(1,), activation='relu')(pred_input) dropout_n = layers.Dropout(dropout_rate)(layer_n) for _ in range(n_hidden_layers - 1): layer_n = layers.Dense(n_cells, input_shape=(1,), activation='relu')(dropout_n) dropout_n = layers.Dropout(dropout_rate)(layer_n) output = layers.Dense(n_labels, activation=label_activation)(dropout_n) model = models.Model( inputs=pred_input, outputs=output, ) model.compile(loss=loss, optimizer='adam') return lambda: model # Keras classifier with prediction input K.clear_session() class PredictionEncoder(BaseEstimator, TransformerMixin): def fit(self, X, y): self._min_pred = round(X.min()) return self def transform(self, X, y=None): # Assumes that X is a single prediction column X_np = np.array(X).reshape(-1) # In case there are negative predicted margins smaller than our training set, # we set them to zero return np.maximum(X_np.round() - min(self._min_pred, 0), np.zeros(len(X_np))).astype(int).reshape(-1, 1) nnse = make_pipeline( KerasWrapper( model_func=create_embed_model, n_hidden_layers=1, n_cells=25, dropout_rate=0.1, n_labels=2, label_activation='softmax', loss=bits_loss, ), ) # nnse[:-1].fit(X_train_se) # X_val_trans = nnse[:-1].transform(X_val_se) nnse.fit( X_train_se, y_train_se, keraswrapper__validation_data=(X_val_se, y_val) ) model_tracking.graph_tf_model_history(nnse[-1].history) bits = bits_scorer( nnse, X_test_se, y_test, proba=True, ) bits ``` #### Conclusion A basic neural net with the full data set performs about as well as XGBoost. The model that takes `StackingEstimator` predictions as input performs horribly, whether the model uses an embedding layer or not. Given that the results of `predict_proba` tend to be very close to 0 or 1, it probably just way over-fits, resulting in excessive confidence in its predictions. ### Conclusion Although the default `XGBClassifier` performed best, I believe some tuning would improve the version of `XGBClassifier` with the custom loss function such that it performed better. Same goes for the neural net, but `xgboost` models are simpler and train faster, so I'll stick with them for now, because I don't feel like dealing with optimising NN architectures.
github_jupyter
__Author: Manu Jayadharan, University of Pittsburgh, 2020__ # Solving difussion equation using its mixed form. We have a system of equation to solve: $p_t + \nabla\cdot u -f = 0$ and $-\nabla p = u$, over domain $\Omega$ from time T_initial to T_final. Variables $p$ and $u$ has the physical meaning of pressure and velocity respectively. For demonstration purposes we take $f=sin(x_1 + x_2) + tsin(x_1 + x_2)$ and $\Omega = [-2,2]\times [0,1]$ and the time interval to be $[0,1]$, so we can compare the results with the actual solution $u=tsin(x_1 + x_2)$. ``` #Import fluidlearn package and classes import fluidlearn from fluidlearn import dataprocess ``` ### Defining the domain and time interval for which the PDE needs to be solved. This matters only for generating collocation points and if the user is feeding their own collocation points, they can skip this step. ``` #domain range X_1_domain = [-2, 2] X_2_domain = [0, 1] #time range T_initial = 0 T_final = 1 T_domain = [T_initial, T_final] #domain of the problem domain_bounds = [X_1_domain, X_2_domain, T_domain] ``` ### Loading data from a csv file - We use the manufactured data with $u=tsin(x_1 + x_2)$ saved in a csv file. - Data is saved in the format: ($x_1 , x_2, t, u(x_1, x_2, t)$) as four columns. - You could load either preprocess your data to be in this format or load your data from a csv file with similar format. ``` path_to_data = "data_manufactured/t_sin_x_plus_y.csv" X_data, Y_data = dataprocess.imp_from_csv(path_to_csv_file=path_to_data, x_y_combined=True, y_dim=1) ``` ### Defining the rhs function $f=sin(x_1 + x_2) + tsin(x_1 + x_2)$ of the PDE. We use tensorflow.sin function instead of python functions, we could used numpy.sin as well. ``` def rhs_function (args, time_dep=True): import tensorflow as tf if time_dep: space_inputs = args[:-1] time_inputs = args[-1] else: space_inputs = args return tf.sin(space_inputs[0]+space_inputs[1]) + 2*time_inputs*tf.sin(space_inputs[0]+space_inputs[1]) ``` ### Defining the model architecture ``` model_type = 'forward' space_dim = 2 #dimension of Omega time_depedent_problem = True n_hid_lay=3 #numberof hidden layers in the neural network n_hid_nrn=20 #number of neurons in each hidden layer act_func='tanh' #activation function used for hidden layers: could be elu, relu, sigmoid loss_list='mse' #type of error function used for cost functin, we use mean squared error. optimizer='adam' #type of optimizer for cost function minimization dom_bounds=domain_bounds #domain bounds where collocation points has to be generated distribution = 'uniform' #type of distribution used for generating the pde collocation points. number_of_collocation_points = 5000 batch_size = 32 #batch size for stochastic batch gradient type optimization num_epochs = 10 #number of epochs used for trainng ``` ### Defining the fluidlearn solver ``` diffusion_model = fluidlearn.Solver() diffusion_model(model_type=model_type, space_dim=space_dim, time_dep=time_depedent_problem, output_dim=1, n_hid_lay=n_hid_lay, n_hid_nrn=n_hid_lay, act_func=act_func, rhs_func=rhs_function, loss_list=loss_list, optimizer=optimizer, dom_bounds=dom_bounds, load_model=False, model_path=None,) ``` ### Fitting the model ``` diffusion_model.fit( x=X_data, y=Y_data, colloc_points=number_of_collocation_points, dist=distribution, batch_size=batch_size, epochs=num_epochs, ) ``` ### Resuming Training the model again for 50 more epochs ``` diffusion_model.fit( x=X_data, y=Y_data, colloc_points=number_of_collocation_points, dist=distribution, batch_size=batch_size, epochs=50, ) ``` ### Demo Using the trained model for predicton ``` #taking two points from the domain for time t=0.3 and t=0.76 respectively x_test_points = [[-0.5,0.1,0.3], [0.66,0.6,0.76]] #Predicting the value y_predicted = diffusion_model.predict(x_test_points) #finding the true y value for comparing import numpy as np x_test_points = np.array(x_test_points) y_true = np.sin(x_test_points[:,0:1] + x_test_points[:,1:2]) * x_test_points[:,2:3] #looking at predicted and true solution side by side. np.concatenate([y_predicted, y_true], axis=1) ``` Note that we need more training for further improving the accuracy. ### Saving the model to a specified location. ``` path_to_save_model = "saved_model/model_name" diffusion_model.save_model(path_to_save_model) ``` ### Loading the saved model ``` path_to_load_model = "saved_model/model_name" loaded_diffusion_model = fluidlearn.Solver() loaded_diffusion_model(space_dim=2, time_dep=True, load_model=True, model_path=path_to_load_model) ``` ### Predicting using loaded model ``` y_predicted = loaded_diffusion_model.predict(X_data) y_predicted ```
github_jupyter
# MO434 - Final S2 ## Physionet Challenge 2020 ## Code loading Firstly, the training, inference and evaluation scripts are loaded from the remote repository. ``` !git clone -b v1.0-rc2 https://github.com/Kotzly/physionet-challenge-2020.git ``` ## Data Loading Then the datasets are download, they are 6 in total. All recordings are then copied to the same folder. ``` !gdown --id "1-5x4KIbnr7Lj1tVmMkuTnqznk7x5Vf4R" !gdown --id "1-33KhbVm-BSfBHt2LYgZ6Q5zdPu8AlQ8" !gdown --id "1-HuKofw5BFjBL69X5W2BiRnRpqy3V__i" !gdown --id "1-KjbowxvHaCUPz23A_ujBnlujyFswaw6" !gdown --id "1-9f0JFg_QoPfHAZ_zj4dkLfT3X9B39x9" !gdown --id "1-AQuoTSL1KD2J0N0y-U886n_Mdx_oIAf" import os tar_files = os.listdir() tar_files = [x for x in tar_files if x.endswith("gz")] !mkdir dataset for filename in tar_files: !tar -xf "{filename}" -C dataset !mv dataset/Training_2/* dataset !mv dataset/Training_WFDB/* dataset %%bash for filename in dataset/WFDB/*; do mv $filename dataset done !rm -rf dataset/Training_2/ dataset/Training_WFDB/ dataset/WFDB !wget -O mini.sh https://repo.anaconda.com/miniconda/Miniconda3-py38_4.8.2-Linux-x86_64.sh !chmod +x mini.sh !bash ./mini.sh -b -f -p /usr/local ``` ## Model training Here the MLP is trained using 60% of the files, and using 20% of the files as the validation set, to perform the early stopping. After training, the remaining files (20% of the dataset) is used as testing set. ``` !python --version %cd physionet-challenge-2020 !python setup.py install from PIL import Image %matplotlib inline !pip install ipykernel !pip install tabnet==0.1.6 !python physionet_challenge/command/train_model.py ../dataset model_multibranch --split split.json --checkpoint checkpoint_other --model multi --monitor val_loss --seed 10 --processing other Image.open("plot_model_multibranch.png") !python physionet_challenge/command/train_model.py ../dataset model_multibranch_focal --split split.json --checkpoint checkpoint_other --model multi_focal --monitor val_loss --seed 10 --processing other Image.open("plot_model_multibranch_focal.png") !python physionet_challenge/command/train_model.py ../dataset model_multi_big --split split.json --checkpoint checkpoint_other --model multi_big --monitor val_loss --seed 10 --processing other Image.open("plot_model_multi_big.png") !python physionet_challenge/command/train_model.py ../dataset model_tabnet --split split.json --checkpoint checkpoint_other --model tabnet --monitor val_loss --seed 10 --processing other Image.open("plot_model_tabnet.png") ``` ## Model inference The model saved in the training step is loaded and used to infer the diagnostics for every recording in the test set. Each inference is saved in a specific inference folder. ``` %%bash datasets="CPSC2018 INCART PTB G12EC" rm -rf results.txt rm -rf inference* for model_folder in model_*; do for dataset in $datasets; do inference_folder=inference_"$model_folder"_"$dataset" echo "===================================================================================================================================" >> results.txt echo "Results for $model_folder and $dataset" >> results.txt python physionet_challenge/command/inference.py $model_folder ../dataset $inference_folder --split_filepath splits/dataset_split_test.json --split $dataset --processing other python physionet_challenge/evaluation/evaluation.py ../dataset $inference_folder splits/dataset_split_test.json $dataset > temp.txt tail temp.txt -n 7 >> results.txt done done cat results.txt !cat results.txt !python physionet_challenge/command/inference.py model_multibranch ../dataset inference_multibranch --split_filepath split.json --split test --processing other !python physionet_challenge/command/inference.py model_multibranch_focal ../dataset inference_multibranch_focal --split_filepath split.json --split test --processing other !python physionet_challenge/command/inference.py model_multi_big ../dataset inference_multi_big --split_filepath split.json --split test --processing other !python physionet_challenge/command/inference.py model_tabnet ../dataset inference_tabnet --split_filepath split.json --split test --processing other ``` ## Model evaluation The inference files are used to compute the metrics. ``` import pandas as pd # Multi-branch big,0.799,0.197,0.299,0.194,0.194,0.091,0.070 txt = """,AUROC,AUPRC,Accuracy,F-measure,Fbeta-measure,Gbeta-measure,Challenge metric Multi-branch,0.793,0.184,0.275,0.202,0.218,0.098,0.226 Baseline (MLP),0.799,0.197,0.323,0.183,0.176,0.086,0.079 Multi-branch focal loss,0.773,0.194,0.254,0.041,0.034,0.018,-0.523 TabNet,0.528,0.066,0.125,0.000,0.000,0.000,-0.823 """ with open("txt.csv", "w") as file: file.write(txt) pd.read_csv("txt.csv",index_col=0) !python physionet_challenge/evaluation/evaluation.py ../dataset inference_multibranch split.json test !python physionet_challenge/evaluation/evaluation.py ../dataset inference_multibranch_focal split.json test !python physionet_challenge/evaluation/evaluation.py ../dataset inference_tabnet split.json test !python physionet_challenge/evaluation/evaluation.py ../dataset inference_multi_big split.json test ```
github_jupyter
# k-NN: finding optimal weight function ('distance' or 'uniform') ``` """k-NN: finding optimal weight function ('distance' or 'uniform') """ # import libraries import pandas as pd import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn import preprocessing from sklearn.model_selection import TimeSeriesSplit # import data df = pd.read_csv('data/SCADA_downtime_merged.csv', skip_blank_lines=True) # list of turbines to plot list1 = list(df['turbine_id'].unique()) # sort turbines in ascending order list1 = sorted(list1, key=int) # list of categories list2 = list(df['TurbineCategory_id'].unique()) # remove NaN from list list2 = [g for g in list2 if g >= 0] # sort categories in ascending order list2 = sorted(list2, key=int) # categories to remove list2 = [m for m in list2 if m not in (1, 12, 13, 14, 15, 17, 21, 22)] # empty list to hold optimal n values for all turbines num = [] # empty list to hold minimum error readings for all turbines err = [] # filter only data for turbine x for x in list1: dfx = df[(df['turbine_id'] == x)].copy() # copying fault to new column (mins) (fault when turbine category id is y) for y in list2: def f(c): if c['TurbineCategory_id'] == y: return 0 else: return 1 dfx['mins'] = dfx.apply(f, axis=1) # sort values by timestamp in descending order dfx = dfx.sort_values(by='timestamp', ascending=False) # reset index dfx.reset_index(drop=True, inplace=True) # assigning value to first cell if it's not 0 if dfx.loc[0, 'mins'] == 0: dfx.set_value(0, 'mins', 0) else: dfx.set_value(0, 'mins', 999999999) # using previous value's row to evaluate time for i, e in enumerate(dfx['mins']): if e == 1: dfx.at[i, 'mins'] = dfx.at[i - 1, 'mins'] + 10 # sort in ascending order dfx = dfx.sort_values(by='timestamp') # reset index dfx.reset_index(drop=True, inplace=True) # convert to hours, then round to nearest hour dfx['hours'] = dfx['mins'].astype(np.int64) dfx['hours'] = dfx['hours']/60 dfx['hours'] = round(dfx['hours']).astype(np.int64) # > 48 hours - label as normal (999) def f1(c): if c['hours'] > 48: return 999 else: return c['hours'] dfx['hours'] = dfx.apply(f1, axis=1) # filter out curtailment - curtailed when turbine is pitching outside # 0deg <= normal <= 3.5deg def f2(c): if 0 <= c['pitch'] <= 3.5 or c['hours'] != 999 or ( (c['pitch'] > 3.5 or c['pitch'] < 0) and ( c['ap_av'] <= (.1 * dfx['ap_av'].max()) or c['ap_av'] >= (.9 * dfx['ap_av'].max()))): return 'normal' else: return 'curtailed' dfx['curtailment'] = dfx.apply(f2, axis=1) # filter unusual readings, i.e., for normal operation, power <= 0 in # operating wind speeds, power > 100 before cut-in, runtime < 600 and # other downtime categories def f3(c): if c['hours'] == 999 and (( 3 < c['ws_av'] < 25 and ( c['ap_av'] <= 0 or c['runtime'] < 600 or c['EnvironmentalCategory_id'] > 1 or c['GridCategory_id'] > 1 or c['InfrastructureCategory_id'] > 1 or c['AvailabilityCategory_id'] == 2 or 12 <= c['TurbineCategory_id'] <= 15 or 21 <= c['TurbineCategory_id'] <= 22)) or (c['ws_av'] < 3 and c['ap_av'] > 100)): return 'unusual' else: return 'normal' dfx['unusual'] = dfx.apply(f3, axis=1) # round to 6 hour intervals def f4(c): if 1 <= c['hours'] <= 6: return 6 elif 7 <= c['hours'] <= 12: return 12 elif 13 <= c['hours'] <= 18: return 18 elif 19 <= c['hours'] <= 24: return 24 elif 25 <= c['hours'] <= 30: return 30 elif 31 <= c['hours'] <= 36: return 36 elif 37 <= c['hours'] <= 42: return 42 elif 43 <= c['hours'] <= 48: return 48 else: return c['hours'] dfx['hours6'] = dfx.apply(f4, axis=1) # change label for unusual and curtailed data (9999) def f5(c): if c['unusual'] == 'unusual' or c['curtailment'] == 'curtailed': return 9999 else: return c['hours6'] dfx['hours_%s' % y] = dfx.apply(f5, axis=1) # drop unnecessary columns dfx = dfx.drop('hours6', axis=1) dfx = dfx.drop('hours', axis=1) dfx = dfx.drop('mins', axis=1) dfx = dfx.drop('curtailment', axis=1) dfx = dfx.drop('unusual', axis=1) # separate features from classes for classification features = [ 'ap_av', 'ws_av', 'wd_av', 'pitch', 'ap_max', 'ap_dev', 'reactive_power', 'rs_av', 'gen_sp', 'nac_pos'] classes = [col for col in dfx.columns if 'hours' in col] # list of columns to copy into new df list3 = features + classes + ['timestamp'] df2 = dfx[list3].copy() # drop NaNs df2 = df2.dropna() X = df2[features] # normalise features to values b/w 0 and 1 X = preprocessing.normalize(X) Y = df2[classes] # convert from pd dataframe to np array Y = Y.as_matrix() # subsetting just the odd ones weights = ['uniform', 'distance'] # empty list that will hold average cross validation scores for each n scores = [] # cross validation using time series split tscv = TimeSeriesSplit(n_splits=5) # looping for each value of w and defining classifier for w in weights: knn = KNeighborsClassifier(weights=w, n_jobs=-1) # empty list to hold score for each cross validation fold p1 = [] # looping for each cross validation fold for train_index, test_index in tscv.split(X): # split train and test sets X_train, X_test = X[train_index], X[test_index] Y_train, Y_test = Y[train_index], Y[test_index] # fit to classifier and predict knn1 = knn.fit(X_train, Y_train) pred = knn1.predict(X_test) # accuracy score p2 = np.sum(np.equal(Y_test, pred))/Y_test.size # add to list p1.append(p2) # average score across all cross validation folds p = sum(p1)/len(p1) scores.append(p) # changing to misclassification error MSE = [1 - x for x in scores] # determining best n optimal = weights[MSE.index(min(MSE))] num.append(optimal) err.append(min(MSE)) d = pd.DataFrame(num, columns=['weights']) d['error'] = err d['turbine'] = list1 d ```
github_jupyter
# MiRAC-P The following example presents the nadir passive microwave radiometer MiRAC-P. The Microwave Radar/Radiometer for Arctic Clouds - passive (MiRAC-P) was installed during ACLOUD and AFLUX. During MOSAiC-ACA, passive microwave observations are conducted by the HATPRO instrument. More information on MiRAC can be found in [Mech et al. (2019)](https://amt.copernicus.org/articles/12/5019/2019/). If you have questions or if you would like to use the data for a publication, please don't hesitate to get in contact with the dataset authors as stated in the dataset attributes `contact` or `author`. ## Data access Some of the data, like the preliminary data of the HALO-(AC)3 campaign, is stored on the (AC)3 nextcloud server. This requires username and password as credentials (registration) that need to be loaded from environment variables ``` import os ac3cloud_username = os.environ['AC3_USER'] ac3cloud_password = os.environ['AC3_PASSWORD'] import ac3airborne cat = ac3airborne.get_intake_catalog() datasets = [] for campaign in ['ACLOUD', 'AFLUX']: datasets.extend(list(cat[campaign]['P5']['MiRAC-P'])) datasets ``` ```{note} Have a look at the attributes of the xarray dataset `ds_mirac_p` for all relevant information on the dataset, such as author, contact, or citation information. ``` ``` ds_mirac_p = ds_cth = cat['ACLOUD']['P5']['MiRAC-P']['ACLOUD_P5_RF05'](user=ac3cloud_username,password=ac3cloud_password).to_dask() ds_mirac_p ``` The dataset includes nadir brightness temperatures (`tb`) observed by MiRAC-P at the 183.31 GHz water vapor absorption line (183±0.6, ±1.5, ±2.5, ±3.5, ±5.0, ±7.5 GHz) and two window channels at 243 and 340 GHz. ## Load Polar 5 flight phase information Polar 5 flights are divided into segments to easily access start and end times of flight patterns. For more information have a look at the respective [github](https://github.com/igmk/flight-phase-separation) repository. At first we want to load the flight segments of (AC)³airborne ``` meta = ac3airborne.get_flight_segments() ``` The following command lists all flight segments into the dictionary `segments` ``` segments = {s.get("segment_id"): {**s, "flight_id": flight["flight_id"]} for campaign in meta.values() for platform in campaign.values() for flight in platform.values() for s in flight["segments"] } ``` In this example we want to look at a high-level segment during ACLOUD RF05. ``` seg = segments["ACLOUD_P5_RF05_hl07"] ``` Using the start and end times of the segment `ACLOUD_P5_RF05_hl07` stored in `seg`, we slice the MiRAC-P data to the selected flight sections. ``` ds_mirac_p_sel = ds_mirac_p.sel(time=slice(seg["start"], seg["end"])) ``` In polar regions, the surface type is helpful for the interpretation of airborne passive microwave observations, especially near the marginal sea ice zone, as generally a higher emissivity is expected over sea ice compared to open ocean. Therefore, we also load AMSR2 sea ice concentration data along the Polar 5 flight track, which is operationally derived by the University of Bremen. ``` ds_sea_ice = cat['ACLOUD']['P5']['AMSR2_SIC']['ACLOUD_P5_RF05'].to_dask().sel( time=slice(seg["start"], seg["end"])) ``` ## Plots The flight section during ACLOUD RF05 is flown at about 3 km altitude in west-east direction during a cold-air outbreak event perpendicular to the wind field. Clearly one can identify the roll-cloud structure in the radar reflectivity and the 89 GHz brightness temperature. ``` import warnings warnings.filterwarnings("ignore") %matplotlib inline import matplotlib.pyplot as plt import matplotlib.dates as mdates from matplotlib import cm import numpy as np plt.style.use("../../mplstyle/book") fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, gridspec_kw=dict(height_ratios=(1, 1, 0.1))) kwargs = dict(s=10, linewidths=0) colors = cm.get_cmap('viridis', 6).colors for i in range(0, 6): ax1.scatter(ds_mirac_p_sel.time, ds_mirac_p_sel.tb.sel(channel=i), label='%g GHz'%ds_mirac_p_sel.frequency.sel(channel=i).item(), color=colors[i], **kwargs) for i in range(6, 8): ax2.scatter(ds_mirac_p_sel.time, ds_mirac_p_sel.tb.sel(channel=i), label='%g GHz'%ds_mirac_p_sel.frequency.sel(channel=i).item(), **kwargs) ax1.legend(frameon=False, bbox_to_anchor=(1.05, 0.5), loc='center left') ax2.legend(frameon=False, bbox_to_anchor=(1.05, 0.5), loc='center left') ax1.set_ylabel('$T_b$ [K]') ax2.set_ylabel('$T_b$ [K]') # plot AMSR2 sea ice concentration im = ax3.pcolormesh(np.array([ds_sea_ice.time, ds_sea_ice.time]), np.array([0, 1]), np.array([ds_sea_ice.sic]), cmap='Blues_r', vmin=0, vmax=100, shading='auto') cax = fig.add_axes([0.87, 0.085, 0.1, ax3.get_position().height]) fig.colorbar(im, cax=cax, orientation='horizontal', label='Sea ice [%]') ax3.tick_params(axis='y', labelleft=False, left=False) ax3.set_xlabel('Time (hh:mm) [UTC]') ax3.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) plt.show() ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` # ATTENTION: Please do not alter any of the provided code in the exercise. Only add your own code where indicated # ATTENTION: Please do not add or remove any cells in the exercise. The grader will check specific cells based on the cell position. # ATTENTION: Please use the provided epoch values when training. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Train Your Own Model and Convert It to TFLite This notebook uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here: <table> <tr><td> <img src="https://tensorflow.org/images/fashion-mnist-sprite.png" alt="Fashion MNIST sprite" width="600"> </td></tr> <tr><td align="center"> <b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>&nbsp; </td></tr> </table> Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc.) in a format identical to that of the articles of clothing we'll use here. This uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code. We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow. Import and load the Fashion MNIST data directly from TensorFlow: # Setup ``` # TensorFlow import tensorflow as tf # TensorFlow Datsets import tensorflow_datasets as tfds tfds.disable_progress_bar() # Helper Libraries import numpy as np import matplotlib.pyplot as plt import pathlib from os import getcwd print('\u2022 Using TensorFlow Version:', tf.__version__) print('\u2022 GPU Device Found.' if tf.test.is_gpu_available() else '\u2022 GPU Device Not Found. Running on CPU') ``` # Download Fashion MNIST Dataset We will use TensorFlow Datasets to load the Fashion MNIST dataset. ``` splits = tfds.Split.ALL.subsplit(weighted=(80, 10, 10)) filePath = f"{getcwd()}/../tmp2/" splits, info = tfds.load('fashion_mnist', with_info=True, as_supervised=True, split=splits, data_dir=filePath) (train_examples, validation_examples, test_examples) = splits num_examples = info.splits['train'].num_examples num_classes = info.features['label'].num_classes ``` The class names are not included with the dataset, so we will specify them here. ``` class_names = ['T-shirt_top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # Create a labels.txt file with the class names with open('labels.txt', 'w') as f: f.write('\n'.join(class_names)) # The images in the dataset are 28 by 28 pixels. IMG_SIZE = 28 ``` # Preprocessing Data ## Preprocess ``` # EXERCISE: Write a function to normalize the images. def format_example(image, label): # Cast image to float32 image = tf.cast(image, tf.float32)# YOUR CODE HERE # Normalize the image in the range [0, 1] image = image/255 # YOUR CODE HERE return image, label # Specify the batch size BATCH_SIZE = 256 ``` ## Create Datasets From Images and Labels ``` # Create Datasets train_batches = train_examples.cache().shuffle(num_examples//4).batch(BATCH_SIZE).map(format_example).prefetch(1) validation_batches = validation_examples.cache().batch(BATCH_SIZE).map(format_example) test_batches = test_examples.map(format_example).batch(1) ``` # Building the Model ``` Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 26, 26, 16) 160 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 13, 13, 16) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 11, 11, 32) 4640 _________________________________________________________________ flatten (Flatten) (None, 3872) 0 _________________________________________________________________ dense (Dense) (None, 64) 247872 _________________________________________________________________ dense_1 (Dense) (None, 10) 650 ================================================================= Total params: 253,322 Trainable params: 253,322 Non-trainable params: 0 ``` ``` # EXERCISE: Build and compile the model shown in the previous cell. model = tf.keras.Sequential([ # Set the input shape to (28, 28, 1), kernel size=3, filters=16 and use ReLU activation, tf.keras.layers.Conv2D(16, (3,3), activation = 'relu', input_shape = (28,28,1)),# YOUR CODE HERE tf.keras.layers.MaxPooling2D(), # Set the number of filters to 32, kernel size to 3 and use ReLU activation tf.keras.layers.Conv2D(32, (3,3), activation = 'relu'),# YOUR CODE HERE # Flatten the output layer to 1 dimension tf.keras.layers.Flatten(), # Add a fully connected layer with 64 hidden units and ReLU activation tf.keras.layers.Dense(64, activation = "relu"),# YOUR CODE HERE # Attach a final softmax classification head tf.keras.layers.Dense(10, activation = "softmax")])# YOUR CODE HERE # Set the appropriate loss function and use accuracy as your metric model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=["accuracy"]) ``` ## Train ``` history = model.fit(train_batches, epochs=10, validation_data=validation_batches) ``` # Exporting to TFLite You will now save the model to TFLite. We should note, that you will probably see some warning messages when running the code below. These warnings have to do with software updates and should not cause any errors or prevent your code from running. ``` # EXERCISE: Use the tf.saved_model API to save your model in the SavedModel format. export_dir = 'saved_model/1' tf.saved_model.save(model, export_dir) # YOUR CODE HERE # Select mode of optimization mode = "Speed" if mode == 'Storage': optimization = tf.lite.Optimize.OPTIMIZE_FOR_SIZE elif mode == 'Speed': optimization = tf.lite.Optimize.OPTIMIZE_FOR_LATENCY else: optimization = tf.lite.Optimize.DEFAULT # EXERCISE: Use the TFLiteConverter SavedModel API to initialize the converter converter = tf.lite.TFLiteConverter.from_saved_model(export_dir) # YOUR CODE HERE # Set the optimzations converter.optimizations = [optimization] # YOUR CODE HERE # Invoke the converter to finally generate the TFLite model tflite_model = converter.convert() # YOUR CODE HERE tflite_model_file = pathlib.Path('./model.tflite') tflite_model_file.write_bytes(tflite_model) ``` # Test the Model with TFLite Interpreter ``` # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] # Gather results for the randomly sampled test images predictions = [] test_labels = [] test_images = [] for img, label in test_batches.take(50): interpreter.set_tensor(input_index, img) interpreter.invoke() predictions.append(interpreter.get_tensor(output_index)) test_labels.append(label[0]) test_images.append(np.array(img)) # Utilities functions for plotting def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) img = np.squeeze(img) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label.numpy(): color = 'green' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plt.grid(False) plt.xticks(list(range(10))) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array[0], color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array[0]) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') # Visualize the outputs # Select index of image to display. Minimum index value is 1 and max index value is 50. index = 49 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(index, predictions, test_labels, test_images) plt.subplot(1,2,2) plot_value_array(index, predictions, test_labels) plt.show() ``` # Click the Submit Assignment Button Above You should now click the Submit Assignment button above to submit your notebook for grading. Once you have submitted your assignment, you can continue with the optinal section below. ## If you are done, please **don't forget to run the last two cells of this notebook** to save your work and close the Notebook to free up resources for your fellow learners. # Prepare the Test Images for Download (Optional) ``` !mkdir -p test_images from PIL import Image for index, (image, label) in enumerate(test_batches.take(50)): image = tf.cast(image * 255.0, tf.uint8) image = tf.squeeze(image).numpy() pil_image = Image.fromarray(image) pil_image.save('test_images/{}_{}.jpg'.format(class_names[label[0]].lower(), index)) !ls test_images !tar --create --file=fmnist_test_images.tar test_images !ls ``` # When you're done/would like to take a break, please run the two cells below to save your work and close the Notebook. This frees up resources for your fellow learners. ``` %%javascript <!-- Save the notebook --> IPython.notebook.save_checkpoint(); %%javascript <!-- Shutdown and close the notebook --> window.onbeforeunload = null window.close(); IPython.notebook.session.delete(); ```
github_jupyter
# Changing R plot options in Jupyter To use R with Jupyter, under the hood the service runs the IRKernel. This kernel communicates between R and the Jupyter service. IRKernel allows you to specify different options for plotting. For example, you can change: * whether to display images as SVG or PNG * the plot size ## Changing the plot mimetype The mimetype specifies how the plot returns from R to your browser. For example, the format can be SVG (Scalable vector graphics) or PNG (a bitmap image). Not all browsers handle SVG display very well. For example, some versions of Internet Explorer render SVG very slowly, or not at all. Thus it's a good idea to set the mimetype to "image/png" if you plan to use Internet Explorer. ### Using "image/png" ``` # Create a plot with some normally distributed data library("ggplot2") set.seed(42) n <- 1000 p <- ggplot(data.frame(x = rnorm(n), y = rnorm(n)), aes(x=x, y=y)) + geom_point(alpha = 0.25, size = 1, colour = "blue") + geom_density2d(colour = "red") ``` Now change the plot type to `image/png` ``` # Change mimetype to PNG options(jupyter.plot_mimetypes = "image/png") p + ggtitle(sprintf("Mime type = '%s'", getOption("jupyter.plot_mimetypes"))) ``` ### Using image/svg+xml Some browswers are very good at handling SVG plots. If you are running this notebook in a browser like Firefox or Chrome, then you can safely remove the comment in the next block of code to view the image using SVG. If you are using Internet Explorer, your browser session might hang up. If you are using the Edge browser in Windows 10, then the plot should display, but it could take some time to render. Proceed with caution! ``` # Change mimetype to PNG ### Uncomment the next line if you are using a browser with good support for SVG graphics # options(jupyter.plot_mimetypes = "image/svg+xml") p + ggtitle(sprintf("Mime type = '%s'", getOption("jupyter.plot_mimetypes"))) ``` ## Changing the plot size You can change the plot size by setting the option `repr.plot.width` and `repr.plot.height`. The `repr` package is part of the IRKernel (the machinery that connects Jupyter and R), and specifically handles representations of objects, including plots. Try setting the plot width to 4, for a plot that fills only about half the available screen width: ``` options(repr.plot.width = 4, repr.plot.height = 3) p + ggtitle(sprintf( "Plot width = %s, plot height = %s", getOption("repr.plot.width"), getOption("repr.plot.height") )) ``` Next, set the plot width to 8, i.e. double the previous width. ``` options(repr.plot.width = 8, repr.plot.height = 3) p + ggtitle(sprintf( "Plot width = %s, plot height = %s", getOption("repr.plot.width"), getOption("repr.plot.height") )) options(repr.plot.width = 6, repr.plot.height = 6) p + ggtitle(sprintf( "Plot width = %s, plot height = %s", getOption("repr.plot.width"), getOption("repr.plot.height") )) ``` ## Jupyter only respects the last value of repr options in each cell Note that you can only change the plot options once in each cell. The following example creates two identical plots, and attempts to set the width and height for each. However, this **does not work**, as you can see from the output, where the two plots are clearly the same size. ``` options(repr.plot.width = 8, repr.plot.height = 3) p + ggtitle(sprintf( "Plot width = %s, plot height = %s", getOption("repr.plot.width"), getOption("repr.plot.height") )) options(repr.plot.width = 6, repr.plot.height = 6) p + ggtitle(sprintf( "Plot width = %s, plot height = %s", getOption("repr.plot.width"), getOption("repr.plot.height") )) ``` As you can see, you can only change the plot size once per cell. To create plots of different size, put each plot in a different Jupyter cell. --- Created by a Microsoft Employee. Copyright (C) Microsoft. All Rights Reserved.
github_jupyter
``` from toolbox.processing import * #%ls /home/stewart/su/2d_land_data/2D_Land_data_2ms/ file = "/home/stewart/su/2d_land_data/2D_Land_data_2ms/su/Line_001.su" #file = "/home/sfletcher/Downloads/2d_land_data/2D_Land_data_2ms/Line_001.su" #initialise file data, params = initialise(file) #no coordinates in the headers, but we know energy point number and channel. #%cat /home/stewart/su/2d_land_data/2D_Land_data_2ms/Line_001.TXT #%cat /home/stewart/su/2d_land_data/2D_Land_data_2ms/Line_001.SPS #%cat /home/stewart/su/2d_land_data/2D_Land_data_2ms/Line_001.RPS #%cat /home/stewart/su/2d_land_data/2D_Land_data_2ms/header import numpy as np from matplotlib import collections dmap = np.memmap(file, dtype=toolbox.typeSU(1501), mode='r') eps = np.unique(dmap['ep']) for ep in eps[:1]: panel = dmap[dmap['ep'] == ep].copy() panel = toolbox.agc(panel, None, **params) trace_centers = np.linspace(1,284, panel.size).reshape(-1,1) trace_width = 284/(panel.size*0.5) x = panel['trace'].copy() x += trace_centers y = np.meshgrid(np.arange(1501), np.arange(284))[0] x = np.split(x.ravel(), 284) y = np.split(y.ravel(), 284) bits = [zip(x[a],y[a]) for a in range(len(x))] fig = pylab.figure() ax = fig.add_subplot(111) col1 = collections.LineCollection(bits) col1.set_color('k') ax.add_collection(col1, autolim=True) ax.autoscale_view() pylab.xlim([0,284]) pylab.ylim([0,1500]) ax.set_ylim(ax.get_ylim()[::-1]) pylab.tight_layout() pylab.show() import numpy as np from matplotlib import collections dmap = np.memmap(file, dtype=toolbox.typeSU(1501), mode='r') eps = np.unique(dmap['ep']) for ep in eps[:1]: panel = dmap[dmap['ep'] == ep].copy() panel = toolbox.agc(panel, None, **params) trace_centers = np.linspace(1,284, panel.size).reshape(-1,1) scalar = 284/(panel.size*0.5) panel['trace'][:,-1] = np.nan x = panel['trace'].ravel() x[x < 0] = 0 y = np.meshgrid(np.arange(1501), np.arange(284))[0].ravel() zero_crossings = np.where(x == 0)[0]+1 zero_crossings = zero_crossings[np.diff(zero_crossings) == 1] #zero_crossings = np.where(np.diff(np.signbit(x)))[0]+1 x = ((panel['trace']*scalar)+trace_centers).ravel() xverts = np.split(x, zero_crossings) yverts = np.split(y, zero_crossings) polygons = [zip(xverts[i], yverts[i]) for i in range(0, len(xverts)) if len(xverts[i]) > 2] xlines = np.split(x, 284) ylines = np.split(y, 284) lines = [zip(xlines[a],ylines[a]) for a in range(len(xlines))] fig = pylab.figure() ax = fig.add_subplot(111) col = collections.PolyCollection(polygons) col.set_color('k') ax.add_collection(col, autolim=True) col1 = collections.LineCollection(lines) col1.set_color('k') ax.add_collection(col1, autolim=True) ax.autoscale_view() pylab.xlim([0,284]) pylab.ylim([0,1500]) ax.set_ylim(ax.get_ylim()[::-1]) pylab.tight_layout() pylab.show() import numpy as np from matplotlib import collections %pylab tk def polytrace(data, **kwargs): segs = [] segl = [] nt = data.shape[-2] for i in range(nt): trace = data[i] line = list(zip(trace, np.arange(1501))) segl.append(line) xx = trace #np.ma.array(trace, mask=(trace <= 0)) yy = np.arange(1501) #np.ma.array(np.arange(0,1501), mask=(trace <= 0)) curve = [(0, 0)] curve.extend(list(zip(xx, yy))) curve.extend([(0, 1501)]) segs.append(curve) #print segs[0] print '' return segs, segl #first lets do some checks. does of energy points should equal number of records? print np.unique(data['ep']).size, np.unique(data['fldr']).size #no duplicates - that makes it easier. print 251*284 #284 traces per shot, 2 aux traces . lets have a look dmap = np.memmap(file, dtype=toolbox.typeSU(1501), mode='r') eps = np.unique(dmap['ep']) for ep in eps[:1]: panel = dmap[dmap['ep'] == ep].copy() panel = toolbox.agc(panel, None, **params) trace_centers = np.linspace(1,284, panel.size).reshape(-1,1) trace_width = 284/(panel.size*0.5) buf = panel['trace'].copy() buf *= trace_width segs, segl = polytrace(buf) fig = pylab.figure() ax = fig.add_subplot(111) offs = (10.0, 0.0) offs = list(zip(np.arange(238), np.zeros(238))) col = collections.PolyCollection(segs, offsets=offs) col.set_color('k') ax.add_collection(col, autolim=True) #col1 = collections.LineCollection(segl, offsets=offs) #col1.set_color('k') #ax.add_collection(col1, autolim=True) ax.autoscale_view() pylab.xlim([0,284]) pylab.ylim([0,1500]) ax.set_ylim(ax.get_ylim()[::-1]) pylab.tight_layout() pylab.show() import numpy as np from matplotlib import collections import matplotlib.pyplot as pylab #make some oscillating data panel = np.meshgrid(np.arange(1501), np.arange(284))[0] panel = np.sin(panel) #generate coordinate vectors. panel[:,-1] = np.nan #prevent wrapping when flatten 2d array x = panel.flatten() y = np.meshgrid(np.arange(1501), np.arange(284))[0].ravel() #find indexes of each zero crossing zero_crossings = np.where(np.diff(np.signbit(x)))[0]+1 #calculate scalar used to shift "traces" to plot corrdinates trace_centers = np.linspace(1,284, panel.shape[-2]).reshape(-1,1) gain = 0.5 #scale traces #shift traces to plotting coordinate x = ((panel*gain)+trace_centers).ravel() #split each vector at each zero crossing xverts = np.split(x, zero_crossings) yverts = np.split(y, zero_crossings) #we only want the vertices which outline positive values if x[0] > 0: steps = range(0, len(xverts),2) else: steps = range(1, len(xverts),2) #turn vectors of coordinates into lists of coordinate pairs polygons = [zip(xverts[i], yverts[i]) for i in steps if len(xverts[i]) > 2] #this is so we can plot the lines as well xlines = np.split(x, 284) ylines = np.split(y, 284) lines = [zip(xlines[a],ylines[a]) for a in range(len(xlines))] #and plot fig = pylab.figure() ax = fig.add_subplot(111) col = collections.PolyCollection(polygons) col.set_color('k') ax.add_collection(col, autolim=True) col1 = collections.LineCollection(lines) col1.set_color('k') ax.add_collection(col1, autolim=True) ax.autoscale_view() pylab.xlim([0,284]) pylab.ylim([0,1500]) ax.set_ylim(ax.get_ylim()[::-1]) pylab.tight_layout() pylab.show() import numpy as np from matplotlib import collections dmap = np.memmap(file, dtype=toolbox.typeSU(1501), mode='r') eps = np.unique(dmap['ep']) for ep in eps[:1]: panel = dmap[dmap['ep'] == ep].copy() panel = toolbox.agc(panel, None, **params) panel['trace'][:,-1] = np.nan trace_centers = np.linspace(1,284, panel.size).reshape(-1,1) scalar = 284/(panel.size*0.5) y = np.meshgrid(np.arange(1501), np.arange(284))[0].ravel() offsets = (np.meshgrid(np.arange(1501), np.arange(284))[1]+1).ravel() x = ((panel['trace']*scalar)+trace_centers).ravel() fig,ax = plt.subplots() #or i in range(284): #ax.plot(x[i],y[i],'k-') ax.fill_betweenx(y,offsets,x,where=(x>offsets),color='k') pylab.xlim([0,284]) pylab.ylim([0,1500]) ax.set_ylim(ax.get_ylim()[::-1]) pylab.tight_layout() pylab.show() ```
github_jupyter
# Think Bayes: Chapter 11 This notebook presents code and exercises from Think Bayes, second edition. Copyright 2016 Allen B. Downey MIT License: https://opensource.org/licenses/MIT ``` from __future__ import print_function, division % matplotlib inline import warnings warnings.filterwarnings('ignore') import math import numpy as np from thinkbayes2 import Pmf, Cdf, Suite, Joint import thinkplot ``` ## The Euro problem Problem statement here. Here's a more efficient version of the Euro class that takes the dataset in a more compact form and uses the binomial distribution (ignoring the binomial coefficient because it does not depend on `x`). ``` class Euro(Suite): """Represents hypotheses about the probability of heads.""" def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. hypo: integer value of x, the probability of heads (0-100) data: tuple of (number of heads, number of tails) """ x = hypo / 100.0 heads, tails = data like = x**heads * (1-x)**tails return like ``` If we know the coin is fair, we can evaluate the likelihood of the data directly. ``` data = 140, 110 suite = Euro() like_f = suite.Likelihood(data, 50) print('p(D|F)', like_f) ``` If we cheat an pretend that the alternative hypothesis is exactly the observed proportion, we can compute the likelihood of the data and the likelihood ratio, relative to the fair coin. ``` actual_percent = 100.0 * 140 / 250 likelihood = suite.Likelihood(data, actual_percent) print('p(D|B_cheat)', likelihood) print('p(D|B_cheat) / p(D|F)', likelihood / like_f) ``` Under this interpretation, the data are in favor of "biased", with K=6. But that's a total cheat. Suppose we think "biased" means either 0.4 or 0.6, but we're not sure which. The total likelihood of the data is the weighted average of the two likelihoods. ``` like40 = suite.Likelihood(data, 40) like60 = suite.Likelihood(data, 60) likelihood = 0.5 * like40 + 0.5 * like60 print('p(D|B_two)', likelihood) print('p(D|B_two) / p(D|F)', likelihood / like_f) ``` Under this interpretation, the data are in favor of "biased", but very weak. More generally, if "biased" refers to a range of possibilities with different probabilities, the total likelihood of the data is the weighted sum: ``` def SuiteLikelihood(suite, data): """Computes the weighted average of likelihoods for sub-hypotheses. suite: Suite that maps sub-hypotheses to probability data: some representation of the data returns: float likelihood """ total = 0 for hypo, prob in suite.Items(): like = suite.Likelihood(data, hypo) total += prob * like return total ``` Here's what it looks like if "biased" means "equally likely to be any value between 0 and 1". ``` b_uniform = Euro(range(0, 101)) b_uniform.Remove(50) b_uniform.Normalize() likelihood = SuiteLikelihood(b_uniform, data) print('p(D|B_uniform)', likelihood) print('p(D|B_uniform) / p(D|F)', likelihood / like_f) ``` By that definition, the data are evidence against the biased hypothesis, with K=2. But maybe a triangle prior is a better model of what "biased" means. ``` def TrianglePrior(): """Makes a Suite with a triangular prior.""" suite = Euro() for x in range(0, 51): suite.Set(x, x) for x in range(51, 101): suite.Set(x, 100-x) suite.Normalize() return suite ``` Here's what it looks like: ``` b_tri = TrianglePrior() b_tri.Remove(50) b_tri.Normalize() likelihood = b_tri.Update(data) print('p(D|B_tri)', likelihood) print('p(D|B_tri) / p(D|F)', likelihood / like_f) ``` By the triangle definition of "biased", the data are very weakly in favor of "fair". ## Normalizing constant We don't really need the SuiteLikelihood function, because `Suite.Update` already computes the total probability of the data, which is the normalizing constant. ``` likelihood = SuiteLikelihood(b_uniform, data) likelihood euro = Euro(b_uniform) euro.Update(data) likelihood = SuiteLikelihood(b_tri, data) likelihood euro = Euro(b_tri) euro.Update(data) ``` This observation is the basis of hierarchical Bayesian models, of which this solution to the Euro problem is a simple example.
github_jupyter
# Dictionaries {"Java" => "Maps", "Python" => "Dictionaries"} Python has a structure called a dictionary which is very similar to Java's `Map`. It is pretty similar to use them, with only some syntactic differences. In Java ```java Map<String, Integer> students = new HashMap<String, Integer>(); students.put("Josh", 12345); students.put("Erika", 11111); if (students.containsKey("Erika")) { students.put("Erika", students.get("Erika") * 2); } for (String name : students.keyset()) { System.out.println(name + " - " + students.get(name)); } System.out.println(students.get("Nicole")); System.out.println(students) // Output: // Josh - 12345 // Erika - 22222 // null // {Josh=12345, Erika=22222} ``` In Python ``` students = {} students['Josh'] = 12345; students['Erika'] = 11111; # The in operator works for dictionaries too! if 'Erika' in students: students['Erika'] = students['Erika'] * 2 # Could have also written: students['Erika'] *= 2 # This is just a foreach loop over the keys in the dictionary for name in students.keys(): print(name + ' - ' + str(students[name])) print(students) # print(students['Nicole']) Causes an error because there is no entry for that! ``` Some of you have heard the analogy that in Java, `Map`s are like arrays where you can use another type as the indices; as you can see from the Python example that they really take this idea to heart and let you use the array syntax you are used to on dictionaries. You can also mix types for the keys and values in Python like you can for any structure; pretty much any type works for the keys (must be hashable) and the values can be any type. You generally don't use many of the methods for dictionaries so I will leave it as an exercise to the reader to look up the documentation for dictionaries to see which methods are available. As usual, Python also provides nice ways of initializing a dictionary and suprisingly enough you are able to use dictionary comprehensions just like lists! ``` students = {'Josh': 12345, 'Erika': 11111} print(students) names = ['Hunter', 'Josh', 'Erik', 'Dylan', 'Joely', 'Nicole', 'Erika'] indices = { # Step 4) Put all the values in a dictionary name: names.index(name) # Step 3) What key, value pair do you want? Syntax is key: value for name in names # Step 1) What are you looping over and what is the loop variable? if name in students # Step 2 [Optional]) Only use this element from Step 1 if it satisfies this condition } # Note: names.index(name) returns the index of name in names print(indices) ```
github_jupyter
# Predicting Time Series Data > If you want to predict patterns from data over time, there are special considerations to take in how you choose and construct your model. This chapter covers how to gain insights into the data before fitting your model, as well as best-practices in using predictive modeling for time series data. This is the Summary of lecture "Machine Learning for Time Series Data in Python", via datacamp. - toc: true - badges: true - comments: true - author: Chanseok Kang - categories: [Python, Datacamp, Time_Series_Analysis, Machine_Learning] - image: images/price_percentile.png ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (10, 5) plt.style.use('fivethirtyeight') ``` ## Predicting data over time - Correlation and regression - Regression is similar to calculating correlation, with some key differences - Regression: A process that results in a formal model of the data - Correlation: A statistic that describes the data. Less information than regression model - Correlation between variables often changes over time - Time series often have patterns that change over time - Two timeseries that seem correlated at one moment may not remain so over time. - Scoring regression models - Two most common methods: - Correlation ($r$) - Coefficient of Determination ($R^2$) - The value of $R^2$ is bounded on the top by 1, and can be infinitely low - Values closer to 1 mean the model does a better jot of predicting outputs \ $1 - \frac{\text{error}(model)}{\text{variance}(testdata)}$ ``` prices = pd.read_csv('./dataset/tsa_prices.csv', index_col='date', parse_dates=True) prices.head() # Plot the raw values over time prices.plot(); # Scatterplot with one company per axis prices.plot.scatter('EBAY', 'YHOO'); # Scatterplot with color relating to time prices.plot.scatter('EBAY', 'YHOO', c=prices.index, cmap=plt.cm.viridis, colorbar=False); ``` ### Fitting a simple regression model Now we'll look at a larger number of companies. Recall that we have historical price values for many companies. Let's use data from several companies to predict the value of a test company. You'll attempt to predict the value of the Apple stock price using the values of NVidia, Ebay, and Yahoo. Each of these is stored as a column in the all_prices DataFrame. Below is a mapping from company name to column name: ``` ebay: "EBAY" nvidia: "NVDA" yahoo: "YHOO" apple: "AAPL" ``` We'll use these columns to define the input/output arrays in our model. ``` all_prices = pd.read_csv('./dataset/all_prices.csv', index_col=0, parse_dates=True) all_prices.head() from sklearn.linear_model import Ridge from sklearn.model_selection import cross_val_score # Use stock symbols to extract training data X = all_prices[['EBAY', 'NVDA', 'YHOO']] y = all_prices[['AAPL']] # Fit and score the model with cross-validation scores = cross_val_score(Ridge(), X, y, cv=3) print(scores) ``` ### Visualizing predicted values When dealing with time series data, it's useful to visualize model predictions on top of the "actual" values that are used to test the model. In this exercise, after splitting the data (stored in the variables ```X``` and ```y```) into training and test sets, you'll build a model and then visualize the model's predictions on top of the testing data in order to estimate the model's performance. ``` from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score # Split our data into training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, shuffle=False, random_state=1) # Fit our model and generate predictions model = Ridge() model.fit(X_train, y_train) predictions = model.predict(X_test) score = r2_score(y_test, predictions) print(score) # Visualize our predictions along with the "true" values, and print the score fig, ax = plt.subplots(figsize=(15, 5)) ax.plot(range(len(y_test)), y_test, color='k', lw=3); ax.plot(range(len(predictions)), predictions, color='r', lw=2); ``` ## Advanced time series prediction - Data is messy - Real-world data is often messy - The two most common problems are missing data and outliers - This often happens because of human error, machine error malfunction, database failure, etc.. - Visualizing your raw data makes it easier to spot these problems - Interpolation: using time to fill in missing data - A common way to deal with missing data is to interpolate missing values - With timeseries data, you can use time to assist in interpolation. - In this case, interpolation means using the known values on either side of a gap in the data to make assumptions about what's missing - Using a rolling window to transform data - Another common use of rolling windows is to transform the data - Finding outliers in your data - Outliers are datapoints that are significantly statistically different from the dataset. - They can have negative effects on the predictive power of your model, biasing it away from its "true" value - One solution is to remove or replace outliers with a more representative value > Note: Be very careful about doing this - often it is difficult to determine what is a legitimately extreme value vs an abberation. ### Visualizing messy data Let's take a look at a new dataset - this one is a bit less-clean than what you've seen before. As always, you'll first start by visualizing the raw data. Take a close look and try to find datapoints that could be problematic for fitting models. ``` prices = pd.read_csv('./dataset/prices_null.csv', index_col=0, parse_dates=True) # Visualize the dataset prices.plot(legend=False); plt.tight_layout(); # Count the missing values of each time series missing_values = prices.isnull().sum() print(missing_values) ``` ### Imputing missing values When you have missing data points, how can you fill them in? In this exercise, you'll practice using different interpolation methods to fill in some missing values, visualizing the result each time. But first, you will create the function (```interpolate_and_plot()```) you'll use to interpolate missing data points and plot them. ``` # Create a function we'll use to interpolate and plot def interpolate_and_plot(prices, interpolation): # Create a boolean mask for missing values missing_values = prices.isna() # Interpolate the missing values prices_interp = prices.interpolate(interpolation) # Plot the results, highlighting the interpolated values in black fig, ax = plt.subplots(figsize=(10, 5)) prices_interp.plot(color='k', alpha=0.6, ax=ax, legend=False); # Note plot the interpolated values on top in red prices_interp[missing_values].plot(ax=ax, color='r', lw=3, legend=False); # Interpolate using the latest non-missing value interpolation_type = 'zero' interpolate_and_plot(prices, interpolation_type) # Interpolate using the latest non-missing value interpolation_type = 'linear' interpolate_and_plot(prices, interpolation_type) # Interpolate with a quadratic function interpolation_type = 'quadratic' interpolate_and_plot(prices, interpolation_type) ``` ### Transforming raw data In the last chapter, you calculated the rolling mean. In this exercise, you will define a function that calculates the percent change of the latest data point from the mean of a window of previous data points. This function will help you calculate the percent change over a rolling window. This is a more stable kind of time series that is often useful in machine learning. ``` # Your custom function def percent_change(series): # Collect all *but* the last value of this window, then the final value previous_values = series[:-1] last_value = series[-1] # Calculate the % difference between the last value and the mean of earlier values percent_change = (last_value - np.mean(previous_values)) / np.mean(previous_values) return percent_change # Apply your custom function and plot prices_perc = prices.rolling(20).apply(percent_change) prices_perc.loc["2014":"2015"].plot(); ``` ### Handling outliers In this exercise, you'll handle outliers - data points that are so different from the rest of your data, that you treat them differently from other "normal-looking" data points. You'll use the output from the previous exercise (percent change over time) to detect the outliers. First you will write a function that replaces outlier data points with the median value from the entire time series. ``` def replace_outliers(series): # Calculate the absolute difference of each timepoint from the series mean absolute_differences_from_mean = np.abs(series - np.mean(series)) # Calculate a mask for the difference that are > 3 standard deviations from zero this_mask = absolute_differences_from_mean > (np.std(series) * 3) # Replace these values with the median across the data series[this_mask] = np.nanmedian(series) return series # Apply your preprocessing functino to the timeseries and plot the results prices_perc = prices_perc.apply(replace_outliers) prices_perc.loc["2014":"2015"].plot(); ``` ## Creating features over time - Calculating "date-based" features - Thus far we've focused on calculating "statistical" features - these are features that correspond statistical properties of the data, like "mean" , "standard deviation", etc - However, don't forget the timeseries data oftenhas more "human" features associated with it, like days of the week, holidays, etc. - These features are often useful when dealing with timeseries data that spans multiple years (such as stock value over time) ### Engineering multiple rolling features at once Now that you've practiced some simple feature engineering, let's move on to something more complex. You'll calculate a collection of features for your time series data and visualize what they look like over time. This process resembles how many other time series models operate. ``` # Define a rolling window with Pandas, excluding the right-most datapoint of the window prices_perc_rolling = prices_perc['EBAY'].rolling(20, min_periods=5, closed='right') # Define the features you'll calculate for each window features_to_calculate = [np.min, np.max, np.mean, np.std] # Calculate these features for your rolling window object features = prices_perc_rolling.aggregate(features_to_calculate) # Plot the results ax = features.loc[:"2011-01"].plot(); prices_perc['EBAY'].loc[:"2011-01"].plot(ax=ax, color='k', alpha=0.2, lw=3); ax.legend(loc=(1.01, 0.6)); ``` ### Percentiles and partial functions In this exercise, you'll practice how to pre-choose arguments of a function so that you can pre-configure how it runs. You'll use this to calculate several percentiles of your data using the same ```percentile()``` function in numpy. ``` from functools import partial percentiles = [1, 10, 25, 50, 75, 90, 99] # Use a list comprehension to create a partial function for each quantile percentile_functions = [partial(np.percentile, q=percentile) for percentile in percentiles] # Calculate each of these quantiles on the data using a rolling window prices_perc_rolling = prices_perc['EBAY'].rolling(20, min_periods=5, closed='right') features_percentiles = prices_perc_rolling.aggregate(percentile_functions) # Plot a subset of the result ax = features_percentiles.loc[:"2011-01"].plot(cmap=plt.cm.viridis); ax.legend(percentiles, loc=(1.01, 0.5)); plt.savefig('../images/price_percentile.png') ``` ### Using "date" information It's easy to think of timestamps as pure numbers, but don't forget they generally correspond to things that happen in the real world. That means there's often extra information encoded in the data such as "is it a weekday?" or "is it a holiday?". This information is often useful in predicting timeseries data. ``` # Extract date features from the data, add them as columns prices_perc['day_of_week'] = prices_perc.index.dayofweek prices_perc['week_of_year'] = prices_perc.index.weekofyear prices_perc['month_of_year'] = prices_perc.index.month # Print prices_perc print(prices_perc) ```
github_jupyter
``` %matplotlib inline ``` # Main file subgraph This is the main file for the subgraph classification task ``` import tensorflow as tf import numpy as np import gnn_utils import GNN as GNN import Net_Subgraph as n from scipy.sparse import coo_matrix ##### GPU & stuff config import os os.environ['CUDA_VISIBLE_DEVICES'] = "0" config = tf.ConfigProto() config.gpu_options.allow_growth = True data_path = "./data" #data_path = "./Clique" set_name = "sub_15_7_200" ############# training set ################ #inp, arcnode, nodegraph, nodein, labels = Library.set_load_subgraph(data_path, "train") inp, arcnode, nodegraph, nodein, labels, _ = gnn_utils.set_load_general(data_path, "train", set_name=set_name) ############ test set #################### #inp_test, arcnode_test, nodegraph_test, nodein_test, labels_test = Library.set_load_subgraph(data_path, "test") inp_test, arcnode_test, nodegraph_test, nodein_test, labels_test, _ = gnn_utils.set_load_general(data_path, "test", set_name=set_name) ############ validation set ############# #inp_val, arcnode_val, nodegraph_val, nodein_val, labels_val = Library.set_load_subgraph(data_path, "valid") inp_val, arcnode_val, nodegraph_val, nodein_val, labels_val, _ = gnn_utils.set_load_general(data_path, "validation", set_name=set_name) # set input and output dim, the maximum number of iterations, the number of epochs and the optimizer threshold = 0.01 learning_rate = 0.01 state_dim = 5 tf.reset_default_graph() input_dim = len(inp[0][0]) output_dim = 2 max_it = 50 num_epoch = 10000 optimizer = tf.train.AdamOptimizer # initialize state and output network net = n.Net(input_dim, state_dim, output_dim) # initialize GNN param = "st_d" + str(state_dim) + "_th" + str(threshold) + "_lr" + str(learning_rate) print(param) tensorboard = False g = GNN.GNN(net, input_dim, output_dim, state_dim, max_it, optimizer, learning_rate, threshold, graph_based=False, param=param, config=config, tensorboard=tensorboard) # train the model count = 0 ###### for j in range(0, num_epoch): _, it = g.Train(inputs=inp[0], ArcNode=arcnode[0], target=labels, step=count) if count % 30 == 0: print("Epoch ", count) print("Validation: ", g.Validate(inp_val[0], arcnode_val[0], labels_val, count)) # end = time.time() # print("Epoch {} at time {}".format(j, end-start)) # start = time.time() count = count + 1 # evaluate on the test set print("\nEvaluate: \n") print(g.Evaluate(inp_test[0], arcnode_test[0], labels_test, nodegraph_test[0])[0]) ```
github_jupyter
# Imports ``` # Import pandas import pandas as pd # Import matplotlib import matplotlib.pyplot as plt # Import numpy import numpy as np # Import Network X import networkx as nx ``` # Getting the data ## Paths for in.out files ``` # Path of IN-labels mesh_path = '../../data/final/mesh.pkl' # Path for IN-tags geo_path = '../../data/final/geo.pkl' # Path for IN-tags-restful rest_path = '../../data/final/geo_restful_chem.pkl' ``` ## Read geo_df and mesh_df ``` # Read MeSH mesh_df = pd.read_pickle(mesh_path) # Read GEO geo_df = pd.read_pickle(geo_path) # Read Restful API rest_df = pd.read_pickle(rest_path) # Separate Diseases geo_C = geo_df[geo_df['category']=='C'] geo_D = geo_df[geo_df['category']=='D'] # Find new tags for drugs geo_D_rest = pd.merge( geo_D, rest_df['mesh_id disease_tag_from_tagger'.split()].drop_duplicates(), how='inner', on='mesh_id') geo_D_rest.drop(columns='mesh_heading', inplace=True) geo_D_rest = geo_D_rest['geo_id nsamples date mesh_id disease_tag_from_tagger category method'.split()] geo_D_rest.rename(columns={'disease_tag_from_tagger':'mesh_heading'}, inplace=True) # Concatenate them into new geo_df geo_df = pd.concat([geo_C, geo_D_rest]) # Echo geo_df.head() ``` ## Compute category-depth ``` # Construct grand AstraZeneca dataframe az_df = pd.merge(geo_df, mesh_df, on='mesh_id') # Drop extra columns from merge az_df.drop(columns='mesh_heading_y category_y method'.split(), inplace=True) # Rename merge column az_df.rename(columns={'mesh_heading_x':'mesh_heading'}, inplace=True) # Calculate category - Again az_df['category']=az_df['mesh_treenumbers'].str.split('.').str[0].str[0] # Report on propperly classified MeSH-ids category-wise Propper_Tags = list(az_df['category_x']==az_df['category']).count(True) Total_Tags = az_df['category_x'].shape[0] print('Correctly categorized MeSH ids: {:4.1f}%'.format(100*Propper_Tags/Total_Tags)) # Calculate category depth az_df['depth']=az_df['mesh_treenumbers'].str.split('.').str.len() # Drop old-category column az_df.drop(columns='category_x'.split(), inplace=True) # Echo az_df.head() ``` ## Filter and Clean geo DataFrame ``` # Construct date filter mask_date = az_df['date']==az_df['date'] # Take all studies # Construct category filter mask_category = ((az_df['category']=='C') | (az_df['category']=='D')) # Drugs and Diseases # Construct mask to filter high-general categories mask_depth = True #((az_df['depth']>=2) & (az_df['depth']>=2)) # Construct mask to avoid specific categories mask_c23 = ~az_df['mesh_treenumbers'].str.startswith('C23', na=False) mask_avoid_cats = mask_c23 # Apply filters filtered_geo_df = pd.DataFrame(az_df[mask_date & mask_category & mask_depth & mask_avoid_cats]) # Eliminate filterning columns filtered_geo_df.drop(columns='date mesh_treenumbers depth'.split(), inplace=True) # Drop NaNs filtered_geo_df.dropna(axis=0, inplace=True) # Drop duplicates filtered_geo_df.drop_duplicates(inplace=True) # Only select summaries with +1 tag tags_by_summary = filtered_geo_df['geo_id mesh_id'.split()].groupby('geo_id').count().reset_index() # Count tags per summary good_summaries = tags_by_summary[tags_by_summary['mesh_id']>1] # Select abstracts with more than one tag clean_geo = pd.merge(filtered_geo_df, good_summaries, on='geo_id') # Inner Join clean_geo = clean_geo.drop(columns='mesh_id_y') # Drop column from inner join clean_geo = clean_geo.rename(columns={'mesh_id_x':'mesh_id'}) # Rename key column # Write info print('Number of Records: ',clean_geo.shape[0]) # Echo clean_geo.head() ``` # Constructing the Disease-Drug Graph ## Construct Nodes ``` # Select only relevant columns nodes = pd.DataFrame(clean_geo['mesh_id category mesh_heading'.split()]) # Drop duplicates nodes.drop_duplicates(inplace=True, keep='first') # Echo nodes.head() ``` ## Construct Edges ``` # Construct all-with-all links inside same geoid-nsample-date record links = pd.merge(clean_geo, clean_geo, on='geo_id nsamples'.split()) # Rename to Source-Target links.rename(columns={'mesh_id_x':'source', 'mesh_id_y':'target'}, inplace=True) # Delete self-linkage links.drop(links[links['source']==links['target']].index, inplace=True) # Collapse repetitions while calculating weights edges = links.groupby('source target'.split()).sum().reset_index() # Rename sum(nsamples) to 'weight' edges.rename(columns={'nsamples':'weight'}, inplace=True) # Account for mirror-duplicates edges['weight']/=2 # Normalize weights edges['weight']/=edges['weight'].max() # Head edges.head() ``` ## Construct Graph ``` # Construct Directed Graph dd = nx.from_pandas_edgelist(edges, source='source', target='target', edge_attr='weight', create_using=nx.DiGraph() ) # Transform to undirected graph dd = nx.to_undirected(dd) # Add nodes attributes - Category nx.set_node_attributes(dd, nodes['mesh_id category'.split()].set_index('mesh_id').to_dict()['category'], 'category') # Add nodes attributes - Mesh Heading nx.set_node_attributes(dd, nodes['mesh_id mesh_heading'.split()].set_index('mesh_id').to_dict()['mesh_heading'], 'mesh_heading') # Save as pickle nx.write_gpickle(dd,'Gephi_DD.pkl') # Save to gephi nx.write_gexf(dd,'Gephi_DD.gexf') # Echo info print(' Size (Nodes): ', dd.size()) print(' Order (Edges): ', dd.order()) print(' Graph Density: ', nx.density(dd)) ``` ## Define some useful functions over the tree ``` def get_categories(graph): """ Get a dictionary with the categories of all the nodes """ return nx.get_node_attributes(graph, 'category') def get_mesh_headings(graph): """ Get a dictionary with the mesh-headings of all the nodes """ return nx.get_node_attributes(graph, 'mesh_heading') def get_neighbors(graph, node, cats): """ Get the neighbors of the node such that they have the same/opposite category """ # Define empty lists same = list() oppo = list() # Select only those with same category for neigh in nx.neighbors(dd, node): # Check for same neighbors if cats[neigh]==cats[node]: same.append(neigh) else: oppo.append(neigh) # Return the tuples same and oppo return same, oppo def get_top(dictionary_metric, top): """ Find the top-n nodes according to some metric """ # Get the items in the metric dictionary items = list(dictionary_metric.items()) # Sort them out items.sort(reverse=True, key=lambda x: x[1]) # Return the keys return list(map(lambda x:x[0], items[:top])) def get_only(graph, cats, specific_category): """ Select the nodes of the graph where category==category and returns a subgraph """ # Define empty list only_nodes = list() # Cycle through the nodes for node in graph.nodes(): if cats[node]==specific_category: only_nodes.append(node) # Return the subgraph return nx.subgraph(graph, only_nodes) ``` # Recomend drugs for top diseases ['C'] ## Select diseases ``` # Read full graph ee = nx.read_gpickle('Gephi_DD.pkl') # Read categories and labels cats = get_categories(graph=ee) labs = get_mesh_headings(graph=ee) # Choose only disease-nodes diseases = get_only(graph=ee, cats=cats, specific_category='C') ``` ## Runs stats on diseases ``` # Disease eigenvector centrality diseases_eig = nx.eigenvector_centrality(diseases, max_iter=500, weight='weight') # Disease PageRank diseases_pgn = nx.pagerank(diseases, alpha=0.9, weight='weight') # Disease Degree diseases_deg = nx.degree_centrality(diseases) ``` ## Choose n-top disease nodes ``` # Find top-diseases top = 100 top_eig = get_top(dictionary_metric=diseases_eig, top=top) top_pgn = get_top(dictionary_metric=diseases_pgn, top=top) top_deg = get_top(dictionary_metric=diseases_deg, top=top) top_diseases = top_eig ``` ## Measure recommendation-strenght (rs) ``` # Define containers of important recommendations rs = list() # Choose a node for disease in top_diseases: # Get neighbors diseases and neighboring drugs nei_dis, nei_dru = get_neighbors(graph=dd, node=disease, cats=cats) # Get max possible weight ww_max = sum([dd.get_edge_data(disease, nei, 'weight')['weight'] for nei in nei_dis]) # For every neighboring disease for n_disease in nei_dis: # Find all the neighboring drugs _ , nei_nei_dru = get_neighbors(graph=dd, node=n_disease, cats=cats) # Chose drugs not in nei_dru not_in_nei_dru = list(set(nei_nei_dru) - set(nei_dru)) # Add them to rs with weight c1 = [disease]*len(not_in_nei_dru) c2 = not_in_nei_dru ww = dd.get_edge_data(disease, n_disease, 'weight')['weight'] c3 = [ww/ww_max]*len(not_in_nei_dru) rs.extend(zip(c1, c2, c3)) # Get into a DF rs = pd.DataFrame(data=rs, columns='Disease Drug Recommendation_Strenght'.split()) # Group by disease-drug pairs and add the weights rs = pd.DataFrame(rs.groupby('Disease Drug'.split()).sum().reset_index()) # Clean duplicates rs = rs.drop_duplicates().reset_index(drop=True) # Add names to mesh_ids rs['Disease_Name'] = [labs[node] for node in rs.Disease] rs['Drug_Name'] = [labs[node] for node in rs.Drug] # Rearrange rs = rs['Disease Disease_Name Drug Drug_Name Recommendation_Strenght'.split()] # Sort by r-strenght rs.sort_values(by='Recommendation_Strenght Disease Drug'.split(), inplace=True, ascending=False) # Reset index rs.reset_index(inplace=True, drop=True) # Echo print('Size of rs: ', rs.shape) rs.head(25) ``` ## Visualization of rs ``` # Choose input cardinality = 1 # Get nodes dis_node = rs['Disease'].iloc[cardinality] dru_node = rs['Drug'].iloc[cardinality] dis_neighs, _ = get_neighbors(graph=ee, node=dis_node, cats=cats) # Gather nodes my_nodes = [dis_node, dru_node] my_nodes.extend(dis_neighs) # Gather categories my_cats={node:cats[node] for node in my_nodes} # Gather labels my_labs={node:labs[node] for node in my_nodes} # Gather positions eps = 3 angle = np.linspace(0, 2*np.pi, len(my_nodes)-2) radius = np.ones(len(my_nodes)-2) x_pos, y_pos = radius*np.cos(angle), radius*np.sin(angle) my_poss=dict() my_poss[dis_node]=(0, +eps) my_poss[dru_node]=(0, -eps) for i in range(len(my_nodes)-2): my_poss[dis_neighs[i]]=(x_pos[i], y_pos[i]) # Construct subgraph ee_sub = ee.subgraph(my_nodes) # Modify original node ee_sub.nodes[dis_node]['category']='X' # Export subgraph to gephi nx.write_gexf(ee_sub, 'drug_recommendation_{:07d}.gexf'.format(cardinality)) # Plot fig = plt.figure() axes = fig.add_axes([0.1,0.1,0.8,0.8]) nx.draw_networkx_labels(ee_sub, pos=my_poss, labels=my_labs, font_size=10) nx.draw_networkx(ee_sub, pos=my_poss, node_size=200, node_shape='^', with_labels=False) titulo='Drug recommendation (rank=#{:}, rs={:3.3f})'.format( cardinality, rs['Recommendation_Strenght'].iloc[cardinality]) axes.set_title(titulo) axes.set_xlim(-1.5,1.5) axes.set_ylim(-3.5,3.5) plt.axis('off') plt.savefig('drug_recommendation_{:07d}.png'.format(cardinality), dpi=500) plt.show() ``` # Recomend disease for top drug ['D'] ## Select drugs ``` # Read full graph ee = nx.read_gpickle('Gephi_DD.pkl') # Read categories and labels cats = get_categories(graph=ee) labs = get_mesh_headings(graph=ee) # Choose only drug-nodes drugs = get_only(graph=ee, cats=cats, specific_category='D') ``` ## Runs stats on drugs ``` # Drugs eigenvector centrality drugs_eig = nx.eigenvector_centrality(drugs, max_iter=500, weight='weight') # Drugs PageRank drugs_pgn = nx.pagerank(drugs, alpha=0.9, weight='weight') # Drugs Degree drugs_deg = nx.degree_centrality(drugs) ``` ## Select n-top drugs ``` # Find top-drugs top = 100 top_eig = get_top(dictionary_metric=drugs_eig, top=top) top_pgn = get_top(dictionary_metric=drugs_pgn, top=top) top_deg = get_top(dictionary_metric=drugs_deg, top=top) top_drugs = top_eig ``` ## Compute recommendation-strenght (rs) ``` # Define containers of important recommendations rs = list() # Choose a node for drug in top_drugs: # Get neighbors diseases and neighboring drugs nei_dru, nei_dis = get_neighbors(graph=dd, node=drug, cats=cats) # Get max possible weight ww_max = sum([dd.get_edge_data(drug, nei, 'weight')['weight'] for nei in nei_dru]) # For every neighboring drug for n_drug in nei_dru: # Find all the neighboring diseases _, nei_nei_dis = get_neighbors(graph=dd, node=n_drug, cats=cats) # Chose disease not in nei_dis not_in_nei_dis = list(set(nei_nei_dis) - set(nei_dis)) # Add them to rs with weight c1 = [drug]*len(not_in_nei_dis) c2 = not_in_nei_dis ww = dd.get_edge_data(drug, n_drug, 'weight')['weight'] c3 = [ww/ww_max]*len(not_in_nei_dis) rs.extend(zip(c1, c2, c3)) # Get into a DF rs = pd.DataFrame(data=rs, columns='Drug Disease Recommendation_Strenght'.split()) # Group by disease-drug pairs and add the weights rs = pd.DataFrame(rs.groupby('Drug Disease'.split()).sum().reset_index()) # Clean duplicates rs = rs.drop_duplicates().reset_index(drop=True) # Add names to mesh_ids rs['Drug_Name'] = [labs[node] for node in rs.Drug] rs['Disease_Name'] = [labs[node] for node in rs.Disease] # Rearrange rs = rs['Drug Drug_Name Disease Disease_Name Recommendation_Strenght'.split()] # Sort by r-strenght rs.sort_values(by='Recommendation_Strenght Drug Disease'.split(), inplace=True, ascending=False) # Reset index rs.reset_index(inplace=True, drop=True) # Echo print('Size of rs: ', rs.shape) rs.head(25) ``` ## Visualization of rs ``` # Choose input cardinality = 250 # Get nodes dru_node = rs['Drug'].iloc[cardinality] dis_node = rs['Disease'].iloc[cardinality] dru_neighs, _ = get_neighbors(graph=ee, node=dru_node, cats=cats) # Gather nodes my_nodes = [dru_node, dis_node] my_nodes.extend(dru_neighs) # Gather categories my_cats={node:cats[node] for node in my_nodes} # Gather labels my_labs={node:labs[node] for node in my_nodes} # Gather positions eps = 3 angle = np.linspace(0, 2*np.pi, len(my_nodes)-2) radius = np.ones(len(my_nodes)-2) x_pos, y_pos = radius*np.cos(angle), radius*np.sin(angle) my_poss=dict() my_poss[dru_node]=(0, +eps) my_poss[dis_node]=(0, -eps) for i in range(len(my_nodes)-2): my_poss[dru_neighs[i]]=(x_pos[i], y_pos[i]) # Construct subgraph ee_sub = ee.subgraph(my_nodes) # Modify original node ee_sub.nodes[dru_node]['category']='X' # Export subgraph to gephi nx.write_gexf(ee_sub, 'second_use_recommendation_{:07d}.gexf'.format(cardinality)) # Plot fig = plt.figure() axes = fig.add_axes([0.1,0.1,0.8,0.8]) nx.draw_networkx_labels(ee_sub, pos=my_poss, labels=my_labs, font_size=10) nx.draw_networkx(ee_sub, pos=my_poss, node_size=200, node_shape='^', with_labels=False) titulo='Drug recommendation (rank=#{:}, rs={:3.3f})'.format( cardinality, rs['Recommendation_Strenght'].iloc[cardinality]) axes.set_title(titulo) axes.set_xlim(-1.5,1.5) axes.set_ylim(-3.5,3.5) plt.axis('off') plt.savefig('second_use_recommendation_{:07d}.png'.format(cardinality)) plt.show() ``` # End
github_jupyter
``` from sqlalchemy import create_engine, select, and_, or_ from sqlalchemy import MetaData, create_engine from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import sessionmaker from sqlalchemy import select, and_, literal, bindparam, exists from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.inspection import inspect import sqlalchemy as sa import pandas as pd import numpy as np from alphamind.api import * from alphamind.data.processing import factor_processing from alphamind.data.standardize import standardize from alphamind.data.winsorize import winsorize_normal from alphamind.data.neutralize import neutralize from alphamind.portfolio.riskmodel import FactorRiskModel from alphamind.portfolio.constraints import LinearConstraints from alphamind.data.processing import factor_processing from alphamind.analysis.factoranalysis import er_portfolio_analysis from ultron.factor.genetic.geneticist.operators import calc_factor import pdb import uqer from uqer import DataAPI #收益率计算 def returns_processing(mkt_df, key='bar30_vwap',horizon=1): price_tb = mkt_df[key].unstack() return_tb = (price_tb.shift(-horizon) / price_tb - 1.0) return_tb[return_tb>10.0] = np.NaN return_tb = return_tb.shift(-1) return_se = return_tb.stack().reindex(mkt_df.index) mkt_df['nxt1_ret'] = return_se #索引为trade_date,code,复制时会根据索引复制 return mkt_df #数据处理 def data_processing(risk_se, mkt_se, factors_se, factor_sets, is_process=False): factors_data = factors_se.reset_index() if is_process: ndiff_field = [column for column in factors_data.columns if column not in factor_sets] alpha_res = [] grouped = factors_data.groupby(['trade_date']) for k, g in grouped: ret_preprocess = factor_processing(g[factor_sets].fillna(0).values, pre_process=[winsorize_normal, standardize]) f = pd.DataFrame(ret_preprocess, columns=factor_sets) for k in ndiff_field: f[k] = g[k].values alpha_res.append(f) factors_data = pd.concat(alpha_res) return factors_data.merge(risk_se.reset_index(),on=['trade_date','code']).merge( mkt_se.reset_index(),on=['trade_date','code']) def index_processing(univ_df, risk_df, mkt_df, factors_df, universe_code): univ_se = univ_df[universe_code] univ_se = univ_se[univ_se>0].dropna() risk_se = risk_df.set_index(['trade_date','code']).reindex(univ_se.index) risk_se.dropna(inplace=True) mkt_se = mkt_df.set_index(['trade_date','code']).reindex(univ_se.index) mkt_se.dropna(inplace=True) factors_se = factors_df.set_index(['trade_date','code']).reindex(univ_se.index) return risk_se, mkt_se, factors_se def market_processing(mktbar_raw_df, mkt_df): mktbar_raw_df['vwap'] = mktbar_raw_df['total_value'] / mktbar_raw_df['total_volume'] mktbar_raw_df = mktbar_raw_df.sort_values(['trade_date','code']).reset_index(drop=True) mktbar_df = mktbar_raw_df.set_index(['trade_date', 'code', 'unit'])['vwap'].unstack() mktbar_df.columns = ['bar'+str(x)+'_vwap' for x in mktbar_df.columns] mktbar_df = mktbar_df.reset_index() mkt_df = mkt_df.merge(mktbar_df, on=['trade_date', 'code'], how='left') for price in ['closePrice', 'openPrice', 'lowestPrice', 'highestPrice', 'bar30_vwap', 'bar60_vwap']: mkt_df[price] = mkt_df[price] * mkt_df['accumAdjFactor'] return mkt_df class FactorsDB(object): def __init__(self): DB_URL = 'postgresql+psycopg2://alpha:alpha@180.166.26.82:8889/alpha' self._engine = sa.create_engine(DB_URL) self._session = sessionmaker(bind=self._engine, autocommit=False, autoflush=True) self._base = automap_base() self._base.prepare(self._engine,reflect=True) def _query_statements(self, start_date: str = None, end_date: str = None, universe: str = None, dates=None): Universe = self._base.classes['universe'] return and_( getattr(Universe, universe) == 1, Universe.trade_date.in_(dates) if dates else Universe.trade_date.between(start_date, end_date) ) def universe(self, universe, start_date, end_date): Universe = self._base.classes['universe'] pdb.set_trace() query = select([Universe.trade_date, Universe.code]).where( self._query_statements(start_date, end_date, universe) ).order_by(Universe.trade_date, Universe.code) return pd.read_sql(query, self._engine) def fetch_universes(self, universe_codes, start_date, end_date): Universe = self._base.classes['universe'] col_list = [Universe.trade_date, Universe.code] for universe in universe_codes: col_list.append(Universe.__dict__[universe]) query = select(col_list).where( and_( Universe.trade_date >= start_date, Universe.trade_date <= end_date, )) univ_df = pd.read_sql(query, self._engine) univ_df = univ_df.set_index(['trade_date', 'code']).sort_index() return univ_df def fetch_risk_exposure(self, start_date, end_date): RiskExposure = self._base.classes['risk_exposure'] query = select([RiskExposure]).where( and_( RiskExposure.trade_date >= start_date, RiskExposure.trade_date <= end_date, )) return pd.read_sql(query, self._engine) def fetch_risk_cov(self, start_date, end_date, risk_model='short'): RiskCov = self._base.classes['risk_cov_' + risk_model] query = select([RiskCov]).where( and_( RiskCov.trade_date >= start_date, RiskCov.trade_date <= end_date, )) return pd.read_sql(query, self._engine).sort_values('FactorID') def fetch_special_risk(self, start_date, end_date, risk_model='short'): SpecificRisk = self._base.classes['specific_risk_' + risk_model] query = select([SpecificRisk]).where( and_( SpecificRisk.trade_date >= start_date, SpecificRisk.trade_date <= end_date )) return pd.read_sql(query, self._engine) def fetch_market(self, start_date, end_date): Market = self._base.classes['market'] query = select([Market.trade_date, Market.code, Market.accumAdjFactor, Market.closePrice, Market.openPrice, Market.lowestPrice, Market.openPrice, Market.highestPrice, Market.turnoverVol,Market.turnoverValue,Market.chgPct, Market.marketValue]).where( and_( Market.trade_date >= start_date, Market.trade_date <= end_date )) return pd.read_sql(query, self._engine) def fetch_marketraw(self, start_date, end_date): MarketBar = self._base.classes['market_bar'] query = select([MarketBar.trade_date, MarketBar.code, MarketBar.bar_time, MarketBar.unit, MarketBar.vwap, MarketBar.close_price, MarketBar.total_volume, MarketBar.total_value]).where( and_( MarketBar.trade_date >= start_date, MarketBar.trade_date <= end_date, or_(and_(MarketBar.unit == 60, MarketBar.bar_time == '10:30'), and_(MarketBar.unit == 30, MarketBar.bar_time == '10:00')) )) return pd.read_sql(query, self._engine) def fetch_index_component(self,index_code_sets, start_date, end_date): IndexComponent = self._base.classes['index_components'] query = select([IndexComponent.trade_date, IndexComponent.indexCode, IndexComponent.code, IndexComponent.secShortName, (IndexComponent.weight / 100.).label('weight')] ).where( and_(IndexComponent.trade_date >= start_date, IndexComponent.trade_date <= end_date, IndexComponent.indexCode.in_(index_code_sets) ) ) return pd.read_sql(query, self._engine) def fetch_technical(self, start_date, end_date): Technical = self._base.classes['technical'] query = select([Technical]).where( and_( Technical.trade_date >= start_date, Technical.trade_date <= end_date, )) return pd.read_sql(query, self._engine) def fetch_institution(self, start_date, end_date): InstitutionHaitong = self._base.classes['institution_haitong'] query = select([InstitutionHaitong]).where( and_( InstitutionHaitong.trade_date >= start_date, InstitutionHaitong.trade_date <= end_date, )) return pd.read_sql(query, self._engine) def fetch_uqer(self, start_date, end_date, keys=[], columns=[]): Uqer = self._base.classes['uqer'] if len(keys) > 0 and len(columns) > 0: cols = [] for key in keys: cols.append(Uqer.__dict__[key]) for col in columns: cols.append(Uqer.__dict__[col]) query = select(cols).where( and_( Uqer.trade_date >= start_date, Uqer.trade_date <= end_date )) else: query = select([Uqer]).where( and_( Uqer.trade_date >= start_date, Uqer.trade_date <= end_date, )) return pd.read_sql(query, self._engine) def fetch_experimental(self, start_date, end_date): Experimental = self._base.classes['experimental'] query = select([Experimental]).where( and_( Experimental.trade_date >= start_date, Experimental.trade_date <= end_date, )) return pd.read_sql(query, self._engine) def fetch_industry(self, start_date, end_date, category='sw_adj'): code_name = 'industryID' + str(1) category_name = 'industryName' + str(1) Industry = self._base.classes['industry'] query = select([Industry.trade_date, Industry.code, getattr(Industry, code_name).label('industry_code'), getattr(Industry, category_name).label('industry')]).where( and_( Industry.trade_date >= start_date, Industry.trade_date <= end_date, Industry.industry == '申万行业分类修订' ) ) return pd.read_sql(query, self._engine) new_time = '2020-02-14' #模型开始时候 start_time = '2020-01-12' end_time = '2020-02-14' # 因子数据时间 trade_time = '2020-02-17' # 交易时间 weights_bandwidth=0.01 method='risk_neutral' turn_over_target = 0.6 indexCode = '905' benchmark = 'zz500' uqer_columns = ["DAVOL10","DAVOL20","DAVOL5","DDNBT","DDNCR","DDNSR","DHILO","DVRAT","EMA10","EMA120","EMA20", "EMA5","EMA60","HBETA","HSIGMA","MA10","MA120","MA20","MA5","MA60","MAWVAD","PSY","RSTR12", "RSTR24","VOL10","VOL120","VOL20","VOL240","VOL5","VOL60","WVAD","Skewness","ILLIQUIDITY", "BackwardADJ","MACD","ADTM","ATR14","BIAS10","BIAS20","BIAS5","BIAS60","BollDown","BollUp", "CCI10","CCI20","CCI5","CCI88","KDJ_K","KDJ_D","KDJ_J","ROC6","ROC20","SBM","STM","UpRVI", "DownRVI","RVI","SRMI","ChandeSD","ChandeSU","CMO","DBCD","ARC","OBV","OBV6","OBV20", "TVMA20","TVSTD20","TVSTD6","VDEA","VDIFF","VEMA10","VEMA12","VEMA26","VEMA5", "VMACD","VOSC","VR","VROC12","VROC6","VSTD10","VSTD20","KlingerOscillator","MoneyFlow20", "AD","AD20","AD6","CoppockCurve","ASI","ChaikinOscillator","ChaikinVolatility","EMV14", "EMV6","plusDI","minusDI","ADX","ADXR","Aroon","AroonDown","AroonUp","DEA","DIFF","DDI", "DIZ","MTM","MTMMA","PVT","PVT6","PVT12","TRIX5","TRIX10","UOS","MA10RegressCoeff12", "MA10RegressCoeff6","PLRC6","PLRC12","SwingIndex","Ulcer10","Ulcer5","Hurst","ACD6","ACD20", "EMA12","EMA26","APBMA","BBI","BBIC","TEMA10","TEMA5","MA10Close","AR","BR","ARBR","CR20", "MassIndex","BearPower","BullPower","Elder","NVI","PVI","RC12","RC24","JDQS20"] experimental_columns= ["IVR","CHV","vretd_bar15","retd_bar15","vretd_bar5","retd_bar5","abs_vretd","vretd","abs_retd", "retd","ivr_bar30","ivr_bar60","ivr_day","vhhi_std","vskew_std","vvol_std","rhhi_std","rskew_std", "rvol_std","vhhi","vkurt","vskew","vvol","rkurt","rskew","rvol","idl_mtm_20","cvvwap","clv","ccv", "chlv","chlvwap","chlc","ideal_mtm_20","low_mtm_20","high_mtm_20","mix_cap_liq","mix_liq","amh_20", "amh_10","apm_20","apm_10","pure_cap_liq_4","pure_cap_liq_3","pure_cap_liq_2","pure_cap_liq_1","pure_cap_liq_0", "cap_liq","pe_hist60","pure_liq_4","pure_liq_3","pure_liq_2","pure_liq_1","pure_liq_0","liq"] factors_db = FactorsDB() client = uqer.Client(token='07b082b1f42b91987660f0c2c19097bc3b10fa4b12f6af3274f82df930185f04') univ_df = factors_db.fetch_universes(['zz800','zz500'], start_time, end_time) risk_exposure_df = factors_db.fetch_risk_exposure(start_time, end_time) market_df = factors_db.fetch_market(start_time, end_time) mktbar_raw_df = factors_db.fetch_marketraw(start_time, end_time) index_component_df = factors_db.fetch_index_component(['906','905'],start_time, end_time) uqer_df = factors_db.fetch_uqer(start_time, end_time, ['trade_date','code'],uqer_columns) industry_df = factors_db.fetch_industry(start_time, end_time) risk_cov_df = factors_db.fetch_risk_cov(start_time, end_time) specical_risk_df = factors_db.fetch_special_risk(start_time, end_time) #修改名称 rename_dict = {} for col in uqer_columns: rename_dict[col] = 'uqer_' + str(col) uqer_df.rename(columns=rename_dict, inplace=True) experimental_df = factors_db.fetch_experimental(start_time, end_time) #修改名称 rename_dict = {} for col in experimental_columns: rename_dict[col] = 'exper_' + str(col) experimental_df.rename(columns=rename_dict, inplace=True) mkt_df = market_processing(mktbar_raw_df, market_df) factors_df = uqer_df.merge(experimental_df, on=['trade_date','code']) ## 行业数据 industry_df['industry_name'] = industry_df['industry'] industry_se = pd.get_dummies(industry_df, columns=['industry'], prefix="", prefix_sep="").drop( 'industry_code', axis=1) ## 风险模型数据 risk_exp_df = risk_exposure_df.merge(specical_risk_df, on=['trade_date','code']).dropna() factor_names = risk_cov_df.Factor.tolist() new_risk_cov = risk_cov_df.set_index('Factor') factor_cov = new_risk_cov.loc[factor_names, factor_names] / 10000. new_risk_exp = risk_exp_df.set_index('code') factor_loading = new_risk_exp.loc[:, factor_names] idsync = new_risk_exp['SRISK'] * new_risk_exp['SRISK'] / 10000 #FactorRiskModel(factor_cov, factor_loading, idsync) risk_exposure_se, mkt_se, factors_se = index_processing(univ_df, risk_exposure_df, mkt_df, factors_df, [benchmark]) index_component_se = index_component_df.set_index('indexCode').loc[indexCode].reset_index() mkt_se = returns_processing(mkt_se, horizon=2) factor_sets = factors_se.columns standard_data = data_processing(risk_se = risk_exposure_se, mkt_se = mkt_se, factors_se = factors_se, factor_sets = factor_sets, is_process=True) expression = """ SecurityMaximumValueHolder(SecurityMaximumValueHolder('exper_pure_cap_liq_4','exper_retd'),'exper_vhhi_std') """ expression_name = 'ultron_1581149987701014' factors_data = calc_factor(expression=expression, total_data=standard_data.copy(), indexs='trade_date', key='code', name=expression_name ) ''' 历史回测数据判断方向 ''' ## 基于历史因子判断方向 total_data = factors_data.reset_index().merge(mkt_se.reset_index(), on=['trade_date','code'] ).set_index( ['trade_date','code'])[[expression_name,'nxt1_ret']].dropna(subset=['nxt1_ret']) #求每期的IC ic_serialize = total_data.groupby('trade_date').apply(lambda x: np.corrcoef(x[expression_name].values, x.nxt1_ret.values)[0,1]) direction = np.sign(ic_serialize.mean()) factors_data[expression_name] = direction * factors_data[expression_name] factors_data = factors_data.rename(columns={expression_name:'factor'}) ##数据合并 total_data = pd.merge(factors_data, industry_se, on=['trade_date','code']) total_data = pd.merge(total_data,index_component_se, on=['trade_date','code'], how='left') total_data.fillna({'weight': 0.}, inplace=True) total_data = pd.merge(total_data, mkt_se['nxt1_ret'].reset_index().dropna(subset=['nxt1_ret']), on=['trade_date','code'], how='left') #trade_list = total_data.trade_date.unique() #total_data = total_data.set_index('trade_date').loc[:trade_list[-4]].reset_index() total_data = pd.merge(total_data, risk_exp_df, on=['trade_date','code']) is_in_benchmark = (total_data.weight > 0.).astype(float).values.reshape((-1, 1)) total_data.loc[:, 'benchmark'] = is_in_benchmark total_data.loc[:, 'total'] = np.ones_like(is_in_benchmark) total_data_groups = total_data.set_index('trade_date').loc[end_time].reset_index().groupby('trade_date') ## 构建模型 models = {} alpha_model = ConstLinearModel(features=['factor'], weights={'factor': 1.0}) for ref_date, _ in total_data_groups: models[ref_date] = alpha_model alpha_models = models class RunningSetting(object): def __init__(self, lbound=None, ubound=None, weights_bandwidth=None, rebalance_method='risk_neutral', bounds=None, **kwargs): self.lbound = lbound self.ubound = ubound self.weights_bandwidth = weights_bandwidth self.executor = NaiveExecutor() self.rebalance_method = rebalance_method self.bounds = bounds self.more_opts = kwargs industry_name = 'sw_adj' industry_level = 1 industry_names = industry_list(industry_name, industry_level) constraint_risk = industry_names constraint_risk = risk_styles + industry_names total_risk_names = constraint_risk + ['benchmark', 'total'] effective_industry_names = ['建筑材料','机械设备','家用电器','交通工具', '化工','电器设备','信息服务','建筑装饰','计算机','轻工制造', '交运设备','建筑建材','商业贸易','房地产','汽车','公用事业', '保险','休闲服务','证券','多元金融'] best_industry_names = ['电子','家用电器','食品饮料','医药生物','通信'] invalid_industry_names = ['农林牧渔','采掘','钢铁','有色金属','纺织服装','商业贸易', '综合','国防军工','传媒','银行'] b_type = [] l_val = [] u_val = [] for name in total_risk_names: if name == 'benchmark': b_type.append(BoundaryType.RELATIVE) l_val.append(0.0) u_val.append(1.0) elif name == 'total': b_type.append(BoundaryType.ABSOLUTE) l_val.append(-0.0) u_val.append(0.0) elif name in effective_industry_names: b_type.append(BoundaryType.ABSOLUTE) l_val.append(-0.025) u_val.append(0.025) elif name in best_industry_names: b_type.append(BoundaryType.ABSOLUTE) l_val.append(-0.065) u_val.append(0.065) elif name in invalid_industry_names: b_type.append(BoundaryType.RELATIVE) l_val.append(-0.005) u_val.append(0.005) else: b_type.append(BoundaryType.ABSOLUTE) l_val.append(-0.3) u_val.append(0.3) bounds = create_box_bounds(total_risk_names, b_type, l_val, u_val) running_setting = RunningSetting(lbound=0., ubound=0.02, weights_bandwidth=weights_bandwidth, rebalance_method=method, bounds=bounds, turn_over_target=turn_over_target) def _create_lu_bounds(running_setting, codes, benchmark_w): codes = np.array(codes) if running_setting.weights_bandwidth: lbound = np.maximum(0., benchmark_w - running_setting.weights_bandwidth) ubound = running_setting.weights_bandwidth + benchmark_w lb = running_setting.lbound ub = running_setting.ubound if lb or ub: if not isinstance(lb, dict): lbound = np.ones_like(benchmark_w) * lb else: lbound = np.zeros_like(benchmark_w) for c in lb: lbound[codes == c] = lb[c] if 'other' in lb: for i, c in enumerate(codes): if c not in lb: lbound[i] = lb['other'] if not isinstance(ub, dict): ubound = np.ones_like(benchmark_w) * ub else: ubound = np.ones_like(benchmark_w) for c in ub: ubound[codes == c] = ub[c] if 'other' in ub: for i, c in enumerate(codes): if c not in ub: ubound[i] = ub['other'] return lbound, ubound previous_pos = pd.DataFrame() positions = pd.DataFrame() target_position = [] for ref_date, this_data in total_data_groups: more_opts = running_setting.more_opts new_model = alpha_models[ref_date] codes = this_data.code.values.tolist() if previous_pos.empty: current_position = None else: previous_pos.set_index('code', inplace=True) remained_pos = previous_pos.reindex(codes) remained_pos.fillna(0., inplace=True) current_position = remained_pos.weight.values benchmark_w = this_data.weight.values constraints = LinearConstraints(running_setting.bounds, this_data, benchmark_w) lbound, ubound = _create_lu_bounds(running_setting, codes, benchmark_w) this_data.fillna(0, inplace=True) new_factors = factor_processing(this_data[new_model.features].values, pre_process=[winsorize_normal, standardize]) new_factors = pd.DataFrame(new_factors, columns=['factor'], index=codes) er = new_model.predict(new_factors).astype(float) target_pos, _ = er_portfolio_analysis(er=er,industry=this_data.industry_name.values, dx_return=None,constraints=constraints, detail_analysis=False,benchmark=benchmark_w, method=running_setting.rebalance_method, lbound=lbound,ubound=ubound,current_position=current_position, target_vol=more_opts.get('target_vol'), risk_model=None, turn_over_target=more_opts.get('turn_over_target')) target_pos['code'] = codes target_pos['trade_date'] = ref_date target_position.append(target_pos) previous_pos = target_pos target_position = pd.concat(target_position) target = target_position[target_position.weight.abs() > 0.0015].set_index('trade_date').loc[end_time] target['exchange'] = target['code'].apply(lambda x: 'XSHG' if \ len(str(x))==6 and str(x)[0] in '6' else 'XSHE') target['code'] = target['code'].apply(lambda x: "{:06d}".format(x) + '.XSHG' if \ len(str(x))==6 and str(x)[0] in '6' else "{:06d}".format(x)\ + '.XSHE') def uqer_market(univ, exchange, ref_date): return DataAPI.SHSZBarHistOneDayGet(tradeDate=ref_date,exchangeCD=exchange, ticker=univ,unit="30", startTime=u"10:00",endTime=u"10:00", field=u"",pandas="1") sz_codes = target[target['exchange'] == 'XSHE'].code.values.tolist() sh_codes = target[target['exchange'] == 'XSHG'].code.values.tolist() mkt_data = pd.concat([uqer_market(sz_codes, 'XSHE', '20200217'),uqer_market(sh_codes, 'XSHG', '20200217')]) mkt_data['ticker'] = mkt_data['ticker'].apply(lambda x: str(x) + '.XSHG' if \ len(str(x))==6 and str(x)[0] in '6' else str(x)\ + '.XSHE') mkt_data = mkt_data.rename(columns={'ticker':'code'}) industry_data = industry_df.set_index('trade_date').loc[end_time] industry_data['code'] = industry_data['code'].apply( lambda x: "{:06d}".format(x) + '.XSHG' if len(str(x))==6 and str(x)[0] in '6' else "{:06d}".format(x)\ + '.XSHE') trader = mkt_data.merge(target,on=['code'])[['vwap','weight','code','barTime','closePrice','shortNM']].merge( industry_data.reset_index()[['code','industry_name']], on=['code']) trader['cost'] = 10000000 * trader['weight'] trader['count'] = trader['cost'] / trader['vwap'] trader['count'] = (trader['count']/ 100).astype('int') * 100 trader['fee'] = trader['count'] * trader['vwap'] * 0.001 trader['operation'] = '买入' trader['trade_date'] = '2020-02-17' trader['profit'] = (trader['closePrice'] - trader['vwap']) * trader['count'] - trader['fee'] trader = trader[['trade_date','code','shortNM','industry_name','operation','count','vwap','closePrice','fee','barTime','profit']].rename(columns={ 'trade_date':'成交日期','barTime':'成交时间','code':'证券代码','operation':'操作类型', 'count':'成交数量','vwap':'成交价格','fee':'佣金','shortNM':'名称','profit':'收益', 'closePrice':'收盘价','industry_name':'行业' }).to_csv(trade_time + '_' + expression_name + '.csv', encoding='UTF-8') ```
github_jupyter