diff --git "a/3836.jsonl" "b/3836.jsonl" new file mode 100644--- /dev/null +++ "b/3836.jsonl" @@ -0,0 +1,835 @@ +{"seq_id":"25563648708","text":"from PyQt5.QtWidgets import QGraphicsItem, QGraphicsPathItem, QStyleOptionGraphicsItem, QGraphicsSceneHoverEvent\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QColor, QPen, QLinearGradient, QBrush, QPainterPath\n\nfrom lib.nodeeditor.GraficosRutaConexion import EnrutadorRecto, EnrutadorBezier\n\n\nclass GraficosdeConexion(QGraphicsPathItem):\n\tdef __init__(self, linea, parent=None):\n\t\tsuper().__init__(parent)\n\t\t\n\t\tself.linea = linea\n\t\t\n\t\t# Crear instancia de nuestra clase de lineas.\n\t\tself.enrutador = self.definir_clase_de_enrutador()(self)\n\t\t\n\t\t# init de señales.\n\t\tself._ultimo_estado_de_seleccion = False\n\t\tself.hovered = False\n\t\t\n\t\t# init de variables.\n\t\tself.posicion_origen = [0, 0]\n\t\tself.posicion_destino = [200, 100]\n\t\t\n\t\tself.init_assets()\n\t\tself.init_ui()\n\n\tdef init_ui(self):\n\t\tself.setFlag(QGraphicsItem.ItemIsSelectable)\n\t\tself.setAcceptHoverEvents(True)\n\t\tself.setZValue(-1)\n\t\t\n\tdef init_assets(self):\n\t\tself._color = self._color_por_defecto = QColor(\"#001000\")\n\t\tself._color_seleccionado = QColor(\"#00ff00\")\n\t\tself._color_del_efecto_hover = QColor(\"#FF37A6FF\")\n\t\tself._lápiz = QPen(self._color)\n\t\tself._lápiz_para_seleccionado = QPen(self._color_seleccionado)\n\t\tself._lápiz_dibujo = QPen(self._color)\n\t\tself._efecto_hover = QPen(self._color_del_efecto_hover)\n\t\tself._lápiz_dibujo.setStyle(Qt.DashLine)\n\t\tself._lápiz.setWidthF(3.0)\n\t\tself._lápiz_para_seleccionado.setWidthF(3.0)\n\t\tself._lápiz_dibujo.setWidthF(3.0)\n\t\tself._efecto_hover.setWidthF(5.0)\n\t\t\n\tdef crear_calculador_de_la_ruta(self):\n\t\tself.enrutador = self.definir_clase_de_enrutador()(self)\n\t\treturn self.enrutador\n\t\t\n\tdef definir_clase_de_enrutador(self):\n\t\tfrom lib.nodeeditor.Conexiones import bezier, recta\n\t\tif self.linea.tipo_de_conexion == bezier:\n\t\t\treturn EnrutadorBezier\n\t\tif self.linea.tipo_de_conexion == recta:\n\t\t\treturn EnrutadorRecto\n\t\telse:\n\t\t\treturn EnrutadorBezier\n\t\t\n\tdef hacer_no_seleccionable(self):\n\t\tself.setFlag(QGraphicsItem.ItemIsSelectable, False)\n\t\tself.setAcceptHoverEvents(False)\n\t\n\tdef cambiar_color(self, color):\n\t\tself._color = QColor(color) if type(color) == str else color\n\t\tself._lápiz = QPen(self._color)\n\t\tself._lápiz.setWidthF(3.0)\n\n\n\t# Esta método es adición mía.\n\tdef cambiar_color_gradiente(self, color_1, color_2):\n\t\torigen_x = self.linea.zocalo_origen.nodo.obtener_posición_de_zocalo_en_la_escena(self.linea.zocalo_origen)[0]\n\t\torigen_y = self.linea.zocalo_origen.nodo.obtener_posición_de_zocalo_en_la_escena(self.linea.zocalo_origen)[1]\n\t\tfinal_x = self.linea.zocalo_final.nodo.obtener_posición_de_zocalo_en_la_escena(self.linea.zocalo_final)[0]\n\t\tfinal_y = self.linea.zocalo_final.nodo.obtener_posición_de_zocalo_en_la_escena(self.linea.zocalo_final)[1]\n\n\t\tgradiente = QLinearGradient(origen_x, origen_y, final_x, final_y)\n\n\t\tnuevo_color_1 = QColor(color_1) if type(color_1) == str else color_1\n\t\tnuevo_color_2 = QColor(color_2) if type(color_2) == str else color_2\n\n\t\tgradiente.setColorAt(0.49, nuevo_color_1)\n\t\tgradiente.setColorAt(0.51, nuevo_color_2)\n\n\t\tself._color = gradiente\n\t\tself._brush = QBrush(self._color)\n\t\tself._lápiz = QPen(self._brush, 3.0)\n\t\t#self._lápiz.setWidthF(3.0)\n\n\t\n\tdef definir_color_desde_el_zocalo(self):\n\t\ttry:\n\t\t\ttipo_zocalo_origen = self.linea.zocalo_origen.tipo_zocalo\n\t\t\t# Esta es la línea original: tipo_zocalo_origen = self.linea.zocalo_inicial_de_dibujado.tipo_zocalo\n\t\t\ttipo_zocalo_final = self.linea.zocalo_final.tipo_zocalo\n\t\t\tif tipo_zocalo_origen != tipo_zocalo_final:\n\t\t\t\treturn self.cambiar_color_gradiente(\n\t\t\t\t\t\tself.linea.zocalo_origen.GraficosZocalos.obtener_color_para_el_zocalo(tipo_zocalo_origen),\n\t\t\t\t\t\tself.linea.zocalo_origen.GraficosZocalos.obtener_color_para_el_zocalo(tipo_zocalo_final)\n\t\t\t\t\t\t)\n\t\t\tself.cambiar_color(self.linea.zocalo_origen.GraficosZocalos.obtener_color_para_el_zocalo(tipo_zocalo_origen))\n\t\texcept AttributeError:\n\t\t\t# ¿Por qué sale un Nonetype como error aquí? Ni idea...\n\t\t\tpass\n\t\t\n\tdef seleccionado(self):\n\t\tself.linea.escena.graficador_de_la_escena.objeto_seleccionado.emit()\n\t\t\n\tdef hacer_selección(self, nuevo_estado=True):\n\t\tself.setSelected(nuevo_estado)\n\t\tself._ultimo_estado_de_seleccion = nuevo_estado\n\t\tif nuevo_estado: self.seleccionado()\n\t\t\n\tdef mouseReleaseEvent(self, event):\n\t\tsuper().mouseReleaseEvent(event)\n\t\tif self._ultimo_estado_de_seleccion != self.isSelected():\n\t\t\tself.linea.escena.restaurar_último_estado_de_selección()\n\t\t\tself._ultimo_estado_de_seleccion = self.isSelected()\n\t\t\tself.seleccionado()\n\t\t\t\n\tdef hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent') -> None:\n\t\tself.hovered = True\n\t\tself.update()\n\t\t\n\tdef hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent') -> None:\n\t\tself.hovered = False\n\t\tself.update()\n\t\t\n\tdef punto_origen(self, x, y):\n\t\tself.posicion_origen = [x, y]\n\t\n\tdef punto_destino(self, x, y):\n\t\tself.posicion_destino = [x, y]\n\t\t\n\tdef paint(self, painter, estilo: QStyleOptionGraphicsItem, widget=None):\n\t\tself.setPath(self.calculo_de_ruta())\n\t\t\n\t\tpainter.setBrush(Qt.NoBrush)\n\t\t\n\t\tif self.hovered and self.linea.zocalo_final is not None:\n\t\t\tpainter.setPen(self._efecto_hover)\n\t\t\tpainter.drawPath(self.path())\n\t\t\n\t\tif self.linea.zocalo_final is None:\n\t\t\tpainter.setPen(self._lápiz_dibujo)\n\t\telse:\n\t\t\tpainter.setPen(self._lápiz if not self.isSelected() else self._lápiz_para_seleccionado)\n\t\tpainter.drawPath(self.path())\n\t\t\n\tdef cruzado_con(self, p1, p2):\n\t\truta_de_recorte = QPainterPath(p1)\n\t\truta_de_recorte.lineTo(p2)\n\t\truta = self.calculo_de_ruta()\n\t\treturn ruta_de_recorte.intersects(ruta)\n\t\t\n\tdef calculo_de_ruta(self):\n\t\t# Para controlar el dibujo de las conexiones entre nodos.\n\t\treturn self.enrutador.calculo_de_ruta()\n\t\n\tdef retangulo_delimitador(self):\n\t\treturn self.shape().boundingRect()\n\t\n\tdef shape(self):\n\t\treturn self.calculo_de_ruta()\n","repo_name":"JhonnyJimenez/NodePlanner","sub_path":"lib/nodeeditor/GraficosdeConexion.py","file_name":"GraficosdeConexion.py","file_ext":"py","file_size_in_byte":5743,"program_lang":"python","lang":"es","doc_type":"code","stars":6,"dataset":"github-code","pt":"19"} +{"seq_id":"70301982763","text":"#!//python\n\n# --------------------------------------------------------------------------------\n# ****IMPORTANT!****\n# Before starting the bot, it is REQUIRED to change the first line of this\n# script (line starting with '#!') to point to the installation of python that\n# contains the required dependencies.\n# --------------------------------------------------------------------------------\n\n# trader.py\n# --------------------------------------------------------------------------------\n# This script is triggered by run.py to execute trades.\n# --------------------------------------------------------------------------------\n\n\n# run rebalance.py\n\nimport rebalance\n\n\n# define values used to make trades\n\nexchange = rebalance.exchange\nsell_mkts = rebalance.sell_mkts\nbuy_mkts = rebalance.buy_mkts\n \n \n# execute sell orders\n\nfor symbol in list(sell_mkts.keys()):\n orderbook = exchange.fetch_order_book(symbol)\n exchange.create_market_sell_order(symbol, sell_mkts[symbol])\n \n \n# execute buy orders\n\nfor symbol in list(buy_mkts.keys()):\n orderbook = exchange.fetch_order_book(symbol)\n exchange.create_market_buy_order(symbol, buy_mkts[symbol])","repo_name":"tonyrishwain/HODL-bot","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"42956984357","text":"#==============================================================================\n## Plot and trim population count \n#==============================================================================\n# Uses modules:\nfrom netCDF4 import Dataset\nfrom matplotlib import pyplot as plt\n##import cartopy\n##import cartopy.crs as ccrs\n##import cartopy.feature as cfeature\nimport numpy as np\n##import Country_mask\n##import pandas as pd\n##from geopy.distance import distance\nimport numpy.ma as ma\nfrom global_land_mask import globe\n#==============================================================================\n\ndef get_background_stats():\n # Get an area in the middle of the Pacific Ocean to use as very clean background\n fname = '/Users/dfinch/Documents/S5P_OFFL_L2__NO2____20191222T223925_20191223T002055_11358_01_010302_20191224T152624.nc'\n dataset = Dataset(fname)\n products = dataset.groups['PRODUCT']\n no2 = products.variables['nitrogendioxide_tropospheric_column']\n clean_no2_subset = no2[0,2400:2500,:] \n mean = np.mean(clean_no2_subset)\n std = np.std(clean_no2_subset)\n dataset.close()\n return mean,std\n\nclean_mean, clean_std = get_background_stats()\n\n\nfname = '/Users/dfinch/Documents/S5P_OFFL_L2__NO2____20181101T155632_20181101T173802_05452_01_010200_20181107T173227.nc'\ndataset = Dataset(fname)\nproducts = dataset.groups['PRODUCT']\nno2 = products.variables['nitrogendioxide_tropospheric_column'][0]\nlat = products.variables['latitude'][0]\nlon = products.variables['longitude'][0]\nqa = products.variables['qa_value'][0]\n\nqa_limit = 0.5 # 0.5 for cloudy scenes. 0.75 for cloud free\n\nno2_data = no2.filled(np.nan)\nno2_data[qa < qa_limit] = np.nan\n\n##land_mask = globe.is_land(lat,lon)\n##no2_data[~land_mask] = np.nan\n\nhigh_no2 = no2_data - (clean_mean + clean_std)\nhigh_no2[high_no2 <= 0] = np.nan\n\nx_res = 75 # pixels - will make 6 boxes across swath\ny_res = 59 # - will make 55 boxes down swath\n\nfor x in range(0,450,x_res):\n for y in range(118,3127,y_res): # Trim off the ends of the swatch since these will be messed up by the poles anyway\n data_subset = no2_data[y:y+y_res,x:x_res]\n land_mask = globe.is_land(lat[y:y+y_res,x:x_res],lon[y:y+y_res,x:x_res])\n if land_mask.sum() < 10:\n print(land_mask.sum())\n continue\n if np.count_nonzero(~np.isnan(data_subset)) < 200:\n continue\n plt.pcolormesh(lon[y:y+y_res,x:x+x_res],lat[y:y+y_res,x:x+x_res],data_subset)\n plt.savefig('/Users/dfinch/Desktop/temp.png')\n plt.close()\n while True:\n answer = input('Y or N?')\n if answer.lower() in ['n','no','']:\n action = 'disguarding'\n break\n elif answer.lower() in ['y','yes']:\n action = 'keeping'\n break\n else:\n answer = True\n print(action)\n \n## ============================================================================\n## END OF PROGAM\n## ============================================================================\n","repo_name":"dpfinch/PlumeSpotter","sub_path":"NO2_plume_extraction.py","file_name":"NO2_plume_extraction.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"} +{"seq_id":"9899636228","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow.compat.v1 as tf # works with TF-GPU 1.15.0\nfrom tensorflow.python.keras import backend as K\n\n# SILENCE!\nimport logging\nimport warnings\ntf.get_logger().setLevel(logging.ERROR)\nwarnings.filterwarnings(\"ignore\")\n\nimport numpy as np\n\nimport pandas as pd\nfrom collections import defaultdict\nfrom attacks import get_threshold, train_best_attack_model\nfrom utils import get_data, softmax\nimport attack_features, attack_features_tf1\nfrom defenses import memguard\n\n\nimport argparse\n\nparser = argparse.ArgumentParser('Take a trained model and attack it')\nparser.add_argument('--ndata', default=2500, type=int, help='total amount of data to use for training/test')\nparser.add_argument('--dataset', default='cifar10', type=str, help='dataset name to attack')\nparser.add_argument('--target_model_path', default='target_model.tf', type=str, help='path to load final model.')\nparser.add_argument('--source_model_path', default='source_model.tf', type=str, help='path to load fianl model')\nparser.add_argument('--attack_batch_size', default=10, type=int, help='batch size for predictions in attack')\nparser.add_argument('--defense', default='', type=str, help='name of post processing defense to use.')\nparser.add_argument('--attacks', default='bndac', type=str, help='each char is an attack to perform. n is confidence-vector.'\n 'd is translation, r is rotation, a is adv-e (boundary distance, label-only),'\n 'b is for baseline gap attack'\n 'g is gaussian noise, w is white-box CW, '\n 'c is combined (translation + distance)')\nparser.add_argument('--r', default=9, type=int, help='r param in rotation attack if used')\nparser.add_argument('--d', default=1, type=int, help='d param in translation attack if used')\nparser.add_argument('--noise_samples', default=5000, type=int, help='number of times to duplicate and noise each sample.')\nparser.add_argument('--max_samples', default=-1, type=int, help='-1 to use ndata, else the max number of samples to process for longer attacks (dist, combined)')\nargs = parser.parse_args()\n\n\nvalid_defenses = ['memguard']\nif args.defense not in valid_defenses:\n raise ValueError(f\"Defense: {args.defense} is not a valid defense. Valid defenses are: {valid_defenses}\")\n\n\ndef attack(args):\n batch = args.attack_batch_size\n target_train_set, target_test_set, source_train_set, source_test_set, input_dim, n_classes = get_data(args.dataset, args.ndata)\n source_labels = np.concatenate([source_train_set[1].flatten(), source_test_set[1].flatten()],\n axis=0)\n target_labels = np.concatenate([target_train_set[1].flatten(), target_test_set[1].flatten()], axis=0)\n print(f\"source_labels: {source_labels.shape}, target_labels: {target_labels.shape}\")\n target_model = tf.keras.models.load_model(args.target_model_path)\n source_model = tf.keras.models.load_model(args.source_model_path)\n\n target_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n source_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n _, bstrainacc = source_model.evaluate(*source_train_set, verbose=2)\n _, bstestacc = source_model.evaluate(*source_test_set, verbose=2)\n\n _, bttrainacc = target_model.evaluate(*target_train_set, verbose=2)\n _, bttestacc = target_model.evaluate(*target_test_set, verbose=2)\n # get softmax features for boundary distance and prediction vector attacsk (n, a)\n target_train_ds = tf.data.Dataset.from_tensor_slices(target_train_set).batch(batch)\n target_test_ds = tf.data.Dataset.from_tensor_slices(target_test_set).batch(batch)\n source_train_ds = tf.data.Dataset.from_tensor_slices(source_train_set).batch(batch)\n source_test_ds = tf.data.Dataset.from_tensor_slices(source_test_set).batch(batch)\n\n source_in = source_model.predict(source_train_ds)\n source_out = source_model.predict(source_test_ds)\n\n target_in = target_model.predict(target_train_ds)\n target_out = target_model.predict(target_test_ds)\n\n source_in = softmax(source_in)\n source_out = softmax(source_out)\n target_in = softmax(target_in)\n target_out = softmax(target_out)\n\n if args.defense == 'memguard':\n target_in = memguard(target_in)\n target_out = memguard(target_out)\n\n for attack in list(args.attacks):\n\n target_features = np.concatenate([target_in, target_out], axis=0)\n source_features = np.concatenate([source_in, source_out], axis=0)\n print(f\"source_features: {source_features.shape}, target_features: {target_features.shape}\")\n source_m = np.concatenate([np.ones(len(source_in)),\n np.zeros(len(source_out))], axis=0)\n target_m = np.concatenate([np.ones(len(target_in)),\n np.zeros(len(target_out))], axis=0)\n\n # downsample the dataset since the attack is so slow.\n # make sure the dataset is shuffled so that we don't attack the same class all the time!\n if attack == 'b':\n print(f\"Gap attack| source: {50 + (bstrainacc - bstestacc) * 50}, target: {50 + (bttrainacc - bttestacc) * 50}\")\n elif attack == 'a':\n max_samples = args.ndata if args.max_samples == -1 else args.max_samples\n source_m = np.concatenate([np.ones(max_samples),\n np.zeros(max_samples)], axis=0)\n target_m = np.concatenate([np.ones(max_samples),\n np.zeros(max_samples)], axis=0)\n # attack with HipSkipJump (very slow)\n dists_source_in = attack_features_tf1.dists(source_model, source_train_ds, attack=\"HSJ\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_source_out = attack_features_tf1.dists(source_model, source_test_ds, attack=\"HSJ\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_source = np.concatenate([dists_source_in, dists_source_out], axis=0)\n dists_target_in = attack_features_tf1.dists(target_model, target_train_ds, attack=\"HSJ\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_target_out = attack_features_tf1.dists(target_model, target_test_ds, attack=\"HSJ\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_target = np.concatenate([dists_target_in, dists_target_out], axis=0)\n print(\"threshold on HSJ:\")\n acc2, prec2, _, _ = get_threshold(source_m, dists_source, target_m, dists_target)\n elif attack == 'w':\n max_samples = args.ndata if args.max_samples == -1 else args.max_samples\n source_m = np.concatenate([np.ones(max_samples),\n np.zeros(max_samples)], axis=0)\n target_m = np.concatenate([np.ones(max_samples),\n np.zeros(max_samples)], axis=0)\n # attack with C&W\n dists_source_in = attack_features_tf1.dists(source_model, source_train_ds, attack=\"CW\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_source_out = attack_features_tf1.dists(source_model, source_test_ds, attack=\"CW\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_source = np.concatenate([dists_source_in, dists_source_out], axis=0)\n dists_target_in = attack_features_tf1.dists(target_model, target_train_ds, attack=\"CW\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_target_out = attack_features_tf1.dists(target_model, target_test_ds, attack=\"CW\", max_samples=max_samples,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n dists_target = np.concatenate([dists_target_in, dists_target_out], axis=0)\n print(\"threshold on C&W:\")\n acc1, prec1, _, _ = get_threshold(source_m, dists_source, target_m, dists_target)\n\n elif attack == 'n':\n # just look at confidence in predicted label\n conf_source = np.max(source_features, axis=-1)\n conf_target = np.max(target_features, axis=-1)\n print(\"threshold on predicted label:\")\n acc1, prec1, _, _ = get_threshold(source_m, conf_source, target_m, conf_target)\n\n # look at confidence in true label\n conf_source = source_features[range(len(source_features)), source_labels]\n conf_target = target_features[range(len(target_features)), target_labels]\n print(\"threshold on true label:\")\n acc2, prec2, _, _ = get_threshold(source_m, conf_source, target_m, conf_target)\n elif attack == 'c':\n max_samples = args.ndata if args.max_samples == -1 else args.max_samples\n aug_kwarg = args.d if attack == 'd' else args.r\n attack_test_set = attack_features_tf1.distance_augmentation_attack(target_model, target_train_set, target_test_set,\n max_samples,\n attack,\n aug_kwarg, args.attack_batch_size,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n attack_train_set = attack_features_tf1.distance_augmentation_attack(source_model, source_train_set, source_test_set,\n max_samples,\n attack,\n aug_kwarg, args.attack_batch_size,\n input_dim=[None, input_dim[0], input_dim[1], input_dim[2]],\n n_classes=n_classes)\n vals = train_best_attack_model(attack_train_set, attack_test_set, attack, n_classes=n_classes)\n elif attack == 'd' or attack == 'r':\n max_samples = args.ndata\n aug_kwarg = args.d if attack == 'd' else args.r\n attack_test_set = attack_features.augmentation_attack(target_model, target_train_set, target_test_set, max_samples,\n attack,\n aug_kwarg, args.attack_batch_size)\n attack_train_set = attack_features.augmentation_attack(source_model, source_train_set, source_test_set, max_samples,\n attack,\n aug_kwarg, args.attack_batch_size)\n vals = train_best_attack_model(attack_train_set, attack_test_set, attack, n_classes=n_classes)\n elif attack == 'g':\n max_samples = min(10000, args.ndata)\n if args.dataset in ['adult', 'purchase', 'texas', 'location']:\n sigmas = [1. / input_dim, 2. / input_dim, 3. / input_dim, 5. / input_dim, 10. / input_dim]\n if args.dataset == 'adult':\n sigmas = [20. / input_dim, 30. / input_dim, 50. / input_dim]\n for sigma in sigmas:\n print(f\"threshold on noise robustness, sigma: {sigma}\")\n noise_source_in = attack_features_tf1.binary_rand_robust(source_model, source_train_ds, stddev=sigma * input_dim,\n p=sigma,\n max_samples=max_samples,\n input_dim=[None, input_dim],\n noise_samples=args.noise_samples,\n dataset=args.dataset)\n noise_source_out = attack_features_tf1.binary_rand_robust(source_model, source_test_ds, stddev=sigma * input_dim,\n p=sigma,\n max_samples=max_samples,\n input_dim=[None, input_dim],\n noise_samples=args.noise_samples,\n dataset=args.dataset)\n noise_target_in = attack_features_tf1.binary_rand_robust(target_model, target_train_ds, stddev=sigma * input_dim,\n p=sigma,\n max_samples=max_samples,\n input_dim=[None, input_dim],\n noise_samples=args.noise_samples, dataset=args.dataset)\n noise_target_out = attack_features_tf1.binary_rand_robust(target_model, target_test_ds, stddev=sigma * input_dim,\n p=sigma,\n max_samples=max_samples,\n input_dim=[None, input_dim],\n noise_samples=args.noise_samples, dataset=args.dataset)\n\n for i in range(len(noise_source_in)):\n noise_source = np.concatenate([noise_source_in[i], noise_source_out[i]], axis=0)\n noise_target = np.concatenate([noise_target_in[i], noise_target_out[i]], axis=0)\n get_threshold(source_m, noise_source, target_m, noise_target)\n else:\n for sigma in [0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]:\n print(f\"threshold on noise robustness, sigma: {sigma}\")\n noise_source_in = attack_features_tf1.continuous_rand_robust(source_model, source_train_ds, stddev=sigma, max_samples=max_samples, input_dim=[None, input_dim], noise_samples=args.noise_samples)\n noise_source_out = attack_features_tf1.continuous_rand_robust(source_model, source_test_ds, stddev=sigma, max_samples=max_samples, input_dim=[None, input_dim], noise_samples=args.noise_samples)\n noise_target_in = attack_features_tf1.continuous_rand_robust(target_model, target_train_ds, stddev=sigma, max_samples=max_samples, input_dim=[None, input_dim], noise_samples=args.noise_samples)\n noise_target_out = attack_features_tf1.continuous_rand_robust(target_model, target_test_ds, stddev=sigma, max_samples=max_samples, input_dim=[None, input_dim], noise_samples=args.noise_samples)\n\n for i in range(len(noise_source_in)):\n noise_source = np.concatenate([noise_source_in[i], noise_source_out[i]], axis=0)\n noise_target = np.concatenate([noise_target_in[i], noise_target_out[i]], axis=0)\n get_threshold(source_m, noise_source, target_m, noise_target)\n else:\n raise ValueError(f'attack: {attack} not supported.')\n\nattack(args)","repo_name":"cchoquette/membership-inference","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":16364,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"19"} +{"seq_id":"10208160305","text":"from itertools import chain\nfrom collections import namedtuple\nfrom operator import itemgetter\nfrom typeatlas.util import generic_type\nimport bisect\n\n\nUnion = generic_type('Union')\nOptional = generic_type('Optional')\nTupleOf = generic_type('Tuple')\nIterableOf = generic_type('Iterable')\nIteratorOf = generic_type('Iterator')\n\n\nclass _BlockLikeMeta(type):\n\n def __instancecheck__(cls, instance):\n return (isinstance(instance, tuple) and\n len(instance) >= 2 and (isinstance(x, int) for x in instance[:2]))\n\n def __subclasscheck__(cls, subclass):\n return issubclass(subclass, tuple)\n\n\nclass BlockLike(metaclass=_BlockLikeMeta):\n \"\"\"A tuple of at least two integer arguments, specifying start and\n end.\"\"\"\n\n def __init__(self):\n raise TypeError\n\nBlock = namedtuple('Block', 'start end')\n\n\n_UNSPECIFIED = object()\n\n\ndef iterblock(block: BlockLike) -> IteratorOf[int]:\n \"\"\"Iterate over the indexes (e.g. characters) inside any block\n described by a (start, end) inclusive tuple\"\"\"\n return range(block[0], block[1] + 1)\n\n\ndef iterblocks(blocks: IterableOf[BlockLike]) -> IteratorOf[int]:\n \"\"\"Iterate over the indexes (e.g. characters) inside an iterable\n of blocks described by a (start, end) inclusive tuples.\"\"\"\n return chain.from_iterable(range(block[0], block[1] + 1)\n for block in blocks)\n\n\ndef blocklen(block: BlockLike) -> int:\n \"\"\"Return the length of a block\"\"\"\n return block[1] - block[0] + 1\n\n\ndef blockslen(blocks: IterableOf[BlockLike]) -> int:\n \"\"\"Return the length of a block\"\"\"\n return sum(block[1] - block[0] + 1 for block in blocks)\n\n\ndef toblocks_inorder(values: IterableOf[int]) -> IteratorOf[BlockLike]:\n \"\"\"Like toblocks, but does not sort. This can be an optimisation if\n the values are already sorted, or if you're doing something unusual\n (e.g. finding the continuous blocks in an unordered sequence)\"\"\"\n\n start = None\n end = None\n\n for i in values:\n if end != i - 1:\n if start is not None:\n yield Block(start, end)\n start = i\n end = i\n\n if start is not None:\n yield Block(start, end)\n\n\ndef have_intersection(a: BlockLike, b: BlockLike) -> bool:\n \"\"\"Return True if the two blocks have an intersection.\"\"\"\n\n # Verify both implementations work\n\n astart = a[0]\n aend = a[1]\n\n bstart = b[0]\n bend = b[1]\n\n # No need to check if bstart <= aend <= bend, as then either\n # bstart <= astart <= bend or astart <= bstart <= aend.\n return (astart <= bstart <= aend or astart <= bend <= aend or \n bstart <= astart <= bend)\n\n\ndef intersection(a: BlockLike, b: BlockLike,\n default=_UNSPECIFIED) -> Optional[BlockLike]:\n \"\"\"Return the intersection of the two blocks.\"\"\"\n\n start = max(a[0], b[0])\n end = min(a[1], b[1])\n\n if start <= end:\n return Block(start, end)\n\n if default is _UNSPECIFIED:\n raise ValueError(\"blocks have no intersection\")\n return default\n\n\ndef toblocks(values: IterableOf[int]) -> IteratorOf[BlockLike]:\n \"\"\"Turn an unsorted sequence of integers into a sequence of \n Block named tuples describing the start and end (inclusive)\n of the continuous numbers.\"\"\"\n return toblocks_inorder(sorted(values))\n\n\nOverlapTuple = TupleOf[BlockLike, BlockLike, BlockLike]\n\n#def overlapping_blocks(aseq: IterableOf[int],\n# bseq: IterableOf[int]) -> IteratorOf[OverlapTuple]:\n# \"\"\"Return an iterable of overlapping blocks from the aseq and bseq,\n# as a tuple of (a, b, overlap), where a is a block from aseq, b is \n# a block from bseq, and overlap is a block with their overlap.\"\"\"\n#\n# aseq = sorted(aseq)\n# astarts = list(map(itemgetter(0), aseq))\n# aends = list(map(itemgetter(1), aseq))\n# bseq = sorted(bseq)\n#\n# for b in bseq:\n# bstart = b[0]\n# bend = b[1]\n#\n# #i = bisect.bisect_right(astarts, bstart) - 1\n# #j = bisect.bisect_left(aends, bend) + 1\n# \n# i = bisect.bisect_left(bends, astart)\n# j = bisect.bisect_right(bstarts, aend)\n#\n# for a in aseq[i:j]:\n# overlap = intersection(a, b)\n# if overlap is not None:\n# yield a, b, overlap\n\n\ndef overlapping_blocks(aseq: IterableOf[BlockLike],\n bseq: IterableOf[BlockLike],\n asymmetric: bool=False\n ) -> IteratorOf[Union[BlockLike, OverlapTuple]]:\n\n \"\"\"Return an iterable of overlapping blocks from the aseq and bseq,\n as a tuple of (a, b, overlap), where a is a block from aseq, b is \n a block from bseq, and overlap is a block with their overlap.\n \n If asymmetric is True, only return the blocks from aseq that \n overlap with some block from bseq. In other words, aseq is a haystack, \n and bseq is a sequence of needles to locate the blocks from the \n haystack.\"\"\"\n\n aseq = sorted(aseq)\n bseq = sorted(bseq)\n bstarts = list(map(itemgetter(0), bseq))\n bends = list(map(itemgetter(1), bseq))\n\n for a in aseq:\n astart = a[0]\n aend = a[1]\n\n ## These are very wrong, but I'm keeping to figure out why I thought\n ## this would ever make sense.\n #i = bisect.bisect_right(bstarts, astart) - 1\n #j = bisect.bisect_left(bends, aend) + 1\n\n i = bisect.bisect_left(bends, astart)\n j = bisect.bisect_right(bstarts, aend)\n\n for b in bseq[i:j]:\n overlap = intersection(a, b, None)\n if overlap is not None:\n if asymmetric:\n yield a\n break\n yield a, b, overlap\n\n\ndef intersect_many(aseq: IterableOf[BlockLike],\n bseq: IterableOf[BlockLike]) -> IteratorOf[BlockLike]:\n \"\"\"Return the intersection of the two sequences of blocks.\n\n This is used to unit test overlap() and overlapping_blocks() in\n one go. For this complex an operation, rangemath may be more useful.\n \"\"\"\n yield from map(itemgetter(2), overlapping_blocks(aseq, bseq))\n\n\ndef union(*block_sequences: IterableOf[BlockLike],\n merge_adjacent: bool=True) -> IteratorOf[BlockLike]:\n \"\"\"Given a few block sequences, yield a single block iterable\n with the union of all blocks. You can also pass a single\n sequence of overlapping blocks, and the function will still work.\n\n\n Adjacent blocks will be merged, unless you pass merge_adjacent=True.\n \"\"\"\n\n blocks = sorted(chain.from_iterable(block_sequences))\n\n adjoff = 1 if merge_adjacent else 0\n\n i = 0\n\n while i < len(blocks):\n start = blocks[i][0]\n end = blocks[i][1]\n\n # Until the next block overlaps, merge with the current one, and\n # skip it in the main loop.\n for j in range(i + 1, len(blocks)):\n\n # To return the promised result, we only need to break when the\n # start of the next is larger than the end, but if also keep\n # going when the start is right next to the end, we'd also merge\n # adjacent blocks.\n if blocks[j][0] > end + adjoff:\n break\n end = max(end, blocks[j][1])\n i += 1\n\n i += 1\n\n yield Block(start, end)\n","repo_name":"milkokr/typeatlas","sub_path":"typeatlas/blockmath.py","file_name":"blockmath.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"32073225","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 7 19:32:53 2019\n\n@author: eileenlu\n\"\"\"\n\nimport pymysql\nimport json\n\nhost='10.93.149.53'\nuser='kg_intern'\npsd='kg_intern'\ndb='kg_intern'\nport=3306\n \ncon=pymysql.connect(host=host,user=user,passwd=psd,db=db,port=port)\n\nprint('mysql connet success')\ni=0;\nf=open(r'E:\\drugs_json_zhongyao1.json','r',encoding='utf-8')\nall_rows_num=len(f.readlines())\nfor lines in f.readlines():\n print(lines)\n if(lines.strip()!=\"\"):\n con=pymysql.connect(host=host,user=user,passwd=psd,db=db,port=port)\n drug_dict=json.loads(lines)\n general_name=\"'\"+drug_dict['general_name'].strip().strip(',')+\"'\"\n brand_name=\"'\"+drug_dict['brand_name'].strip().strip(',')+\"'\"\n approval_number=\"'\"+drug_dict['approval_number'].strip().strip(',')+\"'\"\n standard_code=\"'\"+drug_dict['standard_code'].strip().strip(',')+\"'\"\n company=\"'\"+drug_dict['company'].strip().strip(',')+\"'\"\n brand_factory=\"'\"+drug_dict['brand_factory'].strip().strip(',')+\"'\"\n is_cm=\"'\"+drug_dict['is_cm'].strip().strip(',')+\"'\"\n ingredient=\"'\"+drug_dict['ingredient'].strip().strip(',')+\"'\"\n character=\"'\"+drug_dict['character'].strip().strip(',')+\"'\"\n function=\"'\"+drug_dict['function'].strip().strip(',')+\"'\"\n specification=\"'\"+drug_dict['specification'].strip().strip(',')+\"'\"\n dosage_form=\"'\"+drug_dict['dosage_form'].strip().strip(',')+\"'\"\n usage=\"'\"+drug_dict['usage'].strip().strip(',')+\"'\"\n sideaffect=\"'\"+drug_dict['sideaffect'].strip().strip(',')+\"'\"\n contraindication=\"'\"+drug_dict['contraindication'].strip().strip(',')+\"'\"\n announcement=\"'\"+drug_dict['announcement'].strip().strip(',')+\"'\"\n pregnant=\"'\"+drug_dict['pregnant'].strip().strip(',')+\"'\"\n child=\"'\"+drug_dict['child'].strip().strip(',')+\"'\"\n elder=\"'\"+drug_dict['elder'].strip().strip(',')+\"'\"\n interaction=\"'\"+drug_dict['interaction'].strip().strip(',')+\"'\"\n overdose=\"'\"+drug_dict['overdose'].strip().strip(',')+\"'\"\n pharmacological_action=\"'\"+drug_dict['pharmacological_action'].strip().strip(',')+\"'\"\n pharmacokinetics=\"'\"+drug_dict['pharmacokinetics'].strip().strip(',')+\"'\"\n storage=\"'\"+drug_dict['storage'].strip().strip(',')+\"'\"\n packaging=\"'\"+drug_dict['packaging'].strip().strip(',')+\"'\"\n valid_date=\"'\"+drug_dict['valid_date'].strip().strip(',')+\"'\"\n cue=con.cursor()\n sqlstr=\"insert into kg_intern.medicine_new (general_name,brand_name,isbn,standard_code,company,production_address,is_cm,ingredient,character1,function1,specification,dosage_form,usage1,sideaffect,contraindication,announcement,pregnant,child,elder,interaction,overdose,pharmacological_action,pharmacokinetics,storage,packaging,valid_date) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\" % (general_name,brand_name,approval_number,standard_code,company,brand_factory,is_cm,ingredient,character,function,specification,dosage_form,usage,sideaffect,contraindication,announcement,pregnant,child,elder,interaction,overdose,pharmacological_action,pharmacokinetics,storage,packaging,valid_date)\n print(sqlstr)\n #print(sqlstr)\n cue.execute(sqlstr)\n con.commit()\n i=i+1;\n if (i%1==0):\n print('insert success '+str(i)+' records')\n print('now percent '+str(i*1.0/all_rows_num))\n print('insert success: '+general_name)\n\n \ncon.close() \n \n \n ","repo_name":"MenglinLu/Web-crawler","sub_path":"yaopinnet/myspider/to_mysql.py","file_name":"to_mysql.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"19"} +{"seq_id":"41433276727","text":"import sys\nfrom datetime import datetime, timezone\nimport boto3\n\n\naws_region = sys.argv[1]\ninstance_max = int(sys.argv[2])\ncurrent_time = datetime.now().replace(tzinfo=timezone.utc)\ninstance_counter = 0\n\nec2 = boto3.resource('ec2', aws_region)\n\nall_instances = ec2.instances.all()\n\ninstance_list = []\n\nfor instance in all_instances:\n instance_id = instance.id\n instance_type = instance.instance_type\n instance_state = instance.state['Name']\n instance_age = round((current_time - instance.launch_time).total_seconds())\n for tag in instance.tags:\n if tag['Key'] == 'Name':\n instance_name = tag['Value']\n instance_list.append({'ID': instance_id, 'Type': instance_type, 'Age': instance_age, 'State': instance_state, 'Name': instance_name})\n\nsorted_list_of_instances = sorted(instance_list, key = lambda age: age['Age'], reverse=True)\n\nfor instance in sorted_list_of_instances:\n if (instance_counter < instance_max):\n if instance['State'] == 'running':\n print(instance['ID'], instance['Type'], instance['Age'], instance['Name'])\n instance_counter = instance_counter + 1","repo_name":"manzoon/instancelister","sub_path":"oldest-instances.py","file_name":"oldest-instances.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"70512267565","text":"import re \n\ndef solution(str1, str2):\n\tp = re.compile('[a-zA-Z]{2}')\n\tarr1 =[]\n\tarr2 = []\n\twhile str1 != '':\n\t\tif p.match(str1[0:2]):\n\t\t\tarr1.append(str1[0:2].lower())\n\t\tstr1 = str1[1:]\n\twhile str2 != '':\n\t\tif p.match(str2[0:2]):\n\t\t\tarr2.append(str2[0:2].lower())\n\t\tstr2 = str2[1:]\n\tprint (arr1,arr2)\n\n\tif len(arr1) == 0 and len(arr2) ==0:\n\t\treturn 65536\n\t\n\tkyo = 0\n\thap = 0\n\tfor a in arr1:\n\t\tif a in arr2:\n\t\t\tkyo += 1\n\t\t\tarr2.remove(a)\n\t\thap += 1\n\thap += len(arr2)\n\tanswer = kyo / hap * 65536\n\treturn int(answer)\n\nprint(solution('FRANCE','french'))\nprint(solution('E=M*C^2','e=m*c^2'))","repo_name":"boomkim/algorithm_solutions","sub_path":"python/Kakao_blindtest/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"74703797483","text":"import cv2\nimport argparse\n# constrói o analisador de argumentos e analisa os argumentos\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--cascade\", type=str,\n\tdefault=\"haarcascade.xml\")\nargs = vars(ap.parse_args())\n# Carrega o detector de faces\nprint(\"[INFO] loading face detector...\")\ndetector = cv2.CascadeClassifier(args[\"cascade\"])\n# Abre o arquivo de video\ninput_video = cv2.VideoCapture('../assets/arsene.mp4')\n# Checa se foi possivel abrir o arquivo\nif not input_video.isOpened():\n print(\"Error opening video file\")\n exit(1)\n# Como foi possível abrir o video de entrada, vamos agora utilizar \n# essa captura para definir o tamanho do video de saida\nwidth = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH)) # float `width`\nheight = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n# Cria a estrutura do video de saida\n# Com formato e local do arquivo de saida\n# Codec utilizado\n# FPS do video e\n# Tamanho do video\noutput_video = cv2.VideoWriter( './output/out.avi',cv2.VideoWriter_fourcc(*'DIVX'), 24, (width, height))\n# Loop de leitura frame por frame\nwhile True:\n # Le um frame do video e, guarda o resultado da leitura\n # Se nao houver mais frames disponiveis, ret sera falso\n ret, frame = input_video.read()\n # Se nao conseguiu ler o frame, para o laco\n if not ret:\n break\n gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t# realiza a detecção de faces\n rects = detector.detectMultiScale(gray_image, scaleFactor=1.05, minNeighbors=40, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n # varre as faces encontradas\n for (x, y, w, h) in rects:\n\t\t# desenha os retângulos nas faces\n cv2.rectangle(frame, (x, y), (x + w, y + h), (200, 128, 0), 2)\n # Exibe o frame\n cv2.imshow('Rolandinho', frame)\n # Escreve o frame no output\n output_video.write(frame)\n # Se o usuario apertar q, encerra o playback\n # O valor utilizado no waiKey define o fps do playback\n if cv2.waitKey(30) & 0xFF == ord('q'):\n break\n# Fecha tudo\noutput_video.release()\ninput_video.release()\ncv2.destroyAllWindows()","repo_name":"Pablo-RLV/Inteli-M6-T2","sub_path":"Inteli-M6-T2-Prova2/source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"43142228425","text":"# -*- coding: utf-8 -*-\nimport numpy\nfrom theano import Op, Apply\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams\nfrom theano.tensor.basic import as_tensor_variable\n\nimport theano.tensor as T\n\n\nclass MRG_RandomStreams2(MRG_RandomStreams):\n \"\"\"Module component with similar interface to numpy.random\n (numpy.random.RandomState)\n \"\"\"\n\n def __init__(self, seed=12345, use_cuda=None):\n \"\"\"\n :type seed: int or list of 6 int.\n\n :param seed: a default seed to initialize the random state.\n If a single int is given, it will be replicated 6 times.\n The first 3 values of the seed must all be less than M1 = 2147483647,\n and not all 0; and the last 3 values must all be less than\n M2 = 2147462579, and not all 0.\n\n \"\"\"\n super(MRG_RandomStreams2, self).__init__(seed, use_cuda)\n\n def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int32',\n nstreams=None):\n \"\"\"\n Sample `n` (currently `n` needs to be 1) times from a multinomial\n distribution defined by probabilities pvals.\n\n Example : pvals = [[.98, .01, .01], [.01, .98, .01]] will\n probably result in [[1,0,0],[0,1,0]].\n\n .. note::\n -`size` and `ndim` are only there keep the same signature as other\n uniform, binomial, normal, etc.\n todo : adapt multinomial to take that into account\n\n -Does not do any value checking on pvals, i.e. there is no\n check that the elements are non-negative, less than 1, or\n sum to 1. passing pvals = [[-2., 2.]] will result in\n sampling [[0, 0]]\n \"\"\"\n if pvals is None:\n raise TypeError('You have to specify pvals')\n pvals = as_tensor_variable(pvals)\n if size is not None:\n if any([isinstance(i, int) and i <= 0 for i in size]):\n raise ValueError(\n 'The specified size contains a dimension with value <= 0',\n size)\n\n if n == 1 and pvals.ndim == 1:\n if ndim is not None:\n raise ValueError('Provided an ndim argument to ' +\n 'MRG_RandomStreams2.multinomial, which does not use ' +\n 'the ndim argument.')\n unis = self.uniform(size=size, ndim=2, nstreams=nstreams)\n op = MultinomialFromUniform2(dtype)\n return op(pvals, unis)\n else:\n raise NotImplementedError('MRG_RandomStreams2.multinomial only ' +\n ' implemented with n == 1 and pvals.ndim = 2')\n\n\nclass MultinomialFromUniform2(Op):\n '''Converts samples from a uniform into sample from a multinomial.\n\n This random number generator is faster than the standard one of Theano,\n because it stops earlier and doesn't return matrices of zeros and ones,\n indicating which index was drawn. Instead it returns the index of the drawn\n element.\n '''\n def __init__(self, odtype):\n self.odtype = odtype\n\n def __eq__(self, other):\n return type(self) == type(other) and self.odtype == other.odtype\n\n def __hash__(self):\n return hash((type(self), self.odtype))\n\n def __str__(self):\n return '%s{%s}' % (self.__class__.__name__, self.odtype)\n\n def __setstate__(self, dct):\n self.__dict__.update(dct)\n try:\n self.odtype\n except AttributeError:\n self.odtype = 'auto'\n\n def make_node(self, pvals, unis):\n pvals = T.as_tensor_variable(pvals)\n unis = T.as_tensor_variable(unis)\n if pvals.ndim != 1:\n raise NotImplementedError('pvals ndim should be 1', pvals.ndim)\n if unis.ndim != 2:\n raise NotImplementedError('unis ndim should be 2', unis.ndim)\n if self.odtype == 'auto':\n odtype = pvals.dtype\n else:\n odtype = self.odtype\n out = T.tensor(dtype=odtype, broadcastable=unis.type.broadcastable)\n return Apply(self, [pvals, unis], [out])\n\n def grad(self, ins, outgrads):\n pvals, unis = ins\n (gz,) = outgrads\n return [T.zeros_like(x) for x in ins]\n\n# def c_code_cache_version(self):\n# return (5,)\n \n def c_code(self, node, name, ins, outs, sub):\n (pvals, unis) = ins\n (z,) = outs\n\n fail = sub['fail']\n return \"\"\"\n if (PyArray_NDIM(%(pvals)s) != 1)\n {\n PyErr_Format(PyExc_TypeError, \"pvals wrong rank\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(unis)s) != 2)\n {\n PyErr_Format(PyExc_TypeError, \"unis wrong rank\");\n %(fail)s;\n }\n\n if ((NULL == %(z)s)\n || ((PyArray_DIMS(%(z)s))[0] != (PyArray_DIMS(%(unis)s))[0])\n || ((PyArray_DIMS(%(z)s))[1] != (PyArray_DIMS(%(unis)s))[1])\n )\n {\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_ZEROS(2,\n PyArray_DIMS(%(unis)s),\n type_num_%(z)s,\n 0);\n if (!%(z)s)\n {\n PyErr_SetString(PyExc_MemoryError, \"failed to alloc z output\");\n %(fail)s;\n }\n }\n\n { // NESTED SCOPE\n\n const int nb_outcomes = PyArray_DIMS(%(pvals)s)[0];\n const int nb_rows = PyArray_DIMS(%(unis)s)[0];\n const int nb_cols = PyArray_DIMS(%(unis)s)[1];\n\n //\n // For each multinomial, loop over each possible outcome\n //\n for (int row = 0; row < nb_rows; ++row)\n {\n for (int col = 0; col < nb_cols; ++col) {\n// std::cout << row << 'x' << col << std::endl;\n\n dtype_%(pvals)s cummul = 0.;\n const dtype_%(unis)s* unis_n = (dtype_%(unis)s*)PyArray_GETPTR2(%(unis)s, row, col);\n dtype_%(z)s* z_nm = (dtype_%(z)s*)PyArray_GETPTR2(%(z)s, row, col);\n *z_nm = -1;\n\n// std::cout << \"unis \" << (int)(*unis_n * 100) << std::endl;\n// std::cout << \"z_nm \" << (int)(*z_nm * 100) << std::endl;\n\n for (int m = 0; m < nb_outcomes; ++m)\n {\n const dtype_%(pvals)s* pvals_m = (dtype_%(pvals)s*)PyArray_GETPTR1(%(pvals)s, m);\n cummul += *pvals_m;\n// std::cout << \"cummul \" << (int)(cummul * 100) << std::endl;\n\n if (cummul > *unis_n)\n {\n *z_nm = m;\n// *z_nm = 17;\n break;\n }\n\n }\n\n // If we reached the end, use the last value.\n // If we have a real distribution [0,1], than this should never\n // happen, right? I got a segmentation fault when removing it.\n // 2014-04-08\n // This might happen due to rounding errors. 2014-05-01\n if (*z_nm == -1) {\n *z_nm = nb_outcomes - 1;\n }\n }\n }\n } // END NESTED SCOPE\n \"\"\" % locals()\n\n def perform(self, node, ins, outs):\n (pvals, unis) = ins\n (z,) = outs\n\n if z[0] is None or z[0].shape != numpy.sum(unis.shape):\n z[0] = numpy.zeros(unis.shape, dtype=node.outputs[0].dtype)\n\n z[0][:, :] = -1\n\n nb_outcomes = pvals.shape[0]\n\n for row in xrange(unis.shape[0]):\n for col in xrange(unis.shape[1]):\n cummul = 0\n unis_n = unis[row, col]\n\n for m in range(nb_outcomes):\n cummul += pvals[m]\n\n if cummul > unis_n:\n z[0][row, col] = m\n# z[0][row, col] = 13\n break\n\n # If we reached the end, use the last value.\n # If we have a real distribution [0,1], than this should never\n # happen, right? I got a segmentation fault when removing it.\n # 2014-04-08\n # This might happen due to rounding errors. 2014-05-01\n if z[0][row, col] == -1:\n z[0][row, col] = nb_outcomes - 1;\n","repo_name":"yinwenpeng/Attentive_Convolution","sub_path":"src/word2embeddings/tools/theano_extensions.py","file_name":"theano_extensions.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"19"} +{"seq_id":"70622401645","text":"import os\nimport sys\nimport lxml.etree\nfrom mock import Mock, MagicMock, patch\nfrom Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator import *\nimport Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator\n\n# add all parent testsuite directories to sys.path to allow (most)\n# relative imports in python 2.4\npath = os.path.dirname(__file__)\nwhile path != \"/\":\n if os.path.basename(path).lower().startswith(\"test\"):\n sys.path.append(path)\n if os.path.basename(path) == \"testsuite\":\n break\n path = os.path.dirname(path)\nfrom common import *\nfrom TestServer.TestPlugins.TestCfg.Test_init import TestCfgGenerator\nfrom TestServer.TestPlugin.Testhelpers import TestStructFile\n\n\nclass TestCfgAuthorizedKeysGenerator(TestCfgGenerator, TestStructFile):\n test_obj = CfgAuthorizedKeysGenerator\n should_monitor = False\n\n def setUp(self):\n TestCfgGenerator.setUp(self)\n TestStructFile.setUp(self)\n\n @patch(\"Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator.get_cfg\")\n def get_obj(self, mock_get_cfg, name=None, core=None, fam=None):\n if name is None:\n name = self.path\n if core is not None:\n mock_get_cfg.return_value.core = core\n return self.test_obj(name)\n\n @patch(\"Bcfg2.Server.Plugins.Cfg.CfgGenerator.handle_event\")\n @patch(\"Bcfg2.Server.Plugin.helpers.StructFile.HandleEvent\")\n def test_handle_event(self, mock_HandleEvent, mock_handle_event):\n akg = self.get_obj()\n evt = Mock()\n akg.handle_event(evt)\n mock_HandleEvent.assert_called_with(akg, evt)\n mock_handle_event.assert_called_with(akg, evt)\n\n @patch(\"Bcfg2.Server.Plugins.Cfg.CfgAuthorizedKeysGenerator.ClientMetadata\")\n def test_get_data(self, mock_ClientMetadata):\n Bcfg2.Options.setup.sshkeys_category = \"category\"\n akg = self.get_obj()\n akg.XMLMatch = Mock()\n\n def ClientMetadata(host, profile, groups, *args):\n rv = Mock()\n rv.hostname = host\n rv.profile = profile\n rv.groups = groups\n return rv\n\n mock_ClientMetadata.side_effect = ClientMetadata\n\n def build_metadata(host):\n rv = Mock()\n rv.hostname = host\n rv.profile = host\n return rv\n\n akg.core.build_metadata = Mock()\n akg.core.build_metadata.side_effect = build_metadata\n\n def Bind(ent, md):\n ent.text = \"%s %s\" % (md.profile, ent.get(\"name\"))\n return ent\n\n akg.core.Bind = Mock()\n akg.core.Bind.side_effect = Bind\n metadata = Mock()\n metadata.profile = \"profile\"\n metadata.group_in_category.return_value = \"profile\"\n entry = lxml.etree.Element(\"Path\", name=\"/root/.ssh/authorized_keys\")\n\n def reset():\n mock_ClientMetadata.reset_mock()\n akg.XMLMatch.reset_mock()\n akg.core.build_metadata.reset_mock()\n akg.core.Bind.reset_mock()\n metadata.reset_mock()\n\n pubkey = \"/home/foo/.ssh/id_rsa.pub\"\n spec = lxml.etree.Element(\"AuthorizedKeys\")\n lxml.etree.SubElement(spec, \"Allow\", attrib={\"from\": pubkey})\n akg.XMLMatch.return_value = spec\n self.assertEqual(akg.get_data(entry, metadata), \"profile %s\" % pubkey)\n akg.XMLMatch.assert_called_with(metadata)\n self.assertEqual(akg.core.Bind.call_args[0][0].get(\"name\"), pubkey)\n self.assertEqual(akg.core.Bind.call_args[0][1], metadata)\n\n reset()\n group = \"somegroup\"\n spec = lxml.etree.Element(\"AuthorizedKeys\")\n lxml.etree.SubElement(spec, \"Allow\",\n attrib={\"from\": pubkey, \"group\": group})\n akg.XMLMatch.return_value = spec\n self.assertEqual(akg.get_data(entry, metadata),\n \"%s %s\" % (group, pubkey))\n akg.XMLMatch.assert_called_with(metadata)\n self.assertItemsEqual(mock_ClientMetadata.call_args[0][2], [group])\n self.assertEqual(akg.core.Bind.call_args[0][0].get(\"name\"), pubkey)\n self.assertIn(group, akg.core.Bind.call_args[0][1].groups)\n\n reset()\n host = \"baz.example.com\"\n spec = lxml.etree.Element(\"AuthorizedKeys\")\n allow = lxml.etree.SubElement(spec, \"Allow\",\n attrib={\"from\": pubkey, \"host\": host})\n lxml.etree.SubElement(allow, \"Option\", name=\"foo\", value=\"foo\")\n lxml.etree.SubElement(allow, \"Option\", name=\"bar\")\n lxml.etree.SubElement(allow, \"Option\", name=\"baz\", value=\"baz=baz\")\n akg.XMLMatch.return_value = spec\n params, actual_host, actual_pubkey = akg.get_data(entry,\n metadata).split()\n self.assertEqual(actual_host, host)\n self.assertEqual(actual_pubkey, pubkey)\n self.assertItemsEqual(params.split(\",\"), [\"foo=foo\", \"bar\",\n \"baz=baz=baz\"])\n akg.XMLMatch.assert_called_with(metadata)\n akg.core.build_metadata.assert_called_with(host)\n self.assertEqual(akg.core.Bind.call_args[0][0].get(\"name\"), pubkey)\n self.assertEqual(akg.core.Bind.call_args[0][1].hostname, host)\n\n reset()\n spec = lxml.etree.Element(\"AuthorizedKeys\")\n text = lxml.etree.SubElement(spec, \"Allow\")\n text.text = \"ssh-rsa publickey /foo/bar\\n\"\n lxml.etree.SubElement(text, \"Option\", name=\"foo\")\n akg.XMLMatch.return_value = spec\n self.assertEqual(akg.get_data(entry, metadata),\n \"foo %s\" % text.text.strip())\n akg.XMLMatch.assert_called_with(metadata)\n self.assertFalse(akg.core.build_metadata.called)\n self.assertFalse(akg.core.Bind.called)\n\n reset()\n lxml.etree.SubElement(spec, \"Allow\", attrib={\"from\": pubkey})\n akg.XMLMatch.return_value = spec\n self.assertItemsEqual(akg.get_data(entry, metadata).splitlines(),\n [\"foo %s\" % text.text.strip(),\n \"profile %s\" % pubkey])\n akg.XMLMatch.assert_called_with(metadata)\n\n reset()\n metadata.group_in_category.return_value = ''\n spec = lxml.etree.Element(\"AuthorizedKeys\")\n lxml.etree.SubElement(spec, \"Allow\", attrib={\"from\": pubkey})\n akg.XMLMatch.return_value = spec\n self.assertEqual(akg.get_data(entry, metadata), '')\n akg.XMLMatch.assert_called_with(metadata)\n self.assertFalse(akg.core.build_metadata.called)\n self.assertFalse(akg.core.Bind.called)\n self.assertFalse(mock_ClientMetadata.called)\n","repo_name":"Bcfg2/bcfg2","sub_path":"testsuite/Testsrc/Testlib/TestServer/TestPlugins/TestCfg/TestCfgAuthorizedKeysGenerator.py","file_name":"TestCfgAuthorizedKeysGenerator.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"19"} +{"seq_id":"39932428889","text":"\nRUN_BOTH = False\n\nimport theano\nimport theano.tensor as T\nimport numpy as np\n\nif RUN_BOTH:\n import synkhronos as synk\n synk.fork()\nelse:\n import theano.gpuarray\n theano.gpuarray.use(\"cuda\")\n\nx = T.matrix('x')\ny = T.matrix('y')\nv = T.vector('v')\ns = theano.shared(np.ones([1, 5], dtype='float32'), name='s')\n\nz = T.sum(x.dot(y), axis=0)\n\nif RUN_BOTH:\n f_synk = synk.function([x, y], z, broadcast_inputs=[y])\n g_synk = synk.function([v], updates={s: s + v}, broadcast_inputs=[v])\n synk.distribute()\n\nf_theano = theano.function([x, y], z)\ng_theano = theano.function([v], updates={s: s + v})\n\nx_dat = 0.01 * np.ones([1000, 10], dtype='float32')\nx_dat1 = x_dat[:400]\nx_dat2 = x_dat[400:]\ny_dat = np.ones([10, 5], dtype='float32')\n\n\nr_theano = f_theano(x_dat, y_dat)\nprint(\"result of f_theano: \", r_theano)\nr_t_1 = f_theano(x_dat1, y_dat)\nr_t_2 = f_theano(x_dat2, y_dat)\nassert np.allclose(r_theano, r_t_1 + r_t_2)\nprint(\"\\nbare theano function f: \")\ntheano.printing.debugprint(f_theano)\nif RUN_BOTH:\n r_as_theano = f_synk.as_theano(x_dat, y_dat)\n r_synk = f_synk(x_dat, y_dat)\n r_1 = f_synk.as_theano(x_dat1, y_dat)\n r_2 = f_synk.as_theano(x_dat2, y_dat)\n assert np.allclose(r_theano, r_as_theano)\n assert np.allclose(r_synk, r_theano)\n assert np.allclose(r_1 + r_2, r_theano)\n print(\"\\nsynk-wrapped theano function f: \")\n theano.printing.debugprint(f_synk.theano_function)\n\nprint(\"\\nAll tests on functions 'f' passed.\\n\")\n\nprint(\"s before: \", s.get_value())\ng_theano(r_theano)\ns_after = s.get_value()\nprint(\"s after: \", s_after)\n\nif RUN_BOTH:\n g_synk(r_theano)\n print(\"s local after synk: \", s.get_value())\n s_gather = synk.gather(shared_vars=s, nd_up=1)\n print(\"s gathered: \", s_gather)\n\n\n","repo_name":"astooke/Test","sub_path":"synkhronos/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7531725976","text":"import json\nimport os\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n# test_dirs = [\"debug_results/test_CBayes-func_schwefel-dim_1-kern_Matern32-runs_5-iters_150-init_samp_50-noise_0.100000-exp_bias_0.250000_03-18-22_12:12:41\",\n# \"debug_results/test_PyRef-impl_skopt-func_schwefel-dim_1-kern_Matern32-runs_5-iters_150-init_samp_50-noise_0.1-exp_bias_0.25_03-18-22_12:25:06\"]\ntest_dirs = None\n\nif test_dirs is None:\n cluster_run_dirs = [\"results/debug/\",\n \"results/debug/\"]\n\nconfigs = []\nresults = []\nrun_results = []\nrun_df = pd.DataFrame()\n\nfor i, d in enumerate(test_dirs):\n with open(os.path.join(d, \"config.json\")) as f:\n configs.append(json.load(f))\n with open(os.path.join(d, \"log.json\")) as f:\n results.append(json.load(f))\n \n n_runs = configs[-1][\"n_runs\"]\n res = results[-1]\n # import pdb; pdb.set_trace()\n model = configs[-1][\"impl\"] #configs[-1][\"tag\"]\n excluded_func = []\n for f, fv in res.items():\n if f in excluded_func: continue\n temp = dict()\n temp[\"Relative Error\"] = np.array(fv[\"relative errors\"])[:, -1]\n temp[\"Max Sample Run Times\"] = np.array(fv[\"sample times\"])[:, -5:].mean(axis=1)\n temp[\"Function\"] = [f] * n_runs\n temp[\"Model\"] = [model] * n_runs\n # other config parameters for comparison\n for k in [\"exp_bias\", \"n_iters\", \"n_init_samp\", \"n_init_samples\", \"noise_level\", \"kern\", \"kernel\"]:\n if k not in configs[-1].keys():\n continue\n if k == \"n_init_samples\": # handle special case in naming differences\n temp[\"n_init_samp\"] = [configs[-1][k]] * n_runs\n elif k == \"kernel\": # handle special case in naming differences\n temp[\"kern\"] = [configs[-1][k]] * n_runs\n else:\n temp[k] = [configs[-1][k]] * n_runs\n run_df = run_df.append(pd.DataFrame(temp))\n \n# for i, r in enumerate(results):\n# print(configs[i][\"system\"])\n\nprint(run_df.columns)\n\n# sns.violinplot(x=\"Function\",\n# y=\"Relative Error\",\n# hue=\"Model\",\n# data=run_df\n# )\n\n# sns.boxplot(x=\"Function\",\n# y=\"Relative Error\",\n# hue=\"Model\",\n# data=run_df,\n# # notch=True,\n# # bootstrap=2000\n# )\n# plt.title(\"Relative Errors\")\n\n# print(run_df.loc[:, [\"Model\", \"kern\"]])\n\nfor eb in run_df[\"exp_bias\"].unique():\n for nl in run_df[\"noise_level\"].unique():\n for k in run_df[\"kern\"].unique():\n for iters in run_df[\"n_iters\"].unique():\n for init in run_df[\"n_init_samp\"].unique():\n plt.figure()\n sns.boxplot(x=\"Function\",\n y=\"Relative Error\",\n hue=\"Model\",\n data=run_df,\n # notch=True,\n # bootstrap=2000\n )\n title = f\"Relative Errors for exp_bias: {eb}, noise: {nl},\\nkern: {k}, samples: {iters}, init_samples: {init}\"\n print(title)\n plt.title(title)\n\n# plt.figure()\n# sns.boxplot(x=\"Function\",\n# y=\"Max Sample Run Times\",\n# hue=\"Model\",\n# data=run_df\n# )\n\nplt.show()\n","repo_name":"pennmem/BayesGPc","sub_path":"BO_test_analyses.py","file_name":"BO_test_analyses.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42272833803","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nfrom flask_cors import CORS\nimport random\nimport sys\n\nfrom models import setup_db, Question, Category\n\nQUESTIONS_PER_PAGE = 10\n\ndef paginate_questions(request, selection):\n page = request.args.get('page', 1, type=int)\n start = (page - 1) * QUESTIONS_PER_PAGE\n end = start + QUESTIONS_PER_PAGE\n\n questions = [question.format() for question in selection]\n current_questions = questions[start:end]\n return current_questions\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n app.config.from_object('config')\n setup_db(app)\n CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n '''\n @TODO: Set up CORS. Allow '*' for origins. \n Delete the sample route after completing the TODOs\n '''\n # CORS Headers \n @app.after_request\n def after_request(response):\n response.headers.add(\n 'Access-Control-Allow-Headers', \n 'Content-Type,Authorization,true'\n )\n response.headers.add(\n 'Access-Control-Allow-Methods', \n 'GET,PUT,POST,DELETE,OPTIONS'\n )\n return response\n\n # REST ENDPOINT GET - All categories\n @app.route('/categories', methods=['GET'])\n def retrieve_categories():\n categories = Category.query.all()\n dict_categories = {\n category.id: category.type for category in categories\n }\n\n return jsonify({\n 'success': True,\n 'categories': dict_categories,\n 'total_categories': len(categories)\n })\n\n # REST ENDPOINT GET - Questions with pagination \n @app.route('/questions', methods=['GET'])\n def retrieve_questions():\n questions = Question.query.order_by(Question.id).all()\n current_questions = paginate_questions(request, questions)\n current_category = request.args.get('categories', None, type=int)\n\n # Get categories\n categories = Category.query.all()\n\n # Format the categories to a dict. A array will not be taken from the FE\n dict_categories = {\n category.id: category.type for category in categories\n }\n\n if len(current_questions) == 0:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'questions': current_questions,\n 'total_questions':len(questions),\n 'categories': dict_categories,\n 'current_category' : None\n })\n\n @app.route('/questions/', methods=['DELETE'])\n def delete_questions(question_id):\n # look if id is exiting\n try:\n question = Question.query.filter(\n Question.id == question_id).one_or_none()\n #if not existing - 404 error\n if question is None:\n abort(404)\n #delete from db\n question.delete()\n\n return jsonify({\n 'success': True,\n 'deleted': question_id,\n })\n\n except Exception as e:\n print(e)\n abort(422)\n\n # REST ENDPOINT: POST New question or search for question\n @app.route('/questions', methods=['POST'])\n def create_question():\n body = request.get_json()\n search_term = body.get('searchTerm', None)\n \n # if search_term is contained in request, then it is a search\n if search_term is not None:\n return search_questions(request, body)\n\n # else a new question is created if all required data is conainted\n new_question = body.get('question', None)\n new_answer = body.get('answer', None)\n new_category = body.get('category', None)\n new_diffculty = body.get('difficulty', None)\n\n try:\n question = Question(question=new_question, \n answer=new_answer, \n category=new_category,\n difficulty=new_diffculty\n )\n question.insert()\n\n return jsonify({\n 'success': True,\n 'created': question.id,\n })\n\n except Exception as e:\n print(e)\n abort(422)\n \n # Search for questions\n def search_questions(request, body):\n\n search_term = body.get('searchTerm', None)\n questions = Question.query.filter(\n Question.question.ilike('%'+search_term+'%')\n ).all()\n current_questions = paginate_questions(request, questions)\n\n # Get categories\n categories = Category.query.all()\n\n # Format the categories to a dict. A array will not be taken from the FE\n dict_categories = {\n category.id: category.type for category in categories\n }\n\n return jsonify({\n 'success': True,\n 'questions': current_questions,\n 'total_questions':len(questions),\n 'categories': dict_categories,\n })\n\n # REST ENDPOINT: GET Questions for a specific category\n @app.route('/categories//questions', methods=['GET'])\n def retrieve_questions_by_category(category_id):\n selection = Question.query.filter(\n Question.category==category_id).order_by(Question.id).all()\n current_questions = paginate_questions(request, selection)\n\n # Get categories\n categories = Category.query.all()\n\n # Format the categories to a dict. A array will not be taken from the FE\n dict_categories = {\n category.id: category.type for category in categories\n }\n\n if len(selection) == 0:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'questions': current_questions,\n 'total_questions':len(selection),\n 'categories': dict_categories,\n 'current_category' : category_id\n })\n\n @app.route('/quizzes', methods=['POST'])\n def play_game():\n #get request data\n body = request.get_json()\n quiz_category = body.get('quiz_category')\n previous_questions = body.get('previous_questions')\n\n # 404 when category or previous question message is missing\n if quiz_category is None or previous_questions is None:\n return abort(422)\n\n # Select categories for all categories\n if int(quiz_category['id']) == 0:\n question = Question.query.filter(\n Question.id.notin_(previous_questions)\n ).order_by(func.random()).first()\n # Filter for Category \n else:\n question = Question.query.filter(\n Question.category==int(quiz_category['id'])).filter(\n Question.id.notin_(previous_questions)\n ).order_by(func.random()).first()\n # if no questions is left return \"None\" as question\n if question is None:\n return jsonify({\n 'success': True,\n 'question': None\n })\n # if a question is left, send it\n else: \n return jsonify({\n 'success': True,\n 'question': question.format()\n })\n\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False, \n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False, \n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n \"success\": False, \n \"error\": 400,\n \"message\": \"bad request\"\n }), 400\n \n return app\n\n ","repo_name":"maxlcode/Trivia","sub_path":"backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12375530971","text":"'''\n문제\n일직선 상의 마을에 여러 채의 집이 위치해 있다.\n이중에서 특정 위치의 집에 특별히 한 개의 안테나를 설치하기로 결정했다. 효\n율성을 위해 안테나로부터 모든 집까지의 거리의 총 합이 최소가 되도록 설치하려고 한다.\n이 때 안테나는 집이 위치한 곳에만 설치할 수 있고, 논리적으로 동일한 위치에 여러 개의 집이 존재하는 것이 가능하다.\n\n집들의 위치 값이 주어질 때, 안테나를 설치할 위치를 선택하는 프로그램을 작성하시오.\n\n예를 들어 N=4이고, 각 위치가 1, 5, 7, 9일 때를 가정하자.\n\n이 경우 5의 위치에 설치했을 때, 안테나로부터 모든 집까지의 거리의 총 합이 (4+0+2+4)=10으로, 최소가 된다.\n\n입력\n첫째 줄에 집의 수 N이 자연수로 주어진다. (1≤N≤200,000) 둘째 줄에 N채의 집에 위치가 공백을 기준으로 구분되어 1이상 100,000이하의 자연수로 주어진다.\n\n출력\n첫째 줄에 안테나를 설치할 위치의 값을 출력한다. 단, 안테나를 설치할 수 있는 위치 값으로 여러 개의 값이 도출될 경우 가장 작은 값을 출력한다.\n\n4\n5 1 7 9 -> 5\n\n4 1234\n'''\nimport sys\ninput = sys.stdin.readline\nN = int(input()) # 집의 수 N\npoint = list(map(int, input().split())) # 집에 위치\npoint.sort()\n\nleft = 0\nright = N-1\nmid = (left + right) //2\nans = mid\ntotal = 0\nfor i in range(N):\n total += abs(point[mid] - point[i])\nMIN = total\nwhile left <= right:\n mid = (left + right) //2\n total = 0\n for i in range(N):\n total += abs(point[mid] - point[i])\n if total <= MIN:\n ans = mid\n MIN = total\n right = mid - 1\n else:\n left = mid + 1\n\nprint(point[ans])\n","repo_name":"Dingadung/algorithm-study","sub_path":"coding-test-study/phthon-study/baek-joon/코취/18310-안테나 copy.py","file_name":"18310-안테나 copy.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"71095459242","text":"import csv\nimport os\nimport time\nfrom urllib.request import urlretrieve\n\n\ndef download_url(url, folder):\n \"Download a file to a particular folder.\"\n filename = url.split('_')[-1]\n local_filename, headers = urlretrieve(url, folder + filename)\n print('Downloaded:', local_filename)\n\n\ndef load_image_data(filename):\n \"Load URLs for the images.\"\n image_data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for entry in reader:\n coco_id = entry['coco_id']\n image_data[coco_id] = entry['coco_url']\n return image_data\n\n\ndef imgid2filename(coco_id):\n \"Generate filename.\"\n return coco_id.zfill(12) + '.jpg'\n\n\ndef download_images(image_data, seconds=1):\n \"Download all the images.\"\n for image, url in image_data.items():\n filename = imgid2filename(image)\n folder = './static/COCO-images/'\n path_to_image = folder + filename\n # Check if file exists. If not, download it.\n if not os.path.isfile(path_to_image):\n download_url(url, folder)\n # Be nice to the server :)\n time.sleep(seconds)\n\nif __name__ == '__main__':\n image_data = load_image_data('./data/images.csv')\n download_images(image_data)\n","repo_name":"evanmiltenburg/DIDEC-written","sub_path":"download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"19432253699","text":"\"\"\"\nDFS와 BFS\n\nhttps://www.acmicpc.net/problem/1260\n\"\"\"\n\nimport sys\nfrom collections import deque\n\nsys.stdin = open(\"input.txt\", \"rt\")\ninput = sys.stdin.readline\n\n\ndef dfs(v):\n for i in range(1, n + 1):\n if graph[v][i] == 1 and visited[i] == 0:\n visited[i] = 1\n result1.append(i) # 리스트에 추가\n dfs(i)\n\n\ndef bfs():\n queue = deque()\n queue.append(v)\n visited[v] = 1\n while queue:\n tmp = queue.popleft()\n result2.append(tmp) # 리스트에 추가\n for i in range(1, n + 1):\n if graph[tmp][i] == 1 and visited[i] == 0:\n visited[i] = 1\n queue.append(i)\n\n\nif __name__ == \"__main__\":\n n, m, v = map(int, input().split())\n graph = [[0] * (n + 1) for _ in range(n + 1)]\n result1 = []\n result2 = []\n\n for i in range(m):\n v1, v2 = map(int, input().split())\n graph[v1][v2] = 1\n graph[v2][v1] = 1\n\n # DFS\n visited = [0] * (n + 1)\n visited[v] = 1\n result1.append(v)\n dfs(v)\n\n # BFS\n visited = [0] * (n + 1)\n bfs()\n\n print(*result1)\n print(*result2)\n","repo_name":"MinjungShin/algorithm","sub_path":"baekjoon/graph_traversal/01_1260.py","file_name":"01_1260.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"26270397884","text":"import glob\nimport json\nimport logging\nimport os\n\nimport torch\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm import tqdm\nfrom transformers import BertTokenizer\n\nlogging.getLogger(\n \"transformers.tokenization_utils_base\").setLevel(logging.ERROR)\n\n\ndef vectorize_labels(all_labels):\n \"\"\"\n Combine labels across all data and reformat the labels e.g. [[1, 2], ..., [123, 343, 4] ] --> [[0, 1, 1, ... 0], ...]\n Only used for multi-label classification\n :param all_labels: dict with labels with keys 'train', 'dev', 'test'\n :return: dict of vectorized labels per split and total number of labels\n \"\"\"\n all_set = []\n for split in all_labels:\n for labels in all_labels[split]:\n all_set.extend(labels)\n all_set = list(set(all_set))\n\n mlb = MultiLabelBinarizer()\n mlb.fit([all_set])\n num_labels = len(mlb.classes_)\n\n print(f'Total number of labels: {num_labels}')\n\n result = {}\n for split in all_labels:\n result[split] = mlb.transform(all_labels[split])\n\n return result, num_labels\n\n\ndef prepare_eurlex_data(inverted=True, eur_path='./data/EURLEX57K'):\n \"\"\"\n Load EURLEX-57K dataset and prepare the datasets\n :param inverted: whether to invert the section order or not\n :param eur_path: path to the EURLEX files\n :return: dicts of lists of documents and labels and number of labels\n \"\"\"\n if not os.path.exists(eur_path):\n raise Exception(\"Data path not found: {}\".format(eur_path))\n\n text_set = {'train': [], 'dev': [], 'test': []}\n label_set = {'train': [], 'dev': [], 'test': []}\n\n for split in ['train', 'dev', 'test']:\n file_paths = glob.glob(os.path.join(eur_path, split, '*.json'))\n for file_path in tqdm(sorted(file_paths), leave=False):\n text, tags = read_eurlex_file(file_path, inverted)\n text_set[split].append(text)\n label_set[split].append(tags)\n\n vectorized_labels, num_labels = vectorize_labels(label_set)\n\n return text_set, vectorized_labels, num_labels\n\n\ndef read_eurlex_file(eur_file_path, inverted):\n \"\"\"\n Read each json file and return lists of documents and labels\n :param eur_file_path: path to a json file\n :param inverted: whether to invert the section order or not\n :return: list of documents and labels\n \"\"\"\n tags = []\n with open(eur_file_path) as file:\n data = json.load(file)\n sections = []\n text = ''\n if inverted:\n sections.extend(data['main_body'])\n sections.append(data['recitals'])\n sections.append(data['header'])\n\n else:\n sections.append(data['header'])\n sections.append(data['recitals'])\n sections.extend(data['main_body'])\n\n text = '\\n'.join(sections)\n\n for concept in data['concepts']:\n tags.append(concept)\n\n return text, tags\n\n\nclass EurLexDataset(Dataset):\n def __init__(self, root: str, split: str = \"train\", num_points: int = -1, shuffle: bool = False):\n super().__init__()\n self.name = \"eurlex_inverted\"\n self.tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n eur_path = os.path.join(root, \"EURLEX57K\")\n text_set, label_set, num_labels = prepare_eurlex_data(\n inverted=True, eur_path=eur_path)\n self.text, self.labels = text_set[split], label_set[split]\n self.num_labels = num_labels\n self.shuffle = shuffle\n self.num_points = num_points\n\n del text_set\n del label_set\n \n def __len__(self):\n return len(self.text)\n\n def __getitem__(self, index):\n text = str(self.text[index])\n text = \" \".join(text.split())\n\n inputs = self.tokenizer.encode_plus(\n text=text,\n text_pair=None,\n add_special_tokens=False,\n truncation=False,\n return_token_type_ids=False,\n )\n ids = torch.tensor(inputs['input_ids'])\n mask = torch.tensor(inputs['attention_mask'])\n if self.shuffle:\n # permute the order of tokens\n perm_idx = torch.randperm(ids.size(0))\n ids = ids[perm_idx]\n mask = mask[perm_idx]\n # truncate the sentence\n if self.num_points > 0:\n ids = ids[:self.num_points]\n mask = mask[:self.num_points]\n\n labels = torch.tensor(self.labels[index]).float()\n\n return ids, mask, labels\n\n\nif __name__ == \"__main__\":\n train_dataset = EurLexDataset(\"../dataset\", split=\"train\")\n # val_dataset = EurLexDataset(\"../dataset\", split=\"dev\")\n # test_dataset = EurLexDataset(\"../dataset\", split=\"test\")\n def collate_fn(data):\n def merge(sequences):\n lengths = [seq.size(0) for seq in sequences]\n padded_seqs = torch.zeros(len(sequences), max(lengths)).long()\n for i, seq in enumerate(sequences):\n end = lengths[i]\n padded_seqs[i, :end] = seq[:end]\n return padded_seqs\n\n input_ids, input_mask, labels = zip(*data)\n \n input_ids = merge(input_ids)\n input_mask = merge(input_mask)\n labels = torch.stack(labels, dim=0)\n\n return input_ids, input_mask, labels\n dataloader = DataLoader(train_dataset, collate_fn=collate_fn, batch_size=8, shuffle=True, pin_memory=True)\n\n for batch in dataloader:\n print(len(batch))","repo_name":"jeffwillette/umbc","sub_path":"data/eurlex.py","file_name":"eurlex.py","file_ext":"py","file_size_in_byte":5407,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"19"} +{"seq_id":"70313601963","text":"from django.conf.urls import patterns, url, include\nfrom django.utils.translation import ugettext_lazy as _\nfrom wi.forms.storage_image import ConvertImageForm, EditDiskForm\nfrom wi.utils.decorators import user_permission\nfrom wi.utils.views import direct_to_template, simple_generic_id, \\\n form_generic_id\n\nresources_patterns = patterns('wi.views.user.storage_image',\n\n url(r'^disks/$', user_permission(direct_to_template), {'template_name': 'resources/disks.html'}, name='res_disks'),\n url(r'^ajax/disk_table/$', 'res_ajax_get_disk_table', name='res_ajax_get_disk_table'),\n url(r'^ajax/upload_disk_http/$', 'res_ajax_upload_disk_http', name='res_ajax_upload_disk_http'),\n url(r'^ajax/add_disk/$', 'res_ajax_add_disk', name='res_ajax_add_disk'),\n url(r'^ajax/edit_disk/(?P\\d+)/$', user_permission(form_generic_id),\n {'template_name': 'generic/form.html',\n 'success_msg': (lambda desc, data: _('You have successfully edited selected disk.') % {'desc': desc}),\n 'ask_msg': (lambda desc: _('Edit disk data:') % {'desc': desc}),\n 'confirmation': _('Save'),\n 'request_url_post': 'user/storage_image/edit/',\n 'request_url_get': 'user/storage_image/get_by_id/',\n 'request_url_both': {'disk_controllers': 'user/storage_image/get_disk_controllers/'},\n 'id_key': 'storage_image_id',\n 'form_class': EditDiskForm},\n name='res_ajax_edit_disk'),\n url(r'^ajax/delete_disk/(?P\\d+)/$', user_permission(simple_generic_id),\n {'template_name': 'generic/simple.html',\n 'success_msg': (lambda desc: _('You have successfully removed disk volume %(desc)s.') % {'desc': desc}),\n 'ask_msg': (lambda desc: _('Do you really want to delete disk volume %(desc)s?') % {'desc': desc}),\n 'request_url': 'user/storage_image/delete/',\n 'id_key': 'storage_image_id',\n },\n name='res_ajax_delete_disk'),\n\n url(r'^ajax/change_to_image/(?P\\d+)/$', user_permission(form_generic_id),\n {'template_name': 'generic/form.html',\n 'success_msg': (lambda desc, data: _('You have successfully changed disk %(desc)s to a VM image.') % {'desc': desc}),\n 'ask_msg': (lambda desc: _('Do you want to change disk %(desc)s to a VM image?') % {'desc': desc}),\n 'request_url_post': 'user/storage_image/convert_to_system_image/',\n 'request_url_both': {'disk_controllers': 'user/system_image/get_disk_controllers/',\n 'video_devices': 'user/system_image/get_video_devices/',\n 'network_devices': 'user/system_image/get_network_devices/', },\n 'confirmation': _('Change'),\n 'id_key': 'storage_image_id',\n 'form_class': ConvertImageForm, },\n name='img_ajax_change_to_image'),\n )\n\nurlpatterns = patterns('',\n url(r'^resources/', include(resources_patterns)),\n)\n","repo_name":"cc1-cloud/cc1","sub_path":"src/wi/urls/user/storage_image.py","file_name":"storage_image.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"19"} +{"seq_id":"6344257761","text":"#This is a Python implementation of a basic Pokemon battle system.\n\nimport random\n\n\nclass Pokemon:\n _battle_table = [[1, .5, 2], [2, 1, .5], [.5, 2, 1]]\n\n def __init__(self, name, type): #The code defines a Pokemon class and its attributes such as name, type, and hp points.\n self._name = name\n self._type = type\n self._hp = 25\n\n def hp(self):\n return self._hp\n\n def get_normal_menu(self):\n return \"1. Slam\\n2. Tackle\"\n\n def _normal_move(self, opponent, move):\n if move == \"slam\":\n return self._slam(opponent)\n elif move == \"tackle\":\n return self._tackle(opponent)\n else:\n return \"\"\n\n def _slam(self, opponent):\n dmg = random.randint(1, 8)\n opponent_type = opponent._type\n multiplier = Pokemon._battle_table[self._type][opponent_type]\n total_dmg = int(dmg * multiplier)\n opponent._take_damage(total_dmg)\n return f\"{self._name} uses Slam on {opponent._name}. It does {total_dmg} damage. Effective!\" if multiplier == 2 else f\"{self._name} uses Slam on {opponent._name}. It does {total_dmg} damage. Not effective.\",total_dmg\n\n def _tackle(self, opponent):\n dmg = random.randint(3, 6)\n opponent_type = opponent._type\n multiplier = Pokemon._battle_table[self._type][opponent_type]\n total_dmg = int(dmg * multiplier)\n opponent._take_damage(total_dmg)\n return f\"{self._name} uses Tackle on {opponent._name}. It does {total_dmg} damage. Effective!\" if multiplier == 2 else f\"{self._name} uses Tackle on {opponent._name}. It does {total_dmg} damage. Not effective.\",total_dmg\n\n def get_special_menu(self): #The code uses a battle table to calculate the effectiveness of a move against a specific type of Pokemon.\n if type == 0:\n return f'Fire.get_special_menu()'\n elif type == 1:\n return f'Water.get_special_menu()'\n elif type == 2:\n return f'Grass.get_special_menu()'\n \n\n def _special_move(self, opponent, move):\n if type == 0:\n return f'Fire._special_move(self, opponent, move)'\n elif type == 1:\n return f'Water._special_move(self, opponent, move)'\n elif type == 2:\n return f'Grass._special_move(self, opponent, move)'\n \n\n def attack(self, opponent, move_type, move): #It also includes methods to attack an opponent using normal or special moves.\n if move_type == \"normal\":\n return self._normal_move(opponent, move)\n elif move_type == \"special\":\n return self._special_move(opponent, move)\n else:\n return \"\"\n\n def __str__(self):\n return f\"{self._name}: {self._hp}/25\"\n\n def _take_damage(self, dmg): #The damage inflicted on the opponent depends on the type of move and the type of Pokemon being attacked.\n self._hp -= dmg\n if self._hp < 0:\n self._hp = 0\n","repo_name":"Smitlila/The_Pokemon_Game","sub_path":"pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"12843279018","text":"import bpy\nfrom mathutils import Vector, Euler, Matrix\nimport random\n\nimport blender.render.metaballs\nimport procedural.miscellaneous.detailshandler\n\nimport imp\nimp.reload(blender.render.metaballs)\nimp.reload(procedural.miscellaneous.detailshandler)\n\nfrom blender.render.metaballs import MetaballGenerator\nfrom procedural.miscellaneous.detailshandler import DetailsHandler\n\nMAX_DRAWABLE_VERTS = 5000\nMAX_SKINNABLE_VERTS = 1000\n\nclass TurtleRenderer:\n \"\"\"\n Given a Turtle representation, renders the result as a 3D mesh.\n \"\"\"\n\n def __init__(self, seed = None, verbose = False):\n self.verbose = verbose\n\n self.showSkin = False\n self.fastSkin = True\n self.simplify = False\n self.limitDrawing = True # If True, the skinning and drawing will be limited based on the number of vertices, to avoid hangs.\n\n self.showLeaves = False\n\n self.showCanopy = False\n self.metaBallRadius = 1.5\n\n self.leaf_detail_index = 0\n self.bulb_detail_index = 0\n self.flower_detail_index = 0\n self.fruit_detail_index = 0\n\n self.leaf_material_index = 0\n self.trunk_material_index = 0\n\n self.details_scale = 1\n self.randomize_details = False # Will randomly choose from the list of details\n\n self.rnd = random.Random()\n if seed is not None: self.rnd.seed(seed)\n\n self.nInstances = 1\n\n def loadParameters(self,turtleParameters):\n self.details_scale = turtleParameters.details_scale\n self.showCanopy = turtleParameters.use_canopy\n self.trunk_material_index = turtleParameters.trunk_material_choice\n self.leaf_material_index = turtleParameters.leaf_material_choice\n self.leaf_detail_index = turtleParameters.leaf_choice\n self.bulb_detail_index = turtleParameters.bulb_choice\n self.flower_detail_index = turtleParameters.flower_choice\n self.fruit_detail_index = turtleParameters.fruit_choice\n\n def drawMesh(self, context, turtleResult, origin_offset = (0,0,0), suffix = \"\", randomSeed = None, multipleInstances = False,\n overridenContext = None):\n if overridenContext is None: scene = context.scene\n else: scene = overridenContext['scene']\n\n verts = turtleResult.verts\n edges = turtleResult.edges\n radii = turtleResult.radii\n leaves = turtleResult.leaves\n bulbs = turtleResult.bulbs\n flowers = turtleResult.flowers\n fruits = turtleResult.fruits\n instance_index = turtleResult.instance_index\n idd = turtleResult.idd\n if randomSeed: self.rnd.seed(randomSeed+instance_index)\n\n if len(verts) == 0:\n if overridenContext is None: context.area.header_text_set(\"WARNING: Mesh drawing aborted due no vertices supplied.\")\n else: context['area'].header_text_set(\"WARNING: Mesh drawing aborted due no vertices supplied.\")\n print(\"WARNING: Mesh drawing aborted due no vertices supplied.\")\n return\n\n if self.limitDrawing and len(verts) > MAX_DRAWABLE_VERTS:\n if overridenContext is None: context.area.header_text_set(\"WARNING: Mesh drawing aborted due to huge number of vertices: \" + str(len(verts)))\n else: context['area'].header_text_set(\"WARNING: Mesh drawing aborted due to huge number of vertices: \" + str(len(verts)))\n print(\"WARNING: Mesh drawing aborted due to huge number of vertices: \" + str(len(verts)))\n return\n\n if suffix == \"\": name = str(idd) + \"_\" + str(instance_index)\n else: name = suffix\n\n if multipleInstances: origin_offset = (origin_offset[0]-((int)(instance_index/5))*1+self.rnd.uniform(-0.5,0.5)*1.0,\n origin_offset[1]+(instance_index%5)*1+self.rnd.uniform(-0.5,0.5)*1.0,\n origin_offset[2])\n\n me = bpy.data.meshes.new('PlantFormMesh' + name)\n ob = bpy.data.objects.new(\"PlantForm\" + name, me)\n me.from_pydata(verts, edges, [])\n me.update()\n ob.location = scene.cursor_location + Vector(origin_offset)\n scene.objects.link(ob)\n\n # Add details\n #print(\"Turtle renderer: Show detaiils\")\n if self.showLeaves:\n if self.randomize_details: leaf_name = DetailsHandler.LEAF_NAMES[random.randint(0,len(DetailsHandler.LEAF_NAMES)-1)]\n else: leaf_name = DetailsHandler.LEAF_NAMES[self.leaf_detail_index]\n self.addDetails(\"leaf\",leaves,leaf_name,self.details_scale,ob)\n\n if self.randomize_details: bulb_name = DetailsHandler.BULB_NAMES[random.randint(0,len(DetailsHandler.BULB_NAMES)-1)]\n else: bulb_name = DetailsHandler.BULB_NAMES[self.bulb_detail_index]\n self.addDetails(\"bulb\",bulbs,bulb_name,self.details_scale,ob)\n\n if self.randomize_details: flower_name = DetailsHandler.FLOWER_NAMES[random.randint(0,len(DetailsHandler.FLOWER_NAMES)-1)]\n else: flower_name = DetailsHandler.FLOWER_NAMES[self.flower_detail_index]\n self.addDetails(\"flower\",flowers,flower_name,self.details_scale,ob)\n\n if self.randomize_details: fruit_name = DetailsHandler.FRUIT_NAMES[random.randint(0,len(DetailsHandler.FRUIT_NAMES)-1)]\n else: fruit_name = DetailsHandler.FRUIT_NAMES[self.fruit_detail_index]\n self.addDetails(\"fruit\",fruits,fruit_name,self.details_scale,ob)\n\n # Metaballs around leaves to simulate canopy\n #print(\"Turtle renderer: Show canopy\")\n if self.showCanopy:\n if len(leaves) > 0:\n mballGenerator = MetaballGenerator()\n for leafQuad in leaves:\n pos = self.toBlenderVector(leafQuad.pos)\n mball = mballGenerator.addMetaball()\n mball.co = pos\n mball.radius = self.metaBallRadius\n mballGenerator.mballCurrentObject.parent = ob # The current mball element's object is in this variable\n\n scene.objects.active = mballGenerator.mballCurrentObject # Select the object as active\n scene.objects.active.select = True # Must force selection\n bpy.ops.object.convert(target='MESH', keep_original=False)\n\n if len(bpy.context.object.data.vertices) > 0:\n # Set the foliage material\n foliage_material_name = \"\"\n if foliage_material_name != \"\":\n mat = bpy.data.materials[foliage_material_name]\n scene.objects.active.data.materials.append(mat)\n\n # We also unwrap the resulting mesh for texture mapping\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.faces_shade_flat() # Flat shaded\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.uv.unwrap()\n bpy.ops.object.mode_set(mode='OBJECT')\n\n if self.showSkin:\n if self.limitDrawing and len(verts) > MAX_SKINNABLE_VERTS:\n if overridenContext is None: context.area.header_text_set(\"WARNING: Mesh skinning blocked due to large number of vertices: \" + str(len(verts)))\n else: context['area'].header_text_set(\"WARNING: Mesh skinning blocked due to large number of vertices: \" + str(len(verts)))\n print(\"WARNING: Mesh skinning blocked due to large number of vertices: \" + str(len(verts)))\n return\n\n else:\n\n if self.verbose: print(\"Turtle renderer: Applying skin modifier\")\n current_modifier_index = 0\n\n # Apply a skin modifier\n # Note that this will create A LOT of vertices (simplification will help later)\n scene.objects.active = ob # Select the object as active\n bpy.ops.object.modifier_add(type='SKIN')\n scene.objects.active.modifiers[current_modifier_index].use_smooth_shade=True\n current_modifier_index+=1\n\n verts = scene.objects.active.data.vertices\n skinverts = scene.objects.active.data.skin_vertices[0].data\n\n # Set the radius of the branches\n if self.verbose: print(\"Turtle renderer: Setting radius\")\n for i,v in enumerate(skinverts):\n r = radii[i]\n\n \"\"\"\n height = verts[i].co[2]\n if height > 0: r = 1.0/height\n else: r = 1.0\n if (r < 0.1): r = 0.1\n if (r > 1.0): r = 1.0\n #print(r)\n \"\"\"\n\n v.radius = [r* 0.01, r* 0.01] # r = 1 -> small radius\n\n if not self.fastSkin:\n if self.verbose: print(\"Turtle renderer: Making it nicer\")\n # Additional modifications to make the mesh nicer\n\n # Add a SubSurf modifier to obtain a smoother mesh\n bpy.ops.object.modifier_add(type='SUBSURF')\n scene.objects.active.modifiers[current_modifier_index].levels = 1\n current_modifier_index+=1\n\n # Edge Split makes for a more cartoony appearance\n bpy.ops.object.modifier_add(type='EDGE_SPLIT')\n current_modifier_index+=1\n #bpy.ops.object.modifier_apply(modifier=\"EdgeSplit\"); current_modifier_index-=1\n\n # Simplification\n #if not self.fastSkin:\n if self.simplify:\n if self.verbose: print(\"Turtle renderer: Simplification\")\n bpy.ops.object.modifier_add(type='DECIMATE')\n scene.objects.active.modifiers[current_modifier_index].ratio=0.1\n scene.objects.active.modifiers[current_modifier_index].decimate_type = 'DISSOLVE' # We use planar decimation, more cartoonish\n scene.objects.active.modifiers[current_modifier_index].angle_limit = 5.0/180.0*3.14159 # 5 degrees is enough\n current_modifier_index+=1\n #bpy.ops.object.modifier_apply(modifier=\"Decimate\"); current_modifier_index-=1\n\n # We will apply modifier to obtain the final mesh.\n self.applyModifiers = True\n if self.applyModifiers:\n if self.verbose: print(\"Turtle renderer: Apply modifiers\")\n\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.modifier_apply(modifier=\"Skin\")\n if not self.fastSkin: bpy.ops.object.modifier_apply(modifier=\"Subsurf\")\n if not self.fastSkin: bpy.ops.object.modifier_apply(modifier=\"EdgeSplit\")\n if self.simplify: bpy.ops.object.modifier_apply(modifier=\"Decimate\")\n\n if self.simplify:\n # We can also simplify further by removing doubles\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all()\n bpy.ops.mesh.remove_doubles() # We use the default threshold (0.0001). We can put here: threshold=0.01 if we prefer.\n bpy.ops.mesh.normals_make_consistent(inside=False)\n bpy.ops.object.mode_set(mode='OBJECT')\n\n\n if self.verbose: print(\"Turtle renderer: Trunk material\")\n # Set the trunk material\n trunk_material_name = DetailsHandler.nameOfMaterial(self.trunk_material_index)\n mat = bpy.data.materials[trunk_material_name]\n scene.objects.active.data.materials.append(mat)\n\n scene.objects.active = scene.objects.active #mballGenerator.mballCurrentObject # Select the object as active\n scene.objects.active.select = True # Must force selection\n\n \"\"\"\n if self.applyModifiers:\n # We also unwrap the resulting mesh for texture mapping\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.faces_shade_flat() # Flat shaded\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.uv.unwrap()\n bpy.ops.object.mode_set(mode='OBJECT')\n \"\"\"\n\n\n def addDetails(self,type_name,details_list,mesh_name,mesh_scale,parent):\n context = bpy.context\n\n qv = ((0.5,0,0),(0.5,1,0),(-0.5,1,0),(-0.5,0,0))\n if mesh_name is \"\" or (not mesh_name in bpy.data.objects):\n q = bpy.data.meshes.new('PlantForm-'+type_name)\n q.from_pydata(qv, [], [(0,1,2,3)])\n q.update()\n q.uv_textures.new()\n else:\n q = bpy.data.objects[mesh_name].data # .data will get the mesh linked to the object\n\n for tmpQuad in details_list:\n obj,base = self.add_obj(q, context)\n eul = self.toBlenderEuler(tmpQuad.eul)\n pos = self.toBlenderVector(tmpQuad.pos)\n r = eul.to_matrix()\n r.resize_4x4()\n obj.matrix_world = Matrix.Translation(pos)*r\n obj.parent = parent\n obj.scale = (mesh_scale,mesh_scale,mesh_scale)\n\n @staticmethod\n def add_obj(obdata, context):\n scene = context.scene\n obj_new = bpy.data.objects.new(obdata.name, obdata)\n base = scene.objects.link(obj_new)\n return obj_new,base\n\n def toBlenderVector(self, myPos):\n return Vector((myPos.x,myPos.y,myPos.z))\n\n def toBlenderEuler(self, myEul):\n return Euler((myEul.x,myEul.y,myEul.z))\n\n","repo_name":"CatmanIta/x-PlantForm-IGA","sub_path":"blender/render/turtlerenderer.py","file_name":"turtlerenderer.py","file_ext":"py","file_size_in_byte":13768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"18939402749","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n if not isinstance(my_list, list) or len(my_list) == 0:\n return 0\n avrg = 0\n som = 0\n for couple in my_list:\n avrg += (couple[0] * couple[1])\n som += couple[1]\n return (avrg / som)\n","repo_name":"ahmedzitouni586/holbertonschool-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"3013104330","text":"\"\"\"\nBackend for the GUI app:\n - Converting the Image colors \n\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport math\nimport skimage\n\nMIN_BRIGHTNESS= 255\nMIN_contrast = 127\n\nMAX_BRIGHTNESS = 510\nMAX_contrast = 254\n\ndef read_img(path):\n \"\"\"return the read image\"\"\"\n\n return cv2.imread(path)\n\ndef test(img):\n \"\"\"Testing the img\"\"\"\n\n cv2.imwrite('wtf.png', img)\n cv2.imshow('test', img)\n\n\ndef conv_grey(img_path):\n \"\"\"Convert Image from default(RGB) to grey scale\"\"\"\n\n image = read_img(img_path)\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n return grey \n\ndef controller(img, brightness=255,\n contrast=127):\n \n brightness = int((brightness - 0) * (255 - (-255)) / (MAX_BRIGHTNESS - 0) + (-255))\n \n contrast = int((contrast - 0) * (127 - (-127)) / (MAX_contrast - 0) + (-127))\n \n if brightness != 0:\n \n if brightness > 0:\n \n shadow = brightness\n max = 255\n \n else:\n shadow = 0\n max = 255 + brightness\n \n al_pha = (max - shadow) / 255\n ga_mma = shadow\n \n # The function addWeighted calculates\n # the weighted sum of two arrays\n cal = cv2.addWeighted(img, al_pha, img, 0, ga_mma)\n \n else:\n cal = img\n \n if contrast != 0:\n Alpha = float(131 * (contrast + 127)) / (127 * (131 - contrast))\n Gamma = 127 * (1 - Alpha)\n \n # The function addWeighted calculates\n # the weighted sum of two arrays\n cal = cv2.addWeighted(cal, Alpha, cal, 0, Gamma)\n \n # # putText renders the specified text string in the image.\n # cv2.putText(cal, 'B:{},C:{}'.format(brightness,\n # contrast), (10, 30),\n # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n \n return cal\n \ndef histogram_eq(img_path):\n \"\"\"Return the img after doing histogram equalization\"\"\"\n\n img = cv2.imread(img_path, 0)\n\n eq = cv2.equalizeHist(img)\n\n return eq\n\n #Add noise to the image\ndef random_noise(img_path):\n img = cv2.imread(img_path)\n \n noisy_image = skimage.util.random_noise(img, mode='gaussian')\n \n return noisy_image\n\ndef s_and_p(img_path):\n img = cv2.imread(img_path)\n \n noisy_image = skimage.util.random_noise(img, mode='s&p')\n \n return noisy_image\n \ndef poisson(img_path):\n img = cv2.imread(img_path)\n \n noisy_image = skimage.util.random_noise(img, mode='poisson')\n \n return noisy_image\n \ndef lp_filter(img_path):\n \"\"\"Using boxFilter as a low pass filter\"\"\"\n\n # reading the img\n img = cv2.imread(img_path, 1)\n\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n box_filter = cv2.boxFilter(img, -1, (15, 15))\n\n return box_filter\n\n\ndef hp_filter(img_path):\n \"\"\"Using Img - The GaussianBlur to show the edges\"\"\"\n\n # helper function\n def hp(img, sigma=3):\n return img - cv2.GaussianBlur(img, (0, 0), sigma) + 127\n\n img = read_img(img_path)\n\n mod_hp = hp(img) \n\n return mod_hp\n\ndef median_filter(img_path):\n \"\"\"Using the in-built medianBlur in the openCV\"\"\"\n\n img = read_img(img_path)\n\n median = cv2.medianBlur(img, 5)\n\n return median\n\n\ndef avg_filter(img_path):\n \"\"\"Using the blur in the openCV to apply avareging filter\"\"\"\n \n img = read_img(img_path)\n\n avg = cv2.blur(img, (5, 5))\n\n return avg\n\ndef log_filter(img_path):\n \"\"\"Applying lap filter\"\"\"\n\n # reading \n img = read_img(img_path)\n\n\n # gray scale \n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n # Removing noise\n img = cv2.GaussianBlur(img, (3,3), 0)\n\n # convolute with proper kernels \n laplacian = cv2.Laplacian(img, cv2.CV_64F)\n\n return laplacian\n\ndef sobel_filter(img_path, x=1, y=1):\n # reading \n img = read_img(img_path)\n\n\n # gray scale \n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n # x and y\n sobel = cv2.Sobel(img, cv2.CV_64F, x, y, ksize=5)\n\n return sobel\n\ndef circle_hough(img_path):\n # reading \n img = read_img(img_path)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n gray = cv2.medianBlur(gray, 5)\n \n cimg = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)\n\n circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1,120, param1=100, param2=30, minRadius=0, maxRadius=0)\n \n if circles is not None:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n # outer circle\n cv2.circle(img, (i[0], i[1]), i[2], (0,255,0), 2)\n\n # center of the circle\n cv2.circle(img, (i[0], i[1]), 2, (0,0,255), 3)\n\n \n return img\n\n\ndef line_hough(img_path):\n\n # reading \n img = read_img(img_path)\n\n # gray scale \n gray = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n # Edge detection\n edges = cv2.Canny(gray, 50, 150, apertureSize=3)\n \n # Standard Hough Line Transform\n lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)\n \n if lines is not None:\n for line in lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n # x1 stores the rounded off value of (r * cos(theta) - 1000 * sin(theta))\n x1 = int(x0 + 1000 * (-b))\n # y1 stores the rounded off value of (r * sin(theta)+ 1000 * cos(theta))\n y1 = int(y0 + 1000 * (a))\n # x2 stores the rounded off value of (r * cos(theta)+ 1000 * sin(theta))\n x2 = int(x0 - 1000 * (-b))\n # y2 stores the rounded off value of (r * sin(theta)- 1000 * cos(theta))\n y2 = int(y0 - 1000 * (a))\n cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\n \n return img\n\ndef erosion(img_path):\n # reading \n img = read_img(img_path)\n\n # gray scale \n gray = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n for i in range(0, 3):\n eroded = cv2.erode(gray.copy(), None, iterations=i + 1)\n \n return eroded\n\ndef dilation(img_path):\n # reading \n img = read_img(img_path)\n\n # gray scale \n gray = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n for i in range(0, 3):\n dilated = cv2.dilate(gray.copy(), None, iterations=i + 1)\n\n \n return dilated\n\ndef open_(img_path):\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n\n _, mask = cv2.threshold(img, 220, 255, cv2.THRESH_BINARY_INV)\n kernal = np.ones((5,5), np.uint8)\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal)\n\n return opening\n\ndef close_(img_path):\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n\n _, mask = cv2.threshold(img, 220, 255, cv2.THRESH_BINARY_INV)\n kernal = np.ones((5,5), np.uint8)\n closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernal)\n\n return closing\n\n# #Read picture\n# img = cv2.imread('cat.jpg', 0)\n# #Add Gaussian noise\n# img1 = random_noise(img,'gaussian', mean=0.1,var=0.01)\n# img1 = np.uint8(img1*255)\n# #\n# cv2.imshow('img', img)\n# cv2.imshow('img1', img1)\n","repo_name":"AYehia0/ImageToolKit","sub_path":"opencv_back.py","file_name":"opencv_back.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"14467874833","text":"from sklearn import tree\nimport graphviz \n\ntraining_data = [\n ['ensolarado', 50, 'sim'],\n ['ensolarado', 50, 'nao'],\n ['chuvoso', 10, 'sim'],\n ['chuvoso', 50, 'nao'],\n ['ensolarado', 10, 'sim'],\n]\n\nX = [[1, 50], [1, 50], [2, 10], [2, 50], [1, 10]]\nY = [1, 2, 1, 2, 1]\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X, Y)\n#tree.plot_tree(clf)\n\ndot_data = tree.export_graphviz(clf, out_file=None, feature_names=[\"tempo\", \"vento\"], class_names=[\"Sim\", \"Não\"], filled=True, rounded=True, special_characters=True) \ngraph = graphviz.Source(dot_data) \ngraph ","repo_name":"regisalbuquerque/decision_trees","sub_path":"cart_scikit.py","file_name":"cart_scikit.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"22449739770","text":"#--本篇极为复杂,请细心阅读各行--\nimport time\nwith open(\"data.txt\", mode = \"r\") as f:\n default = list(f)\nIDs = default[::2] #data.txt中单数行为账号\npassword = default[1::2] #data.txt中双数行为密码\niop = zip(IDs,password) #将账号与密码合并成为字典\niop = dict(iop)\nloginIDs = iop.keys() #字典中的账号\nloginkeys = iop.values() #字典中的密码\n\ntime.sleep(2)\nprint(\"\\nThis is a Login system.\\n\")\n#帐号判定\n\na = 0\nwhile a == 0:\n time.sleep(1.5)\n inputs = input(\"Please enter your ID :\")\n str(inputs)\n userid = inputs\n inputs = (inputs + \"\\n\")\n if inputs in loginIDs:\n print(\"This ID does exist.\")\n break\n elif inputs not in loginIDs:\n print(\"This ID does not exist.\")\n continue\niap = iop.items() #把{帐号:密码}(字典) 转变为 [(账号,密码)](列表)\nidkeylist = list(iap)\n#iap即id and password\nprint(\"------------\")\n#密码判定\n\nwhile a == 0:\n inputss = input(\"Please enter your password :\")\n str(inputss)\n inputss = (inputss + \"\\n\")\n useriap = (inputs,inputss) #useriap即输入的账号与密码组合,\n#iap会是[(acc1,key1),(acc2,key2)...]\n if useriap == idkeylist[2]:\n print(\"Admin login.\")\n a = a + 2\n break\n elif useriap not in iap:\n print(\"Wrong password.\")\n continue\n elif useriap in iap:\n print(\"Login succes.\")\n break\n\nwhile a == 2:\n adminchoice = input(\"1-Check user info\\n2-Check usertxt\\n3-Exit :\")\n str(adminchoice)\n if adminchoice == \"1\":\n for userinfo in idkeylist:\n userinfo = \"\".join(userinfo)\n userinfo = userinfo.strip(\"\\n\")\n print(userinfo)\n elif adminchoice == \"2\":\n admin_checkno = input(\"User number :\")\n admin_choose_txt = int(admin_checkno) - 1\n admin_check = open(IDs[admin_choose_txt] + \".txt\", mode = \"r\")\n admin_check_txt = admin_check.read()\n print(admin_check_txt)\n admin_check.close()\n break\n elif adminchoice == \"3\":\n break\n#询问是否要修改用户的txt\n\nwhile a == 0:\n readorwrite = input(\"1-Read txt\\n2-Write txt\\n3-Exit\\n4-Back to home :\\n\")\n str(readorwrite)\n if readorwrite == \"1\":\n \treadtxt = open(userid + \".txt\", mode = \"r\", encoding = \"utf-8\")\n \ttxt = readtxt.read()\n \tprint(txt)\n \tif txt == \"\":\n print(\"It is a empty txt.\")\n readtxt.close()\n elif readorwrite == \"2\":\n writetxt = open(userid + \".txt\", mode = \"a+\", encoding = \"utf-8\")\n userwrite = input(\"Please enter what you want to write :\")\n print(\"--------------------\")\n str(userwrite)\n userwrite = (userwrite + \"\\n\")\n print(\"This is your writing :\" , userwrite)\n print(\"\\n\")\n writetxt.write(userwrite)\n writetxt.close()\n elif readorwrite == \"3\":\n break\n elif readorwrite == \"4\":\n import home\n break\n else:\n print(\"Our system still need to update.\")\n continue\n#by zgy\nf.close()","repo_name":"Vanxh007/idk","sub_path":"loginID2.py","file_name":"loginID2.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"73663805804","text":"# Определить min элемент в каждой строке матрицы D(M,N),\r\n# запомнить их в массиве G.\r\n# В массиве G поменять местами первый и последний положительные элементы.\r\n\r\n# Унтилова Арина ИУ7-16Б\r\n\r\nM1=(input('Введите количество строк матрицы: ')) \r\nwhile True :\r\n k=0\r\n for i in range(len(M1)): \r\n if ('0' < M1[i] <= '9'):\r\n k+=1\r\n \r\n if (k!=len(M1)):\r\n M1=(input('Введите повторно количество строк матрицы:: '))\r\n else:\r\n break\r\nN1=(input('Введите количество столбцов матрицы: ')) \r\nwhile True :\r\n k=0\r\n for i in range(len(N1)): \r\n if ('0' < N1[i] <= '9'):\r\n k+=1\r\n \r\n if (k!=len(N1)):\r\n N1=(input('Введите повторно количество столбцов матрицы: '))\r\n else:\r\n break\r\nN=int(N1)\r\nM=int(M1)\r\nD=[[0]*N for i in range (M)] # Ввод элементов матрицы\r\nfor i in range (M):\r\n for j in range (N):\r\n print ('Введите элемент № ',j+1,' строки № ',i+1) \r\n a1=(input())\r\n k3=0\r\n while True :\r\n k2=0\r\n if a1[0]=='-':\r\n k=1\r\n else:\r\n k=0\r\n for q in range(len(a1)):\r\n if ('0' <= (a1[q]) <= '9'):\r\n k+=1\r\n if (a1[q]=='.'):\r\n k2+=1\r\n if k2==1:\r\n k+=1\r\n for m in range (len(a1)-1):\r\n if ((a1[m]=='e') and (a1[m+1]=='-')) or ((a1[m]=='e') and (a1[m+1]=='+')):\r\n k3+=1\r\n if (k3==1):\r\n k+=2 \r\n\r\n if (k!=len(a1)):\r\n print('Введите повторно элемент № ',j+1,' строки № ',i+1) \r\n a1=(input())\r\n else:\r\n D[i][j]=a1\r\n break\r\n \r\nprint('\\nНачальная матрица:')\r\nfor q in D:\r\n print (q)\r\n\r\nG=[0]*M\r\nl=0\r\n\r\nfor i in range (M): # Поиск наименьших элементов каждой строки\r\n minzn=D[i][0] \r\n for j in range (N):\r\n if (D[i][j]0:\r\n k+=1\r\nif(k==0):\r\n print ('\\nВ массиве G нет положительных элементов')\r\nelse:\r\n for i1 in range (M): # Запоминание номера первого положительного элемента массива\r\n if float(G[i1])>0:\r\n n1=i1\r\n break\r\n \r\n for i2 in range (M): # Запоминание номера последнего положительного элемента массива\r\n if float(G[i2])>0:\r\n n2=i2\r\n G[n1],G[n2]=G[n2],G[n1]\r\n print('\\nПреобразованный массив G: ')\r\n print (G)\r\n\r\n \r\n \r\n\r\n","repo_name":"tursunovJr/bmstu-python","sub_path":"1 course/ЛР(доработки)/lr7_zadacha1.py","file_name":"lr7_zadacha1.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7699388300","text":"\n# A class named chess has been defined\nclass Chess():\n\n n = input(\"Enter the size of the chess board!\") # n is the input used to enter the board dimension\n print(\"The size of the chess board is \"+n+\" x \"+ n)\n n = int(n) #being converted to integer\n bp_x = input(\"Enter the x coordinate of Black pawn :\") #bp_x is the black pawn's x coordinate\n bp_x = int(bp_x)\n bp_y = input(\"Enter the y coordinate of Black pawn :\") #bp_y is the black pawn's x coordinate\n bp_y = int(bp_y)\n if bp_x > n or bp_y > n: #checking if the coordinates are greater than that of the given dimension\n raise Exception('The Value of X and Y coordinates must be less than n')\n else:\n pass\n\n wp_x = input(\"Enter the x coordinate of white pawn :\") # similarly for white pawn's position\n wp_x = int(wp_x)\n wp_y = input(\"Enter the y coordinate of white pawn :\")\n wp_y = int(wp_y)\n if wp_x > n or wp_y > n:\n raise Exception('The Value of X and Y coordinates must be less than n')\n else:\n pass\n\n # bp_x = input(\"Enter the x coordinate of Black pawn :\")\n bp = [bp_x, bp_y] #to print them in coordinates took them in a list\n wp = [wp_x, wp_y]\n\n\n # print(\"Black pawn is in \" + \"(\" + bp_x + \",\" + bp_y + \")\")\n # print(\"White pawn is in \" + \"(\" + wp_x + \",\" + wp_y + \")\")\n\n print(\"Black pawn is in : \")\n print(bp)\n print(\"White pawn is in : \")\n print(wp)\n\n\n if abs(wp[0]-bp[0]) == abs(wp[1]-bp[1]): # if the pawns have same values or their difference in their\n print(\"The least number of steps for white pawn to reach black pawn is :\");print(abs(wp[0]-bp[0]))#values are same then the number of steos needed is equal to the difference between them\n else: #when they are not equal\n if bp_x >= bp_y: # we need to determine the highest number of the x and y coordinates in both coordinates\n if bp_x >= wp_x and bp_x >= wp_y: #after that which ever is highest, it should be subtracted with the same coordinate of the other pair.\n steps = abs(bp_x - wp_x) #example lets say there is x1 , y1 and x2 and y2 . lets say x1 is greatest of all it should be subtracted with x2\n elif bp_x >= wp_x and bp_x <= wp_y: #to arrive at the least number of steps considering the fact that the pawns can move in diagonally.\n steps = abs(wp_y - bp_y)\n elif bp_x <= wp_x and bp_x >= wp_y:\n steps = abs(wp_x - bp_x)\n elif bp_x <= wp_x and bp_x <= wp_y:\n if wp_x >= wp_y:\n steps = abs(wp_x - bp_x)\n else:\n steps= abs(wp_y - bp_y)\n else:\n if bp_y >= wp_x and bp_y >= wp_y:\n steps = abs(bp_y - wp_y)\n elif bp_y >= wp_x and bp_y <= wp_y:\n steps = abs(wp_y - bp_y)\n elif bp_y <= wp_x and bp_y >= wp_y:\n steps = abs(wp_x - bp_x)\n elif bp_y <= wp_x and bp_y <= wp_y:\n if wp_x >= wp_y:\n steps = abs(wp_x - bp_x)\n else:\n steps= abs(wp_y - bp_y)\n\n\n print(\"The least number of steps for white pawn to reach black pawn is :\")\n print(steps)\n","repo_name":"anoopprasadh/Chess","sub_path":"chess_time.py","file_name":"chess_time.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42910641038","text":"# -*- coding: UTF-8 -*-\n# @Create : 2021/5/11 11:14 上午\n# @Author : yh\n# @Remark : 在此定义所有的消息队列操作方式\n\n# 定义所有打开方式\nBSMQ_OF_OPENEXIST = 0x00000001 # 打开已存在的mq\nBSMQ_OF_CREATENEW = 0x00000002 # 创建新的mq\nBSMQ_OF_OPENMULTI = 0x00000004\n\n# 定义创建mq的风格\nBSMQ_OT_COMMONMQ = 0x01 # 普通队列\nBSMQ_OT_PRIORITYMQ = 0x02 # 优先队列\nBSMQ_OT_TEMPMQ = 0x04\nBSMQ_OT_ENCRYPT = 0x08\nBSMQ_OT_INDEXRECORD = 0x10\n\nBS_MQ_MAXNAME = 100\nBS_MQ_MAXPWD = 20\n\n# 表示最多支持多少个优先级\nBS_MQ_MAXPRIORITY = 9\n# 普通优先级\nBS_MQ_COMMONPRIORITY = (BS_MQ_MAXPRIORITY / 2 + 1)\n\nBS_MQ_MAXRECORDID = 30\n\n# 从mq取数的等待时间设置为无限等待\nBS_TIMER_INFINITE = 0xFFFFFFFF\n","repo_name":"yuanhao1998/Mxsoftpy","sub_path":"db_def/def_mq.py","file_name":"def_mq.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"} +{"seq_id":"3235419299","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom IPython.core.pylabtools import figsize\nimport theano.tensor as tt # todo 这句会报错,网上有类似问题但是没有解决方案\nimport pymc3 as pm\nfrom scipy.stats.mstats import mquantiles\n\n\ndef logistic(x, beta, alpha=0):\n return 1.0 / (1.0 + np.exp(np.dot(beta, x) + alpha))\n\n\ndef show_rings_temperature(challenger_data):\n figsize(12.5, 3.5)\n np.set_printoptions(precision=3, suppress=True)\n\n #drop the NA values\n challenger_data = challenger_data[~np.isnan(challenger_data[:, 1])]\n\n #plot it, as a function of tempature (the first column)\n # print(\"Temp (F), O-Ring failure?\")\n # print(challenger_data)\n\n plt.scatter(challenger_data[:, 0], challenger_data[:, 1], s=75, color=\"k\",\n alpha=0.5)\n plt.yticks([0, 1])\n plt.ylabel(\"Damage Incident?\")\n plt.xlabel(\"Outside temperature (Fahrenheit)\")\n plt.title(\"Defects of the Space Shuttle O-Rings vs temperature\")\n\n\ndef show_alpha_beta(beta_samples, alpha_samples):\n figsize(12.5, 6)\n\n # histogram of the samples:\n plt.subplot(211)\n plt.title(r\"Posterior distributions of the variables $\\alpha, \\beta$\")\n plt.hist(beta_samples, histtype='stepfilled', bins=70, alpha=0.85,\n label=r\"posterior of $\\beta$\", color=\"#7A68A6\", density=True)\n plt.legend()\n\n plt.subplot(212)\n plt.hist(alpha_samples, histtype='stepfilled', bins=70, alpha=0.85,\n label=r\"posterior of $\\alpha$\", color=\"#A60628\", density=True)\n plt.legend()\n plt.show()\n\n\ndef show_posterior(t, p_t, mean_prob_t, temperature, D):\n figsize(12.5, 4)\n\n plt.plot(t, mean_prob_t, lw=3, label=\"average posterior \\nprobability of defect\")\n # 随便从采样中取两条线,有 20000 条可取。这里取的第 1 条和倒数第 2 条\n plt.plot(t, p_t[0, :], ls=\"--\", label=\"realization from posterior\")\n plt.plot(t, p_t[-2, :], ls=\"--\", label=\"realization from posterior\")\n\n # 打点原始数据\n plt.scatter(temperature, D, color=\"k\", s=50, alpha=0.5)\n\n plt.title(\"Posterior expected value of probability of defect; plus realizations\")\n plt.legend(loc=\"lower left\")\n plt.ylim(-0.1, 1.1)\n plt.xlim(t.min(), t.max())\n plt.ylabel(\"probability\")\n plt.xlabel(\"temperature\")\n plt.show()\n\n\ndef show_prob_range_by_temp(p_t, t, mean_prob_t, temperature, D):\n # vectorized bottom and top 2.5% quantiles for \"confidence interval\"\n # 取 p_t 的 [0.025, 0.975] 共 95% 的数据。即舍弃极端值\n qs = mquantiles(p_t, [0.025, 0.975], axis=0)\n\n # *qs ?? 这TM是什么写法?指针?\n # 这个是画了一个条带,*qs 应该是一维参数展开,意味着一维子元素作为参数在此位置罗列\n plt.fill_between(t[:, 0], *qs, alpha=0.7, color=\"#7A68A6\")\n\n # 这条是 “下限”\n plt.plot(t[:, 0], qs[0], label=\"95% CI\", color=\"#7A68A6\", alpha=0.7)\n\n # 这条是均值,跟上面的图是一样的\n plt.plot(t, mean_prob_t, lw=1, ls=\"--\", color=\"k\", label=\"average posterior \\nprobability of defect\")\n\n plt.xlim(t.min(), t.max())\n plt.ylim(-0.02, 1.02)\n plt.legend(loc=\"lower left\")\n plt.scatter(temperature, D, color=\"k\", s=50, alpha=0.5)\n plt.xlabel(\"temp, $t$\")\n\n plt.ylabel(\"probability estimate\")\n plt.title(\"Posterior probability estimates given temp. $t$\")\n plt.show()\n\n\ndef single_temp_prob(beta_samples, alpha_samples, temp=31):\n figsize(12.5, 2.5)\n\n # 只取一个点,返回的结果是单点的可能性列表,所以下图的 x 轴不再用温度而是直接用概率\n prob_single = logistic(temp, beta_samples, alpha_samples)\n\n # plt.xlim(0.995, 1)\n plt.hist(prob_single, bins=2000, density=True, histtype='stepfilled')\n plt.title(\"Posterior distribution of probability of defect, given $t = \" + str(temp) + \"$\")\n plt.xlabel(\"probability of defect occurring in O-ring\")\n plt.show()\n\n\nchallenger_data = np.genfromtxt(\"data/challenger_data.csv\", skip_header=1,\n usecols=[1, 2], missing_values=\"NA\",\n delimiter=\",\")\n\n# 查看数据基本情况\nshow_rings_temperature(challenger_data)\n\ntemperature = challenger_data[:, 0] # 取第一列,温度\nD = challenger_data[:, 1] # defect or not?\n\n# notice the`value` here. We explain why below.\nwith pm.Model() as model:\n # pymc 定义正态分布的方式,使用 mu 和 tau 指定参数\n beta = pm.Normal(\"beta\", mu=0, tau=0.001, testval=0)\n alpha = pm.Normal(\"alpha\", mu=0, tau=0.001, testval=0)\n # p 是 logistic 函数,即目标函数,而 alpha 和 beta 是参数,这两个参数用正态分布来估计\n p = pm.Deterministic(\"p\", 1.0 / (1. + tt.exp(beta * temperature + alpha)))\n\n# connect the probabilities in `p` with our observations through a Bernoulli random variable.\nwith model:\n # p 的定义使用了 temperature,在这里和 D 关联了起来\n observed = pm.Bernoulli(\"bernoulli_obs\", p, observed=D)\n\n # Mysterious code to be explained in Chapter 3\n start = pm.find_MAP() # 最大后验估计?\n step = pm.Metropolis()\n trace = pm.sample(120000, step=step, start=start)\n # 这个是从样本里每 2 个取一个。看来这个 trace 里的东西随便取?\n burned_trace = trace[100000::2]\n\nalpha_samples = burned_trace[\"alpha\"][:, None] # best to make them 1d,意思是将它们降为 1 维?\nbeta_samples = burned_trace[\"beta\"][:, None]\n\n# 查看 alpha 和 beta 的采样结果\nshow_alpha_beta(beta_samples, alpha_samples)\n\nt = np.linspace(temperature.min() - 5, temperature.max() + 5, 50)[:, None]\n# t.T 表示对 t 进行转置,t 是列向量\n# 打点画线,t.T 作为自变量,beta_samples 和 alpha_samples 各有 20000 组,得到的 p_t 也是有 20000 组,可以画 20000 条线\np_t = logistic(t.T, beta_samples, alpha_samples)\n\nmean_prob_t = p_t.mean(axis=0)\n\n# 查看 p 的后验分布\nshow_posterior(t, p_t, mean_prob_t, temperature, D)\n\n# 查看在温度坐标下的概率范围\nshow_prob_range_by_temp(p_t, t, mean_prob_t, temperature, D)\n\n#\nsingle_temp_prob(beta_samples, alpha_samples, 65)\n","repo_name":"LeetJoe/miscscripts","sub_path":"matplotlib/ex_challenger_space_craft.py","file_name":"ex_challenger_space_craft.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"34730453164","text":"from odoo import fields, models\n\n\nclass AbstractPowerLevel(models.AbstractModel):\n\n _name = \"abstract.power.level\"\n _inherit = [\"mozaik.abstract.model\"]\n _description = \"Abstract Power Level\"\n _order = \"sequence, name\"\n _unicity_keys = \"name\"\n _log_access = True\n\n name = fields.Char(\n required=True,\n index=True,\n tracking=True,\n )\n sequence = fields.Integer(\n required=True,\n tracking=True,\n group_operator=\"min\",\n default=5,\n )\n assembly_category_ids = fields.One2many(\n \"abstract.assembly.category\",\n \"power_level_id\",\n string=\"Assembly Categories\",\n domain=[(\"active\", \"=\", True)],\n )\n assembly_category_inactive_ids = fields.One2many(\n \"abstract.assembly.category\",\n \"power_level_id\",\n string=\"Assembly Categories (Inactive)\",\n domain=[(\"active\", \"=\", False)],\n )\n","repo_name":"mozaik-association/mozaik","sub_path":"mozaik_structure/models/abstract_power_level.py","file_name":"abstract_power_level.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"19"} +{"seq_id":"41893906949","text":"import hikari\nimport lightbulb\nimport math\nfrom ai import llm, duckduckgo_agent\n\nwith open(\"token.key\") as token:\n bot = lightbulb.BotApp(token=token.read())\n\ndef response_generator(input, response):\n response_length = len(response)\n num_embeds = math.floor(response_length/4096)\n content = \"\"\n embeds = []\n prompt = input\n \n if len(prompt) > 256:\n prompt = prompt[:256]\n \n if num_embeds == 10:\n for i in range(10):\n embeds.append(hikari.Embed(title=f\"Prompt: {prompt} ({i+1}/10)\", description=response[response_length*i:response_length*(i+1)]))\n content = \"The generated response was too long for Discord, sorry about that!\"\n \n elif (0 < num_embeds < 10):\n for i in range(num_embeds):\n embeds.append(hikari.Embed(title=f\"Prompt: {prompt} ({i+1}/{num_embeds})\", description=response[response_length*i:response_length*(i+1)]))\n else:\n embeds.append(hikari.Embed(title=f\"Prompt: {prompt}\", description=response))\n \n return content, embeds\n\n\n@bot.command\n@lightbulb.option(\"text\", \"What you want to ask NdyAI\")\n@lightbulb.command(\"ask\", \"Ask NdyAI a question\", auto_defer = True)\n@lightbulb.implements(lightbulb.SlashCommand)\nasync def ask(ctx: lightbulb.Context) -> None:\n try:\n response = llm(ctx.options.text)\n except:\n await ctx.respond(embed=hikari.Embed(title=\"Failed to generate response\", description=\"Something went wrong. Please try agian!\"))\n return\n \n content, embeds = response_generator(ctx.options.text, response)\n \n await ctx.respond(content=content, embeds=embeds)\n \n@bot.command\n@lightbulb.option(\"text\", \"What you want to ask NdyAI\")\n@lightbulb.command(\"search\", \"Takes much longer but has access to the internet\", auto_defer = True)\n@lightbulb.implements(lightbulb.SlashCommand)\nasync def search(ctx: lightbulb.Context) -> None:\n try:\n response = duckduckgo_agent.run(ctx.options.text)\n except:\n await ctx.respond(embed=hikari.Embed(title=\"Failed to generate response\", description=\"Something went wrong. Please try agian!\"))\n return\n \n content, embeds = response_generator(ctx.options.text, response)\n \n await ctx.respond(content=content, embeds=embeds)\n \nbot.run()","repo_name":"Ndymario/Ndy-AI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"7381002678","text":"x_train = 0.5\ny_train = 0.8\n\nweight = 0.5\nlr = 0.01\nepoch = 100\n\n\n# mse 조절값의 대한 내용\n# erro = loss \n# loss와 optimizer의 동작내용\nfor iteration in range(epoch):\n y_predict = x_train*weight\n error = (y_predict - y_train) **2\n\n print('Error : ' + str(error) + \"\\ty-predict\" + str(y_predict))\n\n up_y_predict = x_train * (weight + lr)\n up_error = (y_train - up_y_predict ) **2\n\n down_y_predict = x_train * (weight - lr) \n down_error = (y_train - down_y_predict) **2\n\n if(down_error <= up_error):\n weight = weight - lr\n if(down_error > up_error) :\n weight = weight + lr","repo_name":"G-sup/academy-study","sub_path":"keras2/keras69_4_lr.py","file_name":"keras69_4_lr.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"6298812994","text":"#! python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMath's nightmare\n\tSettings:\n\tmedium quality/regular in a new window, i3 wm with game in LHS halved window\n\tExtra time given at end of round 4 and round 7\n\"\"\"\n\nimport Image\nimport pytesseract\nimport pyautogui\nimport pyscreenshot as ImageGrab\nimport logging\nimport sys\nimport time\n\n\n\n#Filters image to remove background noise and leave only the number\ndef cleanImage(img):\n\t\tpixdata = img.load()\n\t\tfor y in xrange(img.size[1]):\n\t\t for x in xrange(img.size[0]):\n\t\t #print(pixdata[x, y])\n\t\t if pixdata[x, y][1] > 90: \n\t\t pixdata[x, y] = (255, 255, 255, 255)\n\t\t if pixdata[x, y][0] == 117: \n\t\t pixdata[x, y] = (255, 255, 255, 255)\n\t\treturn img\n\ndef analyzeBubble(startingX, startingY, boxNum):\n\tmode = \"div\" #random\"\n\ttry:\n\t\t#grab first number\n\t\tim=ImageGrab.grab(bbox=(startingX,startingY,startingX + 37,startingY + 16)) # X1,Y1,X2,Y2\n\t\tim = cleanImage(im)\n\t\tim.save('debugImg/num1' + 'boxNum' + str(boxNum) + '.png')\n\t\tnum1=pytesseract.image_to_string(im, config='-psm 8 digits').replace(\" \", \"\").replace(\"-\", \"\").replace(\".\", \"\")\n\n\t\t#grab operator guess if we're doing random (otherwise will default to division)\n\t\tif mode == \"random\":\n\t\t\tim=ImageGrab.grab(bbox=(startingX,startingY,startingX + 60, startingY + 22)) # X1,Y1,X2,Y2\n\t\t\tim = cleanImage(im)\n\t\t\tim.save('debugImg/op' + 'boxNum' + str(boxNum) + '.png')\n\t\t\topGuess = pytesseract.image_to_string(im, config='-psm 6')\n\t\t\tprint('opguess is ' + opGuess)\n\t\t\top = \"\"\n\t\t\tif \"x\" in opGuess.lower():\n\t\t\t\top = \"mult\"\t\n\t\t\telif \"+\" in opGuess:\n\t\t\t\top = \"add\"\t\n\n\n\n\t\t#grab second number\n\t\tim=ImageGrab.grab(bbox=(startingX,startingY + 21,startingX + 37,startingY + 21 + 18)) # X1,Y1,X2,Y2\n\t\tpixdata = im.load()\n\t\tim=cleanImage(im)\n\t\tim.save('debugImg/num2' + 'boxNum' + str(boxNum) + '.png')\n\t\tnum2=pytesseract.image_to_string(im, config='-psm 8 digits').replace(\" \", \"\").replace(\"-\", \"\").replace(\".\", \"\")\n\n\n\t\t# compute result (division)\n\t\tif mode == \"div\":\n\t\t\tresult=int(num1) / int(num2)\n\t\t\tlogging.debug('bnum %s performed following calculation: %s * %s = %d', boxNum, num1, num2, result)\n\t\t\tpyautogui.click(startingX + 23, startingY + 55)\n\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\t\t\tim = pyautogui.screenshot()\n\t\t\tpix = im.getpixel((startingX + 23, startingY + 55))\n\t\t\tif pix != (175, 193, 221):\n\t\t\t\tlogging.debug('Correct calculation %d %d %d', pix[0], pix[1], pix[2])\n\t\t\telse:\n\t\t\t\t# attempt to fix bad calculation\n\t\t\t\tlogging.debug('INCORRECT calculation %d %d %d', pix[0], pix[1], pix[2])\n\t\t\t\tlogging.debug('Incorrect calculation, taking corrective measures')\n\t\t\t\tim=ImageGrab.grab(bbox=(startingX,startingY-5,startingX + 37,startingY + 20)) # X1,Y1,X2,Y2\n\t\t\t\tim = cleanImage(im)\n\t\t\t\tim.save('debugImg/num1' + 'boxNum' + str(boxNum) + '-reiter.png')\n\t\t\t\tnum1=pytesseract.image_to_string(im, config='-psm 8 digits').replace(\" \", \"\").replace(\"-\", \"\").replace(\".\", \"\")\n\t\t\t\tresult=int(num1) / int(num2)\n\t\t\t\tlogging.debug('new guess: bnum %s performed following calculation: %s * %s = %d', boxNum, num1, num2, result)\n\t\t\t\tpyautogui.click(startingX + 23, startingY + 55)\n\t\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\n\t\t# compute result (random mode)\n\t\telif mode == \"random\":\n\t\t\tif op == \"mult\":\n\t\t\t\tresult=int(num1) * int(num2)\n\t\t\t\tlogging.debug('bnum %s performed following calculation: %s * %s = %d', boxNum, num1, num2, result)\n\t\t\t\tpyautogui.click(startingX + 23, startingY + 55)\n\t\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\t\t\telif op == \"add\":\n\t\t\t\tresult=int(num1) + int(num2)\n\t\t\t\tlogging.debug('bnum %s performed following calculation: %s + %s = %d', boxNum, num1, num2, result)\n\t\t\t\tpyautogui.click(startingX + 23, startingY + 55)\n\t\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\t\t\t\tim = pyautogui.screenshot()\n\t\t\t\tif im.getpixel((startingX + 23, startingY + 55))!= (175, 193, 221):\n\t\t\t\t\tresult=int(num1) / int(num2)\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\t\t\telse:\n\t\t\t\tresult=int(num1) / int(num2)\n\t\t\t\tlogging.debug('bnum %s performed following calculation: %s * %s = %d', boxNum, num1, num2, result)\n\t\t\t\tpyautogui.click(startingX + 23, startingY + 55)\n\t\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\t\t\t\tim = pyautogui.screenshot()\n\t\t\t\tif im.getpixel((startingX + 23, startingY + 55))!= (175, 193, 221):\n\t\t\t\t\tresult=int(num1) - int(num2)\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\t\t\t\tim = pyautogui.screenshot()\n\t\t\t\tif im.getpixel((startingX + 23, startingY + 55))!= (175, 193, 221):\n\t\t\t\t\tresult=int(num1) + int(num2)\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.press('backspace')\n\t\t\t\t\tpyautogui.typewrite(str(result), interval=0.01)\n\t\t\t\n\texcept Exception: #swallow exception. The show must go on!\n\t\tpass\n\t\treturn\n\n\n\n#logging.disable(logging.DEBUG) # uncomment to block debug log messages\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\ntry:\n\t#initialize game setup\n\tstartButton = pyautogui.locateOnScreen('res/startGame.png')\n\tpyautogui.click(startButton[0]+20, startButton[1])\n\ttime.sleep(1)\n\t#select division\n\tdiv = pyautogui.locateOnScreen('res/div.png')\n\tpyautogui.click(div[0]+50, div[1]+5)\n\t\n\t#select brain tree difficulty\n\tdiff = pyautogui.locateOnScreen('res/brain.png')\n\tpyautogui.click(diff[0]+20, diff[1])\n\t\n\t#start game (just down from difficulty button)\n\tpyautogui.click(diff[0]+20, diff[1]+70)\n\ttime.sleep(2.5)\nexcept Exception: #swallow exception. The show must go on!\n\tpass\n\nsleepTime = .9\nroundCount = 0\nwhile True:\n\troundCount += 1\n\tif roundCount == 7:\n\t\tpyautogui.typewrite('letimiyasleep')\n\tboxNum = 1\n\tanalyzeBubble(727, 145, boxNum)\n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(727, 66, boxNum) #box 2\n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(644, 57, boxNum) #box 3\n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(642, 142, boxNum) #box 4\n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(590, 208, boxNum) #box 5\n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(555, 133, boxNum) #box 6\n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(563, 55, boxNum) #box 7 \n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(487, 88, boxNum) #box 8\n\tboxNum = boxNum + 1 \n\ttime.sleep(sleepTime)\n\tanalyzeBubble(482, 171, boxNum) #box 9\n\tboxNum = boxNum + 1 \n\ttime.sleep(1)\n\t#click begin new round button\n\tpyautogui.click(487, 269) \n\ttime.sleep(1.75)\n","repo_name":"alexkohler/mathsNightmareAutomation","sub_path":"math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":6698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"1772785603","text":"#!/usr/bin/env python\nimport sys\nimport serial\nimport time\nimport roslib; roslib.load_manifest('beginner_tutorials')\nimport rospy\nfrom std_msgs.msg import String\nfrom FroboMsgs.msg import fpga_data\nfrom FroboMsgs.msg import pwm_o\n\nser = serial.Serial(\n'/dev/ttyUSB0',\nbaudrate=115200,\nparity=serial.PARITY_NONE,\nstopbits=serial.STOPBITS_ONE,\nbytesize=serial.EIGHTBITS)\nser.isOpen()\nstr = \"Connection open\"\nrospy.loginfo(str)\n\ndef pwmCallback(msg):\n #20Hex -> 10m/s\n speed_desired_left = hex(msg.speed_left)[2:]\n speed_desired_right = hex(msg.speed_right)[2:]\n dir_left = hex(msg.direction_left)[2:]\n dir_right = hex(msg.direction_right)[2:]\n e_left = hex(msg.enable_left)[2:]\n e_right = hex(msg.enable_right)[2:]\n \n in_cmd = \"w04 \" + speed_desired_left.zfill(4) + speed_desired_right.zfill(4) + dir_left + dir_right + e_left + e_right\n ser.write(in_cmd)\n\ndef talker():\n pub = rospy.Publisher('encoder_l', fpga_data)\n rospy.Subscriber('pwm', pwm_o, pwmCallback)\n rospy.init_node('seriel_data')\n # while not rospy.is_shutdown():\n \n ser.write(b'r00')\n encoder_le = ser.read(size=9)\n str = \"left encoder %s\" % encoder_le\n #rospy.loginfo(str)\n fpga_data.encoder_l = int(encoder_le,16)\n \n ser.write(b'r01')\n encoder_ri = ser.read(size=9)\n str = \"right encoder %s\" % encoder_ri\n #rospy.loginfo(str)\n fpga_data.encoder_r = int(encoder_ri,16)\n \n # ser.write(b'r04')\n # pwm = ser.read(size=9)\n # str = \"pwm value %s\" % pwm\n # rospy.loginfo(str)\n #fpga_data.pwm_value = int(pwm,16)\n # str = \"hello worlds %s\" % rospy.get_time()\n #rospy.loginfo(str)\n pub.publish(fpga_data)\n # rospy.sleep(0.02)\n\n\"\"\"\ndef talker():\n input = hex(40)[2:]\n print input.zfill(4)\n #print input\n input2 = hex(33)[2:]\n print input2.zfill(4)\n input3 = input.zfill(4) + input2.zfill(4)\n print input3\n #print input\n # while not rospy.is_shutdown():\n # in_cmd = \"w04 \" + input.zfill(8)\n # ser.write(in_cmd)\n # rospy.sleep(1.0)\n\"\"\"\nif __name__ == '__main__':\n try:\n talker()\n ros.spin()\n except rospy.ROSInterruptException:\n pass\n\n\n\n # pos = ser.read(size=9) #between 20000 - 6666 hex\n # pos = hex(int(pos,16))\n \n # in_cmd = bytes('w05 ' + perror.zfill(8), 'UTF-8')\n\n \n # ser.flushOutput()\n \n","repo_name":"klauskryhlmand/fuerte_workspace","sub_path":"sandbox/beginner_tutorials/scripts/talker.py","file_name":"talker.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"1082020670","text":"#Question 1\r\n# Recursive function to solve the tower of hanoi\r\nprint('-------------------------Question 1-------------------------')\r\n\r\ndef TowerOfHanoi(n , source, destination, auxiliary):\r\n\tif n==1:\r\n\t\tprint (\"Move disk 1 from source\",source,\"to destination\",destination)\r\n\t\treturn\r\n\tTowerOfHanoi(n-1, source, auxiliary, destination)\r\n\tprint (\"Move disk\",n,\"from source\",source,\"to destination\",destination)\r\n\tTowerOfHanoi(n-1, auxiliary, destination, source)\r\nn = 4\r\nTowerOfHanoi(n,'A','B','C') # A, C, B are the name of rods\r\n\r\n#Question 2\r\n#To print Pascal's Triangle\r\nprint('-------------------------Question 2-------------------------')\r\n#Using Loop\r\nrows = int(input(\"Enter number of rows: \"))\r\ncoef = 1\r\n\r\nfor i in range(1, rows+1):\r\n for space in range(1, rows-i+1):\r\n print(\" \",end=\"\")\r\n for j in range(0, i):\r\n if j==0 or i==0:\r\n coef = 1\r\n else:\r\n coef = coef * (i - j)//j\r\n print(coef, end = \" \")\r\n print()\r\n#Using recursive function\r\ndef pascals_triangle(n):\r\n if n == 1:\r\n return [[1]] # Base case termination condition\r\n else:\r\n result = pascals_triangle(n-1) # Recursive call\r\n # Calculate current row using info from previous row\r\n current_row = [1]\r\n previous_row = result[-1] # Take from end of result\r\n for i in range(len(previous_row)-1):\r\n current_row.append(previous_row[i] + previous_row[i+1])\r\n current_row += [1]\r\n result.append(current_row)\r\n return result\r\n\r\nrows=int(input('Enter rows: '))\r\ntriangle = pascals_triangle(rows)\r\nfor row in triangle:\r\n print(row)\r\n \r\n#Question 3\r\n#Built in functions\r\nprint('-------------------------Question 3-------------------------')\r\n\r\na=int(input(\"Enter 1st number: \"))\r\nb=int(input(\"Enter 2nd number: \"))\r\ntup = divmod(a,b)#to print quietiont and remainder\r\nprint(tup)\r\n# code to find whether values are callable or not\r\nprint(callable(tup))\r\n# code to find whether all elements are zero\r\nif tup == (0,0):\r\n print ('All values are zeros')\r\nelse :\r\n print ('Values are nonzeros')\r\ntup1= tup+(4,5,6)\r\n#filter out the values which are greater than 4\r\nnumbers = filter(lambda n: n > 4, tup1)\r\nlist1=list(numbers)\r\nprint('values greater than 4 :',list1)\r\nset1 = set(list1)\r\nprint(set1)\r\nnewset = frozenset(set1)\r\nprint('New immutable set ',newset)\r\nprint ('Maximum value in set is ',max(newset))\r\nprint('The hash value of set is ,',hash(newset))\r\n\r\n\r\n#Question 4\r\n#To create a class\r\nprint('-------------------------Question 4-------------------------')\r\nclass Student:\r\n def __init__(self,name,rollnumber):\r\n self.name = name\r\n self.rollnumber = rollnumber\r\n def object(self):\r\n print ('Name ='+self.name)\r\n print ('Rollnumber =',self.rollnumber)\r\nc = Student(\"Naheed Anjum\",21104002)\r\nc.object()\r\ndel c #Deleting object c\r\n\r\n#Question 5\r\n#Program to store details of three employees: name and salary using class.\r\nprint('-------------------------Question 5-------------------------')\r\n\r\nclass details :\r\n def __init__(self,Employee,Name,Salary):\r\n self.Employee = Employee\r\n self.Name = Name\r\n self.Salary = Salary\r\n def record(self):\r\n print(self.Employee,end=\" \")\r\n print(self.Name,end=\" \")\r\n print(self.Salary,end=\" \")\r\n print()\r\np1 = details(1,'Mehak',40000)\r\np1.record()\r\np2 = details(2,'Ashok',50000)\r\np2.record()\r\np3 = details(3,'Viren',60000)\r\np3.record()\r\np1.Salary=70000\r\nprint('Updated details of Mehak:',end=\" \")\r\np1.record()\r\ndel p3\r\n\r\n#Question 6\r\n#Friendship Test\r\nprint('-------------------------Question 6-------------------------')\r\n\r\ndef code(gword):\r\n print ('Word spoken by George:',gword)\r\n bword = input('Word formed by Barbie:')\r\n length = len(bword)\r\n count = 0\r\n for i in bword :\r\n if i in gword :\r\n count = count + 1\r\n else :\r\n break\r\n if count == length :\r\n print('Barbie is a good friend')\r\n else :\r\n print('Barbie is not a good friend')\r\n \r\nGeorge = input(\"Enter word said by George :\")\r\ncode(George)\r\n","repo_name":"NaheedAnju/Assignment-2-Intoduction-To-Computing","sub_path":"Assignment 4 CODES Naheed Anjum.py","file_name":"Assignment 4 CODES Naheed Anjum.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"5770114914","text":"import random\nfrom . import game_utils\nimport numpy as np\nimport os\nimport sys\nimport math\nsys.path.insert(1, os.path.join(sys.path[0], '...'))\nfrom tft_fight_predictor.teamfight_predictor import TftFightPredictor\nfrom copy import deepcopy\n\n\nSET_DATA = {\n \"ROLL_ODDS\": {\n 1: [100, 0, 0, 0, 0],\n 2: [100, 0, 0, 0, 0],\n 3: [75, 25, 0, 0, 0],\n 4: [55, 30, 15, 0, 0],\n 5: [45, 33, 20, 2, 0],\n 6: [25, 40, 30, 5, 0],\n 7: [19, 30, 35, 15, 1],\n 8: [15, 20, 35, 25, 5],\n 9: [10, 15, 30, 30, 15]\n }\n}\n\n# Index is the (stage - 1), value is how much reward for a win\nSTAGE_WIN_REWARD = [\n 0,# stage 1\n .02,\n .05,\n .07,\n .09,\n .11,\n .13,\n .15,\n .17,\n .19,\n .21,\n .23,\n .24,\n]\n\nACTIONS_MAP = {\n \"BUY_SHOP_POS_1\": 0,\n \"BUY_SHOP_POS_2\": 1,\n \"BUY_SHOP_POS_3\": 2,\n \"BUY_SHOP_POS_4\": 3,\n \"BUY_SHOP_POS_5\": 4,\n \"SELL_BENCH_POS_1\": 5,\n \"SELL_BENCH_POS_2\": 6,\n \"SELL_BENCH_POS_3\": 7,\n \"SELL_BENCH_POS_4\": 8,\n \"SELL_BENCH_POS_5\": 9,\n \"SELL_BENCH_POS_6\": 10,\n \"SELL_BENCH_POS_7\": 11,\n \"SELL_BENCH_POS_8\": 12,\n \"SELL_BENCH_POS_9\": 13,\n \"SELL_CHAMPION_POS_1\": 14,\n \"SELL_CHAMPION_POS_2\": 15,\n \"SELL_CHAMPION_POS_3\": 16,\n \"SELL_CHAMPION_POS_4\": 17,\n \"SELL_CHAMPION_POS_5\": 18,\n \"SELL_CHAMPION_POS_6\": 19,\n \"SELL_CHAMPION_POS_7\": 20,\n \"SELL_CHAMPION_POS_8\": 21,\n \"SELL_CHAMPION_POS_9\": 22,\n \"BENCH_1_TO_BOARD\": 23,\n \"BENCH_2_TO_BOARD\": 24,\n \"BENCH_3_TO_BOARD\": 25,\n \"BENCH_4_TO_BOARD\": 26,\n \"BENCH_5_TO_BOARD\": 27,\n \"BENCH_6_TO_BOARD\": 28,\n \"BENCH_7_TO_BOARD\": 29,\n \"BENCH_8_TO_BOARD\": 30,\n \"BENCH_9_TO_BOARD\": 31,\n \"BOARD_1_TO_BENCH\": 32,\n \"BOARD_2_TO_BENCH\": 33,\n \"BOARD_3_TO_BENCH\": 34,\n \"BOARD_4_TO_BENCH\": 35,\n \"BOARD_5_TO_BENCH\": 36,\n \"BOARD_6_TO_BENCH\": 37,\n \"BOARD_7_TO_BENCH\": 38,\n \"BOARD_8_TO_BENCH\": 39,\n \"BOARD_9_TO_BENCH\": 40,\n\n \"REROLL\": 41,\n \"BUY_EXP\": 42,\n \"READY_NEXT_STAGE\": 43,\n\n \"ITEM_1_TO_BOARD_1\": 44,\n \"ITEM_1_TO_BOARD_2\": 45,\n \"ITEM_1_TO_BOARD_3\": 46,\n \"ITEM_1_TO_BOARD_4\": 47,\n \"ITEM_1_TO_BOARD_5\": 48,\n \"ITEM_1_TO_BOARD_6\": 49,\n \"ITEM_1_TO_BOARD_7\": 50,\n \"ITEM_1_TO_BOARD_8\": 51,\n \"ITEM_1_TO_BOARD_9\": 52,\n \"ITEM_2_TO_BOARD_1\": 53,\n \"ITEM_2_TO_BOARD_2\": 54,\n \"ITEM_2_TO_BOARD_3\": 55,\n \"ITEM_2_TO_BOARD_4\": 56,\n \"ITEM_2_TO_BOARD_5\": 57,\n \"ITEM_2_TO_BOARD_6\": 58,\n \"ITEM_2_TO_BOARD_7\": 59,\n \"ITEM_2_TO_BOARD_8\": 60,\n \"ITEM_2_TO_BOARD_9\": 61,\n \"ITEM_3_TO_BOARD_1\": 62,\n \"ITEM_3_TO_BOARD_2\": 63,\n \"ITEM_3_TO_BOARD_3\": 64,\n \"ITEM_3_TO_BOARD_4\": 65,\n \"ITEM_3_TO_BOARD_5\": 66,\n \"ITEM_3_TO_BOARD_6\": 67,\n \"ITEM_3_TO_BOARD_7\": 68,\n \"ITEM_3_TO_BOARD_8\": 69,\n \"ITEM_3_TO_BOARD_9\": 70,\n \"ITEM_4_TO_BOARD_1\": 71,\n \"ITEM_4_TO_BOARD_2\": 72,\n \"ITEM_4_TO_BOARD_3\": 73,\n \"ITEM_4_TO_BOARD_4\": 74,\n \"ITEM_4_TO_BOARD_5\": 75,\n \"ITEM_4_TO_BOARD_6\": 76,\n \"ITEM_4_TO_BOARD_7\": 77,\n \"ITEM_4_TO_BOARD_8\": 78,\n \"ITEM_4_TO_BOARD_9\": 79,\n \"ITEM_5_TO_BOARD_1\": 80,\n \"ITEM_5_TO_BOARD_2\": 81,\n \"ITEM_5_TO_BOARD_3\": 82,\n \"ITEM_5_TO_BOARD_4\": 83,\n \"ITEM_5_TO_BOARD_5\": 84,\n \"ITEM_5_TO_BOARD_6\": 85,\n \"ITEM_5_TO_BOARD_7\": 86,\n \"ITEM_5_TO_BOARD_8\": 87,\n \"ITEM_5_TO_BOARD_9\": 88,\n \"ITEM_6_TO_BOARD_1\": 89,\n \"ITEM_6_TO_BOARD_2\": 90,\n \"ITEM_6_TO_BOARD_3\": 91,\n \"ITEM_6_TO_BOARD_4\": 92,\n \"ITEM_6_TO_BOARD_5\": 93,\n \"ITEM_6_TO_BOARD_6\": 94,\n \"ITEM_6_TO_BOARD_7\": 95,\n \"ITEM_6_TO_BOARD_8\": 96,\n \"ITEM_6_TO_BOARD_9\": 97,\n}\n\nclass GameManager():\n def __init__(self, players):\n self.players = players\n self.stage = 1\n self.round = 1\n self.champion_pool = self.create_champion_pool()\n self.fight_predictor = TftFightPredictor()\n # Placements (index0 = first place, index 7 = last place). Elements are player objects\n self.placements = [] \n\n def create_champion_pool(self):\n champion_pool = {\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: [],\n }\n\n # TIER 1: 29 of each champ\n for champ in game_utils.get_cost_x_champions(1):\n for i in range(29):\n champion_pool[1].append(Champion(champ['championId'], champ['name'], 1, champ['cost'], champ['traits']))\n\n for champ in game_utils.get_cost_x_champions(2):\n for i in range(22):\n champion_pool[2].append(Champion(champ['championId'], champ['name'], 1, champ['cost'], champ['traits']))\n \n for champ in game_utils.get_cost_x_champions(3):\n for i in range(18):\n champion_pool[3].append(Champion(champ['championId'], champ['name'], 1, champ['cost'], champ['traits']))\n \n for champ in game_utils.get_cost_x_champions(4):\n for i in range(12):\n champion_pool[4].append(Champion(champ['championId'], champ['name'], 1, champ['cost'], champ['traits']))\n \n for champ in game_utils.get_cost_x_champions(5):\n for i in range(10):\n champion_pool[5].append(Champion(champ['championId'], champ['name'], 1, champ['cost'], champ['traits']))\n\n return champion_pool\n\n def increment_stage_round(self):\n if self.stage == 1:\n if self.round == 3:\n self.stage += 1\n self.round = 1\n else:\n self.round += 1\n\n else:\n if self.round == 6:\n self.stage += 1\n self.round = 1\n else:\n self.round += 1\n\n self.distribute_stage_items_and_gold()\n\n # Each player receives 2 exp at end of round\n for player in self.players:\n player.add_exp(2)\n\n def distribute_stage_items_and_gold(self):\n if self.stage in [1,2,3,4] and self.round == 3:\n # Carousel. TODO: Let agent decide their item and champ\n for player in self.players:\n player.add_item_to_inventory(game_utils.get_random_item_component())\n\n if self.stage in [1,2,3,4] and self.round == 6:\n # Creep round: Drop 2 items\n for player in self.players:\n player.add_item_to_inventory(game_utils.get_random_item_component())\n player.add_item_to_inventory(game_utils.get_random_item_component())\n\n def place_item_on_champion(self, player, item_index, board_index):\n item = player.items[item_index]\n champ = player.board[board_index]\n champ.place_item_on_champion(item)\n player.items[item_index] = 0\n player.items.sort(reverse=True)\n\n def distribute_income(self):\n if self.stage == 1:\n for player in self.players: \n player.gold += 3\n else:\n for player in self.players:\n player.gold += player.income\n\n def check_game_over(self):\n alive_players = 0\n for player in self.players:\n if not player.is_eliminated:\n alive_players += 1\n\n if alive_players <= 1:\n # Game is over, add the last alive player to placements and return\n # True\n for player in self.players:\n if not player.is_eliminated:\n self.placements.insert(0, player)\n\n return True\n else:\n return False\n\n def roll_players_shop(self, player):\n shop = []\n players_odds = SET_DATA[\"ROLL_ODDS\"][player.level]\n\n # TODO: player can't roll units they have 3 star of\n\n # First roll rarity, then roll a champion in that rarity\n for i in range(5):\n champ_level = random.choices([1, 2, 3, 4, 5], players_odds)\n champ = random.choice(self.champion_pool[champ_level[0]])\n shop.append(champ)\n\n player.shop = shop\n\n def roll_all_players_shops(self):\n for player in self.players:\n self.roll_players_shop(player)\n\n def simulate_combat_step(self):\n rewards = [0,0,0,0,0,0,0,0]\n\n if self.is_creep_round:\n for player in self.players:\n player.ready = True\n return rewards\n \n alive_players = [player for player in self.players if not player.is_eliminated]\n random.shuffle(alive_players)\n \n # Create ghost player if odd number of players\n if (len(alive_players) % 2) != 0:\n ghost_player = deepcopy(alive_players[-1])\n ghost_player.is_ghost = True\n alive_players.append(ghost_player)\n\n mm_pairs = [alive_players[i:i + 2] for i in range(0, len(alive_players), 2)] # create pairs\n\n for pair in mm_pairs:\n player_one = pair[0]\n player_two = pair[1]\n\n p1_win_probability, p2_win_probability = self.fight_predictor.predict_tft_fight(\n player_one,\n player_two\n )\n if p1_win_probability > .5:\n winner_probability = p1_win_probability\n winner = player_one\n loser = player_two\n else:\n winner_probability = p2_win_probability\n winner = player_two\n loser = player_one\n\n winner.update_streak(True)\n winner.gold += 1\n rewards[winner.id] += STAGE_WIN_REWARD[self.stage-1]\n\n\n # Calculate health loss\n loser.update_streak(False)\n health_loss = 0\n health_loss += self.stage_damage\n units_lost_by = max(1, math.floor(winner_probability * winner.num_units_on_board))\n health_loss += self.get_damage_for_x_unit_loss(units_lost_by)\n loser.health -= health_loss\n rewards[loser.id] = STAGE_WIN_REWARD[self.stage-1]\n\n # Approximate number of units lost by. Examples:\n # .8 * 4 units on board = 3.2 = 3 unit loss\n # .5 * 4 units on board = 2 unit loss\n # .8 * 8 units on board = 6.4 = 6 unit loss\n # .5 * 8 units on board = 4 = 4 unit loss\n units_lost_by = math.floor(winner_probability * winner.num_units_on_board)\n loser.health -= min(2, self.get_damage_for_x_unit_loss(units_lost_by))\n\n if loser.is_eliminated and not loser.is_ghost:\n self.placements.insert(0,loser)\n # TODO: ADD PLAYER UNITS BACK TO POOL\n\n player_one.ready = False\n player_two.ready = False\n player_one.actions_since_last_ready = 0\n player_two.actions_since_last_ready = 0\n\n return rewards\n\n def purchase_champion_at_shop_index(self, player, shop_index):\n # TODO: Player can buy champ if bench is full but buying will\n # upgrade the champion\n if player.bench_is_full and player.board_is_full:\n raise Exception(\"Tried to buy champ with full bench and board\")\n\n players_shop = player.shop\n champion = deepcopy(players_shop[shop_index])\n\n if player.gold >= champion.cost:\n # Puchase the hero: \n # deduct gold, add to players bench, and remove from champion pool\n player.gold = player.gold - champion.cost\n player.shop[shop_index] = None\n\n if not player.board_is_full:\n player.add_champion_to_board(champion)\n elif not player.board_is_full:\n player.add_champion_to_bench(champion)\n\n self.remove_champion_from_pool(champion)\n self.maybe_upgrade_champions_for_player(player, champion)\n else:\n raise Exception(\"tried to buy champ couldnt afford\")\n\n return\n\n def remove_champion_from_pool(self, champion):\n for i, champ in enumerate(self.champion_pool[champion.cost]):\n if champ.champion_id == champion.champion_id:\n del self.champion_pool[champion.cost][i]\n return\n\n raise Exception(\"Did not found champ in pool to remove!\")\n\n def maybe_upgrade_champions_for_player(self, player, champion):\n bench = player.bench\n board = player.board\n\n num_champs = 0\n bench_locations = []\n board_locations = []\n for index, c in enumerate(bench):\n if c and champion.is_same_level_and_champ(c):\n num_champs += 1\n bench_locations.append(index)\n\n for index, c in enumerate(board):\n if c and champion.is_same_level_and_champ(c):\n num_champs += 1\n board_locations.append(index)\n\n if num_champs == 3:\n for b in bench_locations:\n player.bench[b] = None\n for b in board_locations:\n player.board[b] = None\n\n upgraded_champ = Champion(champion.champion_id, champion.name, champion.level + 1, champion.cost, champion.traits)\n player.add_champion_to_bench(upgraded_champ)\n self.maybe_upgrade_champions_for_player(player, champion) # Check again for double upgrade\n\n else:\n return\n\n def sell_champion_at_bench_index(self, player, bench_index):\n champion = player.bench[bench_index]\n if champion:\n player.bench[bench_index] = None\n player.gold += champion.sell_value\n\n for item in champion.champions_items:\n if item != 0:\n if 0 in player.items:\n player.items[player.items.index(0)] = item\n player.items.sort(reverse=True)\n else:\n raise Exception(\"No place to add item to bench\")\n\n\n self.champion_pool[champion.cost] += champion.champions_to_return_to_pool_when_sold # add units back to pool\n else:\n raise Exception(\"tried to sell hero at bench index where none existed\", player.id)\n\n def sell_champion_at_board_index(self, player, board_index):\n champion = player.board[board_index]\n if champion:\n player.board[board_index] = None\n player.gold += champion.sell_value\n\n for item in champion.champions_items:\n if item != 0:\n if 0 in player.items:\n player.items[player.items.index(0)] = item\n player.items.sort(reverse=True)\n else:\n raise Exception(\"No place to add item to bench\")\n\n self.champion_pool[champion.cost] += champion.champions_to_return_to_pool_when_sold # add units back to pool\n else:\n raise Exception(\"tried to sell hero at board index where none existed\", player.id)\n\n def place_champion_from_bench_to_board(self, player, bench_index):\n # TODO: swapping requires an empty board/bench slot\n if self.board_is_full:\n raise Exception(\"Should not be able to swap champion to board if full\")\n champion = player.bench[bench_index]\n player.bench[bench_index] = None\n player.add_champion_to_board(champion)\n\n def place_champion_from_board_to_bench(self, player, board_index):\n # TODO: swapping requires an empty bench/bench slot\n if self.bench_is_full:\n raise Exception(\"Should not be able to swap champion to bench if full\")\n champion = player.board[board_index]\n player.board[board_index] = None\n player.add_champion_to_bench(champion)\n\n # action - int - action integer\n def execute_agent_action(self, player, action):\n if action == ACTIONS_MAP[\"BUY_SHOP_POS_1\"]:\n self.purchase_champion_at_shop_index(player,0)\n\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_2\"]:\n self.purchase_champion_at_shop_index(player,1)\n\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_3\"]:\n self.purchase_champion_at_shop_index(player,2)\n\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_4\"]:\n self.purchase_champion_at_shop_index(player,3)\n\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_5\"]:\n self.purchase_champion_at_shop_index(player,4) \n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_1\"]:\n self.sell_champion_at_bench_index(player, 0)\n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_2\"]:\n self.sell_champion_at_bench_index(player, 1)\n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_3\"]:\n self.sell_champion_at_bench_index(player, 2)\n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_4\"]:\n self.sell_champion_at_bench_index(player, 3)\n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_5\"]:\n self.sell_champion_at_bench_index(player, 4)\n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_6\"]:\n self.sell_champion_at_bench_index(player, 5) \n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_7\"]:\n self.sell_champion_at_bench_index(player, 6)\n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_8\"]:\n self.sell_champion_at_bench_index(player, 7)\n\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_9\"]:\n self.sell_champion_at_bench_index(player, 8)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_1\"]:\n self.sell_champion_at_board_index(player, 0)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_2\"]:\n self.sell_champion_at_board_index(player, 1)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_3\"]:\n self.sell_champion_at_board_index(player, 2)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_4\"]:\n self.sell_champion_at_board_index(player, 3)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_5\"]:\n self.sell_champion_at_board_index(player, 4)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_6\"]:\n self.sell_champion_at_board_index(player, 5)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_7\"]:\n self.sell_champion_at_board_index(player, 6)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_8\"]:\n self.sell_champion_at_board_index(player, 7)\n\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_9\"]:\n self.sell_champion_at_board_index(player, 8)\n\n elif action == ACTIONS_MAP[\"BENCH_1_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 0)\n\n elif action == ACTIONS_MAP[\"BENCH_2_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 1)\n\n elif action == ACTIONS_MAP[\"BENCH_3_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 2)\n\n elif action == ACTIONS_MAP[\"BENCH_4_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 3)\n\n elif action == ACTIONS_MAP[\"BENCH_5_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 4)\n\n elif action == ACTIONS_MAP[\"BENCH_6_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 5)\n\n elif action == ACTIONS_MAP[\"BENCH_7_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 6)\n\n elif action == ACTIONS_MAP[\"BENCH_8_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 7)\n\n elif action == ACTIONS_MAP[\"BENCH_9_TO_BOARD\"]:\n self.place_champion_from_bench_to_board(player, 8)\n\n elif action == ACTIONS_MAP[\"BOARD_1_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player, 0)\n\n elif action == ACTIONS_MAP[\"BOARD_2_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player, 1)\n\n elif action == ACTIONS_MAP[\"BOARD_3_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player, 2)\n\n elif action == ACTIONS_MAP[\"BOARD_4_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player, 3)\n\n elif action == ACTIONS_MAP[\"BOARD_5_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player, 4)\n\n elif action == ACTIONS_MAP[\"BOARD_6_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player,5)\n\n elif action == ACTIONS_MAP[\"BOARD_7_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player,6) \n\n elif action == ACTIONS_MAP[\"BOARD_8_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player,7)\n\n elif action == ACTIONS_MAP[\"BOARD_9_TO_BENCH\"]:\n self.place_champion_from_board_to_bench(player,8)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_1\"]:\n self.place_item_on_champion(player,0,0)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_2\"]:\n self.place_item_on_champion(player,0,1)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_3\"]:\n self.place_item_on_champion(player,0,2)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_4\"]:\n self.place_item_on_champion(player,0,3)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_5\"]:\n self.place_item_on_champion(player,0,4)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_6\"]:\n self.place_item_on_champion(player,0,5)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_7\"]:\n self.place_item_on_champion(player,0,6)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_8\"]:\n self.place_item_on_champion(player,0,7)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_9\"]:\n self.place_item_on_champion(player,0,8)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_1\"]:\n self.place_item_on_champion(player,1,0)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_2\"]:\n self.place_item_on_champion(player,1,1)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_3\"]:\n self.place_item_on_champion(player,1,2)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_4\"]:\n self.place_item_on_champion(player,1,3)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_5\"]:\n self.place_item_on_champion(player,1,4)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_6\"]:\n self.place_item_on_champion(player,1,5)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_7\"]:\n self.place_item_on_champion(player,1,6)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_8\"]:\n self.place_item_on_champion(player,1,7)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_9\"]:\n self.place_item_on_champion(player,1,8)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_1\"]:\n self.place_item_on_champion(player,2,0)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_2\"]:\n self.place_item_on_champion(player,2,1)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_3\"]:\n self.place_item_on_champion(player,2,2)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_4\"]:\n self.place_item_on_champion(player,2,3)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_5\"]:\n self.place_item_on_champion(player,2,4)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_6\"]:\n self.place_item_on_champion(player,2,5)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_7\"]:\n self.place_item_on_champion(player,2,6)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_8\"]:\n self.place_item_on_champion(player,2,7)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_9\"]:\n self.place_item_on_champion(player,2,8)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_1\"]:\n self.place_item_on_champion(player,3,0)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_2\"]:\n self.place_item_on_champion(player,3,1)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_3\"]:\n self.place_item_on_champion(player,3,2)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_4\"]:\n self.place_item_on_champion(player,3,3)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_5\"]:\n self.place_item_on_champion(player,3,4)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_6\"]:\n self.place_item_on_champion(player,3,5)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_7\"]:\n self.place_item_on_champion(player,3,6)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_8\"]:\n self.place_item_on_champion(player,3,7)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_9\"]:\n self.place_item_on_champion(player,3,8)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_1\"]:\n self.place_item_on_champion(player,4,0)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_2\"]:\n self.place_item_on_champion(player,4,1)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_3\"]:\n self.place_item_on_champion(player,4,2)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_4\"]:\n self.place_item_on_champion(player,4,3)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_5\"]:\n self.place_item_on_champion(player,4,4)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_6\"]:\n self.place_item_on_champion(player,4,5)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_7\"]:\n self.place_item_on_champion(player,4,6)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_8\"]:\n self.place_item_on_champion(player,4,7)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_9\"]:\n self.place_item_on_champion(player,4,8)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_1\"]:\n self.place_item_on_champion(player,5,0)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_2\"]:\n self.place_item_on_champion(player,5,1)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_3\"]:\n self.place_item_on_champion(player,5,2)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_4\"]:\n self.place_item_on_champion(player,5,3)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_5\"]:\n self.place_item_on_champion(player,5,4)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_6\"]:\n self.place_item_on_champion(player,5,5)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_7\"]:\n self.place_item_on_champion(player,5,6)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_8\"]:\n self.place_item_on_champion(player,5,7)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_9\"]:\n self.place_item_on_champion(player,5,8)\n\n elif action == ACTIONS_MAP[\"REROLL\"]:\n self.roll_players_shop(player)\n\n elif action == ACTIONS_MAP[\"BUY_EXP\"]:\n player.buy_exp()\n\n elif action == ACTIONS_MAP[\"READY_NEXT_STAGE\"]:\n player.ready = True\n\n else:\n print(\"Unrecognized action:\", action)\n\n @property\n def is_all_players_ready(self):\n return all([True if player.ready else False for player in self.players])\n\n @property\n def is_creep_round(self):\n if self.stage == 1 and self.round in [1,2,3]:\n return True\n \n elif self.round == 6:\n return True\n \n return False\n\n @property\n def stage_damage(self):\n stage_to_damage = [0,0,2,3,5,8,15,15,25,35,55,75,100]\n return stage_to_damage[self.stage-1]\n \n def get_damage_for_x_unit_loss(self, num_units):\n x_unit_lost_damage = [0, 2, 4, 6, 8, 10, 11, 12, 13, 14, 15]\n return x_unit_lost_damage[num_units]\n\n def print_board_state(self):\n print(\"====================\")\n print(\"Game State\")\n print(\"====================\")\n print(f\"Stage: {self.stage}-{self.round}\")\n print(\"====================\")\n for player in self.players:\n player.print_player()\n print(\"--------------------\")\n\nclass Player():\n def __init__(self, id):\n self.id = id\n self.gold = 3\n self.level = 1\n self.exp = 0\n self.shop = [None]*5\n self.board = [None]*9\n self.bench = [None]*9\n self.health = 100\n self.ready = False\n self.items = [0]*14 # only first six items are shown to agents and interacted with\n self.is_ghost = False\n self.streak = 0 # negative is lose streak, positive is win streak\n\n # Metric count actions since last ready, maybe use this \n # to deprioritize taking useless actions back and forth?\n self.actions_since_last_ready = 0\n\n def buy_exp(self):\n if self.gold < 4:\n raise Exception(\"Tried to buy exp without enough gold\")\n elif self.level == 9:\n raise Exception(\"Tried to buy exp at level 9\")\n else:\n self.gold -= 4\n self.add_exp(4)\n\n def add_exp(self, exp):\n self.exp = self.exp + exp\n if self.level == 1:\n self.level = 2\n self.exp = 0\n\n elif self.level == 2 and self.exp >= 2:\n self.level = 3\n self.exp = self.exp - 2\n\n elif self.level == 3 and self.exp >= 6:\n self.level = 4\n self.exp = self.exp - 6\n\n elif self.level == 4 and self.exp >= 10:\n self.level = 5\n self.exp = self.exp - 10\n\n elif self.level == 5 and self.exp >= 20:\n self.level = 6\n self.exp = self.exp - 20\n\n elif self.level == 6 and self.exp >= 36:\n self.level = 7\n self.exp = self.exp - 36\n\n elif self.level == 7 and self.exp >= 56:\n self.level = 8\n self.exp = self.exp - 56\n\n elif self.level == 8 and self.exp >= 80:\n self.level = 9\n self.exp = self.exp - 80\n\n def update_streak(self, is_win):\n if is_win:\n if self.streak <= 0:\n self.streak = 1\n else:\n self.streak += 1\n else:\n if self.streak >= 0:\n self.streak = -1\n else:\n self.streak -= 1\n\n def add_item_to_inventory(self, item):\n if 0 in self.items:\n self.items[self.items.index(0)] = item\n else:\n print(\"Players inventory is full, throwing away item\")\n return\n\n\n def add_champion_to_bench(self, champion):\n for i, bench_occupant in enumerate(self.bench):\n if bench_occupant == None:\n self.bench[i] = champion\n return\n\n def add_champion_to_board(self, champion):\n for i, board_occupant in enumerate(self.board):\n if board_occupant == None:\n self.board[i] = champion\n return\n\n @property\n def bench_is_full(self):\n return (None not in self.bench)\n \n @property\n def board_is_full(self):\n champions_on_board = 0\n for c in self.board:\n if c == None:\n champions_on_board += 1\n\n if champions_on_board >= self.level:\n return True\n\n @property\n def income(self):\n income = 5\n if abs(self.streak) == 5:\n income += 3\n elif abs(self.streak) == 4:\n income += 2\n elif abs(self.streak) > 1:\n income += 1\n\n # interest with max at 5\n income += min((self.gold // 10), 5)\n return income\n \n @property\n def is_eliminated(self):\n return self.health <= 0\n \n @property\n def num_units_on_board(self):\n return len([i for i in self.board if i])\n \n def print_player(self):\n print(f\"Player: {self.id} | Level: {self.level} | Exp: {self.exp} | Health: {self.health} | Gold: {self.gold} | Streak: {self.streak}\")\n print(\"Board:\", [str(c) for c in self.board])\n print(\"Bench:\", [str(c) for c in self.bench])\n print(\"Shop:\", [str(c) for c in self.shop])\n print(\"Items\", [i for i in self.items])\n print(f\"Ready: {self.ready}\")\n\n\nclass Champion():\n def __init__(self, champion_id, name, level, cost, traits):\n self.champion_id = champion_id\n self.level = level\n self.name = name\n self.cost = cost\n self.traits = traits\n self.items = [0] * 3 # array of item_ids (1532)\n\n def __str__(self):\n return f\"Level {self.level} {self.champion_id} {self.items}\"\n\n def is_same_level_and_champ(self, champ):\n return self.level == champ.level and self.champion_id == champ.champion_id\n\n def place_item_on_champion(self, item):\n for index, citem in enumerate(self.items):\n if citem in [1,2,3,4,5,6,7,8,9]:\n if item in [1,2,3,4,5,6,7,8,9]:\n combined = game_utils.combine_items(citem, item)\n self.items[index] = combined\n return\n\n elif citem == 0:\n self.items[index] = item\n return\n\n raise Exception(\"Could not add item to champ\")\n\n @property\n def champions_items(self):\n return [i for i in self.items if i != 0]\n\n @property\n def sell_value(self):\n if self.level > 1:\n return self.cost - 1\n else:\n return self.cost\n \n @property\n def champions_to_return_to_pool_when_sold(self):\n champions = []\n if self.level == 1:\n champions.append(Champion(self.champion_id, self.name, 1, self.cost, self.traits))\n elif self.level == 2:\n for i in range(3):\n champions.append(Champion(self.champion_id, self.name, 1, self.cost, self.traits))\n elif self.level == 3:\n for i in range(9):\n champions.append(Champion(self.champion_id, self.name, 1, self.cost, self.traits))\n\n return champions\n\n\n\ndef is_action_legal(player, action):\n # Player puchasing champ must have a bench slot and enough gold. TODO:\n # technically they can buy if bench is full and buying champ levels champ up\n if action == ACTIONS_MAP[\"BUY_SHOP_POS_1\"]:\n return ((not player.bench_is_full or player.board_is_full) and player.shop[0] != None and player.shop[0].cost <= player.gold)\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_2\"]:\n return ((not player.bench_is_full or player.board_is_full) and player.shop[1] != None and player.shop[1].cost <= player.gold)\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_3\"]:\n return ((not player.bench_is_full or player.board_is_full) and player.shop[2] != None and player.shop[2].cost <= player.gold)\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_4\"]:\n return ((not player.bench_is_full or player.board_is_full) and player.shop[3] != None and player.shop[3].cost <= player.gold)\n elif action == ACTIONS_MAP[\"BUY_SHOP_POS_5\"]:\n return ((not player.bench_is_full or player.board_is_full) and player.shop[4] != None and player.shop[4].cost <= player.gold)\n\n # Player can sell bench at position if it is not empty\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_1\"]:\n return (player.bench[0] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_2\"]:\n return (player.bench[1] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_3\"]:\n return (player.bench[2] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_4\"]:\n return (player.bench[3] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_5\"]:\n return (player.bench[4] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_6\"]:\n return (player.bench[5] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_7\"]:\n return (player.bench[6] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_8\"]:\n return (player.bench[7] != None)\n elif action == ACTIONS_MAP[\"SELL_BENCH_POS_9\"]:\n return (player.bench[8] != None)\n\n # Player can sell champion at board position x if not empty\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_1\"]:\n return (player.board[0] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_2\"]:\n return (player.board[1] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_3\"]:\n return (player.board[2] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_4\"]:\n return (player.board[3] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_5\"]:\n return (player.board[4] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_6\"]:\n return (player.board[5] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_7\"]:\n return (player.board[6] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_8\"]:\n return (player.board[7] != None)\n elif action == ACTIONS_MAP[\"SELL_CHAMPION_POS_9\"]:\n return (player.board[8] != None)\n\n # Can move champion on bench to board if board is not full\n # and the bench champion exists\n elif action == ACTIONS_MAP[\"BENCH_1_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[0] != None)\n elif action == ACTIONS_MAP[\"BENCH_2_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[1] != None)\n elif action == ACTIONS_MAP[\"BENCH_3_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[2] != None)\n elif action == ACTIONS_MAP[\"BENCH_4_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[3] != None)\n elif action == ACTIONS_MAP[\"BENCH_5_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[4] != None)\n elif action == ACTIONS_MAP[\"BENCH_6_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[5] != None)\n elif action == ACTIONS_MAP[\"BENCH_7_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[6] != None)\n elif action == ACTIONS_MAP[\"BENCH_8_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[7] != None)\n elif action == ACTIONS_MAP[\"BENCH_9_TO_BOARD\"]:\n return (not player.board_is_full and player.bench[8] != None)\n\n # Can move champion on bench to board if bench is not full\n # and the board champion exists\n elif action == ACTIONS_MAP[\"BOARD_1_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[0] != None)\n elif action == ACTIONS_MAP[\"BOARD_2_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[1] != None)\n elif action == ACTIONS_MAP[\"BOARD_3_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[2] != None)\n elif action == ACTIONS_MAP[\"BOARD_4_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[3] != None)\n elif action == ACTIONS_MAP[\"BOARD_5_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[4] != None)\n elif action == ACTIONS_MAP[\"BOARD_6_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[5] != None)\n elif action == ACTIONS_MAP[\"BOARD_7_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[6] != None)\n elif action == ACTIONS_MAP[\"BOARD_8_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[7] != None)\n elif action == ACTIONS_MAP[\"BOARD_9_TO_BENCH\"]:\n return (not player.bench_is_full and player.board[8] != None)\n \n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_1\"]:\n return _can_player_place_item_on_board_unit(player, 0, 0)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_2\"]:\n return _can_player_place_item_on_board_unit(player, 0, 1)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_3\"]:\n return _can_player_place_item_on_board_unit(player, 0, 2)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_4\"]:\n return _can_player_place_item_on_board_unit(player, 0, 3) \n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_5\"]:\n return _can_player_place_item_on_board_unit(player, 0, 4)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_6\"]:\n return _can_player_place_item_on_board_unit(player, 0, 5)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_7\"]:\n return _can_player_place_item_on_board_unit(player, 0, 6)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_8\"]:\n return _can_player_place_item_on_board_unit(player, 0, 7)\n elif action == ACTIONS_MAP[\"ITEM_1_TO_BOARD_9\"]:\n return _can_player_place_item_on_board_unit(player, 0, 8)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_1\"]:\n return _can_player_place_item_on_board_unit(player, 1, 0)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_2\"]:\n return _can_player_place_item_on_board_unit(player, 1, 1)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_3\"]:\n return _can_player_place_item_on_board_unit(player, 1, 2)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_4\"]:\n return _can_player_place_item_on_board_unit(player, 1, 3)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_5\"]:\n return _can_player_place_item_on_board_unit(player, 1, 4)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_6\"]:\n return _can_player_place_item_on_board_unit(player, 1, 5)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_7\"]:\n return _can_player_place_item_on_board_unit(player, 1, 6)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_8\"]:\n return _can_player_place_item_on_board_unit(player, 1, 7)\n elif action == ACTIONS_MAP[\"ITEM_2_TO_BOARD_9\"]:\n return _can_player_place_item_on_board_unit(player, 1, 8)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_1\"]:\n return _can_player_place_item_on_board_unit(player, 2, 0)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_2\"]:\n return _can_player_place_item_on_board_unit(player, 2, 1)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_3\"]:\n return _can_player_place_item_on_board_unit(player, 2, 2)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_4\"]:\n return _can_player_place_item_on_board_unit(player, 2, 3)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_5\"]:\n return _can_player_place_item_on_board_unit(player, 2, 4)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_6\"]:\n return _can_player_place_item_on_board_unit(player, 2, 5)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_7\"]:\n return _can_player_place_item_on_board_unit(player, 2, 6)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_8\"]:\n return _can_player_place_item_on_board_unit(player, 2, 7)\n elif action == ACTIONS_MAP[\"ITEM_3_TO_BOARD_9\"]:\n return _can_player_place_item_on_board_unit(player, 2, 8)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_1\"]:\n return _can_player_place_item_on_board_unit(player, 3, 0)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_2\"]:\n return _can_player_place_item_on_board_unit(player, 3, 1)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_3\"]:\n return _can_player_place_item_on_board_unit(player, 3, 2)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_4\"]:\n return _can_player_place_item_on_board_unit(player, 3, 3)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_5\"]:\n return _can_player_place_item_on_board_unit(player, 3, 4)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_6\"]:\n return _can_player_place_item_on_board_unit(player, 3, 5)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_7\"]:\n return _can_player_place_item_on_board_unit(player, 3, 6)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_8\"]:\n return _can_player_place_item_on_board_unit(player, 3, 7)\n elif action == ACTIONS_MAP[\"ITEM_4_TO_BOARD_9\"]:\n return _can_player_place_item_on_board_unit(player, 3, 8)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_1\"]:\n return _can_player_place_item_on_board_unit(player, 4, 0)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_2\"]:\n return _can_player_place_item_on_board_unit(player, 4, 1)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_3\"]:\n return _can_player_place_item_on_board_unit(player, 4, 2)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_4\"]:\n return _can_player_place_item_on_board_unit(player, 4, 3)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_5\"]:\n return _can_player_place_item_on_board_unit(player, 4, 4)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_6\"]:\n return _can_player_place_item_on_board_unit(player, 4, 5)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_7\"]:\n return _can_player_place_item_on_board_unit(player, 4, 6)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_8\"]:\n return _can_player_place_item_on_board_unit(player, 4, 7)\n elif action == ACTIONS_MAP[\"ITEM_5_TO_BOARD_9\"]:\n return _can_player_place_item_on_board_unit(player, 4, 8)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_1\"]:\n return _can_player_place_item_on_board_unit(player, 5, 0)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_2\"]:\n return _can_player_place_item_on_board_unit(player, 5, 1)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_3\"]:\n return _can_player_place_item_on_board_unit(player, 5, 2)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_4\"]:\n return _can_player_place_item_on_board_unit(player, 5, 3)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_5\"]:\n return _can_player_place_item_on_board_unit(player, 5, 4)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_6\"]:\n return _can_player_place_item_on_board_unit(player, 5, 5)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_7\"]:\n return _can_player_place_item_on_board_unit(player, 5, 6)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_8\"]:\n return _can_player_place_item_on_board_unit(player, 5, 7)\n elif action == ACTIONS_MAP[\"ITEM_6_TO_BOARD_9\"]:\n return _can_player_place_item_on_board_unit(player, 5, 8)\n\n elif action == ACTIONS_MAP[\"REROLL\"]:\n return (player.gold >= 2)\n\n elif action == ACTIONS_MAP[\"BUY_EXP\"]:\n return (player.gold >= 4 and player.level < 9)\n\n elif action == ACTIONS_MAP[\"READY_NEXT_STAGE\"]:\n return True\n else:\n raise Exception(\"UNRECOGNIZED ACTION\", action)\n\ndef _can_player_place_item_on_board_unit(player, item_index, board_index):\n item = player.items[item_index]\n champ = player.board[board_index]\n\n if item == 0:\n return False\n if champ == None:\n return False\n\n for c_i in champ.items:\n if c_i == 0: # empty slot\n return True\n elif c_i < 10: #component\n if item < 10: #component\n return True\n return False\n","repo_name":"oncepatriot/TFT-AI","sub_path":"app/environments/teamfighttactics/teamfighttactics/envs/game_engine.py","file_name":"game_engine.py","file_ext":"py","file_size_in_byte":45963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"40705424674","text":"import sys\n# 許容する再帰処理の回数を変更\nsys.setrecursionlimit(10**5+10)\n# input処理を高速化する\nN, M = [int(_) for _ in input().split()]\n\n# 隣接関係は隣接リストで管理する\nlst_edge = [[] for _ in range(N)]\n\nfor _ in range(M):\n # indexを0スタートにする。\n x, y = [int(_)-1 for _ in input().split()] \n lst_edge[x].append(y)\n \n# dp[v] := ノードvを始点とした時の有向パスの長さの最大値\n# -1 未訪問で初期化。\ndp = [-1] * N\n\n# メモ化再帰\ndef rec(v):\n # 既に更新済み\n if dp[v] != -1:\n return dp[v]\n ans = 0\n lst_nv = lst_edge[v] # ノードvからいけるノード\n for nv in lst_nv:\n ans = max(ans, rec(nv) + 1) # 一歩進む\n dp[v] = ans # ノードvを始点とした時の有向パスの長さの最大値\n return dp[v]\n \n# 全ての点に対して更新する\nans = 0\nfor v in range(N):\n ans = max(ans, rec(v))\n \nprint(ans)\n ","repo_name":"hasesuns/atcoder","sub_path":"submitted/dp/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"25905838155","text":"import matplotlib.pyplot as plt\n\nx=[1,2,3]\ny=[2,4,1]\n#plotting the line 1 points\nplt.plot(x,y,label=\"line 1\")\n\nx1=[1,2,3]\ny1=[4,1,3]\nplt.plot(x1,y1,label=\"line 2\")\n\n#naming x axis\nplt.xlabel(\"x - axis\")\nplt.ylabel(\"y - axis\")\n\nplt.title(\"My First Graph\")\n#show a legend \nplt.legend()\n\nplt.show()\n","repo_name":"Sreelekshmi92/Full_Stack","sub_path":"Python/Day2/lineplot.py","file_name":"lineplot.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"42967633900","text":"from flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\n\napp = Flask(__name__)\n\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars_app\"\nmongo = PyMongo(app)\n\n# prevent cached responses\nif app.config[\"DEBUG\"]:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# clear db \nmongo.db.drop_collection(\"mars_db\")\n\n@app.route(\"/\")\ndef index():\n # find one document from our mongo db and return it.\n mars_db = mongo.db.mars_db.find_one()\n # pass that listing to render_template\n return render_template(\"index.html\", mars_db=mars_db)\n\n\n@app.route(\"/scrape\")\ndef scraper():\n # create a listings database\n mars_db = mongo.db.mars_db\n # call the scrape function in our scrape_phone file. This will scrape and save to mongo.\n mars_data = scrape_mars.scrape()\n # update our listings with the data that is being scraped.\n mars_db.replace_one({}, mars_data, upsert=True)\n # return a message to our page so we know it was successful.\n return redirect(\"/\", code=302)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"chadbarlow/gt-data-bootcamp_challenge-12_web-scraping-and-mongodb","sub_path":"mission-to-mars/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"} +{"seq_id":"9060461043","text":"from oslo_policy import policy\n\nfrom cinder.policies import base\n\n\nCREATE_POLICY = 'group:create_group_snapshot'\nDELETE_POLICY = 'group:delete_group_snapshot'\nUPDATE_POLICY = 'group:update_group_snapshot'\nGET_POLICY = 'group:get_group_snapshot'\nGET_ALL_POLICY = 'group:get_all_group_snapshots'\n\n\ngroup_snapshots_policies = [\n policy.DocumentedRuleDefault(\n name=GET_ALL_POLICY,\n check_str=base.RULE_ADMIN_OR_OWNER,\n description=\"List group snapshots.\",\n operations=[\n {\n 'method': 'GET',\n 'path': '/group_snapshots'\n },\n {\n 'method': 'GET',\n 'path': '/group_snapshots/detail'\n }\n ]),\n policy.DocumentedRuleDefault(\n name=CREATE_POLICY,\n check_str=\"\",\n description=\"Create group snapshot.\",\n operations=[\n {\n 'method': 'POST',\n 'path': '/group_snapshots'\n }\n ]),\n policy.DocumentedRuleDefault(\n name=GET_POLICY,\n check_str=base.RULE_ADMIN_OR_OWNER,\n description=\"Show group snapshot.\",\n operations=[\n {\n 'method': 'GET',\n 'path': '/group_snapshots/{group_snapshot_id}'\n }\n ]),\n policy.DocumentedRuleDefault(\n name=DELETE_POLICY,\n check_str=base.RULE_ADMIN_OR_OWNER,\n description=\"Delete group snapshot.\",\n operations=[\n {\n 'method': 'DELETE',\n 'path': '/group_snapshots/{group_snapshot_id}'\n }\n ]),\n policy.DocumentedRuleDefault(\n name=UPDATE_POLICY,\n check_str=base.RULE_ADMIN_OR_OWNER,\n description=\"Update group snapshot.\",\n operations=[\n {\n 'method': 'PUT',\n 'path': '/group_snapshots/{group_snapshot_id}'\n }\n ]),\n]\n\n\ndef list_rules():\n return group_snapshots_policies\n","repo_name":"inspur-storage/cinder","sub_path":"cinder/policies/group_snapshots.py","file_name":"group_snapshots.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"} +{"seq_id":"74469884842","text":"import bs4\nfrom os import system, walk\n\nbaseURL = 'https://2020.igem.org/wiki/index.php?action=edit&title=Team:Fudan'\n\n\ndef fetch_create(filename):\n pageHead = '''\n\n\n \n \n \n %s\n\n\n
\n
\n root folder\n root_path = Path(perception_layer_path).parent.absolute()\n data_directory = os.path.join(root_path, 'hri/disambiguate/disambiguate/data')\n image_path = os.path.join(data_directory, image_name)\n\n return image_path\n\n\ndef test_maskRCNN():\n source_img = 'current_scene.jpg'\n image_path = get_image_path(source_img)\n\n target_image = np.array([])\n\n detectron = maskRCNN.MaskRCNN_detectron()\n output_maskRCNN = detectron.detect(target_image, image_path)\n\n target_image = skimage.io.imread(image_path)\n detectron.print_results_on_image(target_image, output_maskRCNN)\n\n assert 'banana' in output_maskRCNN['names']\n assert 'fork' in output_maskRCNN['names']\n assert 'knife' in output_maskRCNN['names']\n\n oracle_banana_box1 = [247, 288, 393, 452]\n oracle_banana_box2 = [760, 257, 919, 393]\n\n for i, name in enumerate(output_maskRCNN['names']):\n if name == 'banana':\n box = np.array(output_maskRCNN['rois'][i])\n bool1 = np.isclose(box, oracle_banana_box1, atol=10)\n bool1 = compress_bool_array(bool1)\n bool2 = np.isclose(box, oracle_banana_box2, atol=10)\n bool2 = compress_bool_array(bool2)\n\n assert bool1 is True or bool2 is True\n\n\ndef test_maskRCNN_rbgTOdepth():\n source_img = 'color_img.jpg'\n image_path = get_image_path(source_img)\n\n target_image = np.array([])\n\n detectron = maskRCNN.MaskRCNN_detectron()\n output_maskRCNN = detectron.detect(target_image, image_path)\n\n target_image = skimage.io.imread(image_path)\n detectron.print_results_on_image(target_image, copy(output_maskRCNN))\n\n assert 'banana' in output_maskRCNN['names']\n\n params = Parameters()\n params.normalize = False\n params.debug = True\n params.use_max = True\n\n points = []\n rotations = []\n\n classes = output_maskRCNN['names']\n for i, name in enumerate(classes):\n box = list(output_maskRCNN['rois'][i])\n mask = output_maskRCNN['masks'][:, :, i]\n\n point, rotation = compute_mask_frame(classes, name, box, mask, params)\n points.append(point)\n rotations.append(rotation)\n print(name)\n print(point, rotation)\n\n pil_image = Image.open(image_path)\n draw = ImageDraw.Draw(pil_image)\n for i, point in enumerate(points):\n corner1 = (point[0] - 5, point[1] - 5)\n corner2 = (point[0] + 5, point[1] + 5)\n draw.ellipse((corner1, corner2), fill='black', width=5)\n\n target_img = 'color_img_hr.jpg'\n save_path = get_image_path(target_img)\n pil_image.save(save_path)\n\n\ndef test_mask_heuristic():\n # TODO: maybe the method is more optimal if we send in the normalized mask.\n # To normalize a mask it should suffice to take the values in Masks that are\n # within the Bounding Box, since every value in Masks is a pixel of the image.\n source_img = 'current_scene.jpg'\n image_path = get_image_path(source_img)\n\n target_image = np.array([])\n\n detectron = maskRCNN.MaskRCNN_detectron()\n output_maskRCNN = detectron.detect(target_image, image_path)\n\n params = Parameters()\n params.normalize = False\n params.debug = True\n params.use_max = True\n\n points = []\n rotations = []\n\n classes = output_maskRCNN['names']\n for i, name in enumerate(output_maskRCNN['names']):\n box = list(output_maskRCNN['rois'][i])\n mask = output_maskRCNN['masks'][:, :, i]\n\n point, rotation = compute_mask_frame(classes, name, box, mask, params)\n points.append(point)\n rotations.append(rotation)\n print(point, rotation)\n\n\n pil_image = Image.open(image_path)\n draw = ImageDraw.Draw(pil_image)\n for i, point in enumerate(points):\n corner1 = (point[0] - 5, point[1] - 5)\n corner2 = (point[0] + 5, point[1] + 5)\n draw.ellipse((corner1, corner2), fill='black', width=5)\n\n target_img = 'current_scene_hr.jpg'\n save_path = get_image_path(target_img)\n pil_image.save(save_path)\n","repo_name":"matiov/disambiguate-BT-execution","sub_path":"perception_layer/object_recognition/object_detection/test/test_maskRCNN.py","file_name":"test_maskRCNN.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"40022200270","text":"#使用Python发送HTML格式的邮件\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom smtplib import SMTP_SSL\n\n#163邮箱smtp服务器\nhost_server = 'smtp.163.com'\n\n#发件人邮箱\nsender_163_mail='a19979313119@163.com'\n\n#pwd为授权码\npwd='qq1145530136'\n\n#收件人邮箱\nreceiver = '1145530136@qq.com'\n\n#邮件正文内容\nmail_content=\"你好,

这是使用python登录qq邮箱发送HTML格式邮件的测试:

\" \\\n \"

易百教程

\"\n\n#邮件标题\nmail_title='张忠华的邮件'\n\n\n#ssl登录\nsmtp = SMTP_SSL(host_server)\n\n#set_debuglevel()是用来调试的。参数值为1表示开启调试模式,参数值为0关闭调试模式\nsmtp.set_debuglevel(1)\nsmtp.ehlo()\nsmtp.login(sender_163_mail,pwd)\n\nmsg=MIMEText(mail_content,'html','utf-8')\nmsg[\"Subject\"] = Header(mail_title, 'utf-8')\nmsg[\"From\"] = sender_163_mail\nmsg[\"To\"] = Header(\"接收者测试\", 'utf-8') ## 接收者的别名\n\nsmtp.sendmail(sender_163_mail, receiver, msg.as_string())\nsmtp.quit()\n","repo_name":"zhangzhonghua1999/webapp","sub_path":"hua/python-1/SMTP-3.py","file_name":"SMTP-3.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74021999786","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\n__author__ = \"bibow\"\n\nfrom graphene import (\n ObjectType,\n Field,\n List,\n String,\n Int,\n Decimal,\n DateTime,\n Boolean,\n)\nfrom silvaengine_utility import JSON\n\n\nclass SelectValueType(ObjectType):\n value = String()\n value_id = String()\n\n\nclass FunctionRequestType(ObjectType):\n function_name = String()\n request_id = String()\n record_type = String()\n variables = String()\n status = String()\n data = List(JSON)\n internal_ids = List(String)\n log = String()\n request_page_size = Int()\n request_page_number = Int()\n total_records = Int()\n created_at = DateTime()\n updated_at = DateTime()\n","repo_name":"ideabosque/netsuite_graphql_engine","sub_path":"netsuite_graphql_engine/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"37534133185","text":"import argparse\nimport datetime\nimport os\nimport sqlite3\nimport time\n\nimport ccxt\nimport pandas as pd\n\n\ndef download_binance_futures_data(market, db_path=\"binance_futures.db\", symbols=\"all\"):\n # DB 초기화\n db = sqlite3.connect(db_path)\n\n binance = ccxt.binance(\n {\n \"options\": {\n \"defaultType\": market\n },\n \"enableRateLimit\": True\n }\n )\n\n if symbols == \"all\": # download all ticker\n symbols = [mkt[\"symbol\"] for mkt in binance.fetch_markets()]\n \n else: # download only specific ticker\n symbols = symbols.split(\",\")\n print(f\"downloading data for {len(symbols)} symbols : {symbols}\")\n\n for symbol in symbols:\n\n db.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS _{symbol.replace(\"/\", \"\")} (\n timestamp int, \n open float, \n high float, \n low float, \n close float, \n volume float\n )\"\"\")\n t = time.time()\n\n # make table and commit\n db.commit()\n\n prev_data = db.execute(f\"SELECT * FROM _{symbol.replace('/', '')}\").fetchall()\n\n # check previous data\n # new_data starts from (latest_previous_data + 1) timestamp \n then = datetime.datetime(2021, 1, 1)\n timestamp = int(time.mktime(then.timetuple()) * 1000) if not prev_data else prev_data[-1][0] + 1\n\n downloaded = 0 # 로깅용\n\n while True:\n # download ohlcv data\n tohlcv = binance.fetch_ohlcv(\n symbol=symbol,\n timeframe=args.timeframe,\n params={\"startTime\": timestamp},\n limit=1500 \n )\n\n # no more data to download => break loop\n if not tohlcv:\n break\n\n # save to db\n for timestamp, open, high, low, close, volume in tohlcv:\n db.execute(f\"\"\"\n INSERT INTO _{symbol.replace('/', '')} VALUES (\n {timestamp}, {open}, {high}, {low}, {close}, {volume}\n )\"\"\")\n\n db.commit()\n\n # prepare timestamp in the next loop\n timestamp = tohlcv[-1][0] + 1\n\n # amount of data downloaded\n downloaded += len(tohlcv)\n\n # time passed since start_time\n delta_t = time.time() - t\n\n # logging\n print(\n f\"\"\"downloaded {downloaded} rows for {symbol} in {round(delta_t)} seconds, download speed is {round(downloaded / delta_t)} row per second\"\"\",\n end=\"\\r\")\n print(\n f\"\"\"downloaded {downloaded} rows for {symbol} in {round(delta_t)} seconds, download speed is {round(downloaded / delta_t)} row per second\"\"\")\n\n db.commit()\n\n\ndef read_binance_futures_data(db_path, symbol, timeframe):\n \n symbol = symbol.replace(\"/\", \"\")\n db = sqlite3.connect(db_path)\n\n # read all the data\n data = db.execute(f\"SELECT * FROM _{symbol}\").fetchall()\n\n # make dataframe from the raw data\n data = pd.DataFrame(\n data, columns=[\"timestamp\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n\n # timestamp to datetime, and set as index\n data.index = pd.to_datetime(data[\"timestamp\"] * 1000000)\n\n del data[\"timestamp\"]\n\n return data\n\n\ndef export_data(db_path, symbols, timeframes, export_dir):\n timeframes = timeframes.split(\",\")\n\n # what symbol to export\n if symbols == \"all\":\n binance = ccxt.binance(\n {\n \"options\": {\n \"defaultType\": \"future\"\n },\n \"enableRateLimit\": True\n }\n )\n symbols = [mkt[\"symbol\"] for mkt in binance.fetch_markets()]\n\n else:\n symbols = symbols.split(\",\")\n\n # exporting loop\n for symbol in symbols:\n for timeframe in timeframes:\n # fetch data\n df = read_binance_futures_data(db_path)\n\n # export path: export_dir/symbol_timeframe.csv\n export_path = os.path.join(export_dir, f\"{symbol.replace(' / ', '')}_{timeframe}.csv\")\n\n # export in csv format\n df.to_csv(export_path)\n\n print(f\"exported data to {export_path}\")\n\n\nif __name__ == \"__main__\":\n # cli 인터페이스\n\n # argparse\n parser = argparse.ArgumentParser()\n\n # argument #0 market \"spot\" or \"future\"\n parser.add_argument(\"--market\", default=\"future\", type=str)\n parser.add_argument(\"--db_path\", default=\"binance_futures.db\", type=str)\n\n # argument #2 symbols: \"all\" or specific ticker. ex) \"BTC/USDT\" \n parser.add_argument(\"--symbols\", default='all', type=str)\n\n # path to export data\n parser.add_argument(\"--export_dir\", default=None, type=str)\n\n # timeframe to export\n parser.add_argument(\"--export_timeframes\", default=\"1T\", type=str)\n\n args = parser.parse_args()\n\n if not args.market in [\"future\", \"spot\"]:\n raise ValueError(f\"market should be 'spot' or 'future', got {args.market}\")\n\n # download\n if args.export_dir is None:\n download_binance_futures_data(args.market, args.db_path, args.symbols)\n # export\n else:\n export_data(args.db_path, args.symbols, args.export_timeframes, args.export_dir)\n","repo_name":"jsrimr/udacity-final-project","sub_path":"data_downloader.py","file_name":"data_downloader.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"34487382605","text":"import json\nfrom typing import Any, Dict, Optional, Union\n\nimport pymongo\nfrom bson import json_util\n\nimport zmq\n\n\nclass MongoZMQ:\n \"\"\"\n ZMQ server that adds/fetches documents (ie dictionaries) to a MongoDB.\n\n NOTE: mongod must be started before using this class\n \"\"\"\n\n def __init__(\n self, db_name: str, table_name: str, bind_addr: str = \"tcp://127.0.0.1:5000\"\n ):\n \"\"\"\n bind_addr: address to bind zmq socket on\n db_name: name of database to write to (created if doesn't exist)\n table_name: name of mongodb 'table' in the db to write to (created if doesn't exist)\n \"\"\"\n self._bind_addr = bind_addr\n self._db_name = db_name\n self._table_name = table_name\n self._conn: pymongo.MongoClient = pymongo.MongoClient()\n self._db = self._conn[self._db_name]\n self._table = self._db[self._table_name]\n\n def _doc_to_json(self, doc: Any) -> str:\n return json.dumps(doc, default=json_util.default)\n\n def add_document(self, doc: Dict) -> Optional[str]:\n \"\"\"\n Inserts a document (dictionary) into mongo database table\n \"\"\"\n print(f'adding document {doc}')\n try:\n self._table.insert(doc)\n except Exception as e:\n return 'Error: %s' % e\n return None\n\n def get_document_by_keys(self, keys: Dict[str, Any]) -> Union[Dict, str, None]:\n \"\"\"\n Attempts to return a single document from database table that matches\n each key/value in keys dictionary.\n \"\"\"\n print('attempting to retrieve document using keys: %s' % keys)\n try:\n return self._table.find_one(keys)\n except Exception as e:\n return 'Error: %s' % e\n\n def start(self) -> None:\n context = zmq.Context()\n socket = context.socket(zmq.ROUTER)\n socket.bind(self._bind_addr)\n while True:\n msg = socket.recv_multipart()\n print(\"Received msg: \", msg)\n if len(msg) != 3:\n error_msg = 'invalid message received: %s' % msg\n print(error_msg)\n reply = [msg[0], error_msg]\n socket.send_multipart(reply)\n continue\n id = msg[0]\n operation = msg[1]\n contents = json.loads(msg[2])\n # always send back the id with ROUTER\n reply = [id]\n if operation == 'add':\n self.add_document(contents)\n reply.append(\"success\")\n elif operation == 'get':\n doc = self.get_document_by_keys(contents)\n json_doc = self._doc_to_json(doc)\n reply.append(json_doc)\n else:\n print('unknown request')\n socket.send_multipart(reply)\n\n\ndef main() -> None:\n MongoZMQ('ipcontroller', 'jobs').start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zeromq/pyzmq","sub_path":"examples/mongodb/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":3466,"dataset":"github-code","pt":"37"} +{"seq_id":"24491251074","text":"#\n# @lc app=leetcode.cn id=1502 lang=python3\n#\n# [1502] 判断能否形成等差数列\n#\n\n# @lc code=start\nclass Solution:\n def canMakeArithmeticProgression(self, arr: List[int]) -> bool:\n arr.sort()\n n=len(arr)\n if n==2:return True\n d=arr[1]-arr[0]\n for i in range(2,n):\n if arr[i]-arr[i-1]!=d:\n return False\n return True\n# @lc code=end\n\n","repo_name":"zerubbabel/leetcode","sub_path":"1502.判断能否形成等差数列.py","file_name":"1502.判断能否形成等差数列.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4491353943","text":"\nfrom numpy import \\\n array, zeros, dot, hstack, \\\n identity\nfrom scipy.linalg import \\\n inv\nfrom traits.api import \\\n Array, Float, \\\n Instance, Int\n\nfrom ibvpy.fets.fets_eval import FETSEval\nfrom ibvpy.mats.mats_eval import MATSEval\n\n\n#-------------------------------------------------------------------------\n# FETS2D4Q9U - 9 nodes subparametric quadrilateral (2D, quadratic, Lagrange familiy)\n#-------------------------------------------------------------------------\n#-------------------------------------------------------------------------\n# Element Information:\n#-------------------------------------------------------------------------\n#\n# The order of the field approximation is higher then the order of the geometry\n# approximation (subparametric element).\n# The implemented shape functions are derived (in femple) based\n# on the following ordering of the nodes of the parent element.\n#\n# _node_coord_map_dof = Array( Float, (9,2),\n# [[ -1.,-1. ],\n# [ 1.,-1. ],\n# [ 1., 1. ],\n# [ -1., 1. ],\n# [ 0.,-1. ],\n# [ 1., 0. ],\n# [ 0., 1. ],\n# [ -1., 0. ],\n# [ 0., 0. ]])\n#\n# The ordering of the nodes of the parent element used for the geometry approximation\n# is defined in '_node_coord_map_geo' (see code below)\n# and the (linear) shape functions are derived by formula\n#\n#-------------------------------------------------------------------------\n#\nclass FETS2D4Q9U(FETSEval):\n debug_on = True\n\n mats_eval = Instance(MATSEval)\n\n # Dimensional mapping\n dim_slice = slice(0, 2)\n\n n_e_dofs = Int(2 * 9)\n t = Float(1.0, label='thickness')\n\n # Integration parameters\n #\n ngp_r = 3\n ngp_s = 3\n\n dof_r = Array(value=[[-1., -1.],\n [1., -1.],\n [1., 1.],\n [-1., 1.],\n [0., -1.],\n [1., 0.],\n [0., 1.],\n [-1., 0.],\n [0., 0.]])\n geo_r = Array(value=[[-1, -1], [1, -1], [1, 1], [-1, 1]])\n #\n vtk_cell_types = 'QuadraticQuad'\n vtk_r = Array(value=[[-1., -1.],\n [0., -1.],\n [1., -1.],\n [-1., 0.],\n [0., 0.],\n [1., 0.],\n [-1., 1.],\n [0., 1.],\n [1., 1.]])\n vtk_cells = [[0, 2, 8, 6, 1, 5, 7, 3, 4]]\n\n n_nodal_dofs = 2\n\n # Ordering of the nodes of the parent element used for the geometry\n # approximation\n _node_coord_map_geo = Array(Float, (4, 2),\n [[-1., -1.],\n [1., -1.],\n [1., 1.],\n [-1., 1.]])\n\n #---------------------------------------------------------------------\n # Method required to represent the element geometry\n #---------------------------------------------------------------------\n def get_N_geo_mtx(self, r_pnt):\n '''\n Return the value of shape functions for the specified local coordinate r\n '''\n cx = self._node_coord_map_geo\n N_geo_mtx = array(\n [[1 / 4. * (1 + r_pnt[0] * cx[i, 0]) * (1 + r_pnt[1] * cx[i, 1]) for i in range(0, 4)]])\n return N_geo_mtx\n\n def get_dNr_geo_mtx(self, r_pnt):\n '''\n Return the matrix of shape function derivatives.\n Used for the conrcution of the Jacobi matrix.\n\n @TODO - the B matrix is used\n just for uniaxial bar here with a trivial differential\n operator.\n '''\n cx = self._node_coord_map_geo\n dNr_geo_mtx = array([[1 / 4. * cx[i, 0] * (1 + r_pnt[1] * cx[i, 1]) for i in range(0, 4)],\n [1 / 4. * cx[i, 1] * (1 + r_pnt[0] * cx[i, 0]) for i in range(0, 4)]])\n return dNr_geo_mtx\n\n #---------------------------------------------------------------------\n # Method delivering the shape functions for the field variables and their derivatives\n #---------------------------------------------------------------------\n def get_N_mtx(self, r_pnt):\n '''\n Returns the matrix of the shape functions used for the field approximation\n containing zero entries. The number of rows corresponds to the number of nodal\n dofs. The matrix is evaluated for the specified local coordinate r.\n '''\n N_dof = zeros((1, 9), dtype='float_')\n N_dof[0, 0] = (\n r_pnt[0] * r_pnt[1] * (-1 + r_pnt[1]) * (-1 + r_pnt[0])) / 0.4e1\n N_dof[0, 1] = (\n r_pnt[0] * r_pnt[1] * (-1 + r_pnt[1]) * (1 + r_pnt[0])) / 0.4e1\n N_dof[0, 2] = (\n r_pnt[0] * r_pnt[1] * (1 + r_pnt[1]) * (1 + r_pnt[0])) / 0.4e1\n N_dof[0, 3] = (\n r_pnt[0] * r_pnt[1] * (1 + r_pnt[1]) * (-1 + r_pnt[0])) / 0.4e1\n N_dof[0, 4] = - (r_pnt[1] * (-1 + r_pnt[0])\n * (1 + r_pnt[0]) * (-1 + r_pnt[1])) / 0.2e1\n N_dof[0, 5] = - (r_pnt[0] * (-1 + r_pnt[1])\n * (1 + r_pnt[1]) * (1 + r_pnt[0])) / 0.2e1\n N_dof[0, 6] = - (r_pnt[1] * (-1 + r_pnt[0])\n * (1 + r_pnt[0]) * (1 + r_pnt[1])) / 0.2e1\n N_dof[0, 7] = - (r_pnt[0] * (-1 + r_pnt[1])\n * (1 + r_pnt[1]) * (-1 + r_pnt[0])) / 0.2e1\n N_dof[0, 8] = (-1 + r_pnt[1]) * (1 + r_pnt[1]) * \\\n (-1 + r_pnt[0]) * (1 + r_pnt[0])\n I_mtx = identity(self.n_nodal_dofs, float)\n N_mtx_list = [I_mtx * N_dof[0, i] for i in range(0, N_dof.shape[1])]\n N_mtx = hstack(N_mtx_list)\n return N_mtx\n\n def get_dNr_mtx(self, r_pnt):\n '''\n Return the derivatives of the shape functions used for the field approximation\n '''\n dNr_mtx = zeros((2, 9), dtype='float_')\n dNr_mtx[0, 0] = (r_pnt[1] * (-1 + r_pnt[1]) * (-1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (-1 + r_pnt[1])) / 4.0\n dNr_mtx[0, 1] = (r_pnt[1] * (-1 + r_pnt[1]) * (1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (-1 + r_pnt[1])) / 4.0\n dNr_mtx[0, 2] = (r_pnt[1] * (1 + r_pnt[1]) * (1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (1 + r_pnt[1])) / 4.0\n dNr_mtx[0, 3] = (r_pnt[1] * (1 + r_pnt[1]) * (-1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (1 + r_pnt[1])) / 4.0\n dNr_mtx[0, 4] = - (r_pnt[1] * (-1 + r_pnt[1]) * (1 + r_pnt[0])) / \\\n 0.2e1 - (r_pnt[1] * (-1 + r_pnt[1]) * (-1 + r_pnt[0])) / 2.0\n dNr_mtx[0, 5] = - ((-1 + r_pnt[1]) * (1 + r_pnt[1]) * (1 + r_pnt[0])\n ) / 2.0 - (r_pnt[0] * (-1 + r_pnt[1]) * (1 + r_pnt[1])) / 2.0\n dNr_mtx[0, 6] = - (r_pnt[1] * (1 + r_pnt[1]) * (1 + r_pnt[0])\n ) / 2.0 - (r_pnt[1] * (1 + r_pnt[1]) * (-1 + r_pnt[0])) / 2.0\n dNr_mtx[0, 7] = - ((-1 + r_pnt[1]) * (1 + r_pnt[1]) * (-1 + r_pnt[0])\n ) / 2.0 - (r_pnt[0] * (-1 + r_pnt[1]) * (1 + r_pnt[1])) / 2.0\n dNr_mtx[0, 8] = (-1 + r_pnt[1]) * (1 + r_pnt[1]) * (1 +\n r_pnt[0]) + (-1 + r_pnt[1]) * (1 + r_pnt[1]) * (-1 + r_pnt[0])\n dNr_mtx[1, 0] = (r_pnt[0] * (-1 + r_pnt[1]) * (-1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (-1 + r_pnt[0])) / 4.0\n dNr_mtx[1, 1] = (r_pnt[0] * (-1 + r_pnt[1]) * (1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (1 + r_pnt[0])) / 4.0\n dNr_mtx[1, 2] = (r_pnt[0] * (1 + r_pnt[1]) * (1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (1 + r_pnt[0])) / 4.0\n dNr_mtx[1, 3] = (r_pnt[0] * (1 + r_pnt[1]) * (-1 + r_pnt[0])\n ) / 4.0 + (r_pnt[0] * r_pnt[1] * (-1 + r_pnt[0])) / 4.0\n dNr_mtx[1, 4] = - ((-1 + r_pnt[0]) * (1 + r_pnt[0]) * (-1 + r_pnt[1])\n ) / 2.0 - (r_pnt[1] * (-1 + r_pnt[0]) * (1 + r_pnt[0])) / 2.0\n dNr_mtx[1, 5] = - (r_pnt[0] * (1 + r_pnt[1]) * (1 + r_pnt[0])\n ) / 2.0 - (r_pnt[0] * (-1 + r_pnt[1]) * (1 + r_pnt[0])) / 2.0\n dNr_mtx[1, 6] = - ((-1 + r_pnt[0]) * (1 + r_pnt[0]) * (1 + r_pnt[1])\n ) / 2.0 - (r_pnt[1] * (-1 + r_pnt[0]) * (1 + r_pnt[0])) / 2.0\n dNr_mtx[1, 7] = - (r_pnt[0] * (1 + r_pnt[1]) * (-1 + r_pnt[0])\n ) / 2.0 - (r_pnt[0] * (-1 + r_pnt[1]) * (-1 + r_pnt[0])) / 2.0\n dNr_mtx[1, 8] = (-1 + r_pnt[0]) * (1 + r_pnt[0]) * (1 +\n r_pnt[1]) + (-1 + r_pnt[0]) * (1 + r_pnt[0]) * (-1 + r_pnt[1])\n return dNr_mtx\n\n def get_B_mtx(self, r_pnt, X_mtx):\n J_mtx = self.get_J_mtx(r_pnt, X_mtx)\n dNr_mtx = self.get_dNr_mtx(r_pnt)\n dNx_mtx = dot(inv(J_mtx), dNr_mtx)\n Bx_mtx = zeros((3, 18), dtype='float_')\n for i in range(0, 9):\n Bx_mtx[0, i * 2] = dNx_mtx[0, i]\n Bx_mtx[1, i * 2 + 1] = dNx_mtx[1, i]\n Bx_mtx[2, i * 2] = dNx_mtx[1, i]\n Bx_mtx[2, i * 2 + 1] = dNx_mtx[0, i]\n return Bx_mtx\n\n#----------------------- example with the new domain --------\nif __name__ == '__main__':\n from ibvpy.api import \\\n TStepper as TS, RTDofGraph, RTraceDomainListField, TLoop, \\\n TLine, BCDofGroup\n\n #from lib.mats.mats2D.mats_cmdm2D.mats_mdm2d import MACMDM\n from ibvpy.mats.mats2D.mats2D_sdamage.mats2D_sdamage import MATS2DScalarDamage\n #from ibvpy.mats.mats2D.mats2D_elastic.mats2D_elastic import MATS2DElastic\n from ibvpy.mesh.fe_grid import FEGrid\n\n fets_eval = FETS2D4Q9U(mats_eval=MATS2DScalarDamage())\n #fets_eval = FETS2D4Q9U(mats_eval=MATS2DElastic())\n\n # Discretization\n #\n domain = FEGrid(coord_max=(3., 3., 0.),\n shape=(3, 3),\n fets_eval=fets_eval)\n print('n_dofs', domain.n_dofs)\n\n right_dof = 2\n ts = TS(\n sdomain=domain,\n # conversion to list (square brackets) is only necessary for slicing of\n # single dofs, e.g \"get_left_dofs()[0,1]\"\n\n bcond_list=[BCDofGroup(var='u', value=0., dims=[0],\n get_dof_method=domain.get_left_dofs),\n BCDofGroup(var='u', value=0., dims=[1],\n get_dof_method=domain.get_bottom_left_dofs),\n BCDofGroup(var='u', value=0.002, dims=[0],\n get_dof_method=domain.get_right_dofs)],\n\n rtrace_list=[RTDofGraph(name='Fi,right over u_right (iteration)',\n var_y='F_int', idx_y=right_dof,\n var_x='U_k', idx_x=right_dof),\n # RTraceDomainListField(name = 'Stress' ,\n # var = 'sig_app', idx = 0,\n # record_on = 'update'),\n RTraceDomainListField(name='Displacement',\n var='u', idx=0),\n # RTraceDomainListField(name = 'N0' ,\n # var = 'N_mtx', idx = 0,\n # record_on = 'update')\n\n ]\n )\n\n # Add the time-loop control\n\n global tloop\n tloop = TLoop(tstepper=ts,\n DT=0.5,\n tline=TLine(min=0.0, max=1.0, step=0.1))\n\n import cProfile\n cProfile.run('tloop.eval()', 'tloop_prof')\n\n import pstats\n p = pstats.Stats('tloop_prof')\n p.strip_dirs()\n print('cumulative')\n p.sort_stats('cumulative').print_stats(20)\n print('time')\n p.sort_stats('time').print_stats(20)\n\n # Put the whole stuff into the simulation-framework to map the\n # individual pieces of definition into the user interface.\n #\n from ibvpy.plugins.ibvpy_app import IBVPyApp\n app = IBVPyApp(ibv_resource=tloop)\n app.main()\n","repo_name":"simvisage/bmcs","sub_path":"ibvpy/fets/fets2D/fets2D4q9u.py","file_name":"fets2D4q9u.py","file_ext":"py","file_size_in_byte":12334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72456063152","text":"import yaml\nimport json\nimport copy\nimport requests\nimport httplib\nimport datetime\nfrom .utils import marc_rels, inverse_marc_rels, plural, reverse_name\n\nclass TypedSubject(tuple):\n pass\n \ndef subject_constructor(loader, node):\n return TypedSubject((node.tag[1:] , loader.construct_scalar(node)))\ndef subject_representer(dumper, subject):\n return dumper.represent_scalar(u'!%s'% subject[0], subject[1])\n\n\nyaml.SafeLoader.add_constructor(u'!lcsh', subject_constructor)\nyaml.SafeLoader.add_constructor(u'!lcc', subject_constructor)\nyaml.SafeLoader.add_constructor(u'!bisacsh', subject_constructor)\nyaml.SafeDumper.add_representer(TypedSubject, subject_representer)\n\nPANDATA_STRINGFIELDS = [\n '_repo',\n 'description',\n 'funding_info',\n 'gutenberg_issued',\n 'language',\n 'publication_date_original',\n 'publisher_original',\n 'rights',\n 'rights_url',\n 'title',\n ]\n \nPANDATA_AGENTFIELDS = [\n 'authors',\n 'editors_of_a_compilation',\n 'translators',\n 'illustrators',\n ]\nPANDATA_LISTFIELDS = PANDATA_AGENTFIELDS + [\n 'subjects', 'covers', 'edition_list',\n ]\nPANDATA_DICTFIELDS = [\n 'identifiers', 'creator', 'contributor', 'edition_identifiers',\n ]\n \ndef edition_name_from_repo(repo):\n if '_' in repo:\n return '_'.join(repo.split('_')[0:-1])\n return repo\n\ndef get_one(maybe_a_list):\n if isinstance(maybe_a_list, list):\n return str(maybe_a_list[0]) #use first name if available\n else:\n return str(maybe_a_list)\n \n# wrapper class for the json object \nclass Pandata(object):\n def __init__(self, datafile=None):\n if datafile:\n if isinstance(datafile, Pandata):\n self.metadata=copy.deepcopy(datafile.metadata) # copy the metadata\n elif datafile.startswith('https://') or datafile.startswith('https://'):\n r = requests.get(datafile)\n if r.status_code == httplib.OK:\n self.metadata = yaml.safe_load( r.content)\n else:\n self.metadata = yaml.safe_load(file(datafile, 'r').read())\n self.set_edition_id()\n else:\n self.metadata = {}\n \n def __getattr__(self, name):\n if name in PANDATA_STRINGFIELDS:\n value = self.metadata.get(name, '')\n if isinstance(value, str):\n return value\n if name in PANDATA_LISTFIELDS:\n return self.metadata.get(name, [])\n if name in PANDATA_DICTFIELDS:\n return self.metadata.get(name, {})\n return self.metadata.get(name, None)\n \n def load(self, yaml_string):\n self.metadata = yaml.safe_load(yaml_string)\n self.set_edition_id()\n \n def set_edition_id(self):\n # set a (hopefully globally unique) edition identifier\n if not self.metadata.has_key('edition_identifiers'):\n self.metadata['edition_identifiers'] = {}\n base=self.url\n if not base:\n try:\n base = unicode(self.identifiers.keys[0])+':'+unicode(self.identifiers.values[0])\n except:\n base = u'repo:' + unicode(self._repo)\n self.metadata['edition_identifiers']['edition_id'] = base + '#' + self._edition\n \n def agents(self, agent_type): \n if self.creator.get(agent_type,None):\n agents=[self.creator.get(agent_type,None)]\n elif self.creator.get(plural(agent_type),None):\n agents=self.creator.get(plural(agent_type),None)\n elif self.contributor.get(agent_type,None):\n agents=[self.contributor.get(agent_type,None)]\n elif self.contributor.get(plural(agent_type),None):\n agents=self.contributor.get(plural(agent_type),None)\n else:\n agents = []\n return agents\n \n # the edition should be able to report ebook downloads, which should have format and url attributes\n # TODO - fill in URL based on a standard place in repo\n def downloads(self):\n return []\n\n # the edition should be able to report an \"ebook via\" url\n def download_via_url(self):\n return []\n\n # these should be last name first\n def authnames(self):\n return [auth.get('agent_name','') for auth in self.agents(\"author\")]\n \n # some logic to decide\n @property\n def publication_date(self):\n if self.metadata.get(\"publication_date\",None):\n return self.metadata[\"publication_date\"]\n elif self.metadata.get(\"gutenberg_issued\",None):\n return self.metadata[\"gutenberg_issued\"]\n else:\n return str(datetime.datetime.now().date())\n \n # gets the right edition. stub method for compatibility with marc converter\n @staticmethod \n def get_by_isbn(isbn):\n return None\n\n \n def get_one_identifier(self, id_name):\n if self.metadata.get(id_name,''):\n return get_one(self.metadata[id_name]) \n if self.identifiers.get(id_name,''):\n return get_one(self.identifiers[id_name]) \n if self.edition_identifiers.has_key(id_name):\n return get_one(self.edition_identifiers[id_name]) \n return '' \n\n @property\n def isbn(self):\n return self.get_one_identifier('isbn')\n\n @property\n def _edition(self):\n if self.metadata.get(\"_edition\", ''):\n return unicode(self.metadata[\"_edition\"])\n elif self.get_one_identifier('isbn'):\n return unicode(self.get_one_identifier('isbn')) #use first isbn if available\n elif self._repo:\n return edition_name_from_repo(self._repo)\n else:\n return 'book' #this will be the default file name\n\n def get_edition_list(self):\n yield self\n for edition in self.edition_list:\n new_self = Pandata(self)\n for key in edition.keys():\n new_self.metadata[key] = edition[key]\n new_self.set_edition_id()\n yield new_self\n \n def dump_file(self, file_name):\n with open(file_name,'w+') as f:\n f.write(self.__unicode__())\n \n def __unicode__(self):\n return yaml.safe_dump(self.metadata,default_flow_style=False,allow_unicode=True)\n ","repo_name":"gitenberg-dev/metadata","sub_path":"gitenberg/metadata/pandata.py","file_name":"pandata.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"9311774832","text":"import json\r\n#population data\r\nwith open('E:/Ramya/brush ups/Basics/COVID/Data_Collection/populationCensus.json','rb') as rf:\r\n populationData=json.load(rf)\r\nfor p in populationData:\r\n del p['rank'],p['Growth'],p['Pop2018'],p['Pop2010'],p['growthSince2010'],p['Percent'],p['density']\r\n\r\n#state abbreviations data \r\nwith open('E:/Ramya/brush ups/Basics/COVID/Data_Collection/stateAbbreviations.json','r') as rf:\r\n statenamesData=json.load(rf)\r\n for s in statenamesData:\r\n del s['Abbrev']\r\n\r\n#adding state code to populationsdata\r\nfor p in populationData:\r\n for s in statenamesData:\r\n if p['State']==s['State']:\r\n p['code']=s['Code']\r\n\r\n#covid data\r\nwith open('E:/Ramya/brush ups/Basics/COVID/Data_Collection/cleaned_covid_allstates_current.json','r') as rf:\r\n covidData=json.load(rf)\r\n\r\n\r\n#statewise ranking based on number of positive covids\r\n\r\ncovidPosRanking=sorted(covidData,key=lambda entry: entry['positiveCount'],reverse=True)\r\nprint(f'STATE POSITIVE CASES')\r\nfor i in covidPosRanking:\r\n print (f\"{i['state']} {i['positiveCount']}\")\r\n\r\n\r\n#per capita calculation\r\n\r\nignoreStates=[\"VI\",\"GU\",\"MP\",\"AS\"]\r\n\r\ndef calculatePerCapita(metric,name):\r\n for entry in covidData:\r\n if entry['state'] in ignoreStates:\r\n entry[name]= float(0) \r\n else:\r\n for p in populationData: \r\n if p['code']==entry['state']:\r\n entry[name]=entry[metric]/p['Pop'] \r\n\r\n \r\n#positive cases per capita calculation\r\n\r\ncalculatePerCapita('positiveCount','posPerCapita')\r\ncovidPosPerCapita=sorted(covidData,key=lambda entry: entry['posPerCapita'],reverse=True)\r\nprint(f'STATE PERCAPITA COUNT for every 100,000 people ')\r\nfor i in covidPosPerCapita:\r\n print (f\"{i['state']} {round(i['posPerCapita'],5)} {round(i['posPerCapita']*100000,5)}\")\r\n\r\n# percent needing help and are hospitalized\r\nprint(\"Percent of covid patients hospitalised state wise\")\r\nfor state in covidData:\r\n if state['positiveCount']!=0:\r\n print (f\"{state['state']} {(state['hospitalizedCurrently']/state['positiveCount'])*100}\")\r\n\r\n\r\n \r\n \r\n\r\n","repo_name":"RamyaNagarajan88/Covid_Analysis","sub_path":"basic_program/BasiccovidAnalysis.py","file_name":"BasiccovidAnalysis.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39158669924","text":"class Solution:\n def majorityElement(self, nums: List[int]) -> int:\n dic = dict()\n for each in nums:\n if each in dic:\n dic[each] += 1\n if dic[each] >= (len(nums)//2)+1:\n return each\n else:\n dic[each] = 1\n if len(nums) == 1:\n return nums[0]","repo_name":"saro-mano/Leetcode","sub_path":"169. Majority Element.py","file_name":"169. Majority Element.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74083646829","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom home import views as v\n\n# Create your views here.\ndef register(response):\n if response.method == \"POST\":\n form = UserCreationForm(response.POST)\n if form.is_valid():\n form.save()\n v.create(response)\n return redirect(\"login/\")\n else:\n form = UserCreationForm\n\n return render(response, \"users/register.html\", {\"form\":form})\n","repo_name":"njmoore018/to-do-list","sub_path":"mysite/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33767627314","text":"import pandas as pd\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.datasets import fetch_openml\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom PIL import Image\r\nimport PIL.ImageOps\r\n\r\nX,y= fetch_openml('mnist_784', version=1, return_X_y=True)\r\nclasses = ['0','1','2','3','4','5','6','7','8','9']\r\nprint(pd.Series(y).value_counts())\r\n\r\nx_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.3,train_size=0.7,random_state=0)\r\nx_train_scale = x_train/255\r\nx_test_scale = x_test/225\r\n\r\nclassifier = LogisticRegression(solver='saga',multi_class='multinomial').fit(x_train_scale,y_train)\r\n\r\ny_pred= classifier.predict(x_test_scale)\r\naccuracy = accuracy_score(y_test,y_pred)\r\nconfusion_m = confusion_matrix(y_test,y_pred)\r\nprint(accuracy)\r\nprint(confusion_m)\r\n\r\ncam = cv2.VideoCapture(0)\r\n\r\nwhile(True):\r\n try:\r\n grey= cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n height,weight = grey.shape\r\n upper_left = (int(width / 2 - 56), int(height / 2 - 56)) \r\n bottom_right = (int(width / 2 + 56), int(height / 2 + 56))\r\n cv2.rectangle(grey,upper_left,bottom_right,(0,255,0),2)\r\n roi = grey[upper_left[1]:bottom_right[1], upper_left[0]:bottom_right[0]]\r\n im_pil = Image.fromarray(roi)\r\n img_bw = im_pil.covert('L')\r\n img_bw_resize = img_bw.resize((28,28),Image.ANTIALIAS)\r\n img_bw_resize_inverted = PIL.ImageOps.invert(img_bw_resize)\r\n pixel_filter = 20\r\n min_pixel = np.percentile(image_bw_resized_inverted, pixel_filter)\r\n image_bw_resized_inverted_scaled = np.clip(image_bw_resized_inverted-min_pixel, 0, 255) \r\n max_pixel = np.max(image_bw_resized_inverted) \r\n image_bw_resized_inverted_scaled = np.asarray(image_bw_resized_inverted_scaled)/max_pixel \r\n test_sample = np.array(image_bw_resized_inverted_scaled).reshape(1,784)\r\n test_pred = classifier.predict(test_sample)\r\n print('predicted class',test_pred)\r\n cv2.imshow('frame',grey)\r\n except Exception as e:\r\n pass\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"AnushkaPanda/C-123","sub_path":"c-123.py","file_name":"c-123.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34925613207","text":"import sys\n \nfrom pyspark import SparkContext, SparkConf\n\n\ndef print_kmer(a, k):\n x = len(a)\n i = 0\n for i in range(x):\n if (i + k) > x:\n break\n print(a[i:i+k])\n\ndef find_kmer(a, k):\n list_kmer = []\n x = len(a)\n i = 0\n for i in range(x):\n if (i + k) > x:\n break\n list_kmer.append(a[i:i+k])\n return list_kmer\n\ndef count_kmer(a,k):\n # Start by making counts an empty dictionary, using {}\n counts = {}\n big_list = find_kmer(a, k)\n # Now loop through the values in big_list and count them\n for num in big_list:\n \n # Check to see if this key is already in the dictionary\n # If not, add it with an initial count of zero\n if not (num in counts):\n counts[num] = 0\n \n # Now that we are sure the key is in the dictionary, we can increment the count\n counts[num] += 1\n\n # After the loop is finished, counts should contain the right counts for each number seen\n return counts\n\n \nif __name__ == \"__main__\":\n\t\n\t# create Spark context with necessary configuration\n\tsc = SparkContext(\"local\",\"Kmer\")\n\t\n\t# read data from text file and split each line into words\n\twords = sc.textFile(\"ecoli.fa\").flatMap(lambda line: find_kmer(line, 9))\n\t\n\t# count the occurrence of each word\n\twordCounts = words.map(lambda word: (word, 1)).reduceByKey(lambda a,b:a +b)\n\n\t# save the counts to output\n\twordCounts.saveAsTextFile(\"output_kmer/\")\n","repo_name":"thuyduongbka/cntt.httt.hust","sub_path":"Tính toán phân tán - IT4865/ex9_kmer.py","file_name":"ex9_kmer.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36414041403","text":"import numpy as np\n\ndef get_custom_props():\n return np.array([\n [0.7, 0.3],\n [0.2, 0.8]\n ])\n\ndef get_probabilities(arr):\n row_sums = np.sum(arr, axis=1)\n probabilities = 1 / row_sums.astype(float)\n return arr * probabilities[:, np.newaxis]\n\n\ndef get_vector(arr):\n return np.full((1, arr.shape[0]), float(1/arr.shape[0]))\n\n\ndef calc(arr, size):\n new_matrix = get_probabilities(arr)\n # new_matrix = get_custom_props()\n vector = get_vector(new_matrix)\n\n vector_previous = vector\n\n while True:\n vector_new = np.dot(vector_previous, new_matrix)\n if np.allclose(vector_previous, vector_new):\n return vector_new\n\n vector_previous = vector_new\n\n\n\nif __name__ == '__main__':\n init_arr = np.array([\n [0, 1, 0, 0],\n [1, 1, 0, 1],\n [1, 0, 0, 1],\n [0, 0, 1, 1]\n ])\n\n sym_mat_size = init_arr.shape[0]\n print(calc(init_arr, sym_mat_size))\n","repo_name":"jimdixx/PageRanking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31748933024","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import Response, request, jsonify\nfrom csv import DictReader, DictWriter\nfrom json import dumps as dumps\nfrom copy import deepcopy as deepcopy\n\ndb = []\ncurrent_id = 12 # TODO get from last item in csv file\nfieldnames = [\"id\", \"title\", \"media\", \"description\", \"numerical\", \"external_link\", \"list\"]\n\nNEWEST_ITEMS = 10\n\napp = Flask(__name__)\n\n\n# Load entries in \"database\" into memory\ndef load_data():\n with open(\"data.csv\", \"r\") as csvfile:\n reader = DictReader(csvfile)\n for row in reader:\n row[\"list\"] = eval(row[\"list\"]) # String to Dict\n db.append(row)\n\n\nload_data()\n\n\n@app.route('/')\ndef go_to_home():\n # TODO Explore a more efficient way of updating the data loaded in memory\n db.clear()\n load_data()\n # with open(\"data.csv\", \"r\") as csvfile:\n # reader = DictReader(csvfile)\n # for row in reader:\n # row[\"list\"] = eval(row[\"list\"]) # String to Dict\n # db.append(row)\n newest = db[- NEWEST_ITEMS:]\n # print(type(newest))\n # print(type(newest[0]))\n # print(type(dumps(newest)))\n # print(dumps(newest, indent=2))\n return render_template(\"home.html\", newest=newest)\n\n\n@app.route('/view/')\ndef go_to_view(id_str=None):\n print(\"VIEW\")\n id_nbr = int(id_str)\n details = {}\n for item in db: # TODO Use hashmap or another search method instead for efficiency.\n if int(item[\"id\"]) == id_nbr:\n # print(\"item[\\\"id\\\"]: \" + item[\"id\"]) # Debugging\n # print(\"id_nbr: \" + id_str) # Debugging\n # print(item) # Debugging\n details = deepcopy(item)\n break\n # TODO Last time I left here\n # for review in details[\"list\"]:\n\n print(type(details[\"list\"])) # Debugging\n print(details[\"list\"]) # Debugging\n details[\"list\"] = dumps(details[\"list\"]) # Dict to String\n print(type(details[\"list\"])) # Debugging\n print(details[\"list\"]) # Debugging\n return render_template(\"view_details.html\", details=details)\n\n@app.route('/create')\ndef go_to_create_item():\n return render_template(\"create.html\")\n\n\n@app.route('/add_item', methods=['POST'])\ndef add_item():\n print(\"ADD ITEM\")\n global current_id\n global db\n item_row = {}\n\n json_data = request.get_json()\n # print(json_data) # Debugging\n current_id += 1\n\n # Creating row to be stored\n for field_name in fieldnames:\n # print(field_name)\n if field_name == \"id\":\n # print(\"I am inside the id conditional statement\") # Debugging\n item_row[field_name] = current_id\n elif field_name == \"list\":\n # print(\"I am inside the list conditional statement\") # Debugging\n print(type(json_data[\"list_elem\"]))\n print(json_data[\"list_elem\"])\n print(dumps(json_data[\"list_elem\"]))\n user_id = json_data[\"list_elem\"].keys();\n # Using string instead of boolean simplifies storage and retrieval from a csv file\n json_data[\"list_elem\"][list(user_id)[0]][\"mark_as_deleted\"] = \"False\"\n item_row[field_name] = dumps(json_data[\"list_elem\"]) # Dict to String\n elif field_name is None or json_data[field_name] is None:\n item_row[field_name] = None\n else:\n # print(\"I am inside the else statement\") # Debugging\n item_row[field_name] = json_data[field_name]\n\n try:\n with open(\"data.csv\", \"a\") as csvfile:\n writer = DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow(item_row)\n except FileNotFoundError as e:\n print(e, \"Creating new file\")\n with open(\"data.csv\", \"w\") as output_file:\n writer = DictWriter(output_file, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow(item_row)\n except Exception as e:\n current_id -= 1\n # TODO return something that tells the user the record could not be created\n raise e\n print(\"APPEND TO DB\")\n # db.append(item_row) # NOOO this is the thing that is causing inconsistencies.\n print(type(item_row[\"list\"]))\n print(item_row[\"list\"])\n item_row[\"list\"] = eval(item_row[\"list\"])\n db.append(item_row)\n print(type(item_row[\"list\"]))\n print(item_row[\"list\"])\n\n # try:\n # print(type(item_row[\"list\"]))\n # print(item_row[\"list\"])\n # item_row[\"list\"] = eval(item_row[\"list\"])\n # db.append(item_row)\n # print(type(item_row[\"list\"]))\n # print(item_row[\"list\"])\n # except NameError as e:\n # if\n new_link = \"view/\" + str(current_id)\n # print(new_link) # Debugging\n # send back the link to the new page so the client can be redirected there\n return jsonify(link=new_link)\n\n\n@app.route('/search/', methods=['GET'])\ndef search(search_str=None):\n\n print(search_str) # Debugging\n\n search_str_lower = search_str.lower()\n title_results = []\n content_results = []\n for item in db:\n if search_str_lower in item[\"title\"].lower():\n title_results.append(item)\n elif search_str_lower in item[\"description\"].lower():\n content_results.append(item)\n\n print(title_results) # Debugging\n print(content_results) # Debugging\n\n return render_template(\"search_results.html\", search_str=search_str,\n title_results=title_results, content_results=content_results)\n\n\n@app.route('/auto/')\ndef provide_autocomplete(fragment_str=None):\n search_results = []\n for item in db:\n if fragment_str in item[\"title\"].lower() or fragment_str in item[\"description\"].lower():\n search_results.append(item[\"title\"])\n\n print(search_results)\n return jsonify(search_results=search_results)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Luigi-PastorePica/AppsIList","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72140604911","text":"import cv2\nimport pickle \nimport os\nimport numpy as np\nimport imutils \nfrom imutils import paths\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential \nfrom keras.layers.convolutional import Conv2D , MaxPooling2D\nfrom keras.layers.core import Flatten , Dense\n\n#desired width and height in pixel \ndef resize_to_fit(image, width, height):\n\t# grab the dimension of the image\n\t(h,w) = image.shape[:2]\n\t# if the width is greater than the height then resize along the width\n\tif (w > h) :\n\t\timage = imutils.resize(image, width=width)\n\telse :\n\t\timage = imutils.resize(image, height=height)\n\n\t#determine the padding values for the width and height to\n\t#obtain the target dimension\n\tpadW = int((width - image.shape[1]) / 2.0)\n\tpadH = int((height - image.shape[0])/ 2.0)\n\n\t#pad the image then apply once more resizing to handle any\n\t#rounding issues\n\timage = cv2.copyMakeBorder(image, padH, padH, padW, padW, cv2.BORDER_REPLICATE)\n\timage = cv2.resize(image, (width, height))\n\t#return the pre-processed image \n\treturn image\n\n\nletter_image_folder = \"extracted_letter_images\"\nmodel_filename = \"captcha_model.hdf5\"\nmodel_labels_filename = \"model_labels.dat\"\n\ndata = []\nlabels = []\n\n#loop over the input images\nfolder_dir = os.path.dirname(os.path.abspath(__file__)) + \"/\" + letter_image_folder \nfor letter_text_folder in os.listdir(folder_dir):\n\tif letter_text_folder == \".DS_Store\" :\n\t\tcontinue \n\tnext_path = folder_dir + \"/\" + letter_text_folder \n\tfor letter_image in os.listdir(next_path):\n\t\tif letter_image == \".DS_Store\" :\n\t\t\tcontinue \n\t\t#load the image and convert it into grayscale\n\t\timage = cv2.imread(next_path+\"/\"+letter_image)\n\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t\t#resize the image so that it fits in a 20*20 pixel box\n\t\timage = resize_to_fit(image,20,20)\n\t\t#add a third channel dimension to the image to make keras happy\n\t\timage = np.expand_dims(image, axis=2)\n\t\tdata.append(image)\n\t\tlabels.append(letter_text_folder)\n\n#scale the raw pixel intensities to range of [0,1] - this improves training\ndata = np.array(data , dtype=\"float\") / 255.0\nlabels = np.array(labels)\n\n#split the data into train and test sets \nx_train, x_test , y_train, y_test = train_test_split(data,labels, test_size=0.25, random_state = 0)\n#convert the letter_text intp one-hot encoding that keras can work with it \nlb = LabelBinarizer().fit(y_train)\ny_train = lb.transform(y_train)\ny_test = lb.transform(y_test)\n\n#save the one-hot encoding of lettertext\n#this will be required to decode what its prediction means\nwith open(model_labels_filename , \"wb\") as aditya:\n\tpickle.dump(lb,aditya)\n\n#now build neural network\nmodel = Sequential()\n\n#first covolutional layer with max pooling \nmodel.add(Conv2D(20, (5,5), padding=\"same\", input_shape = (20,20,1), activation=\"relu\"))\nmodel.add(MaxPooling2D(pool_size = (2,2), strides=(2,2)))\n\n#second convolutional layer with max pooling \nmodel.add(Conv2D(50, (5,5), padding=\"same\",activation=\"relu\"))\nmodel.add(MaxPooling2D(pool_size = (2,2), strides=(2,2)))\n\n#hidden layer with 500 nodes\nmodel.add(Flatten())\nmodel.add(Dense(500, activation=\"relu\"))\n\n#output layer with 32 nodes\nmodel.add(Dense(32, activation=\"softmax\"))\n\n#ask keras to build the TensorFlow model behind the scenes \nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n#train the neural network \nmodel.fit(x_train,y_train, validation_data = (x_test,y_test), batch_size=32, epochs=10, verbose=1)\n\n#save the train model to disk \nmodel.save(model_filename)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AdityaAtri/captcha-breaker","sub_path":"model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"28783770234","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.db import connection\n# from django.http import HttpResponseRedirect, HttpResponse, JsonResponse\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authentication import TokenAuthentication\nfrom user.utils import ConnectDB\n\n# from datetime import datetime\nimport requests\n\n\nclass PeminjamanAPI(APIView):\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def get(self, request):\n user = request.user\n if user.email == 'ANGGOTA':\n data = ConnectDB.getDataWithQuery(\n '''\n SELECT m.*\n FROM anggota a, peminjaman m, person p\n WHERE p.ktp = a.ktp AND a.no_kartu = m.no_kartu_anggota AND p.ktp = %s\n ''', [user.username])\n return Response(data)\n else:\n return Response([{}])\n\n\ndef peminjaman_view(request):\n person = ConnectDB.getUserDataWithApi(request)\n response = ConnectDB.getPersonalDataWithApi(request, 'peminjaman', '/peminjaman/api/')\n response.update(person)\n return render(request, 'peminjaman.html', response)\n\ndef peminjaman_add(request):\n if (request.method == \"POST\"):\n no_kartu_anggota = request.POST.get('id_voucher', None)\n datetime_pinjam = request.POST.get('nama', None)\n datetime_kembali = request.POST.get('kategori', None)\n biaya = request.POST.get('nilai_poin', None)\n denda = request.POST.get('deskripsi', None)\n nomor_sepeda = request.POST.get('no_kartu_anggota', None)\n id_stasiun = request.POST.get('no_kartu_anggota', None)\n with connection.cursor() as cursor:\n cursor.execute(\n '''\n INSERT INTO peminjaman VALUES(%s, %s, %s, %s, %s, %s, %s)\n ''', [no_kartu_anggota, datetime_pinjam, datetime_kembali, biaya, denda, nomor_sepeda,id_stasiun])\n return HttpResponse(\"SUCCESS 200\")\n else:\n return HttpResponse(\"HTTP 204\")","repo_name":"suprissu/bikesharing","sub_path":"peminjaman/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2993968293","text":"import argparse\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Callable\n\n\ndef parse_input(file_path) -> dict[str, list[str]]:\n graph = defaultdict(lambda: list())\n with open(file_path) as f:\n for line in f:\n node0, node1 = line.strip(\"\\n\").split(\"-\")\n if node1 == \"start\":\n node0, node1 = node1, node0\n graph[node0].append(node1)\n if node0 != \"start\" and node1 != \"end\":\n graph[node1].append(node0)\n return graph\n\n\ndef path_cond_part1(node: str, path: list[str]) -> bool:\n if node.isupper() or node not in path:\n return True\n\n\ndef path_cond_part2(node: str, path: list[str]) -> bool:\n if node.islower() and node in path:\n new_path = path + [node]\n occurances = {n: new_path.count(n) for n in new_path if n.islower()}\n if max(list(occurances.values())) > 2 or list(occurances.values()).count(2) > 1:\n return False\n return True\n\n\ndef find_all_paths(\n graph: dict[str, list[str]],\n path_cond: Callable[[str, list[str]], bool],\n start: str = \"start\",\n path: list[str] = [],\n) -> list[list[str]]:\n path = path + [start]\n if start == \"end\":\n return [path]\n paths = list()\n for node in graph[start]:\n if path_cond(node=node, path=path):\n [paths.append(p) for p in find_all_paths(graph, path_cond, node, path)]\n return paths\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Advent of Code - Day 12: Passage Pathing\")\n parser.add_argument(\"-i\", help=\"Input file path\")\n args = parser.parse_args()\n file_path = Path(args.i) if args.i else Path(\"example_input.txt\")\n assert file_path.exists()\n\n graph = parse_input(file_path)\n paths_part1 = find_all_paths(graph, path_cond_part1)\n print(f\"Answer part 1: #Paths: {len(paths_part1)}\")\n\n paths_part2 = find_all_paths(graph, path_cond_part2)\n print(f\"Answer part 2: #Paths: {len(paths_part2)}\")\n","repo_name":"mhaselmann/advent-of-code","sub_path":"2021/12_passage_pathing/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20939377843","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\n\n\"\"\"\nthe input x in both networks should be [o, g], where o is the observation and g is the goal.\n\n\"\"\"\n\n# define the actor network\nclass actor(nn.Module):\n def __init__(self, env_params):\n super(actor, self).__init__()\n self.max_action = env_params['action_max']\n self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'], 256)\n self.fc2 = nn.Linear(256, 256)\n self.fc3 = nn.Linear(256, 256)\n self.action_out = nn.Linear(256, env_params['action'])\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n actions = self.max_action * torch.tanh(self.action_out(x))\n\n return actions\n\n\n\nclass SacActor(nn.Module):\n def __init__(self, env_params, min_log_sigma=-20.0, max_log_sigma=2.0):\n super(SacActor, self).__init__()\n self.max_action = env_params['action_max']\n self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'], 256)\n self.fc2 = nn.Linear(256, 256)\n self.fc3 = nn.Linear(256, 256)\n self.fc_mu = nn.Linear(256, env_params['action'])\n self.fc_sigma = nn.Linear(256, env_params['action'])\n self.min_log_sigma = min_log_sigma\n self.max_log_sigma = max_log_sigma\n # self.action_out = nn.Linear(256, env_params['action'])\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n mu = self.fc_mu(x)\n log_sigma = self.fc_sigma(x)\n log_sigma = torch.clamp(log_sigma, self.min_log_sigma, self.max_log_sigma)\n return mu, log_sigma\n\n def act(self, mu, log_sigma):\n sigma = torch.exp(log_sigma)\n \n dist = Normal(mu, sigma)\n # * reparameterization trick: recognize the difference of sample() and rsample()\n action = dist.rsample()\n tanh_action = torch.tanh(action)\n # * the log-probabilities of actions can be calculated in closed forms\n log_prob = dist.log_prob(action)\n log_prob = (log_prob - torch.log(1 - torch.tanh(action).pow(2))).sum(-1)\n return tanh_action, log_prob\n\nclass critic(nn.Module):\n def __init__(self, env_params):\n super(critic, self).__init__()\n self.max_action = env_params['action_max']\n self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'] + env_params['action'], 256)\n self.fc2 = nn.Linear(256, 256)\n self.fc3 = nn.Linear(256, 256)\n self.q_out = nn.Linear(256, 1)\n\n def forward(self, x, actions):\n x = torch.cat([x, actions / self.max_action], dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n q_value = self.q_out(x)\n\n return q_value\n\nclass V_critic(nn.Module):\n def __init__(self, env_params):\n super(V_critic, self).__init__()\n self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'], 256)\n self.fc2 = nn.Linear(256, 256)\n self.fc3 = nn.Linear(256, 256)\n self.v_out = nn.Linear(256, 1)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n v_value = self.v_out(x)\n \n return v_value\n\nclass BVNCritic(nn.Module):\n def __init__(self, env_params, latent_dim):\n super(BVNCritic, self).__init__()\n self.max_action = env_params['action_max']\n\n self.F = nn.Sequential(nn.Linear(env_params['obs']+env_params['action'], 176),\n nn.ReLU(),\n nn.Linear(176, 176),\n nn.ReLU(),\n nn.Linear(176, latent_dim))\n self.Phi = nn.Sequential(nn.Linear(env_params['obs']+env_params['goal'], 176),\n nn.ReLU(),\n nn.Linear(176, 176),\n nn.ReLU(),\n nn.Linear(176, latent_dim))\n self.goal_dim = env_params['goal']\n\n def forward(self, x, actions):\n g = x[:, -self.goal_dim:]\n s = x[:, :-self.goal_dim]\n F_in = torch.cat([s, actions / self.max_action], dim=1)\n Phi_in = torch.cat([s, g], dim=1)\n F_out = self.F(F_in).unsqueeze(-1)\n Phi_out = self.Phi(Phi_in).unsqueeze(-1)\n \n q_out = torch.matmul(torch.transpose(F_out, 1, 2), Phi_out)\n return q_out.squeeze(-1)\n\nclass MRNCritic(nn.Module):\n # Metric Residual Networks for Sample Efficient Goal-Conditioned Reinforcement Learning\n # https://github.com/Cranial-XIX/metric-residual-network\n # Q = - (d_sym+d_asym)\n # d_sym = (phi(x)-phi(y)).pow\n # d_asym = max(h(x)-h(y))\n def __init__(self, env_params, emb_dim, hidden_dim):\n super(MRNCritic, self).__init__()\n\n self.max_action = env_params['action_max']\n self.embedding_dim = emb_dim\n self.hidden_dim = hidden_dim\n self.goal_dim = env_params['goal']\n self.act_dim = env_params['action']\n self.obs_dim = env_params['obs']\n self.f_emb = nn.Sequential(nn.Linear(self.obs_dim + self.act_dim, self.hidden_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.ReLU(inplace=True))\n self.phi_emb = nn.Sequential(nn.Linear(self.obs_dim+self.act_dim + self.goal_dim, self.hidden_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.ReLU(inplace=True))\n self.sym = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_dim, self.embedding_dim))\n self.asym = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_dim, self.embedding_dim))\n\n\n def forward(self, x, actions):\n g = x[:, -self.goal_dim:]\n s = x[:, :-self.goal_dim]\n x1 = torch.cat([s, actions/self.max_action], dim=-1)\n x2 = torch.cat([s, actions / self.max_action, g], dim=-1)\n fh = self.f_emb(x1)\n phih= self.phi_emb(x2)\n\n sym1 = self.sym(fh)\n sym2 = self.sym(phih)\n asym1 = self.asym(fh)\n asym2 = self.asym(phih)\n dist_s = (sym1 - sym2).pow(2).mean(-1, keepdims=True)\n res = F.relu(asym1 - asym2)\n dist_a = res.max(-1)[0].view(-1, 1)\n q = - (dist_a+dist_s)\n return q\n \n def evaluate(self,x, actions):\n g = x[:, -self.goal_dim:]\n s = x[:, :-self.goal_dim]\n x1 = torch.cat([s, actions/self.max_action], dim=-1)\n x2 = torch.cat([s, actions / self.max_action, g], dim=-1)\n fh = self.f_emb(x1)\n phih= self.phi_emb(x2)\n\n sym1 = self.sym(fh)\n sym2 = self.sym(phih)\n asym1 = self.asym(fh)\n asym2 = self.asym(phih)\n dist_s = (sym1 - sym2).pow(2).mean(-1, keepdims=True)\n res = F.relu(asym1 - asym2)\n dist_a = res.max(-1)[0].view(-1, 1)\n q = - (dist_a+dist_s)\n return q, -dist_a, -dist_s\n \n\n\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"实现一个残差块\"\"\"\n def __init__(self,inchannel, outchannel, stride = 1,shortcut = None):\n\n super().__init__()\n self.left = nn.Sequential(\n nn.Conv2d(inchannel,outchannel,3,stride,1,bias=False),\n nn.BatchNorm2d(outchannel),\n nn.ReLU(),\n nn.Conv2d(outchannel,outchannel,3,1,1,bias=False), # 这个卷积操作是不会改变w h的\n nn.BatchNorm2d(outchannel)\n )\n self.right = shortcut\n\n def forward(self, input):\n out = self.left(input)\n residual = input if self.right is None else self.right(input)\n out+=residual\n return F.relu(out)\n\n\n\nclass ResNet(nn.Module):\n \"\"\"实现主reset\"\"\"\n\n def __init__(self, num_class=1000):\n super().__init__()\n # 前面几层普通卷积\n self.pre = nn.Sequential(\n nn.Conv2d(3, 64, 7, 2, 3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(3, 2, 1)\n )\n\n # 重复layer,每个layer都包含多个残差块 其中第一个残差会修改w和c,其他的残差块等量变换\n # 经过第一个残差块后大小为 w-1/s +1 (每个残差块包括left和right,而left的k = 3 p = 1,right的shortcut k=1,p=0)\n self.layer1 = self._make_layer(64, 128, 3) # s默认是1 ,所以经过layer1后只有channle变了\n self.layer2 = self._make_layer(128, 256, 4, stride=2) # w-1/s +1\n self.layer3 = self._make_layer(256, 512, 6, stride=2)\n self.layer4 = self._make_layer(512, 512, 3, stride=2)\n self.fc = nn.Linear(512, num_class)\n\n def _make_layer(self, inchannel, outchannel, block_num, stride=1):\n shortcut = nn.Sequential(\n nn.Conv2d(inchannel, outchannel, 1, stride, bias=False),\n nn.BatchNorm2d(outchannel)\n )\n\n layers = []\n layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))\n\n # 之后的cahnnle同并且 w h也同,而经过ResidualBloc其w h不变,\n for i in range(1, block_num):\n layers.append(ResidualBlock(outchannel, outchannel))\n\n return nn.Sequential(*layers)\n\n def forward(self, input):\n x = self.pre(input)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = F.avg_pool2d(x, 7) # 如果图片大小为224 ,经过多个ResidualBlock到这里刚好为7,所以做一个池化,为1,\n # 所以如果图片大小小于224,都可以传入的,因为经过7的池化,肯定为1,但是大于224则不一定\n x = x.view(x.size(0), -1)\n return self.fc(x)\n\n\n\n","repo_name":"poisonwine/Goal-Conditioned-Exploration","sub_path":"rl_modules/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"15451912437","text":"#!/usr/bin/python3\n\n\"\"\"\nThis Python script takes in a URL, sends a request to the URL\nand displaysthe body of the response.\n\nIf the HTTP status code is greater than or equal to 400,\nprint: Error code:\nfollowed by the value of the HTTP status code.\n\nYou must use the packages requests and sys\nYou are not allowed to import packages other than requests and sys\nYou don’t need to check arguments passed to the script (number or type)\nPlease test your script in the sandbox provided, using the web server\nrunning on port 5000\n\nUsage: ./7-error_code.py \n\"\"\"\nimport requests\nfrom sys import argv\n\nif __name__ == '__main__':\n r = requests.get(argv[1])\n status = r.status_code\n print(r.text) if status < 400 else print(\n \"Error code: {}\".format(r.status_code))\n","repo_name":"agun36/alx-higher_level_programming","sub_path":"0x11-python-network_1/7-error_code.py","file_name":"7-error_code.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"1081569130","text":"from django.shortcuts import get_object_or_404\nfrom ncapp.serializers import ClinicSerializer, ArtSerializer\nfrom ncapp.models.clinic import Clinic\nfrom ncapp.models.patient import Patient\nfrom ncapp.models.art import Art\nfrom ncapp.models.site import Site\nfrom ncapp.models.regimen import Regimen\nfrom ncapp.models.support_group import SupportGroup\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom django.utils import timezone\nfrom ncapp.models.actor import Actor\nfrom rest_framework import permissions\n\n\nclass ArtViewSet(viewsets.ViewSet):\n \"\"\"\n A simple ViewSet for listing or retrieving ART dispensations.\n \"\"\"\n queryset = Art.objects.all()\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ArtSerializer\n def get_queryset(self):\n userQ = Actor.objects.all()\n user = get_object_or_404(userQ, pk=self.request.user.pk)\n queryset = self.queryset.filter(nurse_id = user.nurse.pk).all()\n return queryset\n\n def list(self, request):\n\n self.check_permissions(request=request)\n queryset = self.queryset\n serializer = ArtSerializer(queryset, many=True, context={'request': request})\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Art.objects.all()\n art = get_object_or_404(queryset, pk=pk)\n serializer = ArtSerializer(art, context={'request': request})\n return Response(serializer.data)\n\n def create(self, request):\n self.check_permissions(request=request)\n todayArtVisits = self.queryset.filter(clinic=request.data['clinic'] )\n\n if ( todayArtVisits.count() > 0 ):\n return Response(status=403, data={\"error\" : \"cannot add art dispensation of the same patient twice for the same visit date\"})\n\n\n userQ = Actor.objects.all()\n user = get_object_or_404(userQ, pk=request.user.pk)\n regimen = get_object_or_404(Regimen.objects.all(), pk=request.data['art_given'])\n clinic = get_object_or_404(Clinic.objects.all(), pk=request.data['clinic'])\n \n \n\n print(user.nurse)\n queryset = Art(\n wt=request.data['wt'],\n ht=request.data['ht'],\n sbp_dbp=request.data['sbp_dbp'],\n side_effect=request.data['side_effect'],\n tb_status=request.data['tb_status'],\n dose_missed=request.data['dose_missed'],\n pill_count=request.data['pill_count'],\n art_given=regimen,\n number_of_regimen_pills=request.data['number_of_regimen_pills'],\n pyridoxine=request.data['pyridoxine'],\n inh=request.data['inh'],\n bp_drug=request.data['bp_drug'],\n number_of_tabs=request.data['number_of_tabs'],\n fp_meth=request.data['fp_meth'],\n number_of_condoms=request.data['number_of_condoms'],\n adverse_outcome=request.data['adverse_outcome'],\n clinic=clinic,\n )\n queryset.save()\n \n return Response(status=200, data={\"success\" : \"Filled ART Dispensation Succesfully\"});\n \n \n def update(self, request, pk=None):\n pass\n\n def partial_update(self, request, pk=None):\n pass\n\n def destroy(self, request, pk=None):\n queryset = Art.objects.all()\n clinic = get_object_or_404(queryset, pk=pk)\n clinic.delete()\n return Response(status=200, data={\"success\" : \"Deleted Visit Succesfully\"});\n \n\n\n \n","repo_name":"PrinceMwase/ncap","sub_path":"ncapp/modelviews/art.py","file_name":"art.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"36195266398","text":"\"\"\"Escreva uma programa que pergunte a quantidade de km percorridos por um carro alugado e a quantidade de dias pelo quais ele foi alugado.\nCalcule o preço a pagar, sabendo que o carro custa R$ 60.00 por dia e R$ 0.15 por km rodado\"\"\"\n\nprint('=====1° TESTE====')\ndias = int(input('Quantos dias alugado: '))\nvalordias = dias * 60.00\n\nkm = float(input('Quantos km rodados: '))\nvalorkm = km * 0.15\n\nprint('Total a pagar é de R${:.2f} \\n'.format(valordias + valorkm))\n\n\nprint('====2° TESTE====')\ndias = int(input('Quantos dias alugado: '))\nkm = float(input('Quantos km rodados: '))\ntotal = (dias * 60.00) + (km * 0.15)\nprint('Total a pagar é de R${:.2f}'.format(total))\n\n","repo_name":"AleLucasG/Estudos-Python-I","sub_path":"ex015.py","file_name":"ex015.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14277907510","text":"import data_manipulations.data_cleaner as data_cleaner\nimport data_analysis.projects_analytics as projects_analytics\n\nfrom data_loaders.files_loader import load_data_from_csv\nfrom data_extractors.files_extractor import save_to_parquet\nfrom config import PROJECTS_STUDIES_COHORTS_FILE, SUBJECTS_SAMPLES_FILE, SAMPLE_RUN_RESULTS_FILE\n\n# --------------------------------------------\n# Load data\n# --------------------------------------------\nprint('Loading data...')\n\nprojects_studies_cohorts = load_data_from_csv(PROJECTS_STUDIES_COHORTS_FILE)\nsubject_samples = load_data_from_csv(SUBJECTS_SAMPLES_FILE)\nsamples_results = load_data_from_csv(SAMPLE_RUN_RESULTS_FILE)\n\nif any(df is None for df in [projects_studies_cohorts, subject_samples, samples_results]):\n print('One or more datasets could not be loaded. Exiting the pipeline.')\n exit(1)\n\n# Creating a dictionary to map original column names to their desired new names.\n# This is to make the column names more consistent and easier to manage.\nrename_dict = {\n 'cancer_detected_(yes_no)': 'is_cancer_detected',\n ' detection_value ': 'detection_value',\n 'sample_status(running/finished/failed)': 'sample_status',\n 'fail_reason(technical/quality)': 'fail_reason',\n 'date of run': 'date_of_run'\n }\n \nsamples_results.rename(columns=rename_dict, inplace=True)\n\nprint('Data loaded successfully.')\n\n# --------------------------------------------\n# Clean data\n# --------------------------------------------\nprint('Start cleaning data...')\n\nprojects_studies_cohorts = data_cleaner.basic_cleanup(projects_studies_cohorts)\nsubject_samples = data_cleaner.clean_subject_samples_df(subject_samples)\nsamples_results = data_cleaner.clean_samples_results_df(samples_results)\n\nprint('Data cleaned successfully.')\n\n# --------------------------------------------\n# Merge data\n# --------------------------------------------\nprint('Merging the dataframes...')\n\nmerged_df = projects_studies_cohorts.merge(subject_samples, on=['project_code', 'study_code', 'study_cohort_code'], how='left')\\\n .merge(samples_results, on='sample_id', how='left')\n\nprint('Dataframes merged successfully.')\n\n# --------------------------------------------\n# Save the cleaned and merged dataframe to a Parquet file\n# --------------------------------------------\nprint('Saving the merged dataframe to a Parquet file...')\n\noutput_file_path = save_to_parquet(merged_df, 'output.parquet')\n\nprint(f'Data saved successfully to {output_file_path}.')\n\n# --------------------------------------------\n# Generate project summaries\n# --------------------------------------------\nprint('Generating project summaries...')\n\nproject_summaries_path = projects_analytics.generate_project_summaries(merged_df)\n\nprint(f'Project summaries generated successfully. Saved to {project_summaries_path}.')\nprint('Pipeline finished successfully.')\n\n\n","repo_name":"Aldebrand/c2i","sub_path":"data_pipeline.py","file_name":"data_pipeline.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29673770118","text":"import turtle\n\nturtle.pencolor('green')\n\nfor i in range(60):\n turtle.forward(30)\n turtle.left(360 / 3 + 10)\nturtle.done()\n\n# turtle.color('red', 'yellow')\n\n# turtle.begin_fill()\n# for i in range(5 * 3):\n# turtle.forward(100 + i * 10)\n# turtle.right(360 / 5 * 2)\n# turtle.end_fill()\n\n# turtle.done()\n\n# turtle.begin_fill()\n# for _ in range(4):\n# turtle.forward(100)\n# turtle.right(90)\n# turtle.end_fill()\n\n# turtle.done()\n","repo_name":"07130918/Python","sub_path":"section19_graphics/turtle_training.py","file_name":"turtle_training.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"28835786852","text":"#### console application ---> save info about books\r\nfrom book_crud import create_project, display_all_projects , delete_project, register_user, check_login, update_project,search_by_start_date,search_by_end_date\r\nfrom colorama import Fore, Back, Style\r\ndef enter():\r\n while True:\r\n choice = input (f\"{Fore.GREEN}Please choose from this list:{Style.RESET_ALL} \\n 1-Register \\n 2-Login \\n 3-Exit \\n{Fore.GREEN}Enter your choice:{Style.RESET_ALL} \")\r\n if choice == '1':\r\n register_user()\r\n elif choice == '2':\r\n check_login()\r\n f=check_login()\r\n if f==True:\r\n mainmenu()\r\n else:\r\n enter()\r\n elif choice == '3':\r\n print(Fore.YELLOW+ \"***** Thanks for using our App *****\")\r\n print(Style.RESET_ALL)\r\n exit()\r\n\r\ndef mainmenu():\r\n while True:\r\n choice = input(f\"{Fore.GREEN}please choose from this list:{Style.RESET_ALL}\\n 1-Create new project \\n 2-List all projects \\n 3-Edit the project \\n 4-Delete project \\n 5-Search by start date \\n 6-Search by end date \\n 7-Exit \\n{Fore.GREEN}Enter your choice:{Style.RESET_ALL} \")\r\n if choice=='1':\r\n create_project()\r\n elif choice=='2':\r\n display_all_projects()\r\n elif choice=='3':\r\n update_project()\r\n elif choice=='4':\r\n delete_project()\r\n elif choice=='5':\r\n search_by_start_date()\r\n elif choice=='6':\r\n search_by_end_date()\r\n elif choice=='7':\r\n print(Fore.YELLOW+ \"***** Thanks for using our App *****\")\r\n print(Style.RESET_ALL)\r\n exit()\r\nprint(f\"{Fore.MAGENTA} Welcome To CROWD_FUNDING Console App \\n{Fore.MAGENTA} Telecom Applications Development\\n{Fore.CYAN} ITI\\n {Fore.YELLOW}CREATED BY: Esraa Nasser \")\r\nprint(Style.RESET_ALL)\r\nenter()\r\nmainmenu()","repo_name":"EsraaNasserHelal/crowd-fundraising","sub_path":"main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73136744430","text":"import requests\nfrom PIL import Image\nfrom StringIO import StringIO\nfrom uuid import uuid4\n\nTHUMBNAIL_SIZE = 128, 128\n\ndef process_image(file_path, name=None):\n if not name:\n name = uuid4()\n img = Image.open(file_path)\n thumbnail = img.copy().thumbnail(THUMBNAIL_SIZE)\n # Output everything to StringIO objects\n orig_sio = StringIO()\n img.save(orig_sio, format='JPEG')\n thumb_sio = StringIO()\n thumbnail.save(thumb_sio, format='JPEG')\n return {\n 'name': name,\n 'thumbnail': thumb_sio,\n 'original': orig_sio\n }\n\n\ndef process_external_file(url, name=None):\n if not name:\n name = uuid4()\n response = requests.get(url)\n if response.status_code == 200:\n img = Image.open(StringIO(response.content))\n thumbnail = img.copy()\n thumbnail.thumbnail(THUMBNAIL_SIZE)\n orig_sio = StringIO()\n thumb_sio = StringIO()\n img.save(orig_sio, format='JPEG')\n thumbnail.save(thumb_sio, format='JPEG')\n # Process the image, generate thumbnails and a processed\n # file, then return them\n # read response.content\n return {\n 'name': name,\n 'thumbnail': thumb_sio,\n 'original': orig_sio\n }\n else:\n return None\n","repo_name":"mochify/poctopus","sub_path":"proctopy/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32723775505","text":"import unittest\nfrom src.supply_system import SupplySystem\nfrom src.delivery_system import DeliveryAddress\n\n\nclass SupplySystemTest(unittest.TestCase):\n\n def setUp(self):\n self.ssystem = SupplySystem()\n\n def test_handshake(self):\n actual = self.ssystem.handshake()\n expected = 'approved'\n self.assertEqual(actual, expected)\n\n def test_supply(self):\n address = DeliveryAddress(20, \"Israel\", \"Beer Sheva\", \"Rager Blvd 12\", \"8458527\")\n actual = self.ssystem.supply(\"Israel Israelovice\", address)\n expected = 'approved'\n self.assertEqual(actual, expected)\n","repo_name":"orsaada/ecommerce-site","sub_path":"Tests/unit tests/test_supply_system.py","file_name":"test_supply_system.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"37486528265","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField\n\n\nclass UserCategoryForm(FlaskForm):\n streetware = StringField(\"Streetware\")\n formalwear = StringField(\"Formal Ware\")\n dark = StringField(\"Dark\")\n boho = StringField(\"Boho\")\n old_money = StringField(\"Old Money\")\n athleisure = StringField(\"Athleisure\")","repo_name":"renahime/NotPinterest","sub_path":"app/forms/user_category.py","file_name":"user_category.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1920241259","text":"#===============================================================================\n#\n# File Name : parseCheckM.py\n# Description : Given sampleid argument, opens CheckM.stdout. Finds bins with\n# completeness >50% and contamination <5%. Writes passing bin\n# names to outfile. Called by s17_parseCheckM.sh in parent dir.\n# Usage : python p09_parseCheckM.py sampleid\n# Author : Aura Ferreiro, alferreiro@wustl.edu\n# Version : 1.0\n# Created On : 2021-01-15\n# Last Modified: 2021-01-15\n#===============================================================================\n\n\nimport sys\n\nsampleid = sys.argv[1]\ncheckMfilepath = 'CheckM_out/'+sampleid+'_CheckM/'+sampleid+'_checkm.stdout'\noutpath = 'CheckM_out/'+sampleid+'_goodbins.txt'\n\ncheckMfile = open(checkMfilepath, 'r')\noutfile = open(outpath, 'w')\n\n\nfor line in checkMfile:\n line2 = line.strip()\n if line2.startswith('bin.'):\n linev = line2.split()\n if float(linev[12]) > 50 and float(linev[13]) < 5:\n outfile.write(sampleid+'_'+linev[0]+'.fa\\n')\n\n\ncheckMfile.close()\noutfile.close()\n\n\n\n\n","repo_name":"alferreiro/MetagenomicAnalyses_ExampleBashScripts","sub_path":"pythonscripts/p09_parseCheckM.py","file_name":"p09_parseCheckM.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38287187776","text":"\"\"\"\nhttps://leetcode.com/problems/find-if-path-exists-in-graph/\n\nhttps://zhuanlan.zhihu.com/p/93647900\n\"\"\"\n\n\ndef valid_path(n: int, edges: list[list[int]], source: int, destination: int) -> bool:\n \"\"\"\n 并查集\n \"\"\"\n union_find = {\n i: i for i in range(n)\n }\n\n def find_ancestor(x: int) -> int:\n if union_find[x] == x:\n return x\n else:\n return find_ancestor(union_find[x])\n\n for edge in edges:\n union_find[find_ancestor(edge[0])] = find_ancestor(edge[1])\n\n return find_ancestor(source) == find_ancestor(destination)\n\n\n# print(\n# valid_path(\n# n=3,\n# edges=[\n# [0, 1], [1, 2], [2, 0]\n# ],\n# source=0,\n# destination=2\n# )\n# )\n# print(\n# valid_path(\n# n=6,\n# edges=[\n# [0, 1],\n# [0, 2],\n# [3, 5],\n# [5, 4],\n# [4, 3]\n# ],\n# source=0,\n# destination=5\n# )\n# )\n# print(\n# valid_path(\n# n=10,\n# edges=[[4, 3], [1, 4], [4, 8], [1, 7], [6, 4], [4, 2], [7, 4], [4, 0], [0, 9], [5, 4]],\n# source=5,\n# destination=9\n# )\n# )\nprint(\n valid_path(\n n=10,\n edges=[[2, 6], [4, 7], [1, 2], [3, 5], [7, 9], [6, 4], [9, 8], [0, 1], [3, 0]],\n source=5,\n destination=9\n )\n)\n","repo_name":"AnhaoROMA/leetcode","sub_path":"UnionFind/1971 find if path exists in graph.py","file_name":"1971 find if path exists in graph.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40033907064","text":"import tornado.ioloop\nimport tornado.web\nimport threading\nimport asyncio\nimport json\n\ngethistory_s_r = None\njineng_s_r = None\nreadlog_s_r = None\nreadlog_s_r = None\n\n\n'''\nhttp://localhost:8888/\n'''\n\nclass BaseHandler(tornado.web.RequestHandler):\n def get_current_user(self):\n return self.get_secure_cookie(\"user\")\n\n\nclass MainHandler(BaseHandler):\n \n def get(self):\n global history\n history = None\n if not self.current_user:\n self.redirect(\"/login\")\n return\n self.render('index.html')\n\nclass DsHandler(BaseHandler):\n def get(self):\n if not self.current_user:\n self.redirect(\"/login\")\n return\n self.render('ds.html')\n\nclass LogHandler(BaseHandler):\n def get(self):\n if not self.current_user:\n self.redirect(\"/login\")\n return\n self.render('log.html', log=readlog_s_r)\n\nclass StudyHandler(BaseHandler):\n def get(self):\n if not self.current_user:\n self.redirect(\"/login\")\n return\n self.render('study.html')\n\n\nclass DhHandler(BaseHandler):\n def get(self):\n if not self.current_user:\n self.redirect(\"/login\")\n return\n self.render('dh.html',history=gethistory_s_r.getHistory())\n\nclass HistoryHandler(BaseHandler):\n def get(self):\n if not self.current_user:\n res = {'code': 1, 'message': 'illegal visit'}\n else:\n res = {'code': 0, 'message': 'ok', 'history': json.dumps(gethistory_s_r.getHistory())}\n self.write(json.dumps(res))\n self.finish()\n\nclass GetLogHandler(BaseHandler):\n def get(self):\n if not self.current_user:\n res = {'code': 1, 'message': 'illegal visit'}\n else:\n res = {'code': 0, 'message': 'ok', 'log': readlog_s_r}\n self.write(json.dumps(res))\n self.finish()\n\nclass ChatHandler(BaseHandler):\n def post(self):\n if not self.current_user:\n res = {'code': 1, 'message': 'illegal visit'}\n print('chl1..........')\n else:\n global jineng_s_r\n query = self.get_argument('query', '')\n sc_s = str(query)\n if sc_s !='':\n jineng_s_r.jineng(sc_s)\n print('chl3..........')\n res = {'code': 0, 'message': 'ok'}\n self.write(json.dumps(res))\n self.finish()\n\nclass ListHandler(BaseHandler):\n def get(self):\n if not self.current_user:\n self.redirect(\"/login\")\n return\n self.render('list.html')\n\nclass LoginHandler(BaseHandler):\n def get(self):\n\n if self.current_user:\n self.redirect('/')\n return\n '''\n self.write('
'\n 'Name:
'\n 'Password: '\n ''\n '
')\n '''\n self.render('login.html')\n\n\n def post(self):\n\n if '12345' == self.get_argument('password', default=''):#可自行修改\n self.set_secure_cookie(\"user\", self.get_argument(\"name\"))\n self.redirect(\"/\")\n else:\n self.write('用户名或密码错误,请尝试重新输入')\n pass\n\n\nclass ds():\n def get(self):\n self.render('ds.html')\n \n\nsettings = {\n \"cookie_secret\": \"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n \"template_path\": \"server/template\",\n \"static_path\": \"server/static\",\n #\"debug\": Flase#正式发布请改Flase\n}\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/login\", LoginHandler),\n (r\"/ds\", DsHandler),\n (r\"/log\", LogHandler),\n (r\"/study\", StudyHandler),\n (r\"/dh\", DhHandler),\n (r\"/list\", ListHandler),\n (r\"/history\",HistoryHandler),\n (r\"/chat\",ChatHandler),\n (r\"/getlog\",GetLogHandler)\n ], **settings)\n\napp = make_app()\n\ndef start_server():\n asyncio.set_event_loop(asyncio.new_event_loop())\n app.listen(8888)#可自行修改\n tornado.ioloop.IOLoop.current().start()\n\ndef run(jineng_r,gethistory_r,readlog_r):\n global jineng_s_r\n global gethistory_s_r\n global readlog_s_r\n jineng_s_r = jineng_r\n gethistory_s_r = gethistory_r\n readlog_s_r = readlog_r\n threading.Thread(target=start_server).start()\n\ndef hread(readlog_r):\n global readlog_s_r\n readlog_s_r = readlog_r","repo_name":"waterflames-team/lingkong-robot","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"73408979631","text":"import csv\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef summarize_results(solutions, path):\r\n costs = np.array([cost for sol, cost in solutions])\r\n best_sol, best_cost = min(solutions, key=lambda x: x[1])\r\n worst_sol, worst_cost = max(solutions, key=lambda x: x[1])\r\n avg_cost = np.mean(costs)\r\n\r\n print(\"Best cost: \" + str(best_cost))\r\n print(\"Worst cost: \" + str(worst_cost))\r\n print(\"Mean cost after 200 solutions: \" + str(avg_cost))\r\n\r\n show_solution(path, best_sol, title=\"Best Tour\")\r\n\r\n\r\ndef get_coords_n_costs(path: str):\r\n with open(path, 'r') as f:\r\n reader = csv.reader(f, delimiter=';')\r\n data = list(reader)\r\n\r\n data = np.array(data).astype(int)\r\n\r\n coords = data[:, :2]\r\n costs = data[:, 2]\r\n\r\n return coords, costs\r\n\r\n\r\ndef get_dist_matrix(path: str):\r\n coords, costs = get_coords_n_costs(path)\r\n\r\n distances = np.round(np.sqrt(np.sum((coords[:, None, :] - coords[None, :, :]) ** 2, axis=-1))).astype(float)\r\n distances[distances == 0] = np.inf\r\n\r\n return distances + costs\r\n\r\n\r\ndef calculate_cost(solution, matrix):\r\n return sum(matrix[solution[i-1], solution[i]] for i in range(len(solution)))\r\n\r\n\r\ndef show_solution(path, solution, title):\r\n coords, node_costs = get_coords_n_costs(path)\r\n # plt.figure(figsize=(10, 10))\r\n plt.scatter(coords[:, 0], coords[:, 1], c=node_costs, cmap='plasma')\r\n\r\n best_tour_coords = np.append(solution, solution[0])\r\n plt.plot(coords[best_tour_coords, 0], coords[best_tour_coords, 1], 'r-')\r\n\r\n plt.colorbar(label='Cost')\r\n plt.title(title)\r\n plt.show()\r\n\r\n\r\ndef find_regret_with_solution(solution, vertex_id, matrix):\r\n costs = []\r\n solutions = []\r\n for i in range(len(solution)+1):\r\n new_sol = solution[:i] + [vertex_id] + solution[i:]\r\n solutions.append(new_sol)\r\n costs.append(calculate_cost(new_sol, matrix))\r\n return get_regret_n_sol(costs, solutions)\r\n\r\n\r\ndef get_regret_n_sol(costs, solutions):\r\n first = np.argmin(costs)\r\n cost1 = costs[first]\r\n sol = solutions[first]\r\n costs = np.delete(costs, first)\r\n second = np.argmin(costs)\r\n cost2 = costs[second]\r\n return cost2 - cost1, sol\r\n\r\n\r\ndef weighted_regret(matrix, start_v, weight):\r\n n = math.ceil(matrix.shape[0] / 2)\r\n \r\n next_v = np.argmin(matrix[start_v])\r\n\r\n cycle = [start_v, next_v]\r\n current_cost = calculate_cost(cycle, matrix)\r\n\r\n unvisited = np.ones(len(matrix), dtype='bool')\r\n unvisited[start_v] = False\r\n unvisited[next_v] = False\r\n\r\n for _ in range(n - 2):\r\n scores = -np.ones(shape=unvisited.shape) * np.inf\r\n new_costs = np.zeros(shape=unvisited.shape)\r\n new_sols = np.zeros(shape=unvisited.shape, dtype=np.ndarray)\r\n\r\n for vertex_id in np.where(unvisited == True)[0]:\r\n regret, solution = find_regret_with_solution(cycle, vertex_id, matrix)\r\n new_cost = calculate_cost(solution, matrix)\r\n increase = new_cost - current_cost\r\n \r\n score = weight * regret - (1 - weight) * increase\r\n scores[vertex_id] = score\r\n new_sols[vertex_id] = solution\r\n new_costs[vertex_id] = new_cost\r\n\r\n highest_score_id = np.argmax(scores)\r\n cycle = new_sols[highest_score_id]\r\n unvisited[highest_score_id] = False\r\n current_cost = new_costs[highest_score_id]\r\n\r\n return cycle, current_cost\r\n\r\n\r\ndef random_solution(matrix):\r\n n = math.ceil(matrix.shape[0] / 2)\r\n\r\n sol = np.array(np.random.choice(matrix.shape[0], size=n, replace=False))\r\n\r\n cost = calculate_cost(sol, matrix)\r\n\r\n return sol, cost\r\n\r\n\r\n# m_id - id of vertex martix-wise, s_id - id of vertex solution-wise,\r\ndef calculate_delta(solution, matrix, m_id_in, s_id_out):\r\n prev_vertex = solution[s_id_out - 1]\r\n next_vertex = solution[(s_id_out + 1) % len(solution)]\r\n\r\n cost_out = matrix[prev_vertex][solution[s_id_out]] + matrix[solution[s_id_out]][next_vertex]\r\n\r\n cost_in = matrix[prev_vertex][m_id_in] + matrix[m_id_in][next_vertex]\r\n\r\n if math.isnan(cost_in - cost_out) or cost_in-cost_out==np.inf:\r\n return 0\r\n\r\n return cost_in - cost_out\r\n\r\n\r\ndef calculate_delta_edge(solution, matrix, start, end):\r\n prev_vertex = solution[start - 1]\r\n next_vertex = solution[(end + 1) % len(solution)]\r\n\r\n cost_out = matrix[solution[start]][prev_vertex] + matrix[solution[end]][next_vertex]\r\n\r\n cost_in = matrix[solution[end]][prev_vertex] + matrix[solution[start]][next_vertex]\r\n\r\n if math.isnan(cost_in - cost_out) or cost_in-cost_out==np.inf:\r\n return 0\r\n\r\n return cost_in - cost_out\r\n\r\n\r\ndef get_neighbourhood_2n(solution, matrix):\r\n neighbors = []\r\n solution_length = len(solution)\r\n matrix_shape = matrix.shape[0]\r\n\r\n # intra-route\r\n for i in range(solution_length-1):\r\n for j in range(i + 1, solution_length):\r\n neighbor = solution.copy()\r\n neighbor[i], neighbor[j] = neighbor[j], neighbor[i]\r\n delta = calculate_delta(solution, matrix, neighbor[i], j) + calculate_delta(solution, matrix, neighbor[j], i)\r\n neighbors.append((neighbor, delta))\r\n \r\n all_nodes = set(range(matrix_shape))\r\n available_nodes = all_nodes - set(solution)\r\n\r\n # inter-route\r\n for i in range(solution_length):\r\n for node in available_nodes:\r\n neighbor = solution.copy()\r\n neighbor[i] = node\r\n delta = calculate_delta(solution, matrix, node, i)\r\n neighbors.append((neighbor, delta))\r\n\r\n return neighbors\r\n\r\n\r\ndef get_neighbourhood_2e(solution, matrix):\r\n neighbors = []\r\n solution_length = len(solution)\r\n matrix_shape = matrix.shape[0]\r\n\r\n # intra-route\r\n for i in range(solution_length-1):\r\n for j in range(i + 1, solution_length):\r\n if i == 0 and j == solution_length - 1:\r\n continue\r\n\r\n neighbor = np.concatenate((solution[:i], solution[i:j+1][::-1], solution[j+1:]))\r\n\r\n delta = calculate_delta_edge(solution, matrix, i, j)\r\n neighbors.append((neighbor, delta))\r\n\r\n\r\n all_nodes = set(range(matrix_shape))\r\n available_nodes = all_nodes - set(solution)\r\n\r\n # inter-route\r\n for i in range(solution_length):\r\n for node in available_nodes:\r\n neighbor = solution.copy()\r\n neighbor[i] = node\r\n delta = calculate_delta(solution, matrix, node, i)\r\n neighbors.append((neighbor, delta))\r\n\r\n return neighbors\r\n\r\n\r\ndef steepest_2n(matrix, starting_sol):\r\n best_sol = starting_sol\r\n best_delta = 0\r\n neighbourhood = get_neighbourhood_2n(starting_sol, matrix)\r\n\r\n while len(neighbourhood):\r\n deltas = np.array([delta for _, delta in neighbourhood])\r\n best_index = np.argmin(deltas)\r\n probably_best_sol, best_delta = neighbourhood[best_index]\r\n\r\n if best_delta >= 0:\r\n break\r\n \r\n best_sol, best_delta = neighbourhood[best_index]\r\n\r\n neighbourhood = get_neighbourhood_2n(best_sol, matrix)\r\n\r\n return best_sol, calculate_cost(best_sol, matrix)\r\n\r\n\r\ndef steepest_2e(matrix, starting_sol):\r\n best_sol = np.array(starting_sol)\r\n best_delta = 0\r\n neighbourhood = get_neighbourhood_2e(starting_sol, matrix)\r\n\r\n while len(neighbourhood):\r\n deltas = np.array([delta for _, delta in neighbourhood])\r\n best_index = np.argmin(deltas)\r\n probably_best_sol, best_delta = neighbourhood[best_index]\r\n\r\n if best_delta >= 0:\r\n break\r\n \r\n best_sol, best_delta = neighbourhood[best_index]\r\n neighbourhood = get_neighbourhood_2e(best_sol, matrix)\r\n\r\n return best_sol, calculate_cost(best_sol, matrix)\r\n\r\n\r\n\r\ndef greedy_2n(matrix, starting_sol):\r\n best_sol = starting_sol\r\n best_delta = 0\r\n neighbourhood = get_neighbourhood_2n(starting_sol, matrix)\r\n np.random.shuffle(neighbourhood)\r\n i = 0\r\n while i < len(neighbourhood):\r\n probably_best_sol, best_delta = neighbourhood[i]\r\n if best_delta >= 0:\r\n i += 1\r\n else:\r\n best_sol, best_delta = neighbourhood[i]\r\n neighbourhood = get_neighbourhood_2n(best_sol, matrix)\r\n np.random.shuffle(neighbourhood)\r\n i = 0\r\n\r\n return best_sol, calculate_cost(best_sol, matrix)\r\n\r\n\r\ndef greedy_2e(matrix, starting_sol):\r\n best_sol = starting_sol\r\n best_delta = 0\r\n neighbourhood = get_neighbourhood_2e(starting_sol, matrix)\r\n np.random.shuffle(neighbourhood)\r\n\r\n i = 0\r\n while i < len(neighbourhood):\r\n probably_best_sol, best_delta = neighbourhood[i]\r\n if best_delta >= 0:\r\n i += 1\r\n else:\r\n best_sol, best_delta = neighbourhood[i]\r\n neighbourhood = get_neighbourhood_2e(best_sol, matrix)\r\n np.random.shuffle(neighbourhood)\r\n i = 0\r\n\r\n return best_sol, calculate_cost(best_sol, matrix)\r\n\r\n\r\ndef run_greedy_2n_r_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(2):\r\n solutions.append(greedy_2n(matrix, random_solution(matrix)[0]))\r\n\r\n summarize_results(solutions, path)\r\n\r\n\r\ndef run_greedy_2n_bgch_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(200):\r\n solutions.append(greedy_2n(matrix, weighted_regret(matrix, v, 0.5)[0]))\r\n\r\n summarize_results(solutions, path)\r\n\r\n\r\ndef run_steepest_2n_r_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(200):\r\n solutions.append(steepest_2n(matrix, random_solution(matrix)[0]))\r\n\r\n summarize_results(solutions, path)\r\n\r\n\r\ndef run_steepest_2n_bgch_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(200):\r\n solutions.append(steepest_2n(matrix, weighted_regret(matrix, v, 0.5)[0]))\r\n\r\n summarize_results(solutions, path)\r\n\r\n\r\ndef run_greedy_2e_r_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(2):\r\n solutions.append(greedy_2e(matrix, random_solution(matrix)[0]))\r\n\r\n summarize_results(solutions, path)\r\n\r\n\r\ndef run_greedy_2e_bgch_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(200):\r\n solutions.append(greedy_2e(matrix, weighted_regret(matrix, v, 0.5)[0]))\r\n\r\n summarize_results(solutions, path)\r\n\r\n\r\ndef run_steepest_2e_r_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(200):\r\n solutions.append(steepest_2e(matrix, random_solution(matrix)[0]))\r\n\r\n summarize_results(solutions, path)\r\n\r\n\r\ndef run_steepest_2e_bgch_experiment(path: str):\r\n matrix = get_dist_matrix(path)\r\n solutions = []\r\n\r\n for v in range(200):\r\n solutions.append(steepest_2e(matrix, weighted_regret(matrix, v, 0.5)[0]))\r\n\r\n summarize_results(solutions, path)\r\n","repo_name":"Oskshust/evo_comp","sub_path":"3/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":10993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74905113389","text":"#!/usr/bin/env python\nimport os\nimport subprocess\nimport sys\nimport time\n\ndef readFromFile(list,file):\n\tfile = open(file)\n\tfor line in file:\n\t\tline=line.strip('\\n')\n\t\thost=line.split(\":\")[0]\n\t\tport=line.split(\":\")[1].split(\"=\")[1]\n\t\tlist.append(\"%s:%s\"%(host,port))\n\ndef initReplicaSet(replicaSetName,mongdNodes,arbiterNodes):\n\tif len(mongdNodes)==0:\n\t\treturn\n\tprimary=mongdNodes[0]\n\tprint(\"will use %s as primary in %s\"%(primary,replicaSetName))\n\tsubprocess.check_call('''mongo %s --eval \"printjson(rs.initiate())\"'''%(primary),shell=True)\n\tfor worder in mongdNodes[1:]:\n\t\tsubprocess.check_call('''mongo %s --eval 'printjson(rs.add(\"%s\"))' '''%(primary,worder),shell=True)\n\tfor arbiter in arbiterNodes:\n\t\tsubprocess.check_call('''mongo %s --eval 'printjson(rs.addArb(\"%s\"))' '''%(primary,arbiter),shell=True)\n\t\t\nshard1MongdNodes=[]\nshard1ArbiterNodes=[]\nshard2MongdNodes=[]\nshard2ArbiterNodes=[]\nshard3MongdNodes=[]\nshard3ArbiterNodes=[]\n\nreadFromFile(shard1MongdNodes,\"shard1_mongod.properties\")\nreadFromFile(shard1ArbiterNodes,\"shard1_arbiter.properties\")\nreadFromFile(shard2MongdNodes,\"shard2_mongod.properties\")\nreadFromFile(shard2ArbiterNodes,\"shard2_arbiter.properties\")\nreadFromFile(shard3MongdNodes,\"shard3_mongod.properties\")\nreadFromFile(shard3ArbiterNodes,\"shard3_arbiter.properties\")\n\nprint(\"shard1 workers:\")\nprint(shard1MongdNodes)\nprint(\"\")\nprint(\"shard1 arbiters:\")\nprint(shard1ArbiterNodes)\nprint(\"\")\nprint(\"shard2 workers:\")\nprint(shard2MongdNodes)\nprint(\"\")\nprint(\"shard2 arbiters:\")\nprint(shard2ArbiterNodes)\nprint(\"\")\nprint(\"shard3 workers:\")\nprint(shard3MongdNodes)\nprint(\"\")\nprint(\"shard3 arbiters:\")\nprint(shard3ArbiterNodes)\nprint(\"\")\n\ninitReplicaSet(\"s1\",shard1MongdNodes,shard1ArbiterNodes)\ninitReplicaSet(\"s2\",shard2MongdNodes,shard2ArbiterNodes)\ninitReplicaSet(\"s3\",shard3MongdNodes,shard3ArbiterNodes)\n\nport=sys.argv[1]\ntime.sleep(5)\nif len(shard1MongdNodes)>0:\n\ttime.sleep(1)\n\tsubprocess.check_call('''mongo localhost:%s --eval 'printjson(sh.addShard(\"s1/%s\"))' '''%(port,shard1MongdNodes[0]),shell=True)\n\t\nif len(shard2MongdNodes)>0:\t\n\ttime.sleep(1)\n\tsubprocess.check_call('''mongo localhost:%s --eval 'printjson(sh.addShard(\"s2/%s\"))' '''%(port,shard2MongdNodes[0]),shell=True)\n\nif len(shard3MongdNodes)>0:\n\ttime.sleep(1)\n\tsubprocess.check_call('''mongo localhost:%s --eval 'printjson(sh.addShard(\"s3/%s\"))' '''%(port,shard3MongdNodes[0]),shell=True)","repo_name":"howard-repos/jdt-csd","sub_path":"mongodb-basic/src/main/resources/scripts/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"38"} +{"seq_id":"73652997229","text":"import pygame\nimport os\nimport schedule\nimport requests\nfrom BigCookie import BigCookie\nfrom CookieScanner import CookieScanner\nfrom Buildings import BuildingGroup, Cursors, Grandmas, Factories, Farms, Mines\nfrom Buildings import productions, buildings\n\n\n# ---\n\ndef load_image(name, colorkey=None):\n fullname = os.path.join('data', name)\n image = pygame.image.load(fullname).convert()\n if colorkey is not None:\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n else:\n image = image.convert_alpha()\n return image\n\n\ndef human_readable(num): # a function for a much easier measurement of numbers\n str_num = ''\n decimals = num - int(num)\n num -= decimals\n num = int(num)\n while num // 1000 > 0:\n str_num = ',' + str(num % 1000).rjust(3, '0') + str_num\n num //= 1000\n if decimals == 0:\n return str(num) + str_num\n else:\n return str(num) + str_num + str(round(decimals, 2))[1:]\n\n\ndouble_click_power_cost = 100\n# ---\n\nclass Background(pygame.sprite.Sprite):\n def __init__(self, group, image):\n pygame.sprite.Sprite.__init__(self, group)\n self.image = pygame.transform.scale(image, (1100, 750))\n self.rect = self.image.get_rect()\n self.rect.x = 0\n self.rect.y = 0\n\n\npygame.init()\n\nfont = pygame.font.SysFont('Calibri', 24, True, False)\nmedium_font = pygame.font.SysFont('Calibri', 30, False, False)\nbigger_font = pygame.font.SysFont('Calibri', 36, True, False)\n\nscreen = pygame.display.set_mode((1100, 750), pygame.FULLSCREEN, pygame.RESIZABLE)\nbuildings_surface = pygame.Surface((200, 500))\n\nall_sprites = pygame.sprite.Group()\nbackground = Background(all_sprites, load_image('Background.jpg'))\nbig_cookie = BigCookie(all_sprites, load_image('BigCookie.png', -1))\ncookie_scan = CookieScanner()\nclick_power_text = font.render('Double Click Power', False, (0, 0, 0))\ncookie_amount = font.render('Amount: ' + str(big_cookie.cookies_amount), False, (0, 0, 0))\nquit_text = font.render('Quit', False, (255, 0, 0))\nstore_text = bigger_font.render('Store', False, (0, 0, 0))\nplayer_input_box = pygame.Rect(100, 100, 140, 32)\nplayer_name = ''\nrunning = True\nactive = False\ncolor = (200, 200, 200)\nfps = 60\nclock = pygame.time.Clock()\ncursors = Cursors(big_cookie)\ngrandmas = Grandmas()\nfarms = Farms()\nmines = Mines()\nfactories = Factories()\nbuilding_group = BuildingGroup(big_cookie)\nbuilding_group.append(cursors, grandmas, farms, mines, factories)\nget_pressed = pygame.mouse.get_pressed\nschedule.every(0.1).seconds.do(building_group.automatic_cookie_production)\n\n\nwhile running: # game loop\n for event in pygame.event.get():\n mouse_pos = pygame.mouse.get_pos()\n if event.type == pygame.QUIT:\n running = False\n break\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n if player_input_box.collidepoint(mouse_pos):\n # Toggle the active variable.\n active = not active\n # Change the current color of the input box.\n color = (255, 255, 255) if active else (200, 200, 200)\n if event.type == pygame.KEYDOWN:\n if active:\n if event.key == pygame.K_RETURN:\n player_name = ''\n elif event.key == pygame.K_BACKSPACE:\n player_name = player_name[:-1]\n else:\n player_name += event.unicode\n if big_cookie.detect_click(mouse_pos) and any(get_pressed()): # condition for pressing big cookie\n big_cookie.transform_image(220)\n big_cookie.change_pos(440, 30)\n big_cookie.add_cookies()\n else:\n big_cookie.transform_image(200)\n big_cookie.change_pos(450, 40)\n if 1050 < mouse_pos[0] and mouse_pos[1] < 20 and any(get_pressed()): # condition for pressing quit button\n amount = big_cookie.cookies_amount\n cookies_per_click = big_cookie.cookies_per_click\n cookies_per_second = building_group.total_production\n cookie_scan.set_new_record(amount, cookies_per_click, int(cookies_per_second))\n running = False\n break\n if 900 < mouse_pos[0] and 140 <= mouse_pos[1] <= 200 and any(get_pressed()): # condition for pressing double click power button\n if big_cookie.cookies_amount >= double_click_power_cost:\n big_cookie.add_cookies(-double_click_power_cost)\n big_cookie.cookies_per_click *= 2\n productions['cursor'] = big_cookie.cookies_per_click / 10\n cursors.produce = productions['cursor']\n double_click_power_cost *= 5\n if 900 < mouse_pos[0] and 210 < mouse_pos[1] < 760 and any(get_pressed()): # condition for pressing buildings\n for i in range(5):\n if 210 + i * 100 <= mouse_pos[1] <= 310 + i * 100:\n building_group.buy_specific_building(i)\n\n screen.fill((255, 255, 255))\n all_sprites.draw(screen)\n building_group.update_total_production()\n buildings_surface.fill((192, 192, 192))\n pygame.draw.rect(screen, (0, 0, 0), (1050, 0, 50, 25), 2)\n pygame.draw.rect(screen, (255, 215, 0), (900, 140, 200, 60))\n pygame.draw.rect(screen, (0, 0, 0), (900, 140, 200, 60), 3)\n pygame.draw.rect(screen, color, (100, 100, 140, 32), 3)\n cookies_amount = human_readable(int(big_cookie.cookies_amount))\n cookie_amount_text = font.render('Amount: ' + cookies_amount, False, (0, 0, 0))\n cursor_production = productions['cursor'] * cursors.n\n cookies_per_second = human_readable(building_group.total_production + cursor_production)\n cookies_ps_text = font.render('Cookies Per Second: ' + cookies_per_second, False, (0, 0, 0))\n click_power_cost_text = font.render(human_readable(double_click_power_cost), False, (0, 0, 0))\n player_name_text = font.render('Player', False, (0, 0, 0))\n player_name_input_text = font.render(player_name, False, (0, 0, 0))\n screen.blit(click_power_text, (903, 150))\n screen.blit(click_power_cost_text, (903, 175))\n screen.blit(cookie_amount_text, (0, 0))\n screen.blit(cookies_ps_text, (0, 30))\n screen.blit(quit_text, (1055, 0))\n screen.blit(store_text, (900, 100))\n screen.blit(player_name_text, (103, 75))\n screen.blit(player_name_input_text, (103, 103))\n\n for i in range(5): # drawing a table and content of buildings\n pygame.draw.rect(buildings_surface, (191, 96, 0), (0, i * 100, 200, 100), 10)\n building_amount = str(building_group.get_building_amount(i))\n info = buildings[i] + ' x' + building_amount\n production = '+' + human_readable(productions[buildings[i]])\n production_text = font.render(production, False, (0, 0, 0))\n building_name = medium_font.render(info, False, (0, 0, 0))\n cost = building_group.get_building_cost(i)\n building_cost = font.render('Cost: ' + human_readable(cost), False, (0, 0, 0))\n buildings_surface.blit(building_name, (10, 5 + i * 100))\n buildings_surface.blit(production_text, (10, 40 + i * 100))\n buildings_surface.blit(building_cost, (10, 70 + i * 100))\n\n screen.blit(buildings_surface, (900, 210))\n schedule.run_pending()\n clock.tick(fps)\n pygame.display.flip()\n\n# sending the round's record\nrequests.post(url='https://cookie-clicker-records.herokuapp.com/add_record', data={'player': player_name,\n 'total amount': int(cookies_amount),\n 'cookies per second': int(float(cookies_per_second)),\n 'cookies per click': int(cookies_per_click)})\n","repo_name":"Yerbold/yandex_cookieclicker","sub_path":"starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29504859638","text":"import cv2\r\nimport numpy as np\r\n\r\nimg=cv2.imread(\"lena.jpg\")\r\n\r\nsmaller=cv2.pyrDown(img)\r\nlarger=cv2.pyrUp(img)\r\n\r\ncv2.imshow(\"image\",img)\r\ncv2.imshow(\"a\",smaller)\r\ncv2.imshow(\"s\",larger)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"avinashjindal786/computer_vision","sub_path":"pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23867722536","text":"# install humanfriendly if necessary\n\nimport numpy as np, humanfriendly as hf, random\nimport time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import RandomizedSearchCV,\\\n cross_val_score\nfrom sklearn.ensemble import RandomForestClassifier,\\\n ExtraTreesClassifier\n\ndef get_scores(model, xtrain, ytrain, xtest, ytest):\n ypred = model.predict(xtest)\n train = model.score(xtrain, ytrain)\n test = model.score(xtest, y_test)\n name = model.__class__.__name__\n return (name, train, test)\n\ndef get_cross(model, data, target, groups=10):\n return cross_val_score(model, data, target, cv=groups)\n\ndef prep_data(data, target):\n d = [data[i] for i, _ in enumerate(data)]\n t = [target[i] for i, _ in enumerate(target)]\n return list(zip(d, t))\n\ndef create_sample(d, n, replace='yes'):\n if replace == 'yes': s = random.sample(d, n)\n else: s = [random.choice(d) for i, _ in enumerate(d)\n if i < n]\n Xs = [row[0] for i, row in enumerate(s)]\n ys = [row[1] for i, row in enumerate(s)]\n return np.array(Xs), np.array(ys)\n\ndef see_time(note):\n end = time.perf_counter()\n elapsed = end - start\n print (note,\n hf.format_timespan(elapsed, detailed=True))\n\nif __name__ == \"__main__\":\n br = '\\n'\n X_file = 'data/X_mnist'\n y_file = 'data/y_mnist'\n X = np.load('data/X_mnist.npy')\n y = np.load('data/y_mnist.npy')\n X = X.astype(np.float32)\n data = prep_data(X, y)\n sample_size = 7000\n Xs, ys = create_sample(data, sample_size)\n rf = RandomForestClassifier(random_state=0,\n n_estimators=100)\n print (rf, br)\n params = {'class_weight': ['balanced'],\n 'max_depth': [10, 30]}\n random = RandomizedSearchCV(rf, param_distributions = params,\n cv=3, n_iter=2, random_state=0)\n start = time.perf_counter()\n random.fit(Xs, ys)\n see_time('RandomizedSearchCV total tuning time:')\n bp = random.best_params_\n print (bp, br)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, random_state=0)\n rf = RandomForestClassifier(**bp, random_state=0,\n n_estimators=100)\n start = time.perf_counter()\n rf.fit(X_train, y_train)\n rf_scores = get_scores(rf, X_train, y_train,\n X_test, y_test)\n see_time('total time:')\n print (rf_scores[0] + ' (train, test):')\n print (rf_scores[1], rf_scores[2], br)\n et = ExtraTreesClassifier(random_state=0, n_estimators=200)\n print (et, br)\n params = {'class_weight': ['balanced'],\n 'max_depth': [10, 30]}\n random = RandomizedSearchCV(et, param_distributions = params,\n cv=3, n_iter=2, random_state=0)\n start = time.perf_counter()\n random.fit(Xs, ys)\n see_time('RandomizedSearchCV total tuning time:')\n bp = random.best_params_\n print (bp, br)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, random_state=0)\n et = ExtraTreesClassifier(**bp, random_state=0,\n n_estimators=200)\n start = time.perf_counter()\n et.fit(X_train, y_train)\n et_scores = get_scores(et, X_train, y_train,\n X_test, y_test)\n see_time('total time:')\n print (et_scores[0] + ' (train, test):')\n print (et_scores[1], et_scores[2], br)\n print ('cross-validation (et):')\n start = time.perf_counter()\n scores = get_cross(rf, X, y)\n see_time('total time:')\n print (np.mean(scores), br)\n file = 'data/bp_mnist_et'\n np.save(file, bp)\n # need allow_pickle=True parameter\n bp = np.load('data/bp_mnist_et.npy', allow_pickle=True)\n bp = bp.tolist()\n print ('best parameters:')\n print (bp)\n","repo_name":"Apress/hands-on-scikit-learn-for-mach-learning-apps","sub_path":"chapters_py/ch6/2_mnist_p1.py","file_name":"2_mnist_p1.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"38"} +{"seq_id":"20083465575","text":"import datetime\ndate = input(\"dd/mm/yy : \")\nd, m, y = date.split('/')\n\nvalid = True\ntry: \n date = datetime.datetime(int(y), int(m), int(d))\nexcept: \n print(\"Invalid Date\")\n valid = False\n\nif valid: \n nextday = date + datetime.timedelta(days=1)\n print(nextday.date())\n\n\n","repo_name":"anuragrao04/University_Code","sub_path":"Sem1/Lab-4/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27096430162","text":"#! /usr/bin/env python\nfrom unittest import result\nimport cv2\nimport numpy as np\nimport scipy.spatial as spatial\nimport logging\nimport matplotlib.pyplot as plt\nimport math\nfrom PIL import Image, ImageEnhance\n\n\n## 3D Transform\ndef bilinear_interpolate(img, coords):\n \"\"\" Interpolates over every image channel\n http://en.wikipedia.org/wiki/Bilinear_interpolation\n :param img: max 3 channel image\n :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords\n :returns: array of interpolated pixels with same shape as coords\n \"\"\"\n int_coords = np.int32(coords)\n x0, y0 = int_coords\n dx, dy = coords - int_coords\n\n # 4 Neighour pixels\n # if (y0 < img.shape[0] and x0img.shape[0]:\n logging.error(\"Jaw part out of image\")\n else:\n return True\n return False\n\n\ndef face_swap(src_face, dst_face, src_points, dst_points, dst_shape, dst_img, seg_dst, end=68):\n h_face, w_face = dst_face.shape[:2]\n h_img, w_img = dst_img.shape[:2]\n\n id_brow = [2, 3]\n id_eye_l = [4]\n id_eye_r = [5]\n id_lip = [7, 9]\n\n ## 3d warp\n warped_src_face = warp_image_3d(src_face, src_points[:end], dst_points[:end], (h_face, w_face))\n \n ## Mask for blending\n mask = mask_from_points((h_face, w_face), dst_points)\n mask_src = np.mean(warped_src_face, axis=2) > 0\n mask = np.asarray(mask * mask_src, dtype=np.uint8)\n\n ## Poisson Blending\n r = cv2.boundingRect(mask)\n center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))\n output = cv2.seamlessClone(warped_src_face, dst_face, mask, center, cv2.MIXED_CLONE)\n\n brow_mask = get_mask((h_img, w_img), seg_dst, id_brow)\n lip_mask = get_mask((h_img, w_img), seg_dst, id_lip)\n eyeball_mask_l = get_mask((h_img, w_img), seg_dst, id_eye_l)\n eyeball_mask_r = get_mask((h_img, w_img), seg_dst, id_eye_r)\n eye_mask = get_eye_region((h_img, w_img), eyeball_mask_l, eyeball_mask_r, dst_points)\n mask_copy = brow_mask + lip_mask + eye_mask\n\n x, y, w, h = dst_shape\n warped_face = np.zeros_like(dst_img, dtype='uint8')\n warped_face[y:y + h, x:x + w] = warped_src_face\n seg_copy = cv2.bitwise_and(warped_face, warped_face, mask=mask_copy)\n\n dst_img_cp = dst_img.copy()\n dst_img_cp[y:y + h, x:x + w] = output\n dst_img_cp[seg_copy > 0] = seg_copy[seg_copy > 0]\n dst_img_cp[dst_img_cp == 0] = dst_img[dst_img_cp == 0]\n\n return dst_img_cp\n\n\ndef face_blend(src_face, dst_face, src_points, dst_points, dst_shape, dst_img, copy_mask, end=68):\n h_face, w_face = dst_face.shape[:2]\n\n ## 3d warp\n warped_src_face = warp_image_3d(src_face, src_points[:end], dst_points[:end], (h_face, w_face))\n # cv2.imshow(\"warped_src_face1\", warped_src_face)\n # warped_src_face_pil = Image.fromarray(warped_src_face)\n # warped_src_face_pil = ImageEnhance.Contrast(warped_src_face_pil)\n # warped_src_face_pil = warped_src_face_pil.enhance(1.5)\n # warped_src_face = np.array(warped_src_face_pil)\n # cv2.imshow(\"warped_src_face2\", warped_src_face)\n # cv2.waitKey(0)\n\n ## Mask for blending\n mask = mask_from_points((h_face, w_face), dst_points)\n mask_src = np.mean(warped_src_face, axis=2) > 0\n mask = np.asarray(mask * mask_src, dtype=np.uint8)\n \n ## Poisson Blending\n x, y, w, h = dst_shape\n r = cv2.boundingRect(mask)\n center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))\n output = cv2.seamlessClone(warped_src_face, dst_face, mask, center, cv2.MIXED_CLONE)\n gt_blend = dst_img.copy()\n gt_blend[y:y + h, x:x + w] = output\n\n # # BLEND LIPS\n lip_r = cv2.boundingRect(copy_mask)\n lip_center = ((lip_r[0] + int(lip_r[2] / 2), lip_r[1] + int(lip_r[3] / 2)))\n warped_face = np.zeros_like(dst_img, dtype='uint8')\n warped_face[y:y + h, x:x + w] = warped_src_face\n output = cv2.seamlessClone(warped_face, gt_blend, copy_mask, lip_center, cv2.NORMAL_CLONE)\n \n # # LIPS NOT BLENDED\n # x, y, w, h = dst_shape\n # warped_face = np.zeros_like(dst_img, dtype='uint8')\n # warped_face[y:y + h, x:x + w] = warped_src_face\n # seg_copy = cv2.bitwise_and(warped_face, warped_face, mask=copy_mask)\n # gt_blend[seg_copy > 0] = seg_copy[seg_copy > 0]\n\n return gt_blend\n\ndef face_copy(src_face, dst_face, src_points, dst_points, dst_shape, dst_img, copy_mask, end=68):\n h_face, w_face = dst_face.shape[:2]\n\n ## 3d warp\n warped_src_face = warp_image_3d(src_face, src_points[:end], dst_points[:end], (h_face, w_face))\n mask_copy = copy_mask\n\n x, y, w, h = dst_shape\n warped_face = np.zeros_like(dst_img, dtype='uint8')\n warped_face[y:y + h, x:x + w] = warped_src_face\n seg_copy = cv2.bitwise_and(warped_face, warped_face, mask=mask_copy)\n\n gt_copy = dst_img.copy()\n gt_copy[seg_copy > 0] = seg_copy[seg_copy > 0]\n\n return gt_copy\n\ndef get_mask(size, seg, classes_list):\n mask = np.zeros(size, dtype=np.uint8)\n for i in classes_list:\n mask[seg == i] = 255\n return mask","repo_name":"raraspradnya/GANMakeupTransferVariousSkin","sub_path":"groundtruth/face_swap.py","file_name":"face_swap.py","file_ext":"py","file_size_in_byte":14311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27153998701","text":"from pymongo import MongoClient\n\nclient = MongoClient()\ndb = client[\"phase3\"]\n\nprint(\"Following topics are available in the repository: \")\nfor collection_ in db.list_collection_names():\n if \"csv\" in collection_:\n continue\n print(collection_)\n\ntopic = input(\n \"Enter the name of the topic for which you want the sentiment analysis: \\n\")\n\ncollection = topic.replace(' ', '_') + \"_csv\"\n\ntotal_count = db[collection].count_documents({})\npositive_count = db[collection].count_documents({\"sentiment\": \"positive\"})\nnegative_count = db[collection].count_documents({\"sentiment\": \"negative\"})\nneutral_count = db[collection].count_documents({\"sentiment\": \"neutral\"})\n\npositive_count_percentage = (positive_count/total_count)*100\nnegative_count_percentage = (negative_count/total_count)*100\nneutral_count_percentage = (neutral_count/total_count)*100\n\nprint('{}% were positive, {}% were negative, and {}% were neutral tweets about {} which has a total of {} tweets'.format(\n positive_count_percentage, negative_count_percentage, neutral_count_percentage, topic, total_count))\n","repo_name":"kebab-mai-haddi/assange","sub_path":"display-sentiment.py","file_name":"display-sentiment.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11509219695","text":"import os\nimport sys\n#SCHEDULER = [\"neuralnet\"]\n\nDIRECTORY=\"/storage/others/tapan/gpgpu-simDefault/gpgpu-sim/ispass2009-benchmarks\"\nKERNEL = [\"scalarProd\"]\n\ndef runKernel(benchmarkName, scheduler):\n direc = DIRECTORY + '/' + benchmarkName\n os.chdir(direc)\n print(\"Running the kernel: \" + benchmarkName)\n os.system(\"bash rungpgpusim \" + scheduler + \" >> \" + \"terminallog.txt\")\n return\n\ndef parseFile(benchmarkName):\n direc = DIRECTORY + '/' + benchmarkName + '/gpgpusim.log'\n file = open(direc, 'r')\n readLine = file.readlines()\n resultList = []\n #print(readLine)\n for lines in readLine:\n if \"gpu_sim_cycle\" in lines:\n resultList.append((int(lines.split(' ')[-1].strip('\\n')), lines.split(' ')[0].strip('\\n')))\n if resultList == []:\n print(\"Error in parsing file\")\n file.close()\n return resultList\n\ndef outputlog(benchmarkName, scheduler, index, file1):\n print(\"Save the log\")\n getres = parseFile(benchmarkName)\n #file1 = open(benchmarkName + '_' + scheduler + '.txt', 'w')\n file1.write(\"Index \" + str(index)+ \" :\\n\")\n for (res, kern) in getres:\n file1.write(kern + ' : ' + str(res) + '\\n')\n print(str(index) + \" : \" + kern + ' : ' + str(res) + '\\n')\n file1.write('\\n#########################\\n\\n')\n file1.close()\n return getres[0][0]\n\nif __name__ == \"__main__\":\n scheduler = \"neuralnet_\" + KERNEL[0] + \"_\" + sys.argv[1] + \"_\" + sys.argv[2]\n file1 = open(KERNEL[0] + '_' + \"NN\" + '.txt', 'w')\n runKernel(KERNEL[0], scheduler)\n fitness = outputlog(KERNEL[0], \"neuralnet\", 1, file1)\n os.chdir(\"/storage/others/tapan/gpgpu-simDefault/gpgpu-sim/v3.x/ANN_DATA/genetic_algorithm/kernel/\" + KERNEL[0])\n file2 = open('data/' + 'generation_' + sys.argv[1] + '/fitness_' + sys.argv[2] + '.txt', 'w')\n file2.write(str(fitness))\n file1.close()\n file2.close()\n\n","repo_name":"tapan0007/Genetic-Algorithm-for-Scheduling-problems","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"43118860938","text":"#!/usr/bin/python\n\n\"\"\"\n VODie\n kitesurfing@kitesurfing.ie\n\"\"\"\n\nimport re\nimport sys\nimport MenuConstants\n\n# Channel Constants\nCHANNEL = 'Irish Radios'\nLOGOICON = 'http://www.rte.ie/radio/images/logo.gif'\n\nRADIOS = [\n {'id': 'rte_radio_1',\n 'name': 'RTE Radio 1',\n 'logo': 'http://www.rte.ie/radio/images/logo_radio1.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/radio1.asx'},\n {'id': 'rte_radio_1_extra',\n 'name': 'RTE Radio 1 Extra',\n 'logo': 'http://www.rte.ie/radio/images/logo_extra.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/radio1extra.asx'},\n {'id': 'rte_2fm',\n 'name': 'RTE 2FM',\n 'logo': 'http://www.rte.ie/radio/images/logo_2fm.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/2fm.asx'},\n {'id': 'rte_radio_1',\n 'name': 'RTE Lyric FM',\n 'logo': 'http://www.rte.ie/radio/images/logo_lyric.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/lyric.asx'},\n {'id': 'rte_radio_rnag',\n 'name': 'RTE Radio na Gaeltachta',\n 'logo': 'http://www.rte.ie/radio/images/logo_rnag.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/rnag.asx'},\n {'id': 'rte_2xm',\n 'name': 'RTE 2XM',\n 'logo': 'http://www.rte.ie/radio/images/logo_2xm.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/0304.asx'},\n {'id': 'rte_choice',\n 'name': 'RTE Choice',\n 'logo': 'http://www.rte.ie/radio/images/logo_choice.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/0910.asx'},\n {'id': 'rte_junior',\n 'name': 'RTE Junior',\n 'logo': 'http://www.rte.ie/radio/images/logo_junior.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/0708.asx'},\n {'id': 'rte_gold',\n 'name': 'RTE Gold',\n 'logo': 'http://www.rte.ie/radio/images/logo_gold.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/0102.asx'},\n {'id': 'rte_pulse',\n 'name': 'RTE Pulse',\n 'logo': 'http://www.rte.ie/radio/images/logo_pulse.gif',\n 'stream': 'http://dynamic.rte.ie/av/live/radio/1112.asx'},\n {'id': 'q102',\n 'name': \"Dublin's Q102\",\n 'logo': 'http://www.q102.ie/Images/Listen/Q102_logo_listenlive.jpg',\n 'stream': 'http://q102-128.media.vistatec.ie/listen.pls'},\n {'id': 'fm104',\n 'name': \"FM104 Dublin's Hit Music Station\",\n 'logo': 'http://www.fm104.ie/Images/Layout/fm104_logo.png',\n 'stream': 'http://fm104-128.media.vistatec.ie/listen.pls'},\n {'id': 'spin1038',\n 'name': \"Spin 103.8\",\n 'logo': 'http://spin1038.com/wp-content/themes/spin1038/assets/img/logos/spin.png',\n 'url': 'http://media.spin1038.com/get_settings.php',\n 'stream': 'http://208.72.155.250:8413/listen.pls'},\n {'id': '98fm',\n 'name': \"Dublin's 98FM\",\n 'logo': 'http://98fm.s3.amazonaws.com/wp-content/themes/98fm/assets/img/logos/98fm_logo.png',\n 'url': 'http://media.98fm.com/get_settings.php',\n 'stream': 'http://208.72.155.250:8000/listen.pls'},\n {'id': 'newstalk',\n 'name': \"Newstalk Live From Ireland\",\n 'logo': 'http://www.newstalk.ie/wp-content/themes/newstalk/assets/img/logo.gif',\n 'url': 'http://media.newstalk.ie/get_settings.php',\n 'stream': 'http://208.72.155.18:8080/listen.pls'},\n ]\n\n\nclass RTERadio:\n\n def getChannelDetail(self):\n return {'Channel' : CHANNEL,\n 'Thumb' : LOGOICON,\n 'Title' : CHANNEL,\n 'mode' : MenuConstants.MODE_MAINMENU,\n 'Plot' : CHANNEL\n }\n\n def getMainMenu(self):\n for radio in RADIOS:\n yield {'Channel' : CHANNEL,\n 'Thumb' : radio['logo'],\n 'url' : radio['stream'],\n 'Title' : radio['name'],\n 'mode' : MenuConstants.MODE_PLAYRADIO}\n","repo_name":"liamf/VODieR","sub_path":"plugin.video.vodie/resources/lib/RTERadioScraper.py","file_name":"RTERadioScraper.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"38"} +{"seq_id":"18794778127","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom googletrans import Translator\nimport pyperclip\n\n# Creating the Frontend by using Tkinter\napp = tk.Tk()\napp.title('BHAVESH SINGH Translator All Languages')\napp.geometry('500x500')\napp.configure(bg=\"lightgreen\")\n\n# String Variables\nent_var = StringVar()\nout_var = StringVar()\nlang_selec = StringVar()\n\n\n# Class for Translation\nclass Trans():\n def trans(self):\n inn = ent_var.get()\n if inn != \"\":\n selected_language = lang_selec.get()\n\n if selected_language == \"Arabic\":\n out = Translator().translate(inn, dest='ar')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Chinese\":\n out = Translator().translate(inn, dest='zh-TW')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"French\":\n out = Translator().translate(inn, dest='fr')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"English\":\n out = Translator().translate(inn, dest='en')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"German\":\n out = Translator().translate(inn, dest='de')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Gujarati\":\n out = Translator().translate(inn, dest='gu')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Hindi\":\n out = Translator().translate(inn, dest='hi')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Japanese\":\n out = Translator().translate(inn, dest='ja')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Kannada\":\n out = Translator().translate(inn, dest='Kn')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Korean\":\n out = Translator().translate(inn, dest='Kn')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Latin\":\n out = Translator().translate(inn, dest='la')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Marathi\":\n out = Translator().translate(inn, dest='mr')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Punjabi\":\n out = Translator().translate(inn, dest='pa')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Russian\":\n out = Translator().translate(inn, dest='ru')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Spanish\":\n out = Translator().translate(inn, dest='es')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Tamil\":\n out = Translator().translate(inn, dest='ta')\n out_var.set(out.text)\n self.show()\n\n elif selected_language == \"Telugu\":\n out = Translator().translate(inn, dest='te')\n out_var.set(out.text)\n self.show()\n\n else:\n messagebox.showerror(\"Error\", \"Language not available Currently. Please select from the given list\")\n else:\n messagebox.showerror(\"Error\", \"Input Text Can't Be Empty\")\n\n def show(self):\n # Toplayer for the output.\n top = Toplevel()\n top.title('Shiva Translator Made By Bhavesh Singh')\n top.geometry('500x550')\n top.configure(bg=\"orange\")\n\n def copy():\n pyperclip.copy(out_var.get())\n # To copy the output directly to the clipboad.\n\n button = tk.Button(top, text='Copy', width=35, height=3, command=copy,\n activebackground=\"lightgreen\", activeforeground=\"black\")\n button.place(relx=0.5, rely=0.7, anchor=CENTER)\n\n msg1 = tk.Label(top, text='Translation', )\n msg1.place(relx=0.5, rely=0.4, anchor=CENTER)\n\n user_pass = Entry(top,\n textvariable=out_var, )\n user_pass.place(relx=0.5, rely=0.5, anchor=CENTER, width=400, height=50)\n\n\n# Creating the object for the Trans Class\nobj = Trans()\n\nmsg1 = tk.Label(app, text='Input text', )\nmsg1.place(relx=0.5, rely=0.1, anchor=S)\n\nin_text = Entry(app, textvariable=ent_var)\nin_text.place(relx=0.5, rely=0.2, anchor=CENTER, width=350, height=30)\n\nmsg2 = tk.Label(app, text='Language')\nmsg2.place(relx=0.5, rely=0.4, anchor=S)\n\n# Creating the language selection.\nlanguages = [\"Hindi\", \"Telugu\", \"Spanish\", \"Russian\", \"Punjabi\", \"Marathi\", \"Latin\", \"Korean\", \"Kannada\", \"Japanese\",\n \"Tamil\", \"Gujarati\", \"German\", \"English\", \"French\", \"Chinese\", \"Arabic\"]\n\n# SpinBox to select the choice of the language.\nlang_selec_box = Spinbox(app, values=languages, textvariable=lang_selec)\nlang_selec_box.place(relx=0.5, rely=0.5, anchor=CENTER, width=350, height=30)\n\n# Button to start the Trans function.\ntranslate_button = tk.Button(app, text='Translate', width=35, height=3, command=obj.trans,\n activebackground=\"dark grey\", activeforeground=\"red\")\ntranslate_button.place(relx=0.5, rely=0.7, anchor=CENTER)\n\napp.mainloop()\n\n\n","repo_name":"thakurbhavesh/Google","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"14757558788","text":"def czy_na_prostej(xp, yp, a, b):\n z = a * xp + b\n return yp == z\n\n\ndef main():\n xp = int(input(\"Współrzędna x: \"))\n yp = int(input(\"Współrzędna y: \"))\n a = float(input(\"Współczynnik a: \"))\n b = float(input(\"Współrzędna b: \"))\n \n if czy_na_prostej(xp, yp, a, b):\n print(\"Punkt na prostej\")\n else:\n print(\"Punkt poza prostą\")\n return 0\n \n \nmain()\n","repo_name":"lo1cgsan/rok202021","sub_path":"2BP4/python_2/punkt_prosta.py","file_name":"punkt_prosta.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9169081024","text":"\"\"\"\nCreated on 4. 3. 2019\nSVM classifier plugin for ClassMark.\n\n:author: Martin Dočekal\n:contact: xdocek09@stud.fit.vubtr.cz\n\"\"\"\nfrom functools import partial\n\nfrom classmark.core.plugins import Classifier, PluginAttribute, PluginAttributeIntChecker\nfrom classmark.core.preprocessing import BaseNormalizer, NormalizerPlugin, \\\n MinMaxScalerPlugin, StandardScalerPlugin, RobustScalerPlugin\nfrom sklearn.svm import LinearSVC, SVC\nfrom typing import List, Tuple\nimport numpy as np\n\n\nclass SVM(Classifier):\n \"\"\"\n SVM classifier plugin for ClassMark.\n \"\"\"\n\n def __init__(self, normalizer: BaseNormalizer = None,\n kernel: str = \"linear\", showImpFeatures: int = 0):\n \"\"\"\n Classifier initialization.\n \n :param normalizer: Normalizer used for input data. If none than normalization/scalling is omitted.\n :type normalizer: None | BaseNormalizer\n :param kernel: Kernel type that should be used.\n :type kernel: str\n :param showImpFeatures: Sow top-k important features for predictions.\n 0 deactivates this.\n :type showImpFeatures: int\n \"\"\"\n\n normalizer = NormalizerPlugin()\n\n self._normalizer = PluginAttribute(\"Normalize\", PluginAttribute.PluginAttributeType.SELECTABLE_PLUGIN, None,\n [None, NormalizerPlugin, MinMaxScalerPlugin, StandardScalerPlugin,\n RobustScalerPlugin])\n self._normalizer.value = normalizer\n\n self._kernel = PluginAttribute(\"Kernel\", PluginAttribute.PluginAttributeType.SELECTABLE, str,\n [\"linear\", \"poly\", \"rbf\", \"sigmoid\"])\n self._kernel.value = kernel\n\n self._showImpFeatures = PluginAttribute(\"Top-k features (linear only)\",\n PluginAttribute.PluginAttributeType.VALUE,\n PluginAttributeIntChecker(minV=0))\n\n # let's make sure that top-k features will be used only with linear kernel\n\n self._showImpFeatures.value = showImpFeatures\n\n @staticmethod\n def getName():\n return \"Support Vector Machines\"\n\n @staticmethod\n def getNameAbbreviation():\n return \"SVM\"\n\n @staticmethod\n def getInfo():\n return \"\"\n\n def train(self, data, labels):\n if self._normalizer.value is not None:\n data = self._normalizer.value.fitTransform(data)\n\n # The documentation says:\n # Prefer dual=False when n_samples > n_features.\n if self._kernel.value == \"linear\":\n # this should be faster\n self._cls = LinearSVC(dual=data.shape[0] <= data.shape[1])\n else:\n self._cls = SVC(kernel=self._kernel.value)\n\n self._cls.fit(data, labels)\n\n def classify(self, data):\n if self._normalizer.value is not None:\n data = self._normalizer.value.transform(data)\n return self._cls.predict(data)\n\n def classifyShowTopFeatures(self, data, featuresNames: np.array):\n \"\"\"\n Classify label on provided data and provides top important features that were used for decision.\n\n Number of most important features determines each classifier itself.\n Consider adding a user editable attribute for it.\n\n :param data: Data for classification.\n :type data: scipy.sparse matrix\n :param featuresNames: Name for each feature in an input data vector that was passed to the model during training.\n :type featuresNames: np.array\n :return: Predicted labels, array of features names with array of importance scores. Both arrays\n (names, importance) are in descending order according to importance\n :rtype: Tuple[ArrayLike, ArrayLike, ArrayLike]\n \"\"\"\n predictions = self.classify(data)\n predFeaturesNames = []\n predFeaturesImportance = []\n\n usedClasses = self._cls.classes_.tolist()\n if len(usedClasses) == 2:\n # binnary classifiers must be handled differently\n\n # The decision function is for SVM:\n # sgn(w^t * x + b)\n # Let's get importance score for each sample and feature\n impScores = data.multiply(self._cls.coef_.ravel()).tocsr()\n\n for p, imp in zip(predictions, impScores):\n imp = imp.toarray().ravel()\n\n if p != usedClasses[1]:\n # negative prediction sgn(w^t * x + b) = -1\n # because importance should be increasing as the feature is more important we should flip\n # signs\n imp = -imp\n\n impSortedIndices = np.argsort(imp)[::-1][:self._showImpFeatures.value]\n\n predFeaturesNames.append(featuresNames[impSortedIndices])\n predFeaturesImportance.append(imp[impSortedIndices])\n\n else:\n # we have for each class pair a classifier\n impScores = [data.multiply(classCoef).tocsr() for classCoef in self._cls.coef_]\n\n # Predicted class p is not direct index even though labels are 0 .. n_classes -1 and classes_ are sorted\n # with np.unique, because in some iteration step there may be a missing class so we need mapping.\n\n class2Index = {c: i for i, c in enumerate(self._cls.classes_)}\n\n for i, p in enumerate(predictions):\n imp = impScores[class2Index[p]][i].toarray().ravel()\n impSortedIndices = np.argsort(imp)[::-1][:self._showImpFeatures.value]\n predFeaturesNames.append(featuresNames[impSortedIndices])\n predFeaturesImportance.append(imp[impSortedIndices])\n\n return predictions, np.array(predFeaturesNames), np.array(predFeaturesImportance)\n\n def featureImportanceShouldBeShown(self) -> bool:\n return self._kernel.value == \"linear\" and self._showImpFeatures.value > 0\n","repo_name":"mdocekal/ClassMark","sub_path":"plugins/classifiers/plugin_svm/svm/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3322535495","text":"import sys\nsys.path.append('/home/patricknaughton01/Documents/SLAM/Unit A/CourseFiles/Unit_A')\nimport matplotlib.pyplot as plt\nimport time\n\nfrom math import sin, cos, pi\nfrom lego_robot import *\n\nMIN_VALID_DIST = 20.0\nCYLINDER_OFFSET = 90.0\nDEPTH_THRESHOLD = 100.0\n\n\ndef main():\n scans = read_scans(\"scan.txt\")\n plt.figure()\n for i in range(278):\n test_scan = scans[i]\n deriv = compute_derivative(test_scan, MIN_VALID_DIST)\n plt.subplot(211)\n plt.plot(test_scan)\n plt.plot(deriv)\n cylinders = scan_derivative(deriv, test_scan, DEPTH_THRESHOLD, MIN_VALID_DIST)\n for cylinder in cylinders:\n plt.plot(cylinder[0], cylinder[1], marker=\"o\")\n plt.subplot(212)\n axes = plt.gca()\n axes.set_xlim([-1000, 5000])\n axes.set_ylim([-1000, 5000])\n plt.plot(0, 0, color='green', marker='x')\n coordinates = get_cylinders_cartesian(cylinders)\n for coor in coordinates:\n plt.plot(coor[0], coor[1], marker=\"o\")\n plt.show(block=False)\n plt.pause(0.1)\n plt.clf()\n \n\ndef get_cylinders_cartesian(cylinders):\n \"\"\"Return a list of the cartesian coordinates of all the \n cylinders in the robot's frame.\n :param cylinders: A list of cylinders (represented as an\n ordered pair: (scan_index, scan_depth))\n :return: a list of x,y ordered pairs that each represent\n the location of a cylinder in the robot's frame.\n \"\"\"\n coordinates = []\n log_file = LegoLogfile() # Helper class from the lego_robot module\n for cylinder in cylinders:\n angle = log_file.beam_index_to_angle(cylinder[0])\n depth = cylinder[1] - CYLINDER_OFFSET # Cylinder offset was determined experimentally\n coordinates.append((depth*cos(angle), depth*sin(angle)))\n return coordinates\n\n\ndef read_scans(file_name):\n \"\"\"Read in a list of scans from the file with the\n specified file name.\n :param file_name: The file to read from\n :return: a list of lidar scans where a scan is \n represented by a list of numbers (distances)\n \"\"\"\n try:\n in_file = open(file_name, \"r\")\n lines = in_file.readlines()\n in_file.close()\n scans = []\n for line in lines:\n str_nums = line.split(\" \")[3:]\n scan = []\n for str_scan in str_nums:\n scan.append(int(str_scan))\n scans.append(scan)\n return scans\n except Exception:\n print(\"File \" + file_name + \" could not be read\")\n sys.exit(1)\n\n\ndef scan_derivative(deriv, distances, min_jump, min_depth):\n \"\"\"Scan deriv (the derivative of a lidar scan) to find\n the scan index of the center of a cylinder. A cylinder\n is marked by a negative derivative of magnitude larger\n than 100 followed by a positive derivative of magnitude\n larger than 100. Multiple negative or positive spikes\n in a row are ignored (they occur when two cylinders\n overlap). Only the most recent spike will be counted.\n :param deriv: A list that represents the derivative of\n the scan function\n :param distances: A list of the original distances\n from which the derivative came\n :param min_jump: The minimum jump in the derivative\n necessary for us to consider this the edge of\n a cylinder\n :param min_depth: Minimum depth that the scanner can\n read. Filters out bad (invalid) scans.\n :return: A list of lists of scan indicies and average\n distances\n \"\"\"\n on_cylinder = False\n scan_points = []\n scan_depths = []\n cylinders = []\n for i in range(len(deriv)):\n if deriv[i] <= -min_jump:\n on_cylinder = True\n scan_points = []\n scan_depths = []\n if on_cylinder and distances[i] > min_depth:\n scan_points.append(i)\n scan_depths.append(distances[i])\n \n if deriv[i] >= min_jump:\n if on_cylinder:\n cylinders.append([int(average(scan_points)), average(scan_depths)])\n on_cylinder = False\n return cylinders\n \n \ndef average(arr):\n \"\"\"Return the average of all the values in arr\n :param arr: an array of values (ints or floats) to \n take the average of\n :return: float representing the average of all\n the values in arr\n \"\"\"\n avg = 0.0\n n = len(arr)\n for num in arr:\n avg += num/float(n)\n return avg\n\n\ndef compute_derivative(distances, min_valid):\n \"\"\"Compute the derivative of the distances found\n by a lidar scan as a function of the scan index.\n :param distances: a list of distances at different\n points in the scan\n :param min_valid: The minimum value a distance can \n be and still be considered valid (filters\n out some noise)\n :return: a list the same length as distances that \n represents the derivative of the distances function\n \"\"\"\n deriv = [0.0]\n for i in range(1, len(distances)-1):\n left = distances[i+1]\n right = distances[i-1]\n # If this is a valid scan\n if left > min_valid and right > min_valid:\n deriv.append((left-right)/2.0)\n else:\n deriv.append(0)\n deriv.append(0.0)\n return deriv\n \n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"patricknaughton01/SLAM","sub_path":"Unit_A/scan/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35488955782","text":"import os\n\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport numpy as np\n\nfrom networks.MyMLP import MyMLP\nfrom func.load_data import prepare_dataloaders\nfrom func.preprocess import inverse_scaler\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\nfrom hpbandster.core.worker import Worker\n\nclass PyTorchWorker(Worker):\n '''My BOHB worker'''\n def __init__(self, input_size, output_size, train_tuple, validation_tuple, test_tuple, **kwargs):\n super().__init__(**kwargs)\n '''Initialize the data sturctures , input and output sizes'''\n self.train_tuple = train_tuple\n self.validation_tuple = validation_tuple\n self.test_tuple = test_tuple\n self.input_size = input_size\n self.output_size = output_size\n\n def compute(self, config: CS.Configuration, budget, working_directory:str, *args, **kwargs) -> dict:\n \"\"\"\n Simple MLP\n The input parameter \"config\" (dictionary) contains the sampled configurations passed by the bohb optimizer\n \"\"\"\n myMLP = MyMLP(n_layers = config[\"num_layers\"], dropout_rate =config[\"dropout_rate\"] , n_inputs = self.input_size, n_outputs = self.output_size)\n model = myMLP.model\n print(model)\n criterion = nn.MSELoss()\n train_loader = prepare_dataloaders(X_hp=self.train_tuple[0], X_mf=self.train_tuple[1], y= self.train_tuple[2], X_scaling=\"minmax\", y_scaling=\"minmax\", batch_size=config[\"batch_size\"], typeD = \"tensor\")\n validation_loader = prepare_dataloaders(X_hp=self.validation_tuple[0], X_mf=self.validation_tuple[1], y= self.validation_tuple[2], X_scaling=\"minmax\",y_scaling=\"minmax\",batch_size=config[\"batch_size\"], typeD = \"tensor\")\n test_loader = prepare_dataloaders(X_hp=self.test_tuple[0], X_mf=self.test_tuple[1], y= self.test_tuple[2], X_scaling=\"minmax\",y_scaling=\"minmax\" ,batch_size=config[\"batch_size\"], typeD = \"tensor\")\n if config['optimizer'] == 'Adam':\n optimizer = optim.Adam(model.parameters(), lr=config['lr'])\n else:\n optimizer = optim.SGD(model.parameters(), lr=config['lr'], momentum=config['sgd_momentum'])\n\n for _epoch in range(int(budget)):\n loss = 0\n model.train()\n for _idx , data in enumerate(train_loader):\n inputs, labels = data\n #print(\"Input shape: \", inputs.shape)\n #print(\"label shape: \", labels.shape)\n optimizer.zero_grad()\n output = model(inputs)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n train_loss = self.evaluate_loss(model, train_loader, criterion)\n validation_loss = self.evaluate_loss(model, validation_loader, criterion)\n test_loss = self.evaluate_loss(model, test_loader, criterion)\n\n return ({\n 'loss': validation_loss,\n 'info': {\n 'test accuracy': test_loss,\n 'train accuracy': train_loss,\n 'validation accuracy': validation_loss,\n 'model': str(model)\n }\n })\n\n def evaluate_loss(self, model: nn.Module, data_loader: DataLoader, criterion: nn.MSELoss) -> float:\n test_losses = []\n model.eval()\n with torch.no_grad():\n for _idx , data in enumerate(data_loader):\n inputs, labels = data\n outputs = model(inputs)\n rescaled_outputs = inverse_scaler(outputs, method=\"minmax\")\n loss = criterion(rescaled_outputs, labels)\n test_losses.append(loss.item())\n mean_loss = np.mean(test_losses) \n return(mean_loss)\n\n\n @staticmethod\n def get_configspace() -> CS.ConfigurationSpace:\n \"\"\"\n It builds the configuration space with the needed hyperparameters.\n It is easily possible to implement different types of hyperparameters.\n Beside float-hyperparameters on a log scale, it is also able to handle categorical input parameter.\n :return: ConfigurationsSpace-Object\n \"\"\"\n cs = CS.ConfigurationSpace()\n lr = CSH.UniformFloatHyperparameter(name='lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\n # Add different optimizers as categorical hyperparameters.\n # SGD has a conditional parameter 'momentum'.\n optimizer = CSH.CategoricalHyperparameter(name='optimizer', choices=['Adam', 'SGD'])\n sgd_momentum = CSH.UniformFloatHyperparameter(name='sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False)\n cs.add_hyperparameters([lr, optimizer, sgd_momentum])\n # The hyperparameter sgd_momentum will be used,if the configuration\n # contains 'SGD' as optimizer.\n cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD')\n cs.add_condition(cond)\n #Number of layers in the MLP\n num_layers = CSH.UniformIntegerHyperparameter(name='num_layers', lower=3, upper=8)\n cs.add_hyperparameters([num_layers])\n batch_size = CSH.UniformIntegerHyperparameter(name='batch_size', lower=1, upper=10)\n cs.add_hyperparameters([batch_size])\n dropout_rate = CSH.UniformFloatHyperparameter(name='dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False)\n cs.add_hyperparameters([dropout_rate])\n return cs","repo_name":"saiprasadbarke/LCBench_DL","sub_path":"func/NN_HPO.py","file_name":"NN_HPO.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"215502108","text":"# korisnik unosi n, a zatim i n celih brojeva u listu.\n# ispitatai da li je lista rastuca (neopadajuca)\n\n# 1 2 3 4 5 -> rastuca lista i neopadajuca lista\n# 1 1 2 3 4 -> neopadajuca lista, koja nije rastuca\n\n# 0 1 2 3 4\n# 5 7 8 9 9\n# i i+1\n# [0] <= [1]\n# [1] <= [2]\n# [2] <= [3]\n# [3] <= [4]\n\n# i se krece do predposlednjeg elementa \n# ako se makar jednom desi da je trenutni element na poziciji i \n# veci od sledeceg elementa na poziciji i+1 \n# onda lista vise nije neopadajuca\n\n# koristimo algoritam sa pretpostavkom, pretpostavimo da je neopadajuca lista\n# ako se makar jednom desi da nije -> menjamo flag na false i iskacemo iz petlje \nn = int(input(\"Unesite ceo broj:\"))\nbrojevi = []\nfor i in range(n):\n unos = int(input(\"Unesite broj u listu:\"))\n brojevi.append(unos)\nprint(brojevi)\npretpostavka_da_je_neopadajuci = True\nfor i in range(n-1): # i uzima sve pozicije od 0 do n-2 \n if brojevi[i] > brojevi[i+1]:\n pretpostavka_da_je_neopadajuci = False\n # print(\"Nije\")\n # exit()\n \n break\n# print(\"jeste\")\n#if USLOV: -> if True ili False\nif pretpostavka_da_je_neopadajuci:\n print(\"Jeste\")\nelse:\n print(\"Nije\")\n \n\nfor i in range(n-1): # i uzima sve pozicije od 0 do n-2 \n if brojevi[i] > brojevi[i+1]:\n pretpostavka_da_je_neopadajuci = False\n print(\"Nije\")\n # exit()\n \n break\nelse:\n print(\"Jeste\")\n \n","repo_name":"boksanns94/ComTrade_CODE_Python_Basic","sub_path":"05. Petlje i liste u Pythonu 3/5z rastuca_lista.py","file_name":"5z rastuca_lista.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39518078044","text":"import six\n\n\nclass FormatParams(object):\n max_line_length = 79\n wrap_paren = True\n indent = 4\n hanging_indent = 'never'\n use_black = False\n\n def __new__(cls, *args, **kwargs):\n if not kwargs and len(args) == 1 and isinstance(args[0], cls):\n return args[0]\n self = object.__new__(cls)\n # TODO: be more careful here\n dicts = []\n for arg in args:\n if arg is None:\n pass\n elif isinstance(arg, cls):\n dicts.append(arg.__dict__)\n else:\n raise TypeError\n if kwargs:\n dicts.append(kwargs)\n for kwargs in dicts:\n for key, value in six.iteritems(kwargs):\n if hasattr(self, key):\n setattr(self, key, value)\n else:\n raise ValueError(\"bad kwarg %r\" % (key,))\n return self\n\n\ndef fill(tokens, sep=(\", \", \"\"), prefix=\"\", suffix=\"\", newline=\"\\n\",\n max_line_length=80):\n r\"\"\"\n Given a sequences of strings, fill them into a single string with up to\n ``max_line_length`` characters each.\n\n >>> fill([\"'hello world'\", \"'hello two'\"],\n ... prefix=(\"print \", \" \"), suffix=(\" \\\\\", \"\"),\n ... max_line_length=25)\n \"print 'hello world', \\\\\\n 'hello two'\\n\"\n\n :param tokens:\n Sequence of strings to fill. There must be at least one token.\n :param sep:\n Separator string to append to each token. If a 2-element tuple, then\n indicates the separator between tokens and the separator after the last\n token. Trailing whitespace is removed from each line before appending\n the suffix, but not from between tokens on the same line.\n :param prefix:\n String to prepend at the beginning of each line. If a 2-element tuple,\n then indicates the prefix for the first line and prefix for subsequent\n lines.\n :param suffix:\n String to append to the end of each line. If a 2-element tuple, then\n indicates the suffix for all lines except the last, and the suffix for\n the last line.\n :return:\n Filled string.\n \"\"\"\n N = max_line_length\n assert len(tokens) > 0\n if isinstance(prefix, tuple):\n first_prefix, cont_prefix = prefix\n else:\n first_prefix = cont_prefix = prefix\n if isinstance(suffix, tuple):\n nonterm_suffix, term_suffix = suffix\n else:\n nonterm_suffix = term_suffix = suffix\n if isinstance(sep, tuple):\n nonterm_sep, term_sep = sep\n else:\n nonterm_sep = term_sep = sep\n lines = [first_prefix + tokens[0]]\n for token, is_last in zip(tokens[1:], [False]*(len(tokens)-2) + [True]):\n suffix = term_suffix if is_last else nonterm_suffix\n sep = (term_sep if is_last else nonterm_sep).rstrip()\n # Does the next token fit?\n if len(lines[-1] + nonterm_sep + token + sep + suffix) <= N:\n # Yes; add it.\n lines[-1] += nonterm_sep + token\n else:\n # No; break into new line.\n lines[-1] += nonterm_sep.rstrip() + nonterm_suffix + newline\n lines.append(cont_prefix + token)\n lines[-1] += term_sep.rstrip() + term_suffix + newline\n return ''.join(lines)\n\n\ndef pyfill(prefix, tokens, params=FormatParams()):\n \"\"\"\n Fill a Python statement.\n\n >>> print(pyfill('print ', [\"foo.bar\", \"baz\", \"quux\", \"quuuuux\"]), end='')\n print foo.bar, baz, quux, quuuuux\n >>> print(pyfill('print ', [\"foo.bar\", \"baz\", \"quux\", \"quuuuux\"],\n ... FormatParams(max_line_length=15, hanging_indent='auto')), end='')\n print (foo.bar,\n baz,\n quux,\n quuuuux)\n >>> print(pyfill('print ', [\"foo.bar\", \"baz\", \"quux\", \"quuuuux\"],\n ... FormatParams(max_line_length=14, hanging_indent='auto')), end='')\n print (\n foo.bar,\n baz, quux,\n quuuuux)\n\n :param prefix:\n Prefix for first line.\n :param tokens:\n Sequence of string tokens\n :type params:\n `FormatParams`\n :rtype:\n ``str``\n \"\"\"\n N = params.max_line_length\n if params.wrap_paren:\n # Check how we will break up the tokens.\n len_full = sum(len(tok) for tok in tokens) + 2 * (len(tokens)-1)\n if len(prefix) + len_full <= N:\n # The entire thing fits on one line; no parens needed. We check\n # this first because breaking into lines adds paren overhead.\n #\n # Output looks like:\n # from foo import abc, defgh, ijkl, mnopq, rst\n return prefix + \", \".join(tokens) + \"\\n\"\n if params.hanging_indent == \"never\":\n hanging_indent = False\n elif params.hanging_indent == \"always\":\n hanging_indent = True\n elif params.hanging_indent == \"auto\":\n # Decide automatically whether to do hanging-indent mode. If any\n # line would exceed the max_line_length, then do hanging indent;\n # else don't.\n #\n # In order to use non-hanging-indent mode, the first line would\n # have an overhead of 2 because of \"(\" and \",\". We check the\n # longest token since even if the first token fits, we still want\n # to avoid later tokens running over N.\n maxtoklen = max(len(token) for token in tokens)\n hanging_indent = (len(prefix) + maxtoklen + 2 > N)\n else:\n raise ValueError(\"bad params.hanging_indent=%r\"\n % (params.hanging_indent,))\n if hanging_indent:\n # Hanging indent mode. We need a single opening paren and\n # continue all imports on separate lines.\n #\n # Output looks like:\n # from foo import (\n # abc, defgh, ijkl,\n # mnopq, rst)\n return (prefix + \"(\\n\"\n + fill(tokens, max_line_length=N,\n prefix=(\" \" * params.indent), suffix=(\"\", \")\")))\n else:\n # Non-hanging-indent mode.\n #\n # Output looks like:\n # from foo import (abc, defgh,\n # ijkl, mnopq,\n # rst)\n pprefix = prefix + \"(\"\n return fill(tokens, max_line_length=N,\n prefix=(pprefix, \" \" * len(pprefix)), suffix=(\"\", \")\"))\n else:\n raise NotImplementedError\n","repo_name":"deshaw/pyflyby","sub_path":"lib/python/pyflyby/_format.py","file_name":"_format.py","file_ext":"py","file_size_in_byte":6502,"program_lang":"python","lang":"en","doc_type":"code","stars":313,"dataset":"github-code","pt":"38"} +{"seq_id":"215604328","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 5 22:17:28 2021\n\n@author: Boksan\n\"\"\"\n\ndef broj_cifara(broj):\n if broj < 0: # ako korisnik unese negativan broj\n broj *= -1\n return len(str(broj))\n\ndef armstrongova_suma(broj):\n armstrongova_suma = 0\n for cifra in str(broj):\n if cifra == \"-\": # ako korisnik unese negativan broj\n continue\n armstrongova_suma += int(cifra)**broj_cifara(broj)\n return armstrongova_suma\n\ndef da_li_je_armstrongov_broj(broj):\n if broj < 0: # ako korisnik unese negativan broj\n broj *= -1\n if broj == armstrongova_suma(broj):\n return True\n else:\n return False\n\n\n\nbroj = int(input(\"Unesite broj: \"))\nif da_li_je_armstrongov_broj(broj):\n print(f\"Broj {broj} jeste armstrongov broj.\")\nelse:\n print(f\"Broj {broj} nije armstrongov broj.\")","repo_name":"boksanns94/ComTrade_CODE_Python_Basic","sub_path":"06. Funkcije u Pythonu/D5 Viber.3.py","file_name":"D5 Viber.3.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"bs","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37750401127","text":"import matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as tck\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image \nfrom .plt_data import plot_data\nfrom .upload import UP\nimport os\n\n\nip_global=\"192.168.56.101\"\nport_global=\"6234\"\n\npic1_mark=1\npic2_mark=0\n\ndef getPicture(_event, _name, _date1, _date2):\n # _event =\"all\"\n # _name = \"scotland\"\n # _date1 = \"2012-01\"\n # _date2 = \"2015-06\"\n\n global ip_global, port_global\n global pic1_mark, pic2_mark\n\n title = \"points changing chart of \" + _name\n\n try:\n df = plot_data(event_type=_event, name=_name, date1=_date1, date2=_date2)\n except Exception:\n return ''\n\n # line chart\n # df.plot(x='date', y='pts', kind='scatter')\n\n date_raw = df[\"date\"]\n date = [\"Before\"]\n for d in date_raw:\n if d == \"Before\":\n continue\n date.append(np.datetime64(d))\n\n points = np.array(df[\"pts\"]).tolist()\n\n fig, ax = plt.subplots(1, 1)\n\n tick_spacing = int((len(date) - 1) / 5)\n ax.xaxis.set_major_locator(tck.MultipleLocator(tick_spacing))\n\n plt.plot(date, points)\n plt.xticks(rotation=30, fontsize=8)\n plt.ylabel(\"Points\", fontsize=14)\n plt.title(title, fontsize=17)\n\n fig.savefig(fname=\"pic_raw.png\")\n\n im = Image.open('pic_raw.png')\n imBackground = im.resize((1200, 900))\n\n name = str(_event) + str(_name) + str(_date1) + str(_date2) + '.png'\n imBackground.save('../tmp/' + name,'PNG')\n \n url = \"http://\" + ip_global +':'+ port_global + \"/tmp/\" + name\n\n print(\"url:\", url)\n return url\n\nif __name__ == \"__main__\":\n url = getPicture(\"all\", \"scotland\", \"2012-01\", \"2015-06\")\n ","repo_name":"lutianyu2001/workshop_proj_2021","sub_path":"Workshop_Source_Code/FrontEnd_and_BackEnd/django/pre/matplotlib/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39796977332","text":"import binascii\n#import StringIO\n\nfrom decrypt import decrypt\nfrom encrypt import encrypt\nfrom RSA import RSACipher\n\n\n\n\"\"\"\nIMAGINE:\n You have an item and a forged carbon copy you want to securely give to someone.\n You put that item into a lockbox and you lock it with a key and hold onto the carbon copy.\n You then lock that key into a box that you can only be opened with an imprint of the recipient's hand.\n You then toss both boxes into a known location in the ocean and give the carbon copy to the recipient.\n You then tell the recipient to go to the location, salvage the boxes and use his hand to open them.\n Once he takes out he verifies its the same as the carbon copy so he knows he got the right item.\n He is the only person out of 7.5 billion people that can ever open that box even if it gets displaced.\n The item (information) will never be recovered if it gets destroyed.\n This is exactly what's happening here.\n\nProof of Concept of Adrian Ho's reply on\nhttps://www.quora.com/What-is-the-difference-between-SHA-256-AES-256-and-RSA-2048-bit-encryptions\n\nDone:\n - Tested AES256 with input password\n - Got encryption working\n - Fixed private key error. Flipped params\n - Got entire system working with dict\n - Added try except statements on every decryption function\n - Implement RSA Public/Private Key Generation function\n - Export classes as packages\n - Make a file for encrypt and make a file for decrypt.\n - base64 encode/decode sha256 signatures\n - File Writing\n - Compression/Decompression\n - File Reading.\n\nTo Do:\n - Encrypt AES iv and salt values\n - Add suport for any file.\n - Find better AES key than os.urandom.\n - Run test over internet by encrypting a unique file\n I have with Jack's public key and send him only\n the encrypted file, RSA encrypted AES Key, sha256\n signature, and a .pyc or executable of my decryption\n function. Never look at his private key. Don't let him\n edit any code. Don't show him any of the files beforehand.\n Make sure to post to somewhere public. Maybe also have someone\n else try and unlock it.\n\"\"\"\n\n\ndef main():\n\n file = open('assets/HappyDance.gif', 'rb')\n\n binary_data = file.read()\n\n print('Input is ' + str(len(binary_data)/1000) + ' KB')\n\n #RSACipher.create_RSA_2048_key_pair()\n\n (public_key, private_key) = RSACipher.import_keys()\n\n encrypt(binary_data, public_key)\n\n file.close()\n\n compressed_encrypted_dict = open('assets/compressed_encrypted_message.lzma', 'rb')\n encrypted_AES_key = open('assets/encrypted_AES_key.pem', 'rb')\n sha256_signature = open('assets/sha256_signature.txt', 'rb')\n\n par1 = compressed_encrypted_dict.read()\n par2 = encrypted_AES_key.read()\n par3 = sha256_signature.read()\n \n\n file = decrypt(par1, par2, par3, private_key)\n\n compressed_encrypted_dict.close()\n encrypted_AES_key.close()\n sha256_signature.close()\n\n print('Output is ' + str(len(file)/1000) + ' KB')\n\n decrypted_file = open('assets/decrypt.gif','wb')\n bytes = binascii.unhexlify((file.decode()).rstrip('\\r\\n'))\n decrypted_file.write(bytes) #.decode('utf-8'))\n decrypted_file.close()\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"hadysalama/PySecuritySystem","sub_path":"TheHashedDoubleLockBox.py","file_name":"TheHashedDoubleLockBox.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20392595082","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\nimport matplotlib.colors as mcl\nimport seaborn as sns\n\nimport re\n\ndef SNPvalues(sheet_name,seq):\n df = pd.read_excel(r\"C:\\Users\\user\\Desktop\\LAO234_variant table_refseq.xlsx\",header=2,sheet_name=sheet_name)\n\n columns = list(df.columns.values)\n ref = 'Reference\\n(NC_044959.1)'\n p = re.compile('Reference\\n')\n for i in columns:\n m = p.match(i)\n if m != None:\n ref = i\n else: continue\n\n # Classify the data to 3 category (REF, ALT, InDel)\n for i in df.index:\n for j in seq:\n if df.loc[i, ref] == df.loc[i, j]:\n df.loc[i, j] = 0\n elif df.loc[i, ref] != df.loc[i,j] and df.loc[i,\"Type\"] in [\"SNV\",\"MNV\"]:\n df.loc[i, j] = 7\n else: df.loc[i, j] = 12\n\n\n ax0_sum = df[seq].astype(bool).sum(axis=0)\n ax1_sum = df[seq].astype(bool).sum(axis=1)\n ax2_sum = ax1_sum + df['Reference Position']\n conc = pd.concat([df['Reference Position'], ax1_sum], axis=1, keys = ['Reference Position', sheet_name])\n\n return df['Reference Position'], conc, ax0_sum\n\n\nNCpos, NC_ax1sum, NC_ax0sum = SNPvalues(\"maptoNC\",['LAO2 general', 'LAO3 geeneral','LAO4 general'])\nMKpos, MK_ax1sum, MK_ax0sum = SNPvalues(\"maptoMK\",['LAO2 general', 'LAO3 geeneral','LAO4 general'])\nOPpos, OP_ax1sum, OP_ax0sum = SNPvalues(\"maptoOP\",['LAO2 general', 'LAO3 geeneral','LAO4 general'])\nMTpos, MT_ax1sum, MT_ax0sum = SNPvalues(\"maptoMT\",['LAO2 general', 'LAO3 geeneral','LAO4 general'])\n\n\n# Set the style globally using sns.set_style()\nsns.set_style(\"darkgrid\")\n\n# Define a list of color palettes for each subplot\npalettes = ['red', 'blue', 'orange', 'grey']\n\nfig, ax = plt.subplots(ncols=2, nrows=2, figsize=(30, 5))\n\nfor i in range(2):\n for j in range(2):\n sns.histplot(\n NC_ax1sum if i == 0 and j == 0\n else MK_ax1sum if i == 0 and j == 1\n else OP_ax1sum if i == 1 and j == 0\n else MT_ax1sum,\n x=\"Reference Position\",\n bins=100,\n element=\"poly\",\n kde=True,\n ax=ax[i, j],\n color=palettes[i * 2 + j], # Use a different palette for each subplot\n )\n ax[i, j].set_title(\n \"NC044959\" if i == 0 and j == 0\n else \"MK_543947.1\" if i == 0 and j == 1\n else \"OP_467597.1\" if i == 1 and j == 0\n else \"MT_872723.1\",\n fontsize=14,\n )\n ax[i, j].set_xlabel(\"\") # x축 label 제거\n ax[i, j].set_ylim(0, 30)\n\nplt.tight_layout()\nplt.show()","repo_name":"vet-Q/Biotools","sub_path":"SNPvisual/snpforRefseq.py","file_name":"snpforRefseq.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6897803892","text":"characters = [\n {\n \"name\": \"Baby Mario\",\n \"characterIndex\": 1,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Weight\": 8,\n \"Handling\": 6\n }\n },\n {\n \"name\": \"Baby Luigi\",\n \"characterIndex\": 2,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Weight\": 8,\n \"Speed\": 5\n }\n },\n {\n \"name\": \"Baby Peach\",\n \"characterIndex\": 3,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Weight\": 6,\n \"Speed\": 3,\n \"Acceleration\": 3,\n \"Handling\": 3\n }\n },\n {\n \"name\": \"Baby Daisy\",\n \"characterIndex\": 4,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Speed\": 5,\n \"Weight\": 6,\n \"Mini-Turbo\": 3\n }\n },\n {\n \"name\": \"Toad\",\n \"characterIndex\": 5,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Acceleration\": 6,\n \"Drift\": 6\n }\n },\n {\n \"name\": \"Toadette\",\n \"characterIndex\": 6,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Speed\": 3,\n \"Offroad\": 6\n }\n },\n {\n \"name\": \"Koopa Troopa\",\n \"characterIndex\": 7,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Mini-Turbo\": 6,\n \"Handling\": 3\n }\n },\n {\n \"name\": \"Dry Bones\",\n \"characterIndex\": 8,\n \"Weight\": \"light\",\n \"bonuses\": {\n \"Acceleration\": 3,\n \"Drift\": 3,\n \"Mini-Turbo\": 6\n }\n },\n {\n \"name\": \"Mario\",\n \"characterIndex\": 9,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Weight\": 6,\n \"Handling\": 2,\n \"Acceleration\": 2,\n \"Drift\": 3\n }\n },\n {\n \"name\": \"Luigi\",\n \"characterIndex\": 10,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Speed\": 2,\n \"Weight\": 6\n }\n },\n {\n \"name\": \"Peach\",\n \"characterIndex\": 11,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Speed\": 2,\n \"Acceleration\": 5,\n \"Drift\": 6\n }\n },\n {\n \"name\": \"Daisy\",\n \"characterIndex\": 12,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Speed\": 4,\n \"Handling\": 2,\n \"Mini-Turbo\": 3\n }\n },\n {\n \"name\": \"Yoshi\",\n \"characterIndex\": 13,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Weight\": 3,\n \"Drift\": 3,\n \"Offroad\": 5\n }\n },\n {\n \"name\": \"Birdo\",\n \"characterIndex\": 14,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Weight\": 3,\n \"Mini-Turbo\": 5,\n \"Offroad\": 3\n }\n },\n {\n \"name\": \"Diddy Kong\",\n \"characterIndex\": 15,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Acceleration\": 3,\n \"Drift\": 3,\n \"Mini-Turbo\": 5,\n }\n },\n {\n \"name\": \"Bowser Jr.\",\n \"characterIndex\": 16,\n \"Weight\": \"middle\",\n \"bonuses\": {\n \"Offroad\": 3,\n \"Mini-Turbo\": 3\n }\n },\n {\n \"name\": \"Wario\",\n \"characterIndex\": 17,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Weight\": 3,\n \"Offroad\": 3,\n \"Mini-Turbo\": 6\n }\n },\n {\n \"name\": \"Waluigi\",\n \"characterIndex\": 18,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Acceleration\": 6,\n \"Drift\": 5,\n \"Offroad\": 3\n }\n },\n {\n \"name\": \"Donkey Kong\",\n \"characterIndex\": 19,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Weight\": 3,\n \"Acceleration\": 2,\n \"Handling\": 2\n }\n },\n {\n \"name\": \"Bowser\",\n \"characterIndex\": 20,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Speed\": 2,\n \"Weight\": 5,\n \"Drift\": 3\n }\n },\n {\n \"name\": \"King Boo\",\n \"characterIndex\": 21,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Handling\": 5,\n \"Offroad\": 3,\n }\n },\n {\n \"name\": \"Rosalina\",\n \"characterIndex\": 22,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Speed\": 3,\n \"Handling\": 3,\n \"Mini-Turbo\": 3\n }\n },\n {\n \"name\": \"Funky Kong\",\n \"characterIndex\": 23,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Speed\": 4,\n \"Offroad\": 3,\n }\n },\n {\n \"name\": \"Dry Bowser\",\n \"characterIndex\": 24,\n \"Weight\": \"heavy\",\n \"bonuses\": {\n \"Offroad\": 6,\n \"Mini-Turbo\": 6\n }\n },\n {\n \"name\": \"Mii Outfit A\",\n \"characterIndex\": 25,\n \"Weight\": \"???\",\n \"bonuses\": {\n }\n },\n {\n \"name\": \"Mii Outfit B\",\n \"characterIndex\": 26,\n \"Weight\": \"???\",\n \"bonuses\": {\n }\n },\n]\n\nlight_v = [\n {\n \"name\": \"Standard Kart S\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 41,\n \"Weight\": 29,\n \"Acceleration\": 48,\n \"Handling\": 48,\n \"Drift\": 51,\n \"Offroad\": 40,\n \"Mini-Turbo\": 45\n }\n },\n {\n \"name\": \"Booster Seat\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 27,\n \"Weight\": 27,\n \"Acceleration\": 56,\n \"Handling\": 64,\n \"Drift\": 37,\n \"Offroad\": 54,\n \"Mini-Turbo\": 59\n }\n },\n {\n \"name\": \"Mini Beast\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 55,\n \"Weight\": 32,\n \"Acceleration\": 29,\n \"Handling\": 32,\n \"Drift\": 64,\n \"Offroad\": 27,\n \"Mini-Turbo\": 64\n }\n },\n {\n \"name\": \"Cheep Charger\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 34,\n \"Weight\": 24,\n \"Acceleration\": 64,\n \"Handling\": 56,\n \"Drift\": 59,\n \"Offroad\": 45,\n \"Mini-Turbo\": 54\n }\n },\n {\n \"name\": \"Tiny Titan\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 46,\n \"Weight\": 35,\n \"Acceleration\": 43,\n \"Handling\": 43,\n \"Drift\": 29,\n \"Offroad\": 64,\n \"Mini-Turbo\": 40\n }\n },\n {\n \"name\": \"Blue Falcon\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 60,\n \"Weight\": 29,\n \"Acceleration\": 35,\n \"Handling\": 29,\n \"Drift\": 43,\n \"Offroad\": 24,\n \"Mini-Turbo\": 29\n }\n },\n {\n \"name\": \"Standard Bike S\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 39,\n \"Weight\": 21,\n \"Acceleration\": 51,\n \"Handling\": 51,\n \"Drift\": 54,\n \"Offroad\": 43,\n \"Mini-Turbo\": 48\n }\n },\n {\n \"name\": \"Bullet Bike\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 53,\n \"Weight\": 24,\n \"Acceleration\": 32,\n \"Handling\": 35,\n \"Drift\": 67,\n \"Offroad\": 29,\n \"Mini-Turbo\": 67\n }\n },\n {\n \"name\": \"Bit Bike\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 25,\n \"Weight\": 18,\n \"Acceleration\": 59,\n \"Handling\": 67,\n \"Drift\": 40,\n \"Offroad\": 56,\n \"Mini-Turbo\": 62\n }\n },\n {\n \"name\": \"Quacker\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 32,\n \"Weight\": 17,\n \"Acceleration\": 67,\n \"Handling\": 60,\n \"Drift\": 62,\n \"Offroad\": 48,\n \"Mini-Turbo\": 57\n }\n },\n {\n \"name\": \"Magikruiser\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 43,\n \"Weight\": 24,\n \"Acceleration\": 45,\n \"Handling\": 45,\n \"Drift\": 32,\n \"Offroad\": 67,\n \"Mini-Turbo\": 43\n }\n },\n {\n \"name\": \"Jet Bubble\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 48,\n \"Weight\": 27,\n \"Acceleration\": 40,\n \"Handling\": 40,\n \"Drift\": 45,\n \"Offroad\": 35,\n \"Mini-Turbo\": 37\n }\n },\n]\n\nmiddle_v = [\n {\n \"name\": \"Standard Kart M\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 46,\n \"Weight\": 45,\n \"Acceleration\": 40,\n \"Handling\": 43,\n \"Drift\": 45,\n \"Offroad\": 35,\n \"Mini-Turbo\": 40\n }\n },\n {\n \"name\": \"Classic Dragster\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 37,\n \"Weight\": 43,\n \"Acceleration\": 59,\n \"Handling\": 54,\n \"Drift\": 54,\n \"Offroad\": 40,\n \"Mini-Turbo\": 51\n }\n },\n {\n \"name\": \"Wild Wing\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 57,\n \"Weight\": 51,\n \"Acceleration\": 21,\n \"Handling\": 29,\n \"Drift\": 59,\n \"Offroad\": 24,\n \"Mini-Turbo\": 59\n }\n },\n {\n \"name\": \"Super Blooper\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 50,\n \"Weight\": 40,\n \"Acceleration\": 35,\n \"Handling\": 37,\n \"Drift\": 21,\n \"Offroad\": 54,\n \"Mini-Turbo\": 35\n }\n },\n {\n \"name\": \"Daytripper\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 34,\n \"Weight\": 45,\n \"Acceleration\": 51,\n \"Handling\": 59,\n \"Drift\": 32,\n \"Offroad\": 48,\n \"Mini-Turbo\": 54\n }\n },\n {\n \"name\": \"Sprinter\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 64,\n \"Weight\": 48,\n \"Acceleration\": 27,\n \"Handling\": 24,\n \"Drift\": 37,\n \"Offroad\": 21,\n \"Mini-Turbo\": 24\n }\n },\n {\n \"name\": \"Standard Bike M\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 43,\n \"Weight\": 37,\n \"Acceleration\": 43,\n \"Handling\": 45,\n \"Drift\": 48,\n \"Offroad\": 37,\n \"Mini-Turbo\": 43\n }\n },\n {\n \"name\": \"Mach Bike\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 55,\n \"Weight\": 37,\n \"Acceleration\": 24,\n \"Handling\": 32,\n \"Drift\": 62,\n \"Offroad\": 27,\n \"Mini-Turbo\": 62\n }\n },\n {\n \"name\": \"Sugarscoot\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 32,\n \"Weight\": 32,\n \"Acceleration\": 54,\n \"Handling\": 62,\n \"Drift\": 35,\n \"Offroad\": 51,\n \"Mini-Turbo\": 56\n }\n },\n {\n \"name\": \"Zip Zip\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 41,\n \"Weight\": 35,\n \"Acceleration\": 45,\n \"Handling\": 51,\n \"Drift\": 29,\n \"Offroad\": 62,\n \"Mini-Turbo\": 45\n }\n },\n {\n \"name\": \"Sneakster\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 62,\n \"Weight\": 40,\n \"Acceleration\": 29,\n \"Handling\": 27,\n \"Drift\": 40,\n \"Offroad\": 24,\n \"Mini-Turbo\": 27\n }\n },\n {\n \"name\": \"Dolphin Dasher\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 48,\n \"Weight\": 43,\n \"Acceleration\": 37,\n \"Handling\": 40,\n \"Drift\": 24,\n \"Offroad\": 56,\n \"Mini-Turbo\": 37\n }\n },\n]\n\nheavy_v = [\n {\n \"name\": \"Standard Kart L\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 48,\n \"Weight\": 59,\n \"Acceleration\": 37,\n \"Handling\": 40,\n \"Drift\": 40,\n \"Offroad\": 35,\n \"Mini-Turbo\": 35\n }\n },\n {\n \"name\": \"Offroader\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 39,\n \"Weight\": 64,\n \"Acceleration\": 48,\n \"Handling\": 54,\n \"Drift\": 18,\n \"Offroad\": 43,\n \"Mini-Turbo\": 45\n }\n },\n {\n \"name\": \"Flame Flyer\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 62,\n \"Weight\": 59,\n \"Acceleration\": 16,\n \"Handling\": 21,\n \"Drift\": 48,\n \"Offroad\": 18,\n \"Mini-Turbo\": 48\n }\n },\n {\n \"name\": \"Piranha Prowler\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 55,\n \"Weight\": 67,\n \"Acceleration\": 29,\n \"Handling\": 35,\n \"Drift\": 35,\n \"Offroad\": 29,\n \"Mini-Turbo\": 27\n }\n },\n {\n \"name\": \"Jetsetter\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 69,\n \"Weight\": 56,\n \"Acceleration\": 21,\n \"Handling\": 17,\n \"Drift\": 27,\n \"Offroad\": 16,\n \"Mini-Turbo\": 16\n }\n },\n {\n \"name\": \"Honeycoupe\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 53,\n \"Weight\": 62,\n \"Acceleration\": 27,\n \"Handling\": 29,\n \"Drift\": 56,\n \"Offroad\": 24,\n \"Mini-Turbo\": 56\n }\n },\n {\n \"name\": \"Standard Bike L\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 46,\n \"Weight\": 54,\n \"Acceleration\": 40,\n \"Handling\": 43,\n \"Drift\": 43,\n \"Offroad\": 37,\n \"Mini-Turbo\": 37\n }\n },\n {\n \"name\": \"Flame Runner\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 60,\n \"Weight\": 54,\n \"Acceleration\": 18,\n \"Handling\": 24,\n \"Drift\": 51,\n \"Offroad\": 21,\n \"Mini-Turbo\": 51\n }\n },\n {\n \"name\": \"Wario Bike\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 37,\n \"Weight\": 59,\n \"Acceleration\": 51,\n \"Handling\": 56,\n \"Drift\": 21,\n \"Offroad\": 45,\n \"Mini-Turbo\": 48\n }\n },\n {\n \"name\": \"Shooting Star\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 50,\n \"Weight\": 48,\n \"Acceleration\": 29,\n \"Handling\": 32,\n \"Drift\": 59,\n \"Offroad\": 27,\n \"Mini-Turbo\": 59\n }\n },\n {\n \"name\": \"Spear\",\n \"inside_drift\": True,\n \"stats\": {\n \"Speed\": 67,\n \"Weight\": 56,\n \"Acceleration\": 24,\n \"Handling\": 18,\n \"Drift\": 29,\n \"Offroad\": 18,\n \"Mini-Turbo\": 18\n }\n },\n {\n \"name\": \"Phantom\",\n \"inside_drift\": False,\n \"stats\": {\n \"Speed\": 43,\n \"Weight\": 51,\n \"Acceleration\": 43,\n \"Handling\": 48,\n \"Drift\": 17,\n \"Offroad\": 56,\n \"Mini-Turbo\": 40\n }\n },\n]\n","repo_name":"antmyers/mario_kart_randomizer","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":15740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10478301394","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nimport time\r\nimport urllib.request\r\nimport os\r\n\r\na = input(\"검색할 키워드를 입력하세요 : \")\r\nb = int(input(\"개수 : \"))\r\n\r\ndriver = webdriver.Chrome('크롬드라이버 경로')\r\ndriver.get('http://www.google.co.kr/imghp?hl=ko')\r\n\r\nelem = driver.find_element_by_name(\"q\")\r\nelem.send_keys(a)\r\nelem.send_keys(Keys.RETURN)\r\nimages = driver.find_elements_by_css_selector(\".rg_i.Q4LuWd\")\r\ncount = 0\r\n\r\nfor image in images:\r\n try:\r\n image.click()\r\n time.sleep(2)\r\n imgUrl = image.get_attribute(\"src\")\r\n urllib.request.urlretrieve(imgUrl, \"imgfile/img\" + a + str(count)+ \".jpg\")\r\n count += 1\r\n if count == b:\r\n break\r\n except:\r\n pass\r\n \r\ndriver.close()","repo_name":"Yong-Bin-Park/personal_color","sub_path":"Crawling_python/imgfile/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42047132647","text":"from turtle import Turtle, Screen\nimport turtle\nimport time\nimport random\nimport sys\nscore = 0\n\nprint(\"welcome to snake game\")\nscreen = Screen()\nscreen.bgcolor(\"black\")\nscreen.title(\" SNAKE GAME\")\nscreen.setup(500, 500)\nx = [-40, -20, 0]\nl = []\np = 3\nfor i in range(p):\n tim2 = Turtle()\n tim2.shape(\"square\")\n tim2.goto(x[i], 0)\n\n tim2.color(\"white\")\n l.append(tim2)\n\ndef snakemoveright():\n if not l[0].heading() == 180:\n l[0].setheading(0)\n forwardgoing = True\n\n while forwardgoing:\n if l[0].distance(food) < 20:\n global score\n score +=1\n refresh()\n break\n\n if l[0].xcor() > 610 or l[0].xcor()<-610:\n gameover()\n if l[0].ycor() > 610 or l[0].ycor() < -610:\n gameover()\n p = 3\n for i in range(p-1, 0, -1):\n\n x = l[i - 1].xcor()\n y = l[i - 1].ycor()\n l[i].penup()\n l[i].goto(x, y)\n\n l[0].penup()\n l[0].forward(10)\n\ndef snakemovedown():\n if not l[0].heading() == 90:\n l[0].setheading(270)\n movedown = True\n while movedown:\n if l[0].distance(food) < 20:\n\n global score\n score += 1\n refresh()\n break\n if l[0].xcor() > 610 or l[0].xcor()<-610:\n gameover()\n if l[0].ycor() > 610 or l[0].ycor() < -610:\n gameover()\n p = 3\n for i in range(p-1, 0, -1):\n screen.update()\n x = l[i - 1].xcor()\n y = l[i - 1].ycor()\n l[i].penup()\n l[i].goto(x, y)\n\n l[0].penup()\n\n l[0].forward(10)\n\ndef snakemoveup():\n if not l[0].heading() == 270:\n l[0].setheading(90)\n movedown = True\n while movedown:\n if l[0].distance(food) < 20:\n\n global score\n score += 1\n refresh()\n break\n if l[0].xcor() > 610 or l[0].xcor()<-610:\n gameover()\n if l[0].ycor() > 610 or l[0].ycor() < -610:\n gameover()\n p = 3\n for i in range(p-1, 0, -1):\n screen.update()\n x = l[i - 1].xcor()\n y = l[i - 1].ycor()\n l[i].penup()\n l[i].goto(x, y)\n\n l[0].penup()\n\n l[0].forward(10)\n\ndef snakemoveleft():\n if not l[0].heading() == 0:\n l[0].setheading(180)\n movedown = True\n while movedown:\n if l[0].distance(food) < 20:\n\n global score\n score += 1\n refresh()\n break\n if l[0].xcor() > 610 or l[0].xcor()<-610:\n gameover()\n if l[0].ycor() > 610 or l[0].ycor() < -610:\n gameover()\n p = 3\n for i in range(p-1, 0, -1):\n screen.update()\n x = l[i - 1].xcor()\n y = l[i - 1].ycor()\n l[i].penup()\n l[i].goto(x, y)\n\n l[0].penup()\n\n l[0].forward(10)\n\n\n\n\n\n\n\nfood = Turtle()\nfood.penup()\nxf = random.randint(-200, 200)\nyf = random.randint(-200, 200)\nfood.setpos((xf, yf))\nfood.shape(\"circle\")\nfood.color(\"blue\")\nfood.speed(\"fastest\")\nfood.shapesize(0.5,0.5)\n\n\n\n\n\nscreen.onkey(snakemoveup, \"Up\")\nscreen.onkey(snakemovedown, \"Down\")\nscreen.onkey(snakemoveright, \"Right\")\nscreen.onkey(snakemoveleft, \"Left\")\nscreen.listen()\n\n\nsd = Turtle()\nsd.hideturtle()\nsd.color(\"red\")\nsd.penup()\nsd.goto(0,250)\n\n\nsd.write(f\"scoreboard:{score}\",align=\"center\",font=(\"Aerial\",20,\"normal\"))\n\ndef refresh():\n\n\n xf = random.randint(-200, 200)\n yf = random.randint(-200, 200)\n food.setpos((xf, yf))\n sd.clear()\n sd.write(f\"scoreboard:{score}\", align=\"center\", font=(\"Aerial\", 20, \"normal\"))\ndef gameover():\n\n sdd = Turtle()\n sdd.hideturtle()\n sdd.color(\"red\")\n sdd.penup()\n sdd.write(f\"game over:{score}\", align=\"center\", font=(\"Aerial\", 20, \"normal\"))\n sys.exit()\n\n\n\np = Screen()\np.exitonclick()\n","repo_name":"RajeevPrakashAD1/python_projects","sub_path":"snake game projet/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6861698709","text":"from collections import Counter\nn = int(input())\nncard = [int(x) for x in input().split()]\nm = int(input())\nmcard = [int(z) for z in input().split()]\ncnt = 1\nsol = []\nncard = Counter(ncard)\nfor i in mcard:\n sol.append(ncard[i])\nfor l in sol:\n print(l,end=' ')","repo_name":"parkikbum/Jump-to-python","sub_path":"백준/python/10816.py","file_name":"10816.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27110264733","text":"import bl\ndef user_input_check(message):\n if not message.isdigit():\n print('Please text a number!')\n return False\n return True\n\ndef show_message(message):\n print ('--------------------------------------------------------------')\n print(message)\n print ('--------------------------------------------------------------')\n\ndef get_value(message, end=\":\\n\"):\n flag = False\n while flag == False:\n out_message = input(message + end)\n flag = user_input_check(out_message)\n return int(out_message)\n\ndef show_all():\n show_message(bl.get_all())\n\ndef choose_category():\n number = int(get_value(\"Enter the number of the desired category.\"))\n data = bl.choose_category(number)\n show_message(data)\n\ndef add_to_cart():\n code = get_value('Select product code')\n total = get_value(\"How much you want?\", end='\\n')\n show_message(bl.add_to_cart(code, total))\n\ndef show_cart():\n cart = bl.show_cart()\n show_message(cart)\n if cart != 'Your cart is empty!':\n return True\n return False\n \ndef buy():\n show_message(bl.buy())\n\ndef total_price():\n show_message(bl.get_price_all())\n\ndef main_flow():\n while True:\n choosen_value = get_value(\" *****Welcome to the Internet-shop Exclusive!*****\\nChoose your action:\\n0. Exit.\\n1. Show all products\\n2. Choose category\\n3. Add item to cart\\n4. Show cart\\n\", end='')\n if choosen_value == 0:\n break\n elif choosen_value == 1:\n show_all()\n elif choosen_value == 2:\n choose_category()\n while True:\n choosen_value_on_category = get_value('Choose your action:\\n0. Back to menu;\\n1. See another category.\\n',end='')\n if choosen_value_on_category == 0:\n break\n elif choosen_value_on_category == 1:\n choose_category()\n else:\n show_message('Input true value!')\n elif choosen_value == 3:\n add_to_cart()\n elif choosen_value == 4:\n while True:\n if not show_cart():\n break\n choosen_value_on_basket = get_value(\"Choose your action:\\n0. Back to menu;\\n1. Buy;\\n2. Show total cost;\\n\", end='')\n if choosen_value_on_basket == 0:\n break\n elif choosen_value_on_basket == 1:\n buy()\n elif choosen_value_on_basket == 2:\n total_price()\n else:\n show_message('Input true value!')","repo_name":"MikitaTsiarentsyeu/M-PT1-45-22","sub_path":"Tasks/Anevich_Kristina/HW07/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43908319373","text":"# Folder for Excel files to process\nEXCEL_FOLDER = 'Excel'\n# Name of the result file\nRESULTS_FILE = 'results.xlsx'\n# Column to sort for the primary and secondary tables\nSORT_BY_COLUMN = 'Warehouse'\n# Name of the primary worksheet\nPRIMARY_COLUMN = 'Primary'\n# Name of the secondary worksheet\nSECONDARY_COLUMN = 'Secondary'\n# Name of the conversion worksheet\nCONVERSION_WORKSHEET = 'Warehouse Conversion'\n","repo_name":"7ard1grad3/InventoryComperison","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21110884164","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom operator import itemgetter\nfrom queue import PriorityQueue\nimport time\n\n\n\ncwd = os.getcwd() \n\nprint(\"enter size of map 100 200 500 1000 2000\")\nsize = int(input())\nprint(\"enter version of map 1 2 3 4\")\nver = int(input())\nname = 'graphs\\\\graph'+str(size)+'_0.'+str(ver)\n\nprint(\"enter the start point\")\nstart = int(input())\nprint(\"enter target point\")\nend = int(input())\n\n\n\n\n\n'''name = 'graphs\\\\graph100_0.1'''\ncwd = os.path.join(cwd, name )\nE = os.path.join(cwd, \"e.txt\")\nV = os.path.join(cwd, \"v.txt\")\nedge = pd.read_csv(E)\nvetx = pd.read_csv(V)\n\nprint (E)\n\nprint (V)\n\n\n\ndef LCP(str,end,road,hpq):\n next=hpq.pop()\n print(next)\n road.append(next)\n\n if(str==end):\n return road\n if(path[str]==[]):\n back=road.pop[-1];\n return LCP(back,end,road,hpq)\n tmp = len(path[str])\n for i in range (0,tmp):\n addcost = path[str][i]\n addcost.cost += next.cost\n hpq.append (addcost)\n print(hpq)\n hpq.sort(key=getKey)\n next=hpq.pop()\n print(next)\n road.append(next)\n return LCP(next.next,end,road,hpq)\n\ndef dijkstra (start,end):\n dist=[None]*size\n perv=[None] * size\n for i in range (0,size):\n dist[i]= size*14.1 \n dist[start]=0\n \n Q={}\n for i in range (0,size):\n Q[i]=(i,dist[i])\n vised = []\n while (len(Q)>0):\n tmp = list(Q.values())\n tmp.sort(key= getdist)\n temp = tmp.pop(0)\n u = Q.pop(temp[0])\n neigbers = path[u[0]]\n vised.append(u[0])\n if u[0]==end:\n \n return dist[end]\n\n for i in range (len(neigbers)):\n \n\n if (neigbers[i].next) in vised:\n continue\n \n alt=u[1]+neigbers[i].cost\n\n neb = dist[neigbers[i].next]\n if(alt0):\n node = frontier.pop();\n if (node==goal):\n print(\"goal\")\n return sol\n explored.append(node)\n for i in range (len(path[node.it])):\n if(path[node.it][i].next not in explored):\n child = path[node.it][i]\n cost+=path[node.it][i].cost\n frontier.append(child)\n \ndef UCS( start, goal):\n visited = set()\n uqueue = PriorityQueue()\n uqueue.put((0, start))\n while uqueue:\n cost, node = uqueue.get()\n if node not in visited: \n visited.add(node)\n if node == goal:\n return cost\n if(node in path.keys()):\n for i in range (len(path[node])):\n if path[node][i].next not in visited:\n total_cost = cost + path[node][i].cost\n uqueue.put((total_cost, path[node][i].next))\n return None\n \n\n\n\n\ndef A8(start, goal):\n visited = set()\n queue = PriorityQueue()\n gh=(((int(nodeat[end])%10 - int(nodeat[start])%10)**2+ (int(nodeat[end])//10 - int(nodeat[start])//10)**2)**(0.5))*14.1\n queue.put((gh, start, 0))\n while queue:\n fh, node, cost = queue.get()\n if node not in visited:\n visited.add(node)\n if node == goal:\n return cost\n if(node in path.keys()):\n for i in range (len(path[node])):\n if path[node][i].next not in visited:\n total_cost = cost + path[node][i].cost \n gh=(((int(nodeat[end])%10 - int(nodeat[path[node][i].next])%10)**2+ (int(nodeat[end])//10 - int(nodeat[path[node][i].next])//10)**2)**0.5)*14.1\n queue.put((total_cost+gh, path[node][i].next,total_cost))\n return None\n \n\nclass vex:\n def __init__(self, it, next, cost):\n self.it = it\n self.next = next\n self.cost = cost\n self.path = []\n def __repr__(self):\n return '{}: {} {} {}'.format(self.__class__.__name__,\n self.it,\n self.next,\n self.cost\n )\n\ndef getKey(vex):\n return vex.cost \n\nnodeat = vetx.iloc[2:, 0:2].values\nconn = edge.iloc[:,:].values\npath = {}\nptr=0;\n\ni = 0\n\n\nwhile(ptr in range (0,len(edge))):\n\n neigber=[]\n while (conn[ptr][0]==i):\n tmp = vex(conn[ptr][0],conn[ptr][1],conn[ptr][2])\n neigber.append(tmp)\n '''print(tmp)'''\n ptr+=1\n if(ptr==len(edge)):\n break\n '''print(neigber)'''\n neigber.sort(key=getKey)\n '''print(neigber)'''\n path[i]=neigber\n i+=1\nwhile (i 0 else 0\n\n context = {\n 'progress_report': self.progress_report,\n 'pd': self.progress_report.programme_document,\n 'title': self.display_name,\n 'tables': self.create_tables_for_indicator_reports(self.progress_report.indicator_reports.all()),\n 'funds_received_to_date_percentage': funds_received_to_date_percentage,\n }\n\n return context\n\n def get_as_response(self, request):\n try:\n response = render_pdf_to_response(\n request,\n self.template_name,\n self.get_context(),\n )\n return response\n except Exception as exc:\n error_message = 'Error trying to render PDF'\n logger.exception(exc)\n return HttpResponse(error_message)\n\n\nclass ProgressReportListPDFExporter(ProgressReportDetailPDFExporter):\n\n template_name = 'progress_report_list_pdf_export'\n\n def __init__(self, progress_reports):\n self.progress_reports = progress_reports or []\n super().__init__(progress_reports.first())\n self.display_name = '[{:%a %-d %b %-H-%M-%S %Y}] {} Progress Reports Summary'.format(\n timezone.now(), progress_reports.count()\n )\n self.file_name = self.display_name + '.pdf'\n\n def get_context(self):\n section_list = []\n same_pd_across_all_reports = self.progress_reports.values_list('programme_document').distinct().count() == 1\n\n context = {\n 'same_pd_across_all_reports': same_pd_across_all_reports,\n }\n\n for progress_report in self.progress_reports:\n section_data = {\n 'progress_report': progress_report,\n 'tables': self.create_tables_for_indicator_reports(progress_report.indicator_reports.all())\n }\n\n section_list.append(section_data)\n\n context['sections'] = section_list\n\n return context\n","repo_name":"unicef/etools-partner-reporting-portal","sub_path":"django_api/etools_prp/apps/unicef/exports/progress_reports.py","file_name":"progress_reports.py","file_ext":"py","file_size_in_byte":7634,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"15518635030","text":"\"\"\"Expanded length of genres in Venue\n\nRevision ID: 2640c772e75b\nRevises: d5feb630f87a\nCreate Date: 2020-07-07 07:01:35.738439\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2640c772e75b'\ndown_revision = 'd5feb630f87a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('venues', sa.Column('upcoming_shows', sa.VARCHAR(length=120), nullable=True))\n op.add_column('venues', sa.Column('past_shows', sa.VARCHAR(length=120), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('venues', 'upcoming_shows')\n op.drop_column('venues', 'past_shows')\n # ### end Alembic commands ###\n","repo_name":"marcoantonio224/fyyur","sub_path":"migrations/versions/2640c772e75b_expanded_length_of_genres_in_venue.py","file_name":"2640c772e75b_expanded_length_of_genres_in_venue.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42318954546","text":"from artwork import logo\r\ntheBoard = {'7': ' ', '8': ' ', '9': ' ',\r\n '4': ' ', '5': ' ', '6': ' ',\r\n '1': ' ', '2': ' ', '3': ' '}\r\n\r\n\r\ndef print_board(board):\r\n \"\"\"\r\n Print the Tic Tac Toe board in the console\r\n\r\n :param board: dictionary representing Tic Tac Toe board\r\n :return: None\r\n \"\"\"\r\n print(board['7'] + '|' + board['8'] + '|' + board['9'])\r\n print('-+-+-')\r\n print(board['4'] + '|' + board['5'] + '|' + board['6'])\r\n print('-+-+-')\r\n print(board['1'] + '|' + board['2'] + '|' + board['3'])\r\n\r\n\r\ndef player_has_won(board):\r\n \"\"\"\r\n Check if player has won in any allowed combination of Tic Tac Toe\r\n\r\n :param board: dictionary representing Tic Tac Toe board\r\n :return: True if player has won and False if player did not managed to win\r\n \"\"\"\r\n if board['7'] == board['8'] == board['9'] != ' ': # across the top\r\n return True\r\n elif board['4'] == board['5'] == board['6'] != ' ': # across the middle\r\n return True\r\n elif board['1'] == board['2'] == board['3'] != ' ': # across the bottom\r\n return True\r\n elif board['1'] == board['4'] == board['7'] != ' ': # down the left side\r\n return True\r\n elif board['2'] == board['5'] == board['8'] != ' ': # down the middle\r\n return True\r\n elif board['3'] == board['6'] == board['9'] != ' ': # down the right side\r\n return True\r\n elif board['7'] == board['5'] == board['3'] != ' ': # diagonal\r\n return True\r\n elif board['1'] == board['5'] == board['9'] != ' ': # diagonal\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# Show logo and input rules\r\nprint(logo)\r\nprint(\"The fields options follow the num keypad order 789 (top line), 456 (second line) and 123 (bottom line)\")\r\n\r\ngame_is_on = True\r\nwhile game_is_on:\r\n # Print the board\r\n print_board(theBoard)\r\n\r\n # Get Player 1 input\r\n player1_choice_complete = False\r\n while not player1_choice_complete:\r\n player_choice = input(\"Player 1 (X) Please select a empty field: \")\r\n # Handle invalid input scenario\r\n try:\r\n # Evaluate if player input is an empty field\r\n if theBoard[player_choice] == ' ':\r\n theBoard[player_choice] = 'X'\r\n player1_choice_complete = True\r\n else:\r\n print(\"Field already taken, please choose another when prompt!\")\r\n player1_choice_complete = False\r\n except KeyError:\r\n print(\"Invalid input, allowed inputs are: 789 (top line), 456 (second line) and 123 (bottom line)\")\r\n player1_choice_complete = False\r\n\r\n # Print the board\r\n print_board(theBoard)\r\n\r\n # Check if Player 1 won\r\n if player_has_won(theBoard):\r\n print(\"\\nGame Over.\\n\")\r\n print(f\"Player 1 (X) won.\")\r\n game_is_on = False\r\n break\r\n\r\n # Check if all fields are completed if so it is a tie.\r\n if ' ' not in theBoard.values():\r\n game_is_on = False\r\n print(\"\\nGame Over.\\n\")\r\n print(\"It is a tie\")\r\n break\r\n\r\n # Get Player 2 input\r\n player2_choice_complete = False\r\n while not player2_choice_complete:\r\n player_choice = input(\"Player 2 (O) Please select a empty field: \")\r\n # Handle invalid input scenario\r\n try:\r\n # Evaluate if player input is an empty field\r\n if theBoard[player_choice] == ' ':\r\n theBoard[player_choice] = 'O'\r\n player2_choice_complete = True\r\n else:\r\n print(\"Field already taken, please choose another when prompt!\")\r\n player2_choice_complete = False\r\n except KeyError:\r\n print(\"Invalid input, allowed inputs are: 789 (top line), 456 (second line) and 123 (bottom line)\")\r\n player2_choice_complete = False\r\n\r\n # Check if Player 2 won\r\n if player_has_won(theBoard):\r\n print(\"\\nGame Over.\\n\")\r\n print(f\"Player 2 (O) won.\")\r\n game_is_on = False\r\n break\r\n","repo_name":"DanielPinha/Python_100_Challenge_Code","sub_path":"Day 083 Project Tic Tac Toe/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6981753316","text":"import PetriNets.SimulationEntity\r\nimport PetriNets.Petri\r\n\r\n\r\nclass Combiner(PetriNets.SimulationEntity.SimulationEntity):\r\n def __init__(self):\r\n super().__init__()\r\n self.factoryRef = None\r\n\r\n\r\n\r\n def Initialize(self):\r\n ''' Common data '''\r\n eventGuards = {\"t0\": \"null\", \"t1\": \"null\"}\r\n #state0 = {\"P0\": 0, \"P1\": 0, \"P2\": 1, \"P3\": 0, \"P4\":0}\r\n eventPriority = {\"t0\": PetriNets.Petri.Transition(1), \"t1\": PetriNets.Petri.Transition(1)}\r\n transitionMatrix0 = [[-5, -4, -1, 1, 0],\r\n [ 0, 0, 1,-1, 1]]\r\n transitionMatrix1 = [[0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0]]\r\n ''' Machine-0 private data '''\r\n self.petri = PetriNets.Petri.PetriNet()\r\n self.petri.SetGuards(eventGuards)\r\n self.petri.SetTimeGrantFunctions(eventGuards)\r\n self.petri.SetExitFunctions(eventGuards)\r\n self.petri.SetTransitionMatrix(transitionMatrix0, transitionMatrix1)\r\n self.petri.SetEventPriority(eventPriority)\r\n self.petri.SetOwner(self)\r\n self.factoryRef = globals()['Combiner']()\r\n\r\n","repo_name":"hocaoglumf/Modeling-Simulation","sub_path":"JobShopSim/Combiner.py","file_name":"Combiner.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25703069242","text":"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom calidhayte import Coefficients\n\n\n@pytest.fixture\ndef full_data():\n \"\"\"\n Dataset composed of random values. y df constructed from x_df scaled by\n 4 random coeffs\n \"\"\"\n np.random.seed(72)\n x_df = pd.DataFrame()\n x_df['Values'] = pd.Series(np.random.rand(300))\n x_df['a'] = pd.Series(np.random.rand(300))\n x_df['b'] = pd.Series(np.random.rand(300))\n x_df['c'] = pd.Series(np.random.rand(300))\n coeffs = np.random.randn(4)\n\n y_df = pd.DataFrame()\n modded = x_df * coeffs\n\n y_df['Values'] = modded.sum(axis=1)\n\n return {\n 'x': x_df,\n 'y': y_df\n }\n\n\n@pytest.mark.parametrize(\"split\", [(0.5), (0.1), (0.9), (0), (1)])\n@pytest.mark.coeff\ndef test_data_split(full_data, split):\n \"\"\"\n Tests whether data is split properly\n \"\"\"\n tests = dict()\n coeff_inst = Coefficients(\n x_data=full_data['x'],\n y_data=full_data['y'],\n test_size=split\n )\n split_coeffs = coeff_inst.return_measurements()\n test = split_coeffs['Test']\n train = split_coeffs['Train']\n if split < 0.5 and split != 0:\n tests['Test smaller than Train'] = test.shape[0] < train.shape[0]\n elif split > 0.5 and split != 1:\n tests['Test bigger than Train'] = test.shape[0] > train.shape[0]\n elif split == 0.5 or split in [0, 1]:\n tests['Test equals train'] = test.shape[0] == train.shape[0]\n else:\n tests['If this happens there is an error in the test logic'] = False\n\n for test, result in tests.items():\n print(f\"{test}: {result}\")\n\n assert all(tests.values())\n\n\n@pytest.mark.coeff\ndef test_skl_formatting(full_data):\n \"\"\"\n Tests whether skl coeffs are properly formatted\n \"\"\"\n tests = dict()\n coeff_inst = Coefficients(\n x_data=full_data['x'],\n y_data=full_data['y']\n )\n\n x_df, y_df, sc_x, com_s = coeff_inst.format_skl(\n coeff_inst.x_train.columns[1:]\n )\n tests['x same length as y'] = len(x_df) == y_df.shape[0]\n tests['Combo keys are correct'] = all(\n [key in [\"x\", \"a\", \"b\", \"c\"] for key in com_s]\n )\n tests['x mean is 0'] = (\n pd.DataFrame(x_df).mean().astype(int) == 0\n ).all()\n tests['x std is 1'] = (\n pd.DataFrame(x_df).std().astype(int) == 1\n ).all()\n\n tests['x scale roughly 0.28'] = (sc_x.scale_.round(2) == 0.28).all()\n tests['x scaled mean roughly 0.5'] = (sc_x.mean_.round(1) == 0.5).all()\n\n for test, result in tests.items():\n print(f\"{test}: {result}\")\n\n assert all(tests.values())\n\n\n@pytest.mark.coeff\ndef test_pymc_formatting(full_data):\n \"\"\"\n Tests whether pymc coeffs are properly formatted\n \"\"\"\n tests = dict()\n\n coeff_inst = Coefficients(\n x_data=full_data['x'],\n y_data=full_data['y']\n )\n\n df, bam_s, key_s = coeff_inst.format_pymc(\n coeff_inst.x_train.columns[1:]\n )\n\n tests['df length correct'] = df.shape[0] == 180\n tests['df number of columns correct'] = df.shape[1] == 5\n tests['Bambi keys correct'] = all(\n [key in [\"x\", \"oao\", \"obo\", \"oco\"] for key in bam_s]\n )\n tests['Combo strings correct'] = all(\n [key in [\"x\", \"a\", \"b\", \"c\"] for key in key_s]\n )\n\n assert all(tests.values())\n\n\n@pytest.mark.parametrize(\"reg_func\",\n [\n Coefficients.ols,\n Coefficients.ridge,\n Coefficients.lasso,\n Coefficients.elastic_net,\n Coefficients.lars,\n Coefficients.lasso_lars,\n Coefficients.ransac,\n Coefficients.theil_sen\n ]\n )\n@pytest.mark.parametrize(\"mv_keys\",\n [\n ([[]]),\n ([[\"a\"]]),\n ([[\"b\"]]),\n ([[\"c\"]]),\n ([[\"a\", \"b\"]]),\n ([[\"a\", \"c\"]]),\n ([[\"b\", \"c\"]]),\n ([[\"a\", \"b\", \"c\"]]),\n ([\n [],\n [\"a\"],\n [\"b\"],\n [\"c\"],\n [\"a\", \"b\"],\n [\"a\", \"c\"],\n [\"b\", \"c\"],\n [\"a\", \"b\", \"c\"]\n ])\n ]\n )\n@pytest.mark.coeff\ndef test_skl_single_cals_ex_omp(full_data, reg_func, mv_keys):\n \"\"\"\n Combines all possible multivariate key combos with each skl calibration\n method except omp which needs at least 1 mv key\n \"\"\"\n tests = dict()\n\n coeff_inst = Coefficients(\n x_data=full_data['x'],\n y_data=full_data['y']\n )\n for keys in mv_keys:\n reg_func(coeff_inst, keys)\n coeffs = coeff_inst.return_coefficients()\n for technique, df in coeffs.items():\n tests[f'Correct number of rows ({technique})'] = (\n df.shape[0] == len(mv_keys)\n )\n tests[f'Correct number of columns ({technique})'] = (\n df.shape[1] == len(mv_keys[-1]) + 2\n )\n for test, result in tests.items():\n print(f\"{test}: {result}\")\n assert all(tests.values())\n\n\n@pytest.mark.parametrize(\"mv_keys\",\n [\n ([[\"a\"]]),\n ([[\"b\"]]),\n ([[\"c\"]]),\n ([[\"a\", \"b\"]]),\n ([[\"a\", \"c\"]]),\n ([[\"b\", \"c\"]]),\n ([[\"a\", \"b\", \"c\"]]),\n ([\n [\"a\"],\n [\"b\"],\n [\"c\"],\n [\"a\", \"b\"],\n [\"a\", \"c\"],\n [\"b\", \"c\"],\n [\"a\", \"b\", \"c\"]\n ])\n ]\n )\n@pytest.mark.coeff\ndef test_skl_omp(full_data, mv_keys):\n \"\"\"\n Combines all possible multivariate key combos with omp calibration\n method\n \"\"\"\n tests = dict()\n\n coeff_inst = Coefficients(\n x_data=full_data['x'],\n y_data=full_data['y']\n )\n for keys in mv_keys:\n coeff_inst.orthogonal_matching_pursuit(keys)\n coeffs = coeff_inst.return_coefficients()\n for technique, df in coeffs.items():\n tests[f'Correct number of rows ({technique})'] = (\n df.shape[0] == len(mv_keys)\n )\n tests[f'Correct number of columns ({technique})'] = (\n df.shape[1] == len(mv_keys[-1]) + 2\n )\n for test, result in tests.items():\n print(f\"{test}: {result}\")\n assert all(tests.values())\n\n\n@pytest.mark.parametrize(\"mv_keys\",\n [\n ([\n [\"a\"],\n [\"a\", \"b\"],\n [\"a\", \"b\", \"c\"]\n ])\n ]\n )\n@pytest.mark.parametrize(\"ols\", [None, Coefficients.ols])\n@pytest.mark.parametrize(\"ridge\", [None, Coefficients.ridge])\n@pytest.mark.parametrize(\"lasso\", [None, Coefficients.lasso])\n@pytest.mark.parametrize(\"enet\", [None, Coefficients.elastic_net])\n@pytest.mark.parametrize(\"lars\", [None, Coefficients.lars])\n@pytest.mark.parametrize(\"laslars\", [None, Coefficients.lasso_lars])\n@pytest.mark.parametrize(\"ransac\", [None, Coefficients.ransac])\n@pytest.mark.parametrize(\"theilsen\", [None, Coefficients.theil_sen])\n@pytest.mark.coeff\ndef test_combo_cal_skl(\n full_data,\n mv_keys,\n ols,\n ridge,\n lasso,\n enet,\n lars,\n laslars,\n ransac,\n theilsen,\n ):\n \"\"\"\n Tests all combos of skl calibration techniques against several combos of\n multivariate keys\n \"\"\"\n cals_ex_student = [\n ols,\n ridge,\n lasso,\n enet,\n lars,\n laslars,\n ransac,\n theilsen\n ]\n tests = dict()\n coeff_inst = Coefficients(\n x_data=full_data['x'],\n y_data=full_data['y']\n )\n\n for cal in cals_ex_student:\n if cal is not None:\n for key in mv_keys:\n cal(coeff_inst, key)\n\n coeffs = coeff_inst.return_coefficients()\n\n expected_keys = np.array(cals_ex_student, dtype=bool).sum()\n tests['Correct number of keys in coeffs dict'] = (\n len(coeffs.keys()) == expected_keys\n )\n\n for technique, df in coeffs.items():\n tests[f'Correct number of rows ({technique})'] = (\n df.shape[0] == len(mv_keys)\n )\n tests[f'Correct number of columns ({technique})'] = (\n df.shape[1] == len(mv_keys[-1]) + 2\n )\n for test, result in tests.items():\n print(f\"{test}: {result}\")\n assert all(tests.values())\n\n\n@pytest.mark.parametrize(\"mv_keys\",\n [\n ([\n [\"a\"],\n [\"a\", \"b\"],\n [\"a\", \"b\", \"c\"]\n ])\n ]\n )\n@pytest.mark.parametrize(\"family\", [\"Gaussian\", \"Student T\"])\n@pytest.mark.coeff\ndef test_bayesian(full_data, mv_keys, family):\n tests = dict()\n coeff_inst = Coefficients(\n x_data=full_data['x'],\n y_data=full_data['y']\n )\n for key in mv_keys:\n coeff_inst.bayesian(key, family)\n\n coeffs = coeff_inst.return_coefficients()\n tests['Correct family in key'] = (\n list(coeffs.keys())[0] == f\"Bayesian ({family})\"\n )\n\n for technique, df in coeffs.items():\n tests[f'Correct number of rows ({technique})'] = (\n df.shape[0] == len(mv_keys)\n )\n tests[f'Correct number of columns ({technique})'] = (\n df.shape[1] == (len(mv_keys[-1]) + 2) * 2\n )\n for test, result in tests.items():\n print(f\"{test}: {result}\")\n assert all(tests.values())\n","repo_name":"CaderIdris/calidhayte","sub_path":"tests/test_coefficients.py","file_name":"test_coefficients.py","file_ext":"py","file_size_in_byte":10762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27797325636","text":"from src.Unit.OutsideUnit import SVA\nfrom src.Unit.OutsideUnit import BreakPoint\nfrom src.Unit.UnitGroup import UnitSet\n\n\nclass Joint:\n \"\"\"\n 绝缘节\n \"\"\"\n\n def __init__(self, l_par=None, r_par=None):\n # structure\n self.l_par = l_par\n self.r_par = r_par\n\n # parameters\n self.length = None\n self._rlt_pos = None\n self._bas_name = None\n\n # generated\n self._name = str()\n self.units = UnitSet()\n\n @property\n def parent(self):\n if self.l_par:\n return self.l_par\n elif self.r_par:\n return self.r_par\n else:\n return None\n\n @property\n def bas_name(self):\n from src.TrackCircuitConcept.Section import Section\n\n if isinstance(self.parent, Section):\n if self == self.parent.l_joint:\n return '左侧绝缘节'\n elif self == self.parent.r_joint:\n return '右侧绝缘节'\n if self._bas_name is None:\n return ''\n else:\n return self._bas_name\n\n @property\n def name(self):\n if self.parent is None:\n return self.bas_name\n else:\n name = self.parent.name + '_' + self.bas_name\n self._name = name\n return name\n\n @property\n def rlt_pos(self):\n from src.TrackCircuitConcept.Section import Section\n\n if isinstance(self.parent, Section):\n if self == self.parent.l_joint:\n return 0\n elif self == self.parent.r_joint:\n return self.parent.length\n if self._rlt_pos is None:\n return 0\n else:\n return self._rlt_pos\n\n @property\n def abs_pos(self):\n if self.parent is None:\n return self.rlt_pos\n else:\n pos = self.parent.abs_pos + self.rlt_pos\n return pos\n\n @property\n def j_type(self):\n from src.TrackCircuitConcept.Section import ZPW2000A_STyp\n if self.length:\n if self.parent.sec_type == ZPW2000A_STyp:\n return Electric_2000A_JTyp\n else:\n print(\"Warning: 区段类型:'%s';区段长度:'%s';无法设置绝缘节类型\"\n % self.parent.sec_type, self.length)\n return\n else:\n return Mechanical_JTyp\n\n def set_l_par(self, l_par):\n self.l_par = l_par\n\n def set_r_par(self, r_par):\n self.r_par = r_par\n\n def load_kwargs(self, **kwargs):\n if 'length' in kwargs:\n self.length = kwargs['length']\n\n def init_unit(self):\n self.j_type.init_unit(joint=self)\n\n def get_all_units(self):\n all_units = UnitSet()\n all_units.update(self.units)\n return all_units\n\n\nclass Joint_Type:\n \"\"\"\n 绝缘节类型\n \"\"\"\n\n def __init__(self, parent: Joint):\n self.parent = parent\n\n\nclass Mechanical_JTyp(Joint_Type):\n \"\"\"\n 机械绝缘节\n \"\"\"\n\n @classmethod\n def init_unit(cls, joint: Joint):\n unit = BreakPoint(parent=joint, bas_name='断点')\n unit.load_kwargs(rlt_pos=0)\n joint.units.clear()\n joint.units.add(unit)\n\n\nclass Electric_2000A_JTyp(Joint_Type):\n \"\"\"\n 2000A电气绝缘节\n \"\"\"\n\n @classmethod\n def init_unit(cls, joint: Joint):\n unit = SVA(parent=joint, bas_name='SVA')\n unit.load_kwargs(rlt_pos=0)\n joint.units.clear()\n joint.units.add(unit)\n\n\nclass Belarus_Electric__JTyp(Joint_Type):\n \"\"\"\n 白俄电气绝缘节\n \"\"\"\n\n @classmethod\n def init_unit(cls, joint: Joint):\n pass\n","repo_name":"delonxd/Calculate2.0","sub_path":"src/TrackCircuitConcept/Joint.py","file_name":"Joint.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27119203603","text":"from typing import Any, Dict, List, Sequence\n\nfrom ayx_python_sdk.core.constants import NULL_VALUE_PLACEHOLDER\nfrom ayx_python_sdk.core.field import FieldType\nfrom ayx_python_sdk.core.metadata import Metadata\nfrom ayx_python_sdk.providers.amp_provider.builders.packers import (\n _BlobPacker,\n _BoolFalsePacker,\n _BoolPacker,\n _BoolTruePacker,\n _BytePacker,\n _DatePacker,\n _DatetimePacker,\n _DoublePacker,\n _EmptyStringPacker,\n _FloatPacker,\n _IndirectBlobPacker,\n _IndirectStringPacker,\n _Int0Packer,\n _Int16Packer,\n _IntegerPacker,\n _LongIntegerPacker,\n _NullPacker,\n _Packer,\n _StringPacker,\n)\nfrom ayx_python_sdk.providers.amp_provider.resources.generated.record_packet_pb2 import (\n AMPFieldType,\n Record as ProtobufRecord,\n)\n\n\nclass RecordBuilder:\n \"\"\"Utilities for converting records between protobuf and core format objects.\"\"\"\n\n _field_type_to_packer: Dict[Any, _Packer] = {\n AMPFieldType.STRING: _StringPacker(),\n AMPFieldType.BLOB: _BlobPacker(),\n AMPFieldType.SPATIALOBJ: _BlobPacker(),\n AMPFieldType.BCD: _BlobPacker(),\n AMPFieldType.BOOL: _BoolPacker(),\n AMPFieldType.INT0: _Int0Packer(),\n AMPFieldType.UINT8: _BytePacker(),\n AMPFieldType.INT16: _Int16Packer(),\n AMPFieldType.INT32: _IntegerPacker(),\n AMPFieldType.INT64: _LongIntegerPacker(),\n AMPFieldType.FLOAT: _FloatPacker(),\n AMPFieldType.DOUBLE: _DoublePacker(),\n AMPFieldType.DATE: _DatePacker(),\n AMPFieldType.TIME: _IntegerPacker(),\n AMPFieldType.DATETIME: _DatetimePacker(),\n AMPFieldType.INDIRECTSTRING: _IndirectStringPacker(),\n AMPFieldType.INDIRECTBLOB: _IndirectBlobPacker(),\n AMPFieldType.INDIRECTSPATIAL: _IndirectBlobPacker(),\n AMPFieldType.BOOLFALSE: _BoolFalsePacker(),\n AMPFieldType.BOOLTRUE: _BoolTruePacker(),\n AMPFieldType.EMPTYSTRING: _EmptyStringPacker(),\n AMPFieldType.NULL: _NullPacker(),\n }\n\n _blob_cosmetic_types_to_amp_type = {\n FieldType.blob: AMPFieldType.BLOB,\n FieldType.spatialobj: AMPFieldType.SPATIALOBJ,\n FieldType.fixeddecimal: AMPFieldType.BCD,\n }\n\n _datetime_cosmetic_types_to_amp_type = {\n FieldType.date: AMPFieldType.DATE,\n FieldType.datetime: AMPFieldType.DATETIME,\n FieldType.time: AMPFieldType.TIME,\n }\n\n _float_cosmetic_types_to_amp_type = {\n FieldType.float: AMPFieldType.FLOAT,\n FieldType.double: AMPFieldType.DOUBLE,\n }\n\n _int_cosmetic_types_to_amp_type = {\n FieldType.byte: AMPFieldType.UINT8,\n FieldType.int16: AMPFieldType.INT16,\n FieldType.int32: AMPFieldType.INT32,\n FieldType.int64: AMPFieldType.INT64,\n }\n\n _null_values = {NULL_VALUE_PLACEHOLDER, None}\n\n @classmethod\n def from_protobuf(cls, record: ProtobufRecord) -> List:\n \"\"\"\n Convert a protobuf to a list of values.\n\n Parameters\n ----------\n record\n Protobuf representation of a record.\n\n Returns\n -------\n List\n All elements of the record as a list.\n \"\"\"\n start_byte_idx = 0\n parsed_record = []\n\n for amp_type in record.types:\n try:\n packer = cls._field_type_to_packer[amp_type]\n except KeyError:\n raise ValueError(f\"Packer not found for type: {amp_type}\")\n\n element, stride = packer.unpack(record.data, start_byte_idx)\n\n parsed_record.append(element)\n start_byte_idx += stride\n\n return parsed_record\n\n @classmethod\n def to_protobuf(cls, record: Sequence, metadata: Metadata) -> ProtobufRecord:\n \"\"\"\n Convert a sequence of values to a protobuf.\n\n Parameters\n ----------\n record\n A sequence of values.\n metadata\n Metadata associated with the values.\n\n Returns\n -------\n ProtobufRecord\n The protobuf representation of the passed in record.\n \"\"\"\n assert len(record) == len(metadata)\n\n raw_data = bytes()\n amp_types = []\n for field, element in zip(metadata, record):\n try:\n amp_type = cls._get_amp_type(field.type, element)\n packer = cls._field_type_to_packer[amp_type]\n except KeyError:\n raise ValueError(f\"Packer not found for type: {field.type}\")\n\n raw_data += packer.pack(element)\n amp_types.append(amp_type)\n\n return ProtobufRecord(data=raw_data, types=amp_types)\n\n @classmethod\n def _get_amp_type(cls, cosmetic_type: FieldType, element: Any) -> AMPFieldType:\n if element in cls._null_values:\n return AMPFieldType.NULL\n\n if cosmetic_type in [\n FieldType.string,\n FieldType.v_string,\n FieldType.v_wstring,\n FieldType.wstring,\n ]:\n if element == \"\":\n return AMPFieldType.EMPTYSTRING\n\n return AMPFieldType.STRING\n\n if cosmetic_type in cls._blob_cosmetic_types_to_amp_type:\n return cls._blob_cosmetic_types_to_amp_type[cosmetic_type]\n\n if cosmetic_type == FieldType.bool:\n if element:\n return AMPFieldType.BOOLTRUE\n\n return AMPFieldType.BOOLFALSE\n\n if element == 0:\n return AMPFieldType.INT0\n\n if cosmetic_type in cls._float_cosmetic_types_to_amp_type:\n return cls._float_cosmetic_types_to_amp_type[cosmetic_type]\n\n if cosmetic_type in cls._datetime_cosmetic_types_to_amp_type:\n return cls._datetime_cosmetic_types_to_amp_type[cosmetic_type]\n\n if cosmetic_type in cls._int_cosmetic_types_to_amp_type:\n return cls._int_cosmetic_types_to_amp_type[cosmetic_type]\n\n raise ValueError(\n f\"AMP field type not found for cosmetic type {cosmetic_type} and value {element}.\"\n )\n","repo_name":"beesechuuuuurger/gptayx","sub_path":".ayx_cli.cache/dist/ayx_python_sdk/providers/amp_provider/builders/record_builder.py","file_name":"record_builder.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18470794363","text":"# Link:- https://www.codechef.com/problems/NUM239\n\nfor _ in range(int(input())):\n count = 0\n x, y = map(int, input().split())\n\n pretty_nums = ['2', '3', '9']\n for i in range(x, y+1):\n i = str(i)\n if i[-1] in pretty_nums:\n count += 1\n\n print(count)\n","repo_name":"sVinit108/Solved_DSA_Questions","sub_path":"NUM239.PY","file_name":"NUM239.PY","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24644465474","text":"import re\nfrom os import path\nfrom json import dumps as dict_to_str, loads as str_to_dict\nfrom cafe.engine.models.base import AutoMarshallingModel\nfrom cloudcafe.cloudkeep.barbican.secrets.models.secret import Secret\n\n\nclass Order(AutoMarshallingModel):\n\n def __init__(self, secret, secret_href=None, status=None, order_ref=None):\n super(Order, self).__init__()\n self.secret = secret\n self.secret_href = secret_href\n self.status = status\n self.order_ref = order_ref\n\n def get_id_from_ref(self, ref):\n \"\"\"Returns id from reference.\"\"\"\n ref_id = None\n if ref is not None and len(ref) > 0:\n ref_id = path.split(ref)[1]\n return ref_id\n\n def get_id(self):\n \"\"\"Returns order id.\"\"\"\n return self.get_id_from_ref(ref=self.order_ref)\n\n def get_secret_id(self):\n \"\"\"Returns id of secret created by order.\"\"\"\n return self.get_id_from_ref(ref=self.secret_href)\n\n def _obj_to_json(self):\n secret_dict = self.secret._obj_to_dict()\n return dict_to_str({'secret': secret_dict})\n\n @classmethod\n def _json_to_obj(cls, serialized_str):\n json_dict = str_to_dict(serialized_str)\n return cls._dict_to_obj(json_dict)\n\n @classmethod\n def _dict_to_obj(cls, json_dict):\n args = {\n 'order_ref': json_dict.get('order_ref'),\n 'status': json_dict.get('status'),\n 'secret_href': json_dict.get('secret_ref'),\n 'secret': Secret._dict_to_obj(json_dict.get('secret'))\n }\n return Order(**args)\n\n\nclass OrderRef(AutoMarshallingModel):\n\n def __init__(self, reference=None):\n super(OrderRef, self).__init__()\n self.reference = reference\n\n @classmethod\n def _json_to_obj(cls, serialized_str):\n json_dict = str_to_dict(serialized_str)\n return cls._dict_to_obj(json_dict)\n\n @classmethod\n def _dict_to_obj(cls, json_dict):\n return OrderRef(reference=json_dict.get('order_ref'))\n\n\nclass OrderGroup(AutoMarshallingModel):\n\n def __init__(self, orders, next_list=None, previous_list=None):\n super(OrderGroup, self).__init__()\n\n self.orders = orders\n self.next = next_list\n self.previous = previous_list\n\n def get_ids(self):\n return [order.get_id() for order in self.orders]\n\n def get_next_query_data(self):\n matches = re.search('.*\\?(.*?)\\=(\\d*)&(.*?)\\=(\\d*)', self.next)\n return {\n 'limit': matches.group(2),\n 'offset': matches.group(4)\n }\n\n @classmethod\n def _json_to_obj(cls, serialized_str):\n json_dict = str_to_dict(serialized_str)\n return cls._dict_to_obj(json_dict)\n\n @classmethod\n def _dict_to_obj(cls, json_dict):\n orders, next_list, prev_list = [], None, None\n\n for order_dict in json_dict.get('orders'):\n orders.append(Order._dict_to_obj(order_dict))\n\n if 'next' in json_dict:\n next_list = json_dict.get('next')\n if 'previous' in json_dict:\n prev_list = json_dict.get('previous')\n return OrderGroup(orders, next_list, prev_list)\n","repo_name":"jcourtois/rpc9_cloudcafe","sub_path":"cloudcafe/cloudkeep/barbican/orders/models/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8425823421","text":"from tkinter import Tk, Entry, Button\r\n\r\n# Função para calcular o resultado\r\ndef calcular():\r\n try:\r\n expressao = entrada.get()\r\n resultado = eval(expressao)\r\n entrada.delete(0, \"end\")\r\n entrada.insert(\"end\", str(resultado))\r\n except Exception:\r\n entrada.delete(0, \"end\")\r\n entrada.insert(\"end\", \"Erro\")\r\n\r\n# Função para limpar a entrada\r\ndef limpar():\r\n entrada.delete(0, \"end\")\r\n\r\n# Configuração da janela principal\r\njanela = Tk()\r\njanela.title(\"Calculadora\")\r\n\r\n# Configuração da entrada de texto\r\nentrada = Entry(janela, width=25, justify=\"right\")\r\nentrada.grid(row=0, column=0, columnspan=4, padx=10, pady=10)\r\n\r\n# Configuração dos botões\r\nbotoes = [\r\n (\"7\", 1, 0), (\"8\", 1, 1), (\"9\", 1, 2), (\"/\", 1, 3),\r\n (\"4\", 2, 0), (\"5\", 2, 1), (\"6\", 2, 2), (\"*\", 2, 3),\r\n (\"1\", 3, 0), (\"2\", 3, 1), (\"3\", 3, 2), (\"-\", 3, 3),\r\n (\"0\", 4, 0), (\".\", 4, 1), (\"C\", 4, 2), (\"+\", 4, 3)\r\n]\r\n\r\nfor texto, linha, coluna in botoes:\r\n if texto == \"C\":\r\n botao = Button(janela, text=texto, width=5, height=2, command=limpar)\r\n else:\r\n botao = Button(janela, text=texto, width=5, height=2, command=lambda texto=texto: entrada.insert(\"end\", texto))\r\n botao.grid(row=linha, column=coluna)\r\n\r\nbotao_calcular = Button(janela, text=\"=\", width=25, height=2, command=calcular)\r\nbotao_calcular.grid(row=5, column=0, columnspan=4, padx=10, pady=10)\r\n\r\n# Iniciar o loop principal da aplicação\r\njanela.mainloop()\r\n","repo_name":"felipeioavasso/Calculator_with_python","sub_path":"calculadora/calculator_gi.py","file_name":"calculator_gi.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9611806599","text":"\"\"\"\nImplementation of various sequence (ids) to vector models.\n\"\"\"\nimport os\nfrom turtle import forward\nimport numpy as np\nimport math\nimport sys\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom transformers import PreTrainedModel, PretrainedConfig\nfrom transformers.models.bert import BertModel, BertConfig\n\nfrom .const import ACT_MAPPING\n\nclass SeqEncoder(nn.Module):\n \"\"\"\n Base class of sequence (ids or vectors) to sequence encoder models.\n \"\"\"\n def __init__(self):\n super().__init__()\n \n def get_output_dim(self):\n raise NotImplementedError()\n \n\nclass SeqEncoder_RNN(SeqEncoder):\n def __init__(self,\n rnn_type: str,\n input_size: int,\n hidden_size: int,\n num_layers: int,\n dropout: float = 0.,\n bidirectional: bool = True\n ):\n super().__init__()\n\n rnn_cls = nn.LSTM if rnn_type.lower() == 'lstm' else \\\n nn.GRU if rnn_type.lower() == 'gru' else None\n if not rnn_cls:\n raise ValueError(f'Unknown rnn_type: {rnn_type}')\n \n self.rnn = rnn_cls(input_size, hidden_size, num_layers, batch_first = True,\n bidirectional = bidirectional, dropout = dropout\n )\n def forward(self, x):\n output, _ = self.rnn(x)\n return output\n \n def get_output_dim(self):\n return self.rnn.hidden_size*2 if self.rnn.bidirectional else self.rnn.hidden_size\n\nclass SeqEncoder_CNNRNN(SeqEncoder):\n \"\"\"\n CNN + RNN for seq2seq encoder\n \"\"\"\n def __init__(self,\n rnn_type: str,\n input_size: int,\n hidden_size: int,\n num_layers: int,\n cnn_out_size: Optional[int] = None,\n kernel_size: int = 3,\n dropout: float = 0.,\n bidirectional: bool = True,\n activation: str = 'tanh'\n ):\n super().__init__()\n\n if cnn_out_size is None:\n cnn_out_size = hidden_size\n self.cnn = nn.Conv1d(\n input_size,\n cnn_out_size,\n kernel_size = kernel_size\n )\n self.cnn_act = ACT_MAPPING[activation]()\n\n self.rnn = SeqEncoder_RNN(rnn_type, cnn_out_size, hidden_size, num_layers,\n dropout=dropout, bidirectional=bidirectional\n )\n def forward(self, x):\n cnn_out = self.cnn(torch.transpose(x, 2, 1))\n # (batch_size, hid_dim, seq_len)\n cnn_out = torch.transpose(cnn_out, 2, 1)\n cnn_out = self.cnn_act(cnn_out)\n\n rnn_out = self.rnn(cnn_out)\n return rnn_out\n \n def get_output_dim(self):\n return self.rnn.get_output_dim()","repo_name":"srhthu/deepnlp","sub_path":"deepnlp/nn_modules/seq_encoder.py","file_name":"seq_encoder.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3330134740","text":"import unittest\n\nimport geopandas as gpd\nimport numpy as np\nfrom libpysal.examples import load_example\nfrom segregation.local import LocalRelativeCentralization\n\n\nclass Local_Relative_Centralization_Tester(unittest.TestCase):\n def test_Local_Relative_Centralization(self):\n s_map = gpd.read_file(load_example(\"Sacramento1\").get_path(\"sacramentot2.shp\"))\n df = s_map[[\"geometry\", \"BLACK\", \"TOT_POP\"]]\n index = LocalRelativeCentralization(df, \"BLACK\", \"TOT_POP\")\n np.testing.assert_almost_equal(\n index.statistics[0:10],\n np.array(\n [\n 0.03443055,\n -0.29063264,\n -0.19110976,\n 0.24978919,\n 0.01252249,\n 0.61152941,\n 0.78917647,\n 0.53129412,\n 0.04436346,\n -0.20216325,\n ]\n ),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"pysal/segregation","sub_path":"segregation/tests/test_local_relative_centralization.py","file_name":"test_local_relative_centralization.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"37"} +{"seq_id":"12427779300","text":"from __future__ import annotations\n\nfrom src.bounding_box import BoundingBox\nfrom src.point import Point\nfrom src.rectangle import Rectangle\n\n\nclass BoundingBoxB(Rectangle):\n def __init__(self, left_lower: Point, right_upper: Point) -> None:\n self.left_lower = left_lower\n self.right_upper = right_upper\n width = self.right_upper.x - self.left_lower.x\n height = self.right_upper.y - self.left_lower.x\n super().__init__(width=width, height=height)\n self.__left_upper = None\n self.__right_lower = None\n self.__centre = None\n\n def __str__(self) -> str:\n return \"BoundingBoxB: left_lower=\" + str(self.left_lower) +\\\n \", right_upper=\" + str(self.right_upper)\n\n def convert(self) -> BoundingBox:\n return BoundingBox(width=self.width, height=self.height, left_upper=self.left_lower)\n\n @property\n def left_upper(self) -> Point:\n self.__left_upper = Point(p=(self.left_lower.x, self.right_upper.y))\n return self.__left_upper\n\n @property\n def right_lower(self) -> Point:\n self.__right_lower = Point(p=(self.right_upper.x, self.left_lower.y))\n return self.__right_lower\n\n @property\n def centre(self) -> Point:\n shift = Point(p=(self.width / 2, self.height / 2))\n self.__centre = self.left_lower + shift\n return self.__centre\n\n def get_union(self, other: BoundingBoxB) -> BoundingBoxB:\n union_left_lower = Point(p=(min(self.left_lower.x, other.left_lower.x),\n min(self.left_lower.y, other.left_lower.y))\n )\n union_right_upper = Point(p=(max(self.right_upper.x, other.right_upper.x),\n max(self.right_upper.y, other.right_upper.y))\n )\n union = BoundingBoxB(left_lower=union_left_lower, right_upper=union_right_upper)\n return union\n\n def get_intersection(self, other: BoundingBoxB) -> BoundingBoxB:\n intersection_left_lower = Point(p=(max(self.left_lower.x, other.left_lower.x),\n max(self.left_lower.y, other.left_lower.y))\n )\n intersection_right_upper = Point(p=(min(self.right_upper.x, other.right_upper.x),\n min(self.right_upper.y, other.right_upper.y))\n )\n intersection_width = intersection_right_upper.x-intersection_left_lower.x\n intersection_height = intersection_right_upper.y-intersection_left_lower.y\n\n intersection = None\n if intersection_height > 0 and intersection_width > 0:\n intersection = BoundingBoxB(left_lower=intersection_left_lower,\n right_upper=intersection_right_upper)\n\n return intersection\n\n\ndef main() -> None:\n bounding_box_1 = BoundingBoxB(left_lower=Point(p=(2.1, 2)), right_upper=Point(p=(3, 3)))\n print(bounding_box_1)\n bounding_box_2 = BoundingBoxB(left_lower=Point(p=(2, 2)), right_upper=Point(p=(4, 4)))\n print(\"A different representation of the bounding box: \", bounding_box_1.convert())\n print(\"The union of the two bounding boxes: \", bounding_box_1.get_union(bounding_box_2))\n print(\"The intersection of the two bounding boxes: \", bounding_box_1.get_intersection(bounding_box_2))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Epidemic-Models/math-prog-python","sub_path":"src/bounding_box_b.py","file_name":"bounding_box_b.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30912404888","text":"import os\nimport unittest\nfrom HTMLTestRunner_cn import HTMLTestRunner\nfrom time import sleep\n\nfrom ddt import data, unpack, ddt\n\nfrom framework.UtilsDate import UtilsDate\nfrom framework.UtilsFile import UtilsFile\nfrom framework.UtilsRandom import UtilsRandom\nfrom testCase.crm.BaseCase import BaseCase\n\n\n@ddt\nclass TestCRMCreateMutiCustomers(BaseCase):\n # 创建客户\n def creatCustomer(self, customerName):\n # 点击新建客户\n self.driver.click(\"xpath=//a[contains(@href,'/index.php?m=customer&a=add')]\")\n # 输入姓名\n self.driver.send_keys(\"id=name\", customerName)\n print('当前时间',UtilsDate.getCurrentDateAndTime())\n self.driver.send_keys(\"id=nextstep_time\", UtilsDate.getCurrentDateAndTime())\n self.driver.js(\n \"document.getElementById('add_body').children[0].children[0].children[0].children[0].scrollTop=10000;\")\n # 选择下拉列表方式1\n self.driver.click(\"xpath=//option[contains(@value,'电话营销')]\")\n # 首要联系人名称\n self.driver.send_keys(\"id=con_contacts[name]\", customerName)\n # 选择角色\n self.driver.find_element(\"id=con_contacts[role]\").send_keys(\"普通人\")\n # 选择尊称,报错\n self.driver.click(\"xpath=//label[contains(text(),'女士')]\")\n # 输入手机号\n self.driver.send_keys(\"id=con_contacts[telephone]\", UtilsRandom.getMobilePhone())\n # 输入邮箱\n self.driver.send_keys(\"id=con_contacts[email]\", \"820061154@qq.com\")\n self.driver.click(\"id=save_submit\")\n success = self.driver.find_element(\"xpath=//span[contains(text(),'\" + customerName + \"')]\")\n self.assertTrue(success.is_displayed())\n\n # @data(*UtilsFile.get_csv_data(filePath='account.csv'))\n # # @data(['19926451606','ujm159yhn753'],['13126321822','admin1234'])\n # @unpack\n # def test_crmBusinessFlow(self, username, password):\n # \"\"\"多个账号循环创建多个客户\"\"\"\n # self.login(username, password)\n # # 打开客户管理\n # self.driver.click(\"xpath=//span[contains(text(),'客户管理')]\")\n # # 点击客户管理列表下的客户\n # self.driver.click(\"xpath=//a[contains(@href,'/index.php?m=customer&a=index&by=me')]\")\n # for num in range(5):\n # customerName = UtilsRandom.getChineseName()\n # print('new created customer name is:', customerName)\n # self.creatCustomer(customerName)\n # sleep(3)\n\nif __name__ == \"__main__\":\n report_path = os.path.dirname(__file__) + \"/report/\" + \"TestCRMBusinessFlow.html\"\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRMCreateMutiCustomers)\n runer = HTMLTestRunner(title=\"简信CRM测试报告\",\n description=\"创建客户->创建商机->创建合同\",\n stream=open(report_path, \"wb\"),\n verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n\n","repo_name":"yuxichen2019/AotuTestStudy","sub_path":"python_workspace/seleniumStudy/testCase/crm/qTestCRMCreateMutiCustomers.py","file_name":"qTestCRMCreateMutiCustomers.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37261931925","text":"import itertools\nimport cProfile\nimport timeit\n\nfrom common import prompt_single_numeric_input, process_and_print_test_case_results\n\n\ndef get_string_number(character_count: int):\n combinations = itertools.product('0123456789', repeat=character_count)\n strings = list(map(lambda x: ''.join(x), combinations))\n filtered_strings = [string for string in strings if \"13\" not in string]\n\n return len(filtered_strings)\n\n\ndef get_string_number2(character_count: int):\n if character_count == 1:\n return 10\n\n combinations = list(itertools.product('0123456789', repeat=character_count))\n count = 0\n combination_length = len(combinations[0])\n for combination in combinations:\n for i in range(combination_length - 1):\n if combination[i] == '1' and combination[i + 1] == '3':\n break\n count = count + 1\n\n return count\n\n\ndef test_case1(_: int):\n character_count = prompt_single_numeric_input(0, 1000000009)\n\n return get_string_number(character_count)\n\n\ndef test_case2(_: int):\n character_count = prompt_single_numeric_input(0, 1000000009)\n\n return get_string_number2(character_count)\n\n\ndef run():\n if __name__ == \"__main__\":\n test_case_count = prompt_single_numeric_input(1, 100000)\n process_and_print_test_case_results(test_case_count, test_case2)\n\n\ndef run_repeat1():\n get_string_number(3)\n\n\ndef run_repeat2():\n get_string_number2(3)\n\n\nprint(timeit.repeat(\"run_repeat1()\", number=10000, globals=globals()))\nprint(timeit.repeat(\"run_repeat2()\", number=10000, globals=globals()))\n","repo_name":"Lemao81/python_code_monkey","sub_path":"unlucky_13.py","file_name":"unlucky_13.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14785975934","text":"import os\nimport json\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom module_train.basic_model.model_architecture_dl.sub_layer.cnn_feature_extract import CNNFeatureExtract1D, CNNFeatureExtract2D\nfrom module_train.basic_model.model_architecture_dl.sub_layer.layer_high_way import Highway\n\n\ndef xavier_uniform_init(m):\n \"\"\"\n Xavier initializer to be used with model.apply\n \"\"\"\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight.data)\n\n\nclass LSTMCNNWordCharBase(nn.Module):\n def __init__(self, cf, vocabs):\n\n super(LSTMCNNWordCharBase, self).__init__()\n vocab_word, vocab_char, vocab_label = vocabs\n self.vocabs = vocabs\n self.cf = cf\n\n self.output_size = len(vocab_label)\n self.word_embedding_dim = cf['word_embedding_dim']\n self.hidden_size_word = cf['hidden_size_word']\n\n self.char_embedding_dim = cf['char_embedding_dim']\n self.hidden_size_char_lstm = cf['hidden_size_char_lstm']\n\n self.word_embedding_layer = nn.Embedding(len(vocab_word), self.word_embedding_dim)\n self.char_embedding_layer = None\n\n self.use_highway_char = cf['use_highway_char']\n self.use_last_as_ft = cf['use_last_as_ft']\n self.dropout_rate = cf['dropout_rate']\n\n self.use_char_cnn = cf['use_char_cnn']\n if self.use_char_cnn:\n self.char_cnn_filter_num = cf['char_cnn_filter_num']\n self.char_window_size = cf['char_window_size']\n self.dropout_cnn = cf['dropout_cnn']\n\n self.hidden_size_char = 0\n if vocab_char is not None and self.char_embedding_dim > 0:\n self.char_embedding_layer = nn.Embedding(len(vocab_char), self.char_embedding_dim)\n\n if not self.use_char_cnn:\n self.lstm_char = nn.LSTM(self.char_embedding_dim,\n self.hidden_size_char_lstm,\n num_layers=1,\n batch_first=True,\n bidirectional=False,\n dropout=self.dropout_rate)\n self.hidden_size_char = self.hidden_size_char_lstm\n else:\n if cf['D_cnn'] == '1_D':\n self.layer_char_cnn = CNNFeatureExtract1D(self.char_embedding_dim,\n self.char_cnn_filter_num,\n self.char_window_size,\n self.dropout_cnn)\n else:\n self.layer_char_cnn = CNNFeatureExtract2D(self.char_embedding_dim,\n self.char_cnn_filter_num,\n self.char_window_size,\n self.dropout_cnn)\n\n self.hidden_size_char = self.char_cnn_filter_num * len(self.char_window_size)\n\n if self.use_highway_char:\n self.highway_char = Highway(self.hidden_size_char, num_layers=1, f=torch.relu)\n\n self.embedding_word_lstm = self.word_embedding_dim + self.hidden_size_char\n if vocab_word.vectors is not None:\n if self.word_embedding_dim != vocab_word.vectors.shape[1]:\n raise ValueError(\"expect embedding word: {} but got {}\".format(self.word_embedding_dim,\n vocab_word.vectors.shape[1]))\n\n self.word_embedding_layer.weight.data.copy_(vocab_word.vectors)\n self.word_embedding_layer.requires_grad = False\n\n self.lstm_word = nn.LSTM(self.embedding_word_lstm,\n self.hidden_size_word,\n num_layers=1,\n batch_first=True,\n bidirectional=True,\n dropout=self.dropout_rate)\n\n if self.use_last_as_ft:\n self.hidden_final = self.hidden_size_word * 4\n else:\n self.hidden_final = self.hidden_size_word * 2\n\n self.dropout = nn.Dropout(self.dropout_rate)\n self.label = nn.Linear(self.hidden_final, self.output_size)\n\n # reference this repo\n # https://github.com/wabyking/TextClassificationBenchmark/blob/master/models/LSTMwithAttention.py\n # https://www.aclweb.org/anthology/P16-2034\n def attention_net(self, lstm_output, lstm_final_state):\n attn_weights = torch.bmm(lstm_output, lstm_final_state.unsqueeze(2)).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1).unsqueeze(2)\n new_hidden_state = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights).squeeze(2)\n\n return new_hidden_state\n\n def compute(self, batch):\n # input_word_emb = [batch_size, seq_sent, word_emb_dim]\n inputs_word_emb = self.word_embedding_layer(batch.inputs_word)\n inputs_word_emb = self.dropout(inputs_word_emb)\n\n if self.char_embedding_layer is not None:\n\n # batch.char = [batch_size, seq_len, max_len_word]\n # input_char_emb = [batch x seq_len, max_len_word, char_emb_dim]\n inputs_char_emb = self.char_embedding_layer(batch.inputs_char.view(-1, batch.inputs_char.shape[-1]))\n inputs_char_emb = self.dropout(inputs_char_emb)\n\n if not self.use_char_cnn:\n seq_len = inputs_word_emb.shape[1]\n\n # final_hidden_state_char = [1, batch x seq_len, hidden_size_char]\n _, (final_hidden_state_char, _) = self.lstm_char(inputs_char_emb)\n\n # input_char_emb = [batch, seq_len, hidden_size_char]\n output_char_layer = final_hidden_state_char.view(-1, seq_len, self.hidden_size_char)\n else:\n output_char_conv_layer = self.layer_char_cnn(inputs_char_emb)\n output_char_layer = output_char_conv_layer.view(batch.inputs_char.shape[0],\n batch.inputs_char.shape[1],\n output_char_conv_layer.shape[1])\n\n if self.use_highway_char:\n output_char_layer = self.highway_char(output_char_layer)\n\n inputs_word_emb = torch.cat([inputs_word_emb, output_char_layer], -1)\n\n # output_hidden_word = [batch, seq_len, hidden_size_word]\n # final_hidden_state = [num_layer * directions, batch, hidden_size_word]\n # instead use concat two begin and end offset of each direction.\n # we use last output of hidden sequence concat with start of hidden sequence (backward).\n # because we have problem with attention (can't concat end and start offset) and require attention base sequence\n # we can change attention to self-attention mechanism.\n output_hidden_word, (_, _) = self.lstm_word(inputs_word_emb)\n final_hidden_state_word = output_hidden_word[:, -1, :]\n attn_output = self.attention_net(output_hidden_word, final_hidden_state_word)\n\n final_output = attn_output\n if self.use_last_as_ft:\n final_output = torch.cat((final_output, final_hidden_state_word), dim=-1)\n final_output = self.dropout(final_output)\n\n return final_output\n\n def forward(self, batch):\n with torch.no_grad():\n final_output = self.compute(batch)\n logits = self.label(final_output)\n return logits\n\n def loss(self, batch):\n target = batch.labels\n final_output = self.compute(batch)\n\n logits = self.label(final_output)\n # class_weights = torch.FloatTensor([0.2, 0.42, 0.38]).cuda()\n loss = F.cross_entropy(logits, target)\n\n predict_value = torch.max(logits, 1)[1]\n list_predict = predict_value.cpu().numpy().tolist()\n list_target = target.cpu().numpy().tolist()\n\n return loss, list_predict, list_target\n\n @classmethod\n def create(cls, path_folder_model, cf, vocabs, device_set=\"cuda:0\"):\n model = cls(cf, vocabs)\n if cf['use_xavier_weight_init']:\n model.apply(xavier_uniform_init)\n\n if torch.cuda.is_available():\n device = torch.device(device_set)\n model = model.to(device)\n\n path_vocab_file = os.path.join(path_folder_model, \"vocabs.pt\")\n torch.save(vocabs, path_vocab_file)\n\n path_config_file = os.path.join(path_folder_model, \"model_cf.json\")\n with open(path_config_file, \"w\") as w_config:\n json.dump(cf, w_config)\n\n return model\n\n @classmethod\n def load(cls, path_folder_model, path_model_checkpoint):\n path_vocab_file = os.path.join(path_folder_model, 'vocabs.pt')\n path_config_file = os.path.join(path_folder_model, 'model_cf.json')\n\n if not os.path.exists(path_vocab_file) or \\\n not os.path.exists(path_config_file) or \\\n not os.path.exists(path_model_checkpoint):\n raise OSError(\" 1 of 3 file does not exist\")\n\n vocabs = torch.load(path_vocab_file)\n with open(path_config_file, \"r\") as r_config:\n cf = json.load(r_config)\n\n model = cls(cf, vocabs)\n if torch.cuda.is_available():\n model = model.cuda()\n model.load_state_dict(torch.load(path_model_checkpoint))\n else:\n model.load_state_dict(torch.load(path_model_checkpoint, map_location=lambda storage, loc: storage))\n return model\n\n def save(self, path_save_model, name_model):\n checkpoint_path = os.path.join(path_save_model, name_model)\n torch.save(self.state_dict(), checkpoint_path)\n","repo_name":"trangtv57/QAZalo","sub_path":"module_train/basic_model/model_architecture_dl/sub_layer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9798,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"37"} +{"seq_id":"9156190696","text":"from django.test import testcases\nimport pandas\nfrom pandas._testing import assert_frame_equal\n\nfrom product.test_helpers import create_test_product\n\nfrom claim_ai.evaluation.input_models.stored_input_model import ClaimBundleEvaluationAiInputModel\nfrom claim_ai.rest_api.claim_evaluation.claim_bundle_evaluation_manager import ClaimBundleEvaluationManager\nfrom core import datetime\nfrom core.forms import User\nfrom core.services import create_or_update_interactive_user, create_or_update_core_user\n\nfrom insuree.test_helpers import create_test_insuree\nfrom location.models import HealthFacility\nfrom medical.test_helpers import create_test_item, create_test_service\n\nfrom api_fhir_r4.tests import GenericTestMixin, LocationTestMixin\nfrom api_fhir_r4.utils import TimeUtils, DbManagerUtils\nfrom claim.models import Claim, ClaimItem, ClaimService\nfrom medical.models import Diagnosis\nfrom claim.test_helpers import create_test_claim_admin\n\n\nclass TestAiInputConverter(testcases.TestCase):\n _TEST_CODE = 'codeTest'\n _TEST_STATUS = Claim.STATUS_ENTERED\n _TEST_STATUS_DISPLAY = \"entered\"\n _TEST_OUTCOME = \"queued\"\n _TEST_ADJUSTMENT = \"adjustment\"\n _TEST_DATE_PROCESSED = \"2010-11-16T00:00:00\"\n _TEST_APPROVED = 1000.00\n _TEST_REJECTION_REASON = 0\n _TEST_VISIT_TYPE = \"O\"\n\n # claim item data\n _TEST_ITEM_CODE = \"iCode\"\n _TEST_ITEM_UUID = \"AAAA76E2-DC28-4B48-8E29-3AC4ABEC0000\"\n _TEST_ITEM_STATUS = Claim.STATUS_ENTERED\n _TEST_ITEM_QUANTITY = 20\n _TEST_ITEM_PRICE = 10.0\n _TEST_ITEM_REJECTED_REASON = 0\n\n # claim service data\n _TEST_SERVICE_CODE = \"sCode\"\n _TEST_SERVICE_UUID = \"AAAA29BA-3F4E-4E6F-B55C-23A488A10000\"\n _TEST_SERVICE_STATUS = Claim.STATUS_ENTERED\n _TEST_SERVICE_QUANTITY = 1\n _TEST_SERVICE_PRICE = 800\n _TEST_SERVICE_REJECTED_REASON = 0\n\n _TEST_ID = 9999\n _TEST_HISTORICAL_UUID = \"AE580700-0277-4C98-ADAB-D98C0F700000\"\n _TEST_HISTORICAL_ID = 9998\n _PRICE_ASKED_ITEM = 1000.0\n _PRICE_ASKED_SERVICE = 820.0\n _PRICE_APPROVED = 1000\n _ADMIN_AUDIT_USER_ID = -1\n\n _TEST_UUID = \"AE580700-0277-4C98-ADAB-D98C0F7E681B\"\n _TEST_ITEM_AVAILABILITY = True\n\n _TEST_ITEM_TYPE = 'D'\n _TEST_SERVICE_TYPE = 'D'\n\n # insuree and claim admin data\n _TEST_PATIENT_UUID = \"76ACA309-F8CF-4890-8F2E-B416D78DE00B\"\n _TEST_PATIENT_ID = 9283\n _TEST_CLAIM_ADMIN_UUID = \"044C33D1-DBF3-4D6A-9924-3797B461E535\"\n _TEST_CLAIM_ADMIN_ID = 9282\n\n _PRICE_VALUATED = 1000.0\n # hf test data\n _TEST_HF_ID = 10000\n _TEST_HF_UUID = \"6D0EEA8C-62EB-11EA-94D6-C36229A16C2F\"\n _TEST_HF_CODE = \"12345678\"\n _TEST_HF_NAME = \"TEST_NAME\"\n _TEST_HF_LEVEL = \"H\"\n _TEST_HF_LEGAL_FORM = \"G\"\n _TEST_ADDRESS = \"TEST_ADDRESS\"\n _TEST_PHONE = \"133-996-476\"\n _TEST_FAX = \"1-408-999 8888\"\n _TEST_EMAIL = \"TEST@TEST.com\"\n\n _TEST_USER_NAME = \"TestUserTest2\"\n _TEST_USER_PASSWORD = \"TestPasswordTest2\"\n _TEST_DATA_USER = {\n \"username\": _TEST_USER_NAME,\n \"last_name\": _TEST_USER_NAME,\n \"password\": _TEST_USER_PASSWORD,\n \"other_names\": _TEST_USER_NAME,\n \"user_types\": \"INTERACTIVE\",\n \"language\": \"en\",\n \"roles\": [9],\n }\n\n _TEST_PRODUCT_CODE = \"Test0004\"\n\n def setUp(self):\n self._TEST_USER = self.get_or_create_user_api()\n self.item = create_test_item(\n self._TEST_ITEM_TYPE,\n custom_props={\"code\": self._TEST_ITEM_CODE, 'price': self._TEST_ITEM_PRICE}\n )\n self.item.uuid = self._TEST_ITEM_UUID\n self.item.save()\n self.service = create_test_service(\n self._TEST_SERVICE_TYPE,\n custom_props={\"code\": self._TEST_SERVICE_CODE, 'price': self._TEST_SERVICE_PRICE}\n )\n self.service.uuid = self._TEST_ITEM_UUID\n self.service.save()\n\n self._TEST_HF = self._create_test_health_facility()\n self._TEST_PRODUCT = self._create_test_product()\n\n self._EXPECTED_DATAFRAME = self._create_expected_df()\n\n def get_or_create_user_api(self):\n user = DbManagerUtils.get_object_or_none(User, username=self._TEST_USER_NAME)\n if user is None:\n user = self.__create_user_interactive_core()\n return user\n\n def __create_user_interactive_core(self):\n i_user, i_user_created = create_or_update_interactive_user(\n user_id=None, data=self._TEST_DATA_USER, audit_user_id=999, connected=False\n )\n create_or_update_core_user(\n user_uuid=None, username=self._TEST_DATA_USER[\"username\"], i_user=i_user\n )\n return DbManagerUtils.get_object_or_none(User, username=self._TEST_USER_NAME)\n\n def test_dataframe_conversion(self):\n test_bundle_evaluation = self._create_test_idle_evaluation()\n actual_repr = ClaimBundleEvaluationAiInputModel(test_bundle_evaluation).to_representation()\n self._assert_df_repr(actual_repr)\n\n def _create_test_idle_evaluation(self):\n insuree = create_test_insuree()\n insuree.uuid = self._TEST_PATIENT_UUID\n insuree.id = self._TEST_PATIENT_ID\n insuree.save()\n historical_claim = self._create_test_claim(insuree, True)\n claim = self._create_test_claim(insuree)\n\n self._create_items_and_services(historical_claim, self._TEST_PRODUCT, self.item, self.service)\n item, service = self._create_items_and_services(claim, self._TEST_PRODUCT, self.item, self.service)\n claim_bundle_evaluation = ClaimBundleEvaluationManager(self._TEST_USER).create_idle_evaluation_bundle([claim])\n claim_bundle_evaluation = ClaimBundleEvaluationManager(self._TEST_USER).create_idle_evaluation_bundle([claim])\n return claim_bundle_evaluation\n\n def _create_items_and_services(self, claim, imis_product, item, service):\n claim_item = self._create_test_claim_item(claim, item, imis_product)\n claim_service = self._create_test_claim_service(claim, service, imis_product)\n return claim_item, claim_service\n\n def _create_test_claim_item(self, claim, provided, product):\n item = ClaimItem()\n item.item = provided\n item.product = product\n item.claim = claim\n item.status = self._TEST_ITEM_STATUS\n item.qty_approved = self._TEST_ITEM_QUANTITY\n item.qty_provided = self._TEST_ITEM_QUANTITY\n item.rejection_reason = self._TEST_ITEM_REJECTED_REASON\n item.availability = self._TEST_ITEM_AVAILABILITY\n item.price_asked = self._PRICE_ASKED_ITEM\n item.price_approved = self._TEST_ITEM_PRICE\n item.audit_user_id = self._ADMIN_AUDIT_USER_ID\n item.price_valuated = self._PRICE_VALUATED\n item.save()\n return item\n\n def _create_test_claim_service(self, claim, provided, product):\n service = ClaimService()\n service.service = provided\n service.product = product\n service.claim = claim\n service.status = self._TEST_SERVICE_STATUS\n service.qty_approved = self._TEST_SERVICE_QUANTITY\n service.qty_provided = self._TEST_SERVICE_QUANTITY\n service.rejection_reason = self._TEST_SERVICE_REJECTED_REASON\n service.availability = self._TEST_ITEM_AVAILABILITY\n service.price_asked = self._PRICE_ASKED_SERVICE\n service.price_approved = self._TEST_SERVICE_PRICE\n service.audit_user_id = self._ADMIN_AUDIT_USER_ID\n service.price_valuated = self._PRICE_VALUATED\n service.save()\n return service\n\n def _create_test_claim(self, insuree, historical=False):\n imis_claim = Claim()\n if not historical:\n imis_claim.id = self._TEST_ID\n imis_claim.uuid = self._TEST_UUID\n else:\n imis_claim.id = self._TEST_HISTORICAL_ID\n imis_claim.uuid = self._TEST_HISTORICAL_UUID\n imis_claim.code = self._TEST_CODE\n imis_claim.status = self._TEST_STATUS\n imis_claim.adjustment = self._TEST_ADJUSTMENT\n imis_claim.date_processed = TimeUtils.str_to_date(self._TEST_DATE_PROCESSED)\n imis_claim.approved = self._TEST_APPROVED\n imis_claim.rejection_reason = self._TEST_REJECTION_REASON\n imis_claim.insuree = insuree\n imis_claim.health_facility = self._TEST_HF\n\n if not historical:\n imis_claim.icd = Diagnosis(code='ICD00I')\n imis_claim.icd.audit_user_id = self._ADMIN_AUDIT_USER_ID\n imis_claim.icd.save()\n else:\n imis_claim.icd = Diagnosis(code='ICD00V')\n imis_claim.icd.audit_user_id = self._ADMIN_AUDIT_USER_ID\n imis_claim.icd.save()\n imis_claim.audit_user_id = self._ADMIN_AUDIT_USER_ID\n imis_claim.icd.date_from = datetime.date(2018, 12, 12)\n imis_claim.date_from = datetime.date(2018, 12, 12)\n imis_claim.date_claimed = datetime.date(2018, 12, 14)\n imis_claim.visit_type = self._TEST_VISIT_TYPE\n claim_admin = create_test_claim_admin()\n claim_admin.uuid = self._TEST_CLAIM_ADMIN_UUID\n claim_admin.id = self._TEST_CLAIM_ADMIN_ID\n claim_admin.health_facility = self._TEST_HF\n claim_admin.save()\n imis_claim.admin = claim_admin\n imis_claim.save()\n return imis_claim\n\n def _create_test_health_facility(self):\n location = LocationTestMixin().create_test_imis_instance()\n location.save()\n hf = HealthFacility()\n hf.id = self._TEST_HF_ID\n hf.uuid = self._TEST_HF_UUID\n hf.code = self._TEST_HF_CODE\n hf.name = self._TEST_HF_NAME\n hf.level = self._TEST_HF_LEVEL\n hf.legal_form_id = self._TEST_HF_LEGAL_FORM\n hf.address = self._TEST_ADDRESS\n hf.phone = self._TEST_PHONE\n hf.fax = self._TEST_FAX\n hf.email = self._TEST_EMAIL\n hf.location_id = location.id\n hf.offline = False\n hf.audit_user_id = -1\n hf.save()\n return hf\n\n def _assert_df_repr(self, actual_repr):\n claim_provisions = len(set(actual_repr.pop('ProvisionID')))\n self.assertEqual(claim_provisions, 4, \"2 relevant and 2 historical unique items/services should be provided\")\n assert_frame_equal(actual_repr, self._EXPECTED_DATAFRAME)\n\n def _create_expected_df(self):\n return pandas.DataFrame([\n {\n 'ProvisionType': 'Medication',\n 'ItemUUID': self.item.uuid,\n 'HFUUID': \"6D0EEA8C-62EB-11EA-94D6-C36229A16C2F\",\n 'LocationId': self._TEST_HF.location.id, # Is this HF location or insuree location?\n 'ICDCode': 'ICD00I',\n 'ICD1Code': None,\n 'ProdID': self._TEST_PRODUCT.id,\n 'DOB': datetime.date(1970, 1, 1),\n 'Gender': 'M', # Should it be code or \"Gender Object\" used in ORM?\n 'Poverty': None,\n 'QuantityProvided': self._TEST_ITEM_QUANTITY,\n 'ItemPrice': self._TEST_ITEM_PRICE,\n 'PriceAsked': self._PRICE_ASKED_ITEM,\n 'DateFrom': datetime.date(2018, 12, 12),\n 'DateTo': datetime.date(2018, 12, 12),\n 'DateClaimed': datetime.date(2018, 12, 14),\n 'ItemFrequency': None,\n 'ItemPatCat': 15,\n 'ItemLevel': 'M',\n 'HFLevel': 'H',\n 'HFCareType': ' ',\n 'VisitType': 'O',\n 'RejectionReason': 0,\n 'PriceValuated': self._PRICE_VALUATED,\n 'HfUUID': self._TEST_HF.uuid,\n 'ClaimAdminUUID': self._TEST_CLAIM_ADMIN_UUID,\n 'InsureeUUID': self._TEST_PATIENT_UUID,\n 'ClaimUUID': self._TEST_UUID,\n 'New': 'new'\n }, {\n 'ProvisionType': 'ActivityDefinition',\n 'ItemUUID': self.service.uuid,\n 'HFUUID': \"6D0EEA8C-62EB-11EA-94D6-C36229A16C2F\",\n 'LocationId': self._TEST_HF.location.id,\n 'ICDCode': 'ICD00I',\n 'ICD1Code': None,\n 'ProdID': self._TEST_PRODUCT.id,\n 'DOB': datetime.date(1970, 1, 1),\n 'Gender': 'M',\n 'Poverty': None,\n 'QuantityProvided': self._TEST_SERVICE_QUANTITY,\n 'ItemPrice': self._TEST_SERVICE_PRICE,\n 'PriceAsked': self._PRICE_ASKED_SERVICE,\n 'DateFrom': datetime.date(2018, 12, 12),\n 'DateTo': datetime.date(2018, 12, 12),\n 'DateClaimed': datetime.date(2018, 12, 14),\n 'ItemFrequency': None,\n 'ItemPatCat': 15,\n 'ItemLevel': '1',\n 'HFLevel': 'H',\n 'HFCareType': ' ',\n 'VisitType': 'O',\n 'RejectionReason': 0,\n 'PriceValuated': self._PRICE_VALUATED,\n 'HfUUID': self._TEST_HF.uuid,\n 'ClaimAdminUUID': self._TEST_CLAIM_ADMIN_UUID,\n 'InsureeUUID': self._TEST_PATIENT_UUID,\n 'ClaimUUID': self._TEST_UUID,\n 'New': 'new'\n }, {\n 'ProvisionType': 'Medication',\n 'ItemUUID': self.item.uuid,\n 'HFUUID': \"6D0EEA8C-62EB-11EA-94D6-C36229A16C2F\",\n 'LocationId': self._TEST_HF.location.id,\n 'ICDCode': 'ICD00V',\n 'ICD1Code': None,\n 'ProdID': self._TEST_PRODUCT.id,\n 'DOB': datetime.date(1970, 1, 1),\n 'Gender': 'M',\n 'Poverty': None,\n 'QuantityProvided': self._TEST_ITEM_QUANTITY,\n 'ItemPrice': self._TEST_ITEM_PRICE,\n 'PriceAsked': self._PRICE_ASKED_ITEM,\n 'DateFrom': datetime.date(2018, 12, 12),\n 'DateTo': datetime.date(2018, 12, 12),\n 'DateClaimed': datetime.date(2018, 12, 14),\n 'ItemFrequency': None,\n 'ItemPatCat': 15,\n 'ItemLevel': 'M',\n 'HFLevel': 'H',\n 'HFCareType': ' ',\n 'VisitType': 'O',\n 'RejectionReason': 0,\n 'PriceValuated': self._PRICE_VALUATED,\n 'HfUUID': self._TEST_HF.uuid,\n 'ClaimAdminUUID': self._TEST_CLAIM_ADMIN_UUID,\n 'InsureeUUID': self._TEST_PATIENT_UUID,\n 'ClaimUUID': self._TEST_HISTORICAL_UUID,\n 'New': 'old'\n }, {\n 'ProvisionType': 'ActivityDefinition',\n 'ItemUUID': self.service.uuid,\n 'HFUUID': \"6D0EEA8C-62EB-11EA-94D6-C36229A16C2F\",\n 'LocationId': self._TEST_HF.location.id,\n 'ICDCode': 'ICD00V',\n 'ICD1Code': None,\n 'ProdID': self._TEST_PRODUCT.id,\n 'DOB': datetime.date(1970, 1, 1),\n 'Gender': 'M',\n 'Poverty': None,\n 'QuantityProvided': self._TEST_SERVICE_QUANTITY,\n 'ItemPrice': self._TEST_SERVICE_PRICE,\n 'PriceAsked': self._PRICE_ASKED_SERVICE,\n 'DateFrom': datetime.date(2018, 12, 12),\n 'DateTo': datetime.date(2018, 12, 12),\n 'DateClaimed': datetime.date(2018, 12, 14),\n 'ItemFrequency': None,\n 'ItemPatCat': 15,\n 'ItemLevel': '1',\n 'HFLevel': 'H',\n 'HFCareType': ' ',\n 'VisitType': 'O',\n 'RejectionReason': 0,\n 'PriceValuated': self._PRICE_VALUATED,\n 'HfUUID': self._TEST_HF.uuid,\n 'ClaimAdminUUID': self._TEST_CLAIM_ADMIN_UUID,\n 'InsureeUUID': self._TEST_PATIENT_UUID,\n 'ClaimUUID': self._TEST_HISTORICAL_UUID,\n 'New': 'old'\n }\n ])\n\n def _create_test_product(self):\n imis_product = create_test_product(self._TEST_PRODUCT_CODE, valid=True, custom_props=None)\n imis_product.save()\n return imis_product\n","repo_name":"openimis/openimis-be-claim_ai_py","sub_path":"claim_ai/tests/r4_fhir_resources/test_ai_input_converter.py","file_name":"test_ai_input_converter.py","file_ext":"py","file_size_in_byte":15976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22197453108","text":"'''\nA utility file which abstracts out the ResultLogger\n(used for logging) and also the iter_data function\n(uses the tqdm module to return minibatches of data)\n'''\n\nimport os\nimport sys\nimport json\nimport time\nfrom functools import partial\nimport numpy as np\nfrom tqdm import tqdm\n\n# creates dirs if they don't exist\ndef make_path(f):\n d = os.path.dirname(f)\n if d and not os.path.exists(d):\n os.makedirs(d)\n return f\n\n# iterates through the batch and gives out minibatches\n# of the specified data\n# also indicates progress\ndef iter_data(*datas, n_batch=128, truncate=False, \n verbose=False, max_batches=float(\"inf\")):\n n = len(datas[0])\n if truncate:\n n = (n//n_batch)*n_batch\n n = min(n, max_batches*n_batch)\n n_batches = 0\n if verbose:\n f = sys.stderr\n else:\n f = open(os.devnull, 'w')\n # the tqdm module is used for this, indicates progress as well\n for i in tqdm(range(0, n, n_batch), total=n//n_batch, \n file=f, ncols=80, leave=False):\n if n_batches >= max_batches: raise StopIteration\n if len(datas) == 1:\n yield datas[0][i:i+n_batch]\n else:\n yield (d[i:i+n_batch] for d in datas)\n n_batches += 1\n\n\nclass ResultLogger(object):\n # originally used to log data after every x updates\n # now just stores the initial model\n def __init__(self, path, *args, **kwargs):\n if 'time' not in kwargs:\n kwargs['time'] = time.time()\n self.f_log = open(make_path(path), 'w')\n self.f_log.write(json.dumps(kwargs)+'\\n')\n\n def log(self, **kwargs):\n if 'time' not in kwargs:\n kwargs['time'] = time.time()\n self.f_log.write(json.dumps(kwargs)+'\\n')\n self.f_log.flush()\n\n def close(self):\n self.f_log.close()\n","repo_name":"Ras-al-Ghul/code2vec_transformer","sub_path":"Transformer_Code_Attention/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70550505708","text":"import heapq\nfrom itertools import product\nfrom pathlib import Path\nimport sys\n\nfilename = \"15.txt\"\npath = Path(__file__).parent.joinpath(filename)\n\n\nclass Point:\n def __init__(self, row: int, col: int, risk: int):\n self.position = (row, col)\n self.risk = risk % 9 or 9\n self.distance: int = sys.maxsize\n self.visited: bool = False\n\n def __repr__(self):\n return f\"{self.risk} ({self.distance})\"\n\n def __eq__(self, other) -> bool:\n return self.position == other.position\n\n def __lt__(self, other) -> bool:\n return (\n self.distance < other.distance\n if self.distance != other.distance\n else self.position < other.position\n )\n\n\nGrid = list[list[Point]]\n\n\ndef __get_grid(lines: list[str], multiplier: int):\n def get_inner_grid(add_amount: int):\n return [[int(val) + add_amount for val in line] for line in lines]\n\n grids: list[list[list[int]]] = [[] for _ in range(multiplier)]\n for vertical_count in range(multiplier):\n for horizontal_count in range(multiplier):\n grids[vertical_count].append(\n get_inner_grid(vertical_count + horizontal_count)\n )\n\n line_count = len(lines)\n value_grid: list[list[int]] = [[] for _ in range(len(lines) * multiplier)]\n\n for (grid_row_index, grid_row) in enumerate(grids):\n for inner_grid in grid_row:\n for (r, row) in enumerate(inner_grid):\n value_grid[r + (line_count * grid_row_index)].extend(row)\n\n grid = [\n [Point(r, c, value) for (c, value) in enumerate(row)]\n for (r, row) in enumerate(value_grid)\n ]\n grid[0][0].distance = 0\n\n return grid\n\n\ndef __visit(grid: Grid, position: tuple[int, int], allow_diagonal: bool):\n (row, col) = position\n current_point = grid[row][col]\n if current_point.visited:\n return []\n\n neighbors = [\n grid[row + r][col + c]\n for (r, c) in product(range(-1, 2), range(-1, 2))\n if (r, c) != (0, 0)\n and (allow_diagonal or abs(r) + abs(c) < 2)\n and row + r > -1\n and col + c > -1\n and row + r < len(grid)\n and col + c < len(grid[r])\n and (not grid[row + r][col + c].visited)\n ]\n\n for neighbor in neighbors:\n neighbor.distance = min(\n neighbor.distance, current_point.distance + neighbor.risk\n )\n\n current_point.visited = True\n return neighbors\n\n\n# Thank you Djikstra\n# https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm\ndef __get_shortest_path(grid: Grid):\n to_visit = []\n start = grid[0][0]\n heapq.heappush(to_visit, start)\n\n reached_end = False\n while (not reached_end) and len(to_visit) > 0:\n point = heapq.heappop(to_visit)\n neighbors = __visit(grid, point.position, False)\n for neighbor in neighbors:\n if neighbor == grid[-1][-1]:\n reached_end = True\n break\n heapq.heappush(to_visit, neighbor)\n\n return grid[-1][-1].distance\n\n\ndef __get_part_one(lines: list[str]):\n return __get_shortest_path(__get_grid(lines, 1))\n\n\ndef __get_part_two(lines: list[str]):\n return __get_shortest_path(__get_grid(lines, 5))\n\n\ndef run():\n with open(path) as file:\n lines = [line.strip() for line in file.readlines() if line]\n\n part_one = __get_part_one(lines)\n part_two = __get_part_two(lines)\n\n print(f\"Part one: {part_one}\")\n print(f\"Part two: {part_two}\")\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"HolyMeekrob/Advent-of-Code","sub_path":"2021/15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22029785476","text":"import pygame, math, time\nimport joystick, solver, pose, vector3\nfrom operator import itemgetter\nfrom vector3 import col as col\nimport numpy as np\n\nclass Simulation:\n\tdef __init__(self, win_width = 640, win_height = 480):\n\t\tpygame.init()\n\t\tself.screen = pygame.display.set_mode((win_width, win_height))\n\t\tpygame.display.set_caption(\"JoySix Cube Demo\")\n\t\tself.clock = pygame.time.Clock()\n\t\tself.vertices = [\n\t\t\t[-1,1,-1],\n\t\t\t[1,1,-1],\n\t\t\t[1,-1,-1],\n\t\t\t[-1,-1,-1],\n\t\t\t[-1,1,1],\n\t\t\t[1,1,1],\n\t\t\t[1,-1,1],\n\t\t\t[-1,-1,1]\n\t\t]\n\t\t# Define the vertices that compose each of the 6 faces. These numbers are\n\t\t# indices to the vertices list defined above.\n\t\tself.faces = [(0,1,2,3),(1,5,6,2),(5,4,7,6),(4,0,3,7),(0,4,5,1),(3,2,6,7)]\n\t\t# Define colors for each face\n\t\tself.colors = [(255,0,255),(255,0,0),(0,255,0),(0,0,255),(0,255,255),(255,255,0)]\n\t\tself.angle = 0\n\t\tself.joystick = joystick.Joystick()\n\n\t\n\tdef run(self):\n\n\t\twhile 1:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\tsys.exit()\n\n\t\t\t#40 fps max\n\t\t\ttime_passed = self.clock.tick(40)\n\n\t\t\tself.screen.fill((0,32,0))\n\t\t\n\t\t\t#events from joystick\n\t\t\tP = pose.exp(col([-math.pi / 2,0,0,0,0,0])) * self.joystick.getPose()\n\n\t\t\t# It will hold transformed vertices.\n\t\t\tt = []\n\t\t\t\n\t\t\tfor v in self.vertices:\n\t\t\t\tv_c = col(v)\n\t\t\t\tp = 0.01 * (P * (100 * v_c))\t\t\t\t\n\t\t\t\t# Transform the point from 3D to 2D\n\t\t\t\tfactor = 256 / (4 + p[2,0])\n\t\t\t\tx = p[0,0] * factor + self.screen.get_width() / 2\n\t\t\t\ty = -p[1,0] * factor + self.screen.get_height() / 2\n\t\t\t\tz = p[2,0]\n\t\t\t\t# Put the point in the list of transformed vertices\n\t\t\t\tt.append([x,y,z])\n\n\t\t\t# Calculate the average Z values of each face.\n\t\t\tavg_z = []\n\t\t\ti = 0\n\t\t\tfor f in self.faces:\n\t\t\t\tz = (t[f[0]][2] + t[f[1]][2] + t[f[2]][2] + t[f[3]][2]) / 4.0\n\t\t\t\tavg_z.append([i,z])\n\t\t\t\ti = i + 1\n\n\t\t\t# Draw the faces using the Painter's algorithm:\n\t\t\t# Distant faces are drawn before the closer ones.\n\t\t\tfor tmp in sorted(avg_z,key=itemgetter(1),reverse=True):\n\t\t\t\tface_index = tmp[0]\n\t\t\t\tf = self.faces[face_index]\n\t\t\t\tpointlist = [(t[f[0]][0], t[f[0]][1]), (t[f[1]][0], t[f[1]][1]),\n\t\t\t\t\t\t\t (t[f[1]][0], t[f[1]][1]), (t[f[2]][0], t[f[2]][1]),\n\t\t\t\t\t\t\t (t[f[2]][0], t[f[2]][1]), (t[f[3]][0], t[f[3]][1]),\n\t\t\t\t\t\t\t (t[f[3]][0], t[f[3]][1]), (t[f[0]][0], t[f[0]][1])]\n\t\t\t\tpygame.draw.polygon(self.screen,self.colors[face_index],pointlist)\n\n\t\t\t\t\n\t\t\tself.angle += 1\n\t\t\t\n\t\t\tpygame.display.flip()\n\n\nif __name__ == '__main__':\n\ts = Simulation()\n\ts.run()\n","repo_name":"niberger/joysix","sub_path":"driver/demo_3d.py","file_name":"demo_3d.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"25364328399","text":"\"\"\"\n\n1. I will compare OPTICS, k-mean, DBSCAN, Hierarchy agglomerative clustering algorithm\n2. I will use the optimal parameters for each algorithm:\nkmean (k=15 this is optimal value according to silhouette score and elbow)\nDBSCAN (min_sample=31, eps=0.26)\nOPTICS (min_sample=31 as DBSCAN)\nHierarchy agglomerative (hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage ='ward') according to dendogram)\n\n3. my performance measure is the dissimilarity measure\nI need to compute the labels, the the centroide for each class then compute the dissimilarity and similarity measure\n\"\"\"\n# #### output\n# ------------------ Hierarchical Agglomerrative -----------------------------------\n# The av of dissimilarity for samples using Hierarchical is 92.49760881627664\n# ------------------ Optics -----------------------------------\n# The av of dissimilarity for samples using OPTICS is 505.0906716983576\n# ------------------ DBSCAN -----------------------------------\n# The av of dissimilarity for samples using DBSCAN is 300.215624263335\n# ------------------ k=15 -----------------------------------\n# The av of dissimilarity for samples using K-mean is 303.22792885004594\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn import preprocessing #we will normalize our data\nimport pdb\nimport pandas as pd\nimport numpy as np\nimport re\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport scipy.linalg as sp\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns\nfrom DisSimilarity import dissimilarities\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics import silhouette_score, silhouette_samples #find the optimal k using sihouette score\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.cluster import OPTICS\n\n#compute the dissimilarities between each observation and all the centroide except the centroide of that oservation\ndef dis_similarity(observations, centroides):\n dissimilarities=[]\n for observation in observations:\n dist=0\n for c in centroides:\n if observation[-1]==c[-1]: #we have the same centroide, do not compute how far the observation from centroide\n pass\n else:\n dist +=np.linalg.norm(c[0:-1] - observation[0:-1])\n dissimilarities.append(dist)\n return (dissimilarities)\n\nurl='https://raw.githubusercontent.com/becodeorg/BXL-Bouman-2.22/master/content/04.machine_learning/3.Clustering/assets/chipotle_stores.csv?token=AHYQ2CVQ7AR2USNRZD42OUS74XIIY'\ndf=pd.read_csv(url)\n\n\n\"\"\"\nHierarchical agglomerative\n\"\"\"\ndf_hier=df[['latitude','longitude']].copy()\nhc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage ='ward').fit(df_hier.values)\ndf_hier['label_hier']=hc.labels_\n\n#compute the centroide\nlat=df_hier.groupby('label_hier')['latitude'].mean().reset_index(name='centroide_latitude')\nlon=df_hier.groupby('label_hier')['longitude'].mean().reset_index(name='centroide_longitude')\n#store the centroides in a df\ncentroide_hier=pd.DataFrame({\"cen_latitude\": lat[\"centroide_latitude\"],\"cen_longitude\": lon[\"centroide_longitude\"], \"label_hier\": lat[\"label_hier\"],})\n#create a matrix\ncentroide_hier=centroide_hier.values\nobservation_hier=df_hier.values\n#get the dissimilarity measure\ndf_hier['Dissimilarities']=dis_similarity(observation_hier, centroide_hier)\n\nprint(\"------------------ Hierarchical Agglomerrative -----------------------------------\")\n#get the average dissimilarity for each class\nav_dis=df_hier['Dissimilarities'].mean()\nprint(f\"The av of dissimilarity for samples using Hierarchical is {av_dis}\")\n\n\n\n\"\"\"\nOPTICS\n\"\"\"\ndf_optics=df[['latitude','longitude']].copy()\noptics=OPTICS(min_samples=31).fit(df_optics.values)\ndf_optics['label_optics']=optics.labels_\n\n#compute the centroide\nlat=df_optics.groupby('label_optics')['latitude'].mean().reset_index(name='centroide_latitude')\nlon=df_optics.groupby('label_optics')['longitude'].mean().reset_index(name='centroide_longitude')\n#store the centroides in a df\ncentroide_optics=pd.DataFrame({\"cen_latitude\": lat[\"centroide_latitude\"],\"cen_longitude\": lon[\"centroide_longitude\"], \"label_optics\": lat[\"label_optics\"],})\n#create a matrix\ncentroide_optics=centroide_optics.values\nobservation_dbscan=df_optics.values\n#get the dissimilarity measure\ndf_optics['Dissimilarities']=dis_similarity(observation_dbscan, centroide_optics)\n\nprint(\"------------------ Optics -----------------------------------\")\n#get the average dissimilarity for each class\nav_dis=df_optics['Dissimilarities'].mean()\nprint(f\"The av of dissimilarity for samples using OPTICS is {av_dis}\")\n\n\n\"\"\"\nDBSCAN\n\"\"\"\ndf_dbscan=df[['latitude','longitude']].copy()\ndb=DBSCAN(eps=0.25, min_samples=31).fit(df_dbscan.values)\nlabels_dbscan=db.labels_\ndf_dbscan['label_dbscan']=labels_dbscan\n\n#compute the centroide\nlat=df_dbscan.groupby('label_dbscan')['latitude'].mean().reset_index(name='centroide_latitude')\nlon=df_dbscan.groupby('label_dbscan')['longitude'].mean().reset_index(name='centroide_longitude')\n#store the centroides in a df\ncentroide_dbscan=pd.DataFrame({\"cen_latitude\": lat[\"centroide_latitude\"],\"cen_longitude\": lon[\"centroide_longitude\"], \"label_dbscan\": lat[\"label_dbscan\"],})\n#create a matrix\ncentroide_dbscan=centroide_dbscan.values\nobservation_dbscan=df_dbscan.values\n#get the dissimilarity measure\ndf_dbscan['Dissimilarities']=dis_similarity(observation_dbscan, centroide_dbscan)\n\nprint(\"------------------ DBSCAN -----------------------------------\")\n#get the average dissimilarity for each class\nav_dis=df_dbscan['Dissimilarities'].mean()\nprint(f\"The av of dissimilarity for samples using DBSCAN is {av_dis}\")\n\n\n\n\"\"\"\nk=15, according to elbow curve, the best k=15\nthis is the optimal value according to elbow curve\n\"\"\"\ndf_kmean_15=df[['latitude','longitude']].copy()\n\n# number of pokemon clusters\nstate_size_optimal = 15#'k-means++' is to initiate the centrodic in a smart way, to get faster convergence\nkmeans_15=KMeans(n_clusters=state_size_optimal, init='k-means++').fit(df_kmean_15.values)\n# add cluster index to dataframe, this will e your observation\ndf_kmean_15['cluster']=kmeans_15.labels_\n#compute the dissimilarities using Euclidean distance and create a new column in the df\nall_observation=df_kmean_15.values\n# clusters is an attribute of the object\ncluster_centers = kmeans_15.cluster_centers_\n#compute the dissimilarities\ndiss=dissimilarities(all_observation, cluster_centers, distance_type='Eculidean')\n\n#add the dis-similarity\ndf_kmean_15[\"Dissimilarities\"]=diss\n\n\"\"\"\nNote: I will compare the average similarity (distortion) and average dissimilarity to see if there is an improvement\n\"\"\"\nprint(\"------------------ k=15 -----------------------------------\")\n#get the average dissimilarity for each class\nav_dis=df_kmean_15[\"Dissimilarities\"].mean()\nprint(f\"The av of dissimilarity for samples using K-mean is {av_dis}\")\n","repo_name":"SabaYahyaa/Task3-Chipotle_clustering_challeng_individuale","sub_path":"compare_kmean_optics_dbscan_agglomerativeHierarchical.py","file_name":"compare_kmean_optics_dbscan_agglomerativeHierarchical.py","file_ext":"py","file_size_in_byte":6846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23439130365","text":"import dart_fss as dart\n\napi_key='998bebe5acc23a9e8321f35d52181529440718e8'\ndart.set_api_key(api_key=api_key)\n\n# DART 에 공시된 회사 리스트 불러오기\ncorp_list = dart.get_corp_list()\n\n# 삼성전자 검색\nfilename = '삼성전자'\nsamsung = corp_list.find_by_corp_name(filename, exactly=True)[0]\n\n# 2012년부터 연간&분기 연결재무제표 불러오기\nfs = samsung.extract_fs(bgn_de='20120101', report_tp='quarter')\n\n# 재무제표 일괄저장 (default: 실행폴더/fsdata/{corp_code}_{report_tp}.xlsx)\n#fs.save()\n\n# 재무제표 일괄저장\npath = '/User/klee30810/Documents/Career/Finance/halto/fsdata'\nfs.save(filename=filename, path=path)\n","repo_name":"klee30810/FinanceHub","sub_path":"fs_crawling.py","file_name":"fs_crawling.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28894175833","text":"from django.shortcuts import render\nfrom rango.models import Category\nfrom rango.models import Page\nfrom rango.forms import CategoryForm\n\n\n# Create your views here.\n\nfrom django.http import HttpResponse\n\n#def index(request):\n#\treturn HttpResponse(\"Rango says hey there partner, like, pet!
About\")\n\t\ndef about(request):\n\tcontext_dict = {'boldaboutmessage': \"this is the boldaboutmessage context message\"}\n\treturn render(request, 'rango/about.html', context=context_dict)\n\t#return HttpResponse(\"Rango says here is the About page
Index \")\n\n#def index(request):\n\t# Construct a dictionary to pass to the template engine as its context.\n\t# Note the key boldmessage is the same as {{ boldmessage }} in the template!\n\t#context_dict = {'boldmessage': \"Crunchy, creamy, cookie, candy, cupcake!\"}\n\t# Return a rendered response to send to the client.\n\t# We make use of the shortcut function to make our lives easier.\n\t# Note that the first parameter is the template we wish to use.\n\t#return render(request, 'rango/index.html', context=context_dict)\n\ndef index(request):\n\t# Query the database for a list of ALL categories currently stored.\n\t# Order the categories by no. likes in descending order.\n\t# Retrieve the top 5 only - or all if less than 5.\n\t# Place the list in our context_dict dictionary\n\t# that will be passed to the template engine.\n\tcategory_list = Category.objects.order_by('-likes')[:5]\n\tpage_list = Page.objects.order_by('-views')[:5]\n\tcontext_dict = {'categories': category_list, 'pages': page_list}\t\n\t# Render the response and send it back!\n\treturn render(request, 'rango/index.html', context_dict)\n\ndef show_category(request, category_name_slug):\n\tcontext_dict = {}\n\ttry:\n\t\tcategory = Category.objects.get(slug=category_name_slug)\n\t\tpages = Page.objects.filter(category=category)\n\t\tcontext_dict['pages'] = pages\n\t\tcontext_dict['category'] = category\n\texcept Category.DoesNotExist:\n\t\tcontext_dict['category'] = None\n\t\tcontext_dict['pages'] = None\n\treturn render(request, 'rango/category.html', context_dict)\n\t\ndef add_category(request):\n\tform = CategoryForm()\n\n\t# A HTTP POST?\n\tif request.method == 'POST':\n\t\tform = CategoryForm(request.POST)\n\n\t\t# Have we been provided with a valid form?\n\t\tif form.is_valid():\n\t\t\t# Save the new category to the database.\n\t\t\tcat=form.save(commit=True)\n\t\t\tprint(cat, cat.slug)\n\t\t\t# Now that the category is saved\n\t\t\t# We could give a confirmation message\n\t\t\t# But since the most recent category added is on the index page\n\t\t\t# Then we can direct the user back to the index page.\n\t\t\treturn index(request)\n\t\telse:\n\t\t\t# The supplied form contained errors -\n\t\t\t# just print them to the terminal.\n\t\t\tprint(form.errors)\n\t\t# Will handle the bad form, new form, or no form supplied cases.\n\t\t# Render the form with error messages (if any).\n\treturn render(request, 'rango/add_category.html', {'form': form})\n","repo_name":"shobberlyridge/rango.2","sub_path":"tango_with_django_project/rango/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17880003109","text":"nx=100\nx=np.linspace(0,5, nx)\noption = [\"blue\", \"bluegreen\", \"green\", \"gold\", \"brown\", \"rose\", \"purple\"]\n\nfig, ax = plt.subplots(nrows=4, ncols=2, figsize=(8,10))\np = 0\nny = 9\ny=np.zeros((ny,nx), dtype=int)\ncolor, linestyle = cb.Colorplots().huescale(ny)\nfor i in range(ny):\n y[i]=x+i\n ax[0,0].plot(x,y[i],color=color[i], ls=\"-\")\nny = 3\ny=np.zeros((ny,nx), dtype=int)\ncolor, linestyle = cb.Colorplots().huescale(ny, option[0])\nfor i in range(ny):\n y[i]=x+i\n ax[0,1].plot(x,y[i],color=color[i], ls=\"-\")\nax[0,0].set_title(\"ocherscale\", fontsize=10, loc=\"right\", font=\"monospace\", color=\"k\")\nax[0,1].set_title(f\"option = {option[0]}\", fontsize=10, loc=\"right\", font=\"monospace\", color=\"k\")\nax[0,0].set_axis_off()\nax[0,1].set_axis_off()\nfor p, (axi,axj) in zip(range(1,8,2),ax[1:,:]):\n ny = 3\n y=np.zeros((ny,nx), dtype=int)\n color, linestyle = cb.Colorplots().huescale(ny, option[p])\n for i in range(ny):\n y[i]=x+i\n axi.plot(x,y[i], color=color[i])\n color, linestyle = cb.Colorplots().huescale(ny, option[p+1])\n for i in range(ny):\n y[i]=x+i\n axj.plot(x,y[i], color=color[i])\n axi.set_title(f\"option = {option[p]}\", fontsize=10, loc=\"right\", font=\"monospace\", color=\"k\")\n axj.set_title(f\"option = {option[p+1]}\", fontsize=10, loc=\"right\", font=\"monospace\", color=\"k\")\n axi.set_axis_off()\n axj.set_axis_off()\nfig.tight_layout()\nplt.savefig(\"huescale.png\", dpi=100, facecolor='lightgray')\n","repo_name":"volodia99/cblind","sub_path":"imgs/huescale.py","file_name":"huescale.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"10368936851","text":"from tensorflow.keras.models import Model\n\nfrom core.models.vgg16 import vgg16\nfrom core.models.resnet import resnet, resnet_unet_101, resnet_101_flat, resnet_50_flat, \\\n\tresnet_unet_101_bos, resnet_unet_50, resnet_unet_50_bos\nfrom core.models.unet import unet\n\n\ndef backbone_factory(\n\tbackbone: str, embdim: int = 256, use_bos: bool = False, transfer: bool = True\n) -> Model:\n\tif use_bos:\n\t\tif backbone in (\"resnet_unet\", \"resnet_unet_101\"):\n\t\t\treturn resnet_unet_101_bos(embdim, transfer)\n\t\telif backbone == \"resnet_unet_50\":\n\t\t\treturn resnet_unet_50_bos(embdim, transfer)\n\telse:\n\t\tif backbone == \"vgg16\":\n\t\t\treturn vgg16(transfer)\n\t\telif backbone == \"resnet\":\n\t\t\treturn resnet(embdim)\n\t\telif backbone in (\"resnet_unet\", \"resnet_unet_101\"):\n\t\t\treturn resnet_unet_101(embdim, transfer)\n\t\telif backbone == \"resnet_unet_50\":\n\t\t\treturn resnet_unet_50(embdim, transfer)\n\t\telif backbone == \"unet\":\n\t\t\treturn unet(embdim)\n\t\telif backbone == \"resnet_flat_101\":\n\t\t\treturn resnet_101_flat(embdim, transfer)\n\t\telif backbone == \"resnet_flat_50\":\n\t\t\treturn resnet_50_flat(embdim, transfer)\n\traise ValueError(f\"Uknown backbone kind {backbone}\")\n","repo_name":"mskl/relative-layout-matching","sub_path":"src/transfer/core/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22477890985","text":"\"\"\"\nGiven two strings s and t, determine if they are isomorphic.\n\nTwo strings s and t are isomorphic if the characters in s can be replaced to get t.\n\nAll occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character, but a character may map to itself.\n\n \n\nExample 1:\n\nInput: s = \"egg\", t = \"add\"\nOutput: true\n\nExample 2:\n\nInput: s = \"foo\", t = \"bar\"\nOutput: false\n\nExample 3:\n\nInput: s = \"paper\", t = \"title\"\nOutput: true\n \n\nConstraints:\n\n1 <= s.length <= 5 * 104\nt.length == s.length\ns and t consist of any valid ascii character.\n\"\"\"\n\nclass Solution:\n \n def transformString(self, s: str) -> str:\n index_mapping = {}\n new_str = []\n \n for i, c in enumerate(s):\n if c not in index_mapping:\n index_mapping[c] = i\n new_str.append(str(index_mapping[c]))\n \n return \" \".join(new_str)\n \n def isIsomorphic(self, s: str, t: str) -> bool:\n return self.transformString(s) == self.transformString(t)\n","repo_name":"akashaw/Leetcode","sub_path":"205 Isomorphic Strings.py","file_name":"205 Isomorphic Strings.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31034436734","text":"from flask import request\nfrom flask_restplus import Namespace, Resource, fields, reqparse\nfrom pymysql.cursors import DictCursor\n\nfrom db_rest_service import app\nfrom db_rest_service.db_config import mysql_nagios\n\napi = Namespace('messages', description='Message related operations')\n\n\nclass AcknowledgeableMessage(fields.Raw):\n def format(self, value):\n print(\"XXXX\", value)\n return True if value is not None else False\n\nMessageModel = api.model('MessageModel',\n {'date_inserted': fields.DateTime(),\n 'message_text': fields.String(),\n 'can_acknowledge': AcknowledgeableMessage(default=False),\n 'id': fields.Integer()})\nMessageListModel = api.model('MessageListModel ', {\n 'alerts': fields.List(fields.Nested(MessageModel))\n})\n\nparser = reqparse.RequestParser()\nparser.add_argument('type', type=str, choices=['UNSENT', 'ALL'], default='UNSENT', help='types of alerts to return')\nparser.add_argument('limit', type=int, default=5, help='number of alerts to return')\n\n# insert message parser\ninsert_parser = reqparse.RequestParser()\ninsert_parser.add_argument('message_text', type=str, required=True, help='message text to send')\n\n\n@api.route('/')\nclass MessagesResource(Resource):\n @api.marshal_with(MessageListModel)\n @api.expect(parser)\n def get(self):\n args = parser.parse_args()\n if args['type'] == 'ALL':\n alerts_filter = '1=1'\n else:\n alerts_filter = \"status='{}'\".format(args['type'])\n\n limit_filter = 'LIMIT {}'.format(args['limit'])\n\n query = \"SELECT * FROM telegram_bot.nagios_alerts WHERE {} ORDER BY id ASC {}\".format(alerts_filter,\n limit_filter)\n app.logger.info(\"Query to execute is {}\".format(query))\n\n conn = mysql_nagios.get_db()\n cursor = conn.cursor(cursor=DictCursor)\n cursor.execute(query)\n return {'alerts': list(cursor)}\n\n @api.expect(insert_parser, validate=True)\n def post(self):\n \"\"\"\n Allows the addition of new messages\n :return:\n \"\"\"\n alert_data = insert_parser.parse_args()\n\n conn = mysql_nagios.get_db()\n cursor = conn.cursor()\n query = \"INSERT INTO `nagios_alerts`( `message_text`) VALUES (%s)\"\n cursor.execute(query, (alert_data['message_text'], ))\n conn.commit()\n app.logger.info(\"Inserted message successfully\")\n\n@api.route('/')\nclass MessageUpdate(Resource):\n @api.doc(params={'status': 'The new status of the message'})\n def post(self, message_id):\n \"\"\"\n Handles updating an alerts status\n :return: 200 if successful\n \"\"\"\n new_status = request.args['status'].upper()\n\n query = \"UPDATE nagios_alerts SET status=%s, date_sent=NOW() where id=%s\"\n\n conn = mysql_nagios.get_db()\n cursor = conn.cursor()\n cursor.execute(query, (new_status, message_id))\n conn.commit()","repo_name":"avinash-oza/db-rest-service","sub_path":"db_rest_service/apis/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27361021093","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 22 09:41:54 2021\r\n\r\n@author: ACER\r\n\"\"\"\r\n\r\ngraf = {'A':set(['Aisya','C','E']),\r\n 'B':set(['E','D']),\r\n 'C':set(['A','B','D']),\r\n 'D':set(['B','F','K','I']),\r\n 'E':set(['A','B']),\r\n 'F':set(['D','G']),\r\n 'G':set(['H','F','I']),\r\n 'H':set(['G']),\r\n 'I':set(['G','J']),\r\n 'J':set(['I']),\r\n 'K':set(['D','L','Risa'])}\r\n \r\n\r\ndef bfs(graf, mulai, tujuan):\r\n queue = [[mulai]]\r\n visited = set()\r\n\r\n while queue: \r\n jalur = queue.pop(0)\r\n state = jalur[-1]\r\n if state == tujuan:\r\n return jalur\r\n elif state not in visited:\r\n for cabang in graf.get(state, []): \r\n jalur_baru = list(jalur) \r\n jalur_baru.append(cabang) \r\n queue.append(jalur_baru) \r\n\r\n visited.add(state)\r\n\r\n isi = len(queue)\r\n if isi == 0:\r\n print(\"Tidak ditemukan\")\r\n\r\ndef dfs(graf, mulai,tujuan):\r\n stack = [[mulai]]\r\n visited = set()\r\n\r\n while stack: \r\n jalur = stack.pop(-1)\r\n state = jalur[-1]\r\n if state == tujuan:\r\n return jalur\r\n elif state not in visited:\r\n for cabang in graf.get(state, []): \r\n jalur_baru = list(jalur) \r\n jalur_baru.append(cabang) \r\n stack.append(jalur_baru) \r\n\r\n visited.add(state)\r\n\r\n isi = len(stack)\r\n if isi == 0:\r\n print(\"Tidak ditemukan\")\r\n","repo_name":"MurniaLestari/Sistem-Pakar","sub_path":"Tugas2/sistempakar.py","file_name":"sistempakar.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72097345708","text":"import matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\n\nfrom l_pipeline import pipeline_normalizer_kmeans, timer\nimport time\nfrom h_readsqlite import SqliteCorpusReader\nfrom i_vectorizer import TextNormalizer, GensimVectorizer\nfrom k_clustering import KMeansClusters\nfrom sklearn.pipeline import Pipeline\nimport pandas as pd\n\nimport gensim\nimport os\nimport csv\n\nWIDTH = 800\nHEIGHT = 400\ndef wordcloud(word_matrix):\n wordcloud = WordCloud(background_color=\"white\", max_words=50, width=WIDTH, \\\n height=HEIGHT)\n wc = wordcloud.generate_from_frequencies(word_matrix)\n plt.figure(figsize=(8,6))\n plt.axis(\"off\")\n plt.imshow(wc, interpolation=\"bilinear\")\n plt.tight_layout(pad=0)\n plt.show()\n\ndef wordcloud_with_cluster(cluster_list, word_matrix, year):\n cluster_set = sorted(set(cluster_list))\n for i in cluster_set:\n index_ = [a for a, x in enumerate(cluster_list) if x == i]\n temp_wordmatrix = word_matrix.iloc[index_] * 100\n temp_wordmatrix = temp_wordmatrix.sum(axis=0)/len(temp_wordmatrix)\n wc = WordCloud(background_color=\"white\", max_words=50, width=800, \\\n height=400).generate_from_frequencies(temp_wordmatrix)\n plt.figure(figsize=(8,6))\n plt.axis(\"off\")\n plt.imshow(wc, interpolation=\"bilinear\")\n plt.tight_layout(pad=0)\n wc.to_file(\"other/{}_{}_cluster.png\".format(year, i))\n # plt.show()\n return\n\ndef pipeline_normalizer(path:str, lexicon_path:str, year:int) -> pd.DataFrame:\n \"\"\" Perform Lemmatization and Vectorization.\n\n Args:\n path (str): SQL path\n lexicon_path (str): Gensim Lexicon path\n year (int): year \n\n Returns:\n pd.DataFrame: tfidf dataframe with document id as index\n \"\"\"\n corpus_reader = SqliteCorpusReader(path=path)\n docs = corpus_reader.docs(year)\n model = Pipeline([\n (\"norm\", TextNormalizer()),\n (\"vect\", GensimVectorizer(lexicon_path, False, True))\n ])\n tfidf = model.fit_transform(docs)\n ids = corpus_reader.ids(year)\n return pd.DataFrame(tfidf, index=ids)\n\ndef convert_to_list_of_list(list_:list):\n \"\"\"Convert a list to list of list.\n Input = [1, 2, 3]\n Output = [[1], [2], [3]]\n \"\"\"\n output_list = []\n for i in list_:\n output_list.append([i])\n return output_list\n\ndef write_to_csv(year, data:list):\n file = open('other/cluster_{}.csv'.format(year), 'w+', newline ='')\n # writing the data into the file\n converted_data = convert_to_list_of_list(data)\n with file: \n write = csv.writer(file)\n write.writerows(converted_data)\n return\n\ndef pipeline_normalizer_wordcloud(path, lexicon_path, year):\n # Delete existing lexicon to keep the matrix as small as possible\n if os.path.exists(lexicon_path):\n os.remove(lexicon_path)\n print(\"Existing lexicon deleted...\")\n else:\n print(\"No existing lexicon.\")\n tfidf_matrix = pipeline_normalizer(path, lexicon_path, year)\n lexicon = gensim.corpora.Dictionary.load(lexicon_path)\n tfidf_matrix.columns = list(lexicon.token2id.keys())\n clusterer = KMeansClusters()\n cluster_list = clusterer.transform(tfidf_matrix)\n write_to_csv(year, cluster_list)\n wordcloud_with_cluster(cluster_list, tfidf_matrix, year)\n print(\"Finish executing pipeline_normalizer_workcloud\")\n return\n \nif __name__ == \"__main__\":\n start_time = time.time()\n PATH = \"DB/StackOverflow.sqlite\"\n LEXICON_PATH = \"other/lexicon.pkl\"\n YEAR = 2021\n pipeline_normalizer_wordcloud(PATH, LEXICON_PATH, YEAR)\n timer(start_time, time.time())\n ","repo_name":"gunardilin/NLP_StackOverflow","sub_path":"m_wordcloud.py","file_name":"m_wordcloud.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25488474429","text":"import cv2\nimport sys\nimport argparse\nimport requests\nimport numpy as np\nimport imutils\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n\n\nurl = \"http://192.168.1.7:8080/shot.jpg\"\n\nARUCO_DICT = {\n\t\"DICT_4X4_50\": cv2.aruco.DICT_4X4_50,\n\t\"DICT_4X4_100\": cv2.aruco.DICT_4X4_100,\n\t\"DICT_4X4_250\": cv2.aruco.DICT_4X4_250,\n\t\"DICT_4X4_1000\": cv2.aruco.DICT_4X4_1000,\n\t\"DICT_5X5_50\": cv2.aruco.DICT_5X5_50,\n\t\"DICT_5X5_100\": cv2.aruco.DICT_5X5_100,\n\t\"DICT_5X5_250\": cv2.aruco.DICT_5X5_250,\n\t\"DICT_5X5_1000\": cv2.aruco.DICT_5X5_1000,\n\t\"DICT_6X6_50\": cv2.aruco.DICT_6X6_50,\n\t\"DICT_6X6_100\": cv2.aruco.DICT_6X6_100,\n\t\"DICT_6X6_250\": cv2.aruco.DICT_6X6_250,\n\t\"DICT_6X6_1000\": cv2.aruco.DICT_6X6_1000,\n\t\"DICT_7X7_50\": cv2.aruco.DICT_7X7_50,\n\t\"DICT_7X7_100\": cv2.aruco.DICT_7X7_100,\n\t\"DICT_7X7_250\": cv2.aruco.DICT_7X7_250,\n\t\"DICT_7X7_1000\": cv2.aruco.DICT_7X7_1000,\n\t\"DICT_ARUCO_ORIGINAL\": cv2.aruco.DICT_ARUCO_ORIGINAL,\n\t\"DICT_APRILTAG_16h5\": cv2.aruco.DICT_APRILTAG_16h5,\n\t\"DICT_APRILTAG_25h9\": cv2.aruco.DICT_APRILTAG_25h9,\n\t\"DICT_APRILTAG_36h10\": cv2.aruco.DICT_APRILTAG_36h10,\n\t\"DICT_APRILTAG_36h11\": cv2.aruco.DICT_APRILTAG_36h11\n}\n\n#url = \"http://192.168.1.7:8080/shot.jpg\"\n#url = \"http://25.83.15.127:8080/shot.jpg\"\nurl = \"http://192.168.137.214:8080/shot.jpg\"\n\n\n\nprint(\"[INFO] starting video stream...\")\n\n# While loop to continuously fetching data from the Url\nwhile True:\n img_resp = requests.get(url)\n print(\"hello\")\n print(type(img_resp))\n img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8 )\n img = cv2.imdecode(img_arr, -1)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #h,l=gray.shape[:2]\n #print(\"h: \",h,\" l: \",l)\n #high, length = gray.shape[:2]\n #gray=cv2.threshold(gray,25,255,cv2.THRESH_BINARY)\n #print(\"high: \", high, \" length: \", length)\n #img = imutils.resize(gray, width=1000, height=1800)\n #cv2.imshow(\"Android_cam\", img)\n arucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[\"DICT_7X7_250\"])\n arucoParams = cv2.aruco.DetectorParameters_create()\n corners, ids, rejected = cv2.aruco.detectMarkers(img,arucoDict, parameters=arucoParams)\n\n\n frame_markers = cv2.aruco.drawDetectedMarkers(gray, corners, ids)\n print(\"ids: \",ids)\n print(type(corners))\n print(\"----------------------\")\n print(corners)\n #print(frame_markers)\n\n \"\"\" plt.figure()\n plt.imshow(frame_markers)\n for i in range(len(ids)):\n\t c = corners[i][0]\n\t plt.plot([c[:, 0].mean()], [c[:, 1].mean()], \"o\", label=\"id={0}\".format(ids[i]))\n plt.legend()\n plt.show() \"\"\"\n if len(corners) > 0:\n \t# flatten the ArUco IDs list\n ids = ids.flatten()\n \t# loop over the detected ArUCo corners\n for (markerCorner, markerID) in zip(corners, ids):\n \t# extract the marker corners (which are always returned\n \t# in top-left, top-right, bottom-right, and bottom-left\n \t# order)\n corners = markerCorner.reshape((4, 2))\n (topLeft, topRight, bottomRight, bottomLeft) = corners\n \t# convert each of the (x, y)-coordinate pairs to integers\n topRight = (int(topRight[0]), int(topRight[1]))\n bottomRight = (int(bottomRight[0]), int(bottomRight[1]))\n bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))\n topLeft = (int(topLeft[0]), int(topLeft[1]))\n \t# draw the bounding box of the ArUCo detection\n cv2.line(img, topLeft, topRight, (0, 255, 0), 2)\n cv2.line(img, topRight, bottomRight, (0, 255, 0), 2)\n cv2.line(img, bottomRight, bottomLeft, (0, 255, 0), 2)\n cv2.line(img, bottomLeft, topLeft, (0, 255, 0), 2)\n \t# compute and draw the center (x, y)-coordinates of the\n \t# ArUco marker\n cX = int((topLeft[0] + bottomRight[0]) / 2.0)\n cY = int((topLeft[1] + bottomRight[1]) / 2.0)\n cv2.circle(img, (cX, cY), 4, (0, 0, 255), -1)\n print(\"===========================calculated======\")\n \t# draw the ArUco marker ID on the img\n cv2.putText(img, str(markerID),\n (topLeft[0], topLeft[1] - 15),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 255, 0), 2)\n \t# show the output img\n cv2.imshow(\"img\", img)\n key = cv2.waitKey(1) & 0xFF\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\n#cv2.destroyAllWindows()\n\n","repo_name":"hariharan382/IK","sub_path":"ws/auruco_detect.py","file_name":"auruco_detect.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19378101837","text":"# -*- coding: utf-8 -*-\n\nimport requests,datetime\nfrom lxml.html import fromstring\nfrom scrapy.selector import Selector\nimport os\nfrom amazon.api import AmazonAPI\nimport pickle\n\n\n\nagent = 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'\nheaders = {\n 'User-Agent': agent,\n 'Host': \"www.amazon.com\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Encoding\": \"gzip, deflate, sdch, br\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8,zh-TW;q=0.6\",\n \"Connection\": \"keep-alive\"\n}\n\nurl_start='https://www.amazon.com/'\nkeyword='usb car charger'\ns = requests.Session()\ncookies = pickle.load(open(os.path.join(os.path.split(os.path.split(os.path.abspath(__file__))[0])[0],'cookies_am.pkl'), \"rb\"))\nfor cookie in cookies:\n s.cookies.set(cookie['name'], cookie['value'])\npage = s.get(url_start+'s?field-keywords='+keyword+'&page=3',headers=headers)\ntree = fromstring(page.content)\nseller=(Selector(text=page.content).xpath(\".//*[@id='result_3']/@data-asin\")).extract()[0]\nprint(seller)\n\npage = s.get(url_start+(Selector(text=page.content).xpath(\".//*[@id='pagnNextLink']/@href\")).extract()[0],headers=headers)\nnext=(Selector(text=page.content).xpath(\".//*[@id='pagnNextLink']/@href\")).extract()[0]\nprint(next)","repo_name":"newer027/amazon_crawler","sub_path":"test/cookie_sele20170409.py","file_name":"cookie_sele20170409.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15039026924","text":"# *-* coding: utf-8 *-*\nfrom flask import Blueprint, jsonify\nfrom psutil import users \nfrom ..auths import *\n\nuser = Blueprint('user', __name__, \n template_folder='')\n\n@user.route('/user/who/')\n@user.route('/user/who', methods=['GET'])\n@auth.login_required\ndef who(username=None):\n lUsers=users()\n jwho=[]\n\n if username is not None:\n for k,v in enumerate(lUsers):\n if v.name == username:\n jwho.append(lUsers[k])\n else:\n jwho.append(lUsers)\n\n return jsonify(user=jwho)\n\n","repo_name":"wesleyleite/core","sub_path":"core/users/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13024534279","text":"import logging as log\n\nimport numpy as np\n\nfrom mo.front.tf.extractors.utils import tf_dtype_extractor\n\n\ndef tf_fused_bn_infer(node):\n output_shape = np.array(node.in_node(0).shape)\n for port, out_node in node.out_nodes().items():\n out_node.shape = output_shape\n\n\ndef tf_fused_bn_extractor(pb):\n is_training = pb.attr['is_training'].b\n if is_training:\n log.warning('FusedBatchNorm doesn\\'t support is_training=True')\n\n return {\n 'data_format': pb.attr[\"data_format\"].s,\n 'data_type': tf_dtype_extractor(pb.attr[\"T\"].type),\n 'eps': pb.attr['epsilon'].f,\n 'infer': tf_fused_bn_infer,\n 'is_training': is_training\n }\n","repo_name":"Namptiter/OpenVINO-Darknet-YOLOv3","sub_path":"model_optimizer/mo/front/tf/extractors/fused_bn.py","file_name":"fused_bn.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"38816520650","text":"from serviceaccount import ServiceAccount\nfrom tokenauth import ServiceAccountAuth\n\nfrom json import load\nfrom os import chdir\nfrom os.path import split\nfrom requests import get\nfrom traceback import print_exc\n\n#I use this for testing environment where all the files are in one directory\n#I make sure the current directory is the file directory\nchdir(split(__file__)[0]) if split(__file__)[0] else None\n\nupload_url = 'https://www.googleapis.com/upload/drive/v3/'\napi_url = 'https://www.googleapis.com/drive/v3/'\nscopes = ['https://www.googleapis.com/auth/drive']\n\nSA = ServiceAccount(load(open('SA.json')))\nSA.setScopes(scopes)\nSA.readAT('AT_SA.pkl')\n\nauth = ServiceAccountAuth(SA)\n\ntry:\n \n params = {\n 'fields': 'storageQuota/limit, storageQuota/usage'\n }\n resp = get(api_url + 'about', params=params, auth=auth)\n print(resp.status_code)\n storage = resp.json()['storageQuota']\n usage = int ((int(storage['usage']) / int(storage['limit'])) * 40)\n print('[' + '|' * usage + ' ' * (40 - usage) + ']' + '{}/{}'.format(storage['usage'], storage['limit']))\nexcept :\n print_exc()\nfinally:\n SA.saveAT('AT_SA.pkl')\n","repo_name":"mohamedali-zorgati1997/Google-Service-Account","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39881012771","text":"import sqlite3\nimport re\nimport datetime\n\n\nclass SQLite():\n\n def __init__(self, db_name):\n # Error Checks\n if not isinstance(db_name, str):\n raise TypeError(\"The name of the database should be a string\")\n if len(db_name) == 0:\n raise ValueError(\"The database name should not be empty\")\n\n # Create the connection and the cursor\n self.conn = sqlite3.connect(db_name)\n self.c = self.conn.cursor()\n\n self.tables = {}\n\n def __valid_input(self, string):\n '''Private function to validate input'''\n for l in string:\n if l in [';', '(', ')', ]:\n raise ValueError(\"Forbidden character found on the string '%s'\" % string)\n return True\n\n def __fields_not_primary_key(self, fields):\n ''' Private function to determine if the table have a Primary Key'''\n new_fields = []\n for f in fields:\n if not re.match('.*primary key.*', f.lower()):\n new_fields.append(f.split(\" \")[0])\n return new_fields\n\n def __get_primary_key(self, fields):\n ''' Private function to determine the Primary Key field'''\n for f in fields:\n if re.match('.*primary key.*', f.lower()):\n return f.split(\" \")[0]\n\n raise RuntimeError(\"No primary key on table\")\n\n def create_table(self, table_name, fields):\n \"\"\" Create table\"\"\"\n # Error Checks\n if not isinstance(table_name, str):\n raise TypeError(\"The name of the table on database should be a string\")\n if type(fields) is not tuple:\n raise TypeError(\"The fields should be a tuple ('field_name data_type OPTION', ...)\")\n if len(table_name) == 0:\n raise ValueError(\"The table name should not be empty\")\n if len(fields) == 0:\n raise ValueError(\"You need at least one field to create a table\")\n for f in fields:\n self.__valid_input(f)\n\n try:\n self.tables[table_name] = fields\n sql = '''CREATE TABLE IF NOT EXISTS {tbl} ({flds})'''.format(tbl=table_name,\n flds=\",\".join(f for f in fields))\n self.c.execute(sql)\n\n except Exception as e:\n raise e\n\n def insert(self, table_name, values):\n \"\"\" Insert data into table \"\"\"\n # Error Checks\n if not isinstance(table_name, str):\n raise TypeError(\"The name of the table on database should be a string\")\n if type(values) is not tuple:\n raise TypeError(\"The values should be a tuple containing the values to insert\")\n if len(table_name) == 0:\n raise ValueError(\"The table name should not be empty\")\n if len(values) == 0:\n raise ValueError(\"You need at least one value to insert on the table\")\n for v in values:\n if isinstance(v, str):\n self.__valid_input(v)\n\n try:\n fields = self.__fields_not_primary_key(self.tables[table_name])\n sql = '''INSERT INTO {tbl}({flds}) VALUES({vals})'''.format(tbl=table_name,\n flds=\",\".join(f for f in fields),\n vals=\",\".join(\"?\" for i in range(len(values))))\n self.c.execute(sql, values)\n self.conn.commit()\n\n except Exception as e:\n raise e\n\n def get_last_n(self, table_name, n=1):\n \"\"\" Get the last n values on table \"\"\"\n # Error Checks\n if not isinstance(table_name, str):\n raise TypeError(\"The name of the table on database should be a string\")\n if not isinstance(n, int):\n raise TypeError(\"The number of records (n) asked should be an integer\")\n if len(table_name) == 0:\n raise ValueError(\"The table name should not be empty\")\n if n <= 0:\n raise ValueError(\"The number of records should be greater than or equal to 1\")\n\n try:\n sql = '''SELECT * FROM {tbl} ORDER by {pk} DESC LIMIT {num}'''.format(tbl=table_name,\n pk=self.__get_primary_key(\n self.tables[table_name]),\n num=n)\n self.c.execute(sql)\n return self.c.fetchall()\n except Exception as e:\n raise e\n\n def query(self, query, values=None):\n \"\"\" Query \"\"\"\n # Error Checks\n if not isinstance(query, str):\n raise TypeError(\"The query should be a string\")\n if len(query) == 0:\n raise ValueError(\"The query can't be empty\")\n if values is not None:\n if type(values) is not tuple:\n raise ValueError(\"Values should be a tuple\")\n\n try:\n if values is None:\n self.c.execute(query)\n return self.c.fetchall()\n else:\n self.c.execute(query, values)\n return self.c.fetchall()\n\n except Exception as e:\n raise e\n\n def get_columns_from_table(self, table_name):\n \"\"\" Get columns from table \"\"\"\n # Error Checks\n if not isinstance(table_name, str):\n raise TypeError(\"The table name should be a string\")\n if len(table_name) == 0:\n raise ValueError(\"The table name can't be empty\")\n for v in table_name:\n if isinstance(v, str):\n self.__valid_input(v)\n\n try:\n sql = \"SELECT * from {tbl}\".format(tbl=table_name)\n self.c.execute(sql)\n return list(map(lambda x: x[0], self.c.description))\n\n except Exception as e:\n raise e\n\n def close(self):\n \"\"\" Close connection\"\"\"\n try:\n self.conn.close()\n except Exception as e:\n raise e","repo_name":"mercolino/monitoring","sub_path":"lib/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":6095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20214366283","text":"\"\"\"\nGiven a non-negative integer num represented as a string, remove k digits from the number so that the new number is the smallest possible.\n\nNote:\nThe length of num is less than 10002 and will be ≥ k.\nThe given num does not contain any leading zero.\n\nExample 1:\nInput: num = \"1432219\", k = 3\nOutput: \"1219\"\nExplanation: Remove the three digits 4, 3, and 2 to form the new number 1219 which is the smallest.\n\nExample 2:\nInput: num = \"10200\", k = 1\nOutput: \"200\"\nExplanation: Remove the leading 1 and the number is 200. Note that the output must not contain leading zeroes.\n\nExample 3:\nInput: num = \"10\", k = 2\nOutput: \"0\"\nExplanation: Remove all the digits from the number and it is left with nothing which is 0.\n\"\"\"\nclass Solution(object):\n def removeKdigits(self, num, k):\n \"\"\"\n :type num: str\n :type k: int\n :rtype: str\n \"\"\"\n # Convert to Ascending order\n # If already in ascending order, remove from back\n # Remove trailing zeros\n if k == 0:\n return num\n \n if len(num) == 0:\n return num\n \n if len(num) == k:\n return '0'\n \n count = 0\n remove_index = []\n num = list(num)\n \n for i in range(len(num)):\n if count >= k:\n break\n if i + 1 >= len(num):\n break\n if num[i] > num[i+1]:\n remove_index.append(i)\n count += 1\n \n print('remove_index: ', remove_index)\n \n for i in reversed(remove_index):\n print('DELETING num[{}]: {}'.format(i, num[i]))\n del num[i]\n \n for i in range(len(num) - 1, 0, -1):\n if count >= k:\n break\n if i - 1 < 0:\n break\n if num[i] < num[i-1]:\n del num[i-1]\n count += 1\n \n if count < k:\n for i in range(k - count):\n del num[-1]\n count += 1\n \n while (num and num[0] == '0'):\n del num[0]\n \n if not num:\n return '0'\n \n num = ''.join(num)\n return num\n \n","repo_name":"theseanathan/leetcode","sub_path":"medium/remove_k_digits_WIP.py","file_name":"remove_k_digits_WIP.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17651216080","text":"from pyteal import *\n\nfrom auction import Auction\n\nimport os\n\nif __name__ == \"__main__\":\n\n cwd = os.path.dirname(__file__)\n\n approval_program = Auction().approval_program()\n clear_program = Auction().clear_program()\n\n # Mode.Application specifies that this is a smart contract\n compiled_approval = compileTeal(\n approval_program, Mode.Application, version=6)\n print(compiled_approval)\n\n file_name = os.path.join(cwd, \"auction_approval.teal\")\n with open(file_name, \"w\") as teal:\n teal.write(compiled_approval)\n teal.close()\n\n # Mode.Application specifies that this is a smart contract\n compiled_clear = compileTeal(clear_program, Mode.Application, version=6)\n print(compiled_clear)\n file_name = os.path.join(cwd, \"auction_clear.teal\")\n with open(file_name, \"w\") as teal:\n teal.write(compiled_clear)\n teal.close()\n","repo_name":"ogSuccess/Algo-Aunction","sub_path":"src/contracts/compile_auction.py","file_name":"compile_auction.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36856207460","text":"'''list_vms.py - list the virtual machines in a subscription'''\nimport json\nimport sys\n\nimport azurerm\n\n\ndef main():\n '''Main routine.'''\n # Load Azure app defaults\n try:\n with open('azurermconfig.json') as config_file:\n config_data = json.load(config_file)\n except FileNotFoundError:\n sys.exit('Error: Expecting azurermconfig.json in current folder')\n\n tenant_id = config_data['tenantId']\n app_id = config_data['appId']\n app_secret = config_data['appSecret']\n subscription_id = config_data['subscriptionId']\n\n access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)\n\n vmlist = azurerm.list_vms_sub(access_token, subscription_id)\n print(json.dumps(vmlist, sort_keys=False, indent=2, separators=(',', ': ')))\n '''\n for vm in vmlist['value']:\n count += 1\n name = vm['name']\n location = vm['location']\n offer = vm['properties']['storageProfile']['imageReference']['offer']\n sku = vm['properties']['storageProfile']['imageReference']['sku']\n print(''.join([str(count), ': ', name,\n # ', RG: ', rgname,\n ', location: ', location,\n ', OS: ', offer, ' ', sku]))\n '''\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gbowerman/azurerm","sub_path":"examples/list_vms.py","file_name":"list_vms.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"37"} +{"seq_id":"29377671119","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom api.models import Chapter\nfrom api.models import Manga\nfrom api.serializers.manga import MangaSerializer\nimport json\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import JsonResponse\nfrom rest_framework.response import Response\n\n@api_view([\"GET\"])\ndef get_manga(request, manga_id):\n #user = request.user.id\n manga = Manga.objects.get(id=manga_id)\n serializer = MangaSerializer(manga)\n return JsonResponse(serializer.data, safe=False, status=status.HTTP_200_OK)\n\n@api_view([\"GET\"])\ndef index_manga(request):\n #user = request.user.id\n mangas = Manga.objects#.filter(added_by=user)\n serializer = MangaSerializer(mangas, many=True)\n return JsonResponse(serializer.data, safe=False, status=status.HTTP_200_OK)\n\n@api_view([\"POST\"])\ndef create_manga(request):\n # user = request.user\n payload = json.loads(request.body)\n try:\n manga = Manga.objects.create(\n author=payload[\"author\"],\n title=payload[\"title\"],\n description=payload[\"description\"],\n release_date=payload[\"release_date\"],\n image=payload[\"image\"],\n #added_by=user,\n )\n\n for chapter in payload[\"chapters\"]:\n Chapter.objects.create(\n manga=manga,\n number=chapter[\"number\"],\n title=chapter[\"title\"],\n )\n\n\n for genre in payload[\"genres\"]:\n manga.genres.add(genre)\n\n serializer = MangaSerializer(manga)\n return JsonResponse(serializer.data, safe=False, status=status.HTTP_201_CREATED)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Algo deu errado'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view([\"PUT\"])\ndef update_manga(request, manga_id):\n # user = request.user.id\n payload = json.loads(request.body)\n try:\n manga_item = Manga.objects.filter(id=manga_id)\n # returns 1 or 0\n manga_item.update(**payload)\n manga = Manga.objects.get(id=manga_id)\n serializer = MangaSerializer(manga)\n return JsonResponse(serializer.data, safe=False, status=status.HTTP_200_OK)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Algo deu errado'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n@api_view([\"DELETE\"])\ndef delete_manga(request, manga_id):\n #user = request.user.id\n try:\n manga = Manga.objects.get(id=manga_id)\n manga.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except ObjectDoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False, status=status.HTTP_404_NOT_FOUND)\n except Exception:\n return JsonResponse({'error': 'Algo deu errado'}, safe=False, status=status.HTTP_500_INTERNAL_SERVER_ERROR)","repo_name":"nathaliaop/myanimecopy-back","sub_path":"api/views/manga.py","file_name":"manga.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69931500267","text":"import copy\n\nimport tsp.genetic_tsp as gtsp\n\nimport pandas as pd\nimport time\n\n\ndef get(path_or_url):\n return pd.read_csv(path_or_url)\n\n\ndef put(dataframe, filename):\n dataframe.to_csv(filename, index=True)\n\n\npath = 'benchmarks/tsp/data/'\nfiles = ['a280.csv', 'att48.csv', 'ch150.csv', 'fl417.csv']\nresult = pd.DataFrame(columns=['benchmark', 'answer', 'time'])\nrepeats = 5\n\nresult_row = 0\nfor file in files:\n best_route = None\n total_time = 0.0\n print(file)\n for iteration in range(0, repeats, 1):\n dataframe = get(path + file)\n dataframe.columns = ['i', 'x', 'y']\n\n cities = []\n for index, row in dataframe.iterrows():\n cities.append(gtsp.City(row['x'], row['y']))\n\n start = time.time()\n\n best_route_i = gtsp.run(cities,\n population_size=50,\n selection_size=10,\n mutation_rate=0.02,\n iterations=20,\n name=file[:-4])\n end = time.time()\n total_time += end - start\n if best_route is None or best_route_i.get_distance() < best_route.get_distance():\n best_route = copy.deepcopy(best_route_i)\n\n average_time = total_time / repeats\n result.loc[result_row] = [file[:-4], best_route.get_distance(), average_time]\n result_row += 1\n\n\nput(result, \"benchmarks/tsp/result.csv\")","repo_name":"pazamelin/ORA_labs","sub_path":"lab3/benchmark_tsp.py","file_name":"benchmark_tsp.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34571932089","text":"import torch as torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision\nimport pandas as pd\nfrom IPython.core.display import display\nfrom collections import OrderedDict\nfrom collections import namedtuple\nfrom itertools import product\nimport time\nfrom IPython.display import clear_output\nimport json\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import datasets, transforms\n\n\ntorch.set_printoptions(linewidth=120)\n\n\ndata_dir = '/home/rxb5452/Desktop/Deep Learning/Medical Imaging/processed_data/'\n\ntrain_set = datasets.ImageFolder(data_dir, transform=transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.ToTensor(),\n transforms.Resize((120,120))\n ]))\n\nimage, label = train_set[0]\n\nprint(image.shape)\n\nloader = torch.utils.data.DataLoader(train_set, batch_size=62, num_workers=1)\ndata = next(iter(loader))\nmean = data[0].mean()\nstd = data[0].std()\n\ntrain_set_normal = datasets.ImageFolder(\n data_dir\n ,transform=transforms.Compose([\n transforms.Grayscale(num_output_channels=1)\n ,transforms.ToTensor()\n ,transforms.Normalize(mean, std)\n ,transforms.Resize((120,120))\n ])\n)\n\n\nclass Network(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=12, kernel_size=5)\n self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5)\n self.conv3 = nn.Conv2d(in_channels=12, out_channels=6, kernel_size=5)\n\n self.fc1 = nn.Linear(in_features=6 * 11 * 11 + 100, out_features=112)\n self.fc2 = nn.Linear(in_features=112, out_features=56)\n self.fc3 = nn.Linear(in_features=56, out_features=12)\n self.out = nn.Linear(in_features=12, out_features=2)\n\n def forward(self, t):\n # (1) input layer\n t = t\n batch_size = t.size()[0]\n colorhist = [None] * batch_size\n for i in range(batch_size):\n colorhist[i] = torch.histc(t[i] * 255, bins=100, min=0, max=255).tolist()\n\n # (2) hidden conv layer\n t = self.conv1(t)\n t = F.relu(t)\n t = F.max_pool2d(t, kernel_size=2, stride=2)\n\n t = self.conv2(t)\n t = F.relu(t)\n t = F.max_pool2d(t, kernel_size=2, stride=2)\n\n t = self.conv3(t)\n t = F.relu(t)\n t = F.max_pool2d(t, kernel_size=2, stride=2)\n\n # (4) hidden linear layer\n # print(t.shape)\n t = t.reshape(-1, 6 * 11 * 11)\n tt = torch.empty(batch_size, t.size()[1] + 100).to('cuda')\n for i in range(batch_size):\n tt[i] = torch.cat((t[i], torch.Tensor(colorhist[i]).to('cuda')), 0)\n tt = self.fc1(tt)\n tt = F.relu(tt)\n\n # (5) hidden linear layer\n tt = self.fc2(tt)\n tt = F.relu(tt)\n\n # (5) hidden linear layer\n tt = self.fc3(tt)\n tt = F.relu(tt)\n\n # (6) output layer\n tt = self.out(tt)\n # t = F.softmax(t, dim=1)\n\n return tt\n\ntorch.set_grad_enabled(True)\n\nclass RunBuilder():\n @staticmethod\n def get_runs(params):\n\n Run = namedtuple('Run', params.keys())\n\n runs = []\n for v in product(*params.values()):\n runs.append(Run(*v))\n\n return runs\n\n\nclass Epoch():\n # This Epoch class is useless at the moment\n def __init__(self):\n self.count = 0\n self.loss = 0\n self.num_correct = 0\n self.start_time = None\n\n\nclass RunManager():\n def __init__(self):\n\n self.epoch_count = 0\n self.epoch_loss = 0\n self.epoch_num_correct = 0\n self.epoch_start_time = None\n\n self.run_params = None\n self.run_count = 0\n self.run_data = []\n self.run_start_time = None\n\n self.network = None\n self.loader = None\n self.tb = None\n\n def begin_run(self, run, network, loader):\n\n self.run_start_time = time.time()\n\n self.run_params = run\n self.run_count += 1\n\n self.network = network\n self.loader = loader\n self.tb = SummaryWriter(comment=f'-{run}')\n\n images, labels = next(iter(self.loader))\n grid = torchvision.utils.make_grid(images)\n\n self.tb.add_image('images', grid)\n self.tb.add_graph(self.network, images.to(getattr(run, 'device', 'cpu')))\n\n def end_run(self):\n self.tb.close()\n self.epoch_count = 0\n\n def begin_epoch(self):\n self.epoch_start_time = time.time()\n\n self.epoch_count += 1\n self.epoch_loss = 0\n self.epoch_num_correct = 0\n\n def end_epoch(self):\n\n epoch_duration = time.time() - self.epoch_start_time\n run_duration = time.time() - self.run_start_time\n\n loss = self.epoch_loss / len(self.loader.dataset)\n accuracy = self.epoch_num_correct / len(self.loader.dataset)\n\n self.tb.add_scalar('Loss', loss, self.epoch_count)\n self.tb.add_scalar('Accuracy', accuracy, self.epoch_count)\n\n for name, param in self.network.named_parameters():\n self.tb.add_histogram(name, param, self.epoch_count)\n self.tb.add_histogram(f'{name}.grad', param.grad, self.epoch_count)\n\n results = OrderedDict()\n results[\"run\"] = self.run_count\n results[\"epoch\"] = self.epoch_count\n results['loss'] = loss\n results[\"accuracy\"] = accuracy\n results['epoch duration'] = epoch_duration\n results['run duration'] = run_duration\n for k, v in self.run_params._asdict().items(): results[k] = v\n self.run_data.append(results)\n\n df = pd.DataFrame.from_dict(self.run_data, orient='columns')\n\n # Next two lines are only for Jupyter Notebook\n clear_output(wait=True)\n display(df)\n\n def track_loss(self, loss):\n self.epoch_loss += loss.item() * batch[0].shape[0]\n\n def track_num_correct(self, preds, labels):\n self.epoch_num_correct += self._get_num_correct(preds, labels)\n\n @torch.no_grad()\n def _get_num_correct(self, preds, labels):\n return preds.argmax(dim=1).eq(labels).sum().item()\n\n def save(self, fileName):\n\n pd.DataFrame.from_dict(\n self.run_data, orient='columns'\n ).to_csv(f'{fileName}.csv')\n\n with open(f'{fileName}.json', 'w', encoding='utf-8') as f:\n json.dump(self.run_data, f, ensure_ascii=False, indent=4)\n\n'''torch.backends.cuda.matmul.allow_tf32 = True\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.allow_tf32 = True\ndata = torch.randn([1000, 6, 12, 12], dtype=torch.float, device='cuda', requires_grad=True)\nnet = torch.nn.Conv2d(6, 12, kernel_size=[5, 5], padding=[0, 0], stride=[1, 1], dilation=[1, 1], groups=1)\nnet = net.cuda().float()\nout = net(data)\nout.backward(torch.randn_like(out))\ntorch.cuda.synchronize()'''\nprint(len(train_set[0][0][0]))\n\ntorch.backends.cudnn.enabled = True\ntrainsets = {\n 'not_normal': train_set\n , 'normal': train_set_normal\n}\n\nparams = OrderedDict(\n lr=[0.001]\n , batch_size=[40]\n , shuffle=[True]\n , num_workers=[1]\n , device=['cuda']\n , trainset=['normal']\n)\n\nm = RunManager()\n\nfor run in RunBuilder.get_runs(params):\n\n device = torch.device(run.device)\n\n network = Network().to(device)\n\n # Training process given the set of parameters\n # num_workers preloads batches of data\n loader = torch.utils.data.DataLoader(\n trainsets[run.trainset]\n , batch_size=run.batch_size\n , shuffle=run.shuffle\n , num_workers=run.num_workers\n )\n\n optimizer = optim.Adam(\n network.parameters(), lr=run.lr\n )\n\n m.begin_run(run, network, loader)\n\n for epoch in range(50):\n m.begin_epoch()\n for batch in loader:\n images = batch[0].to(device) # Get Batch\n labels = batch[1].to(device)\n preds = network(images) # Pass Batch\n loss = F.cross_entropy(preds, labels) # Calculate Loss\n optimizer.zero_grad() # Zero Gradients\n loss.backward() # Calculate Gradients\n optimizer.step() # Update Weights\n\n m.track_loss(loss)\n m.track_num_correct(preds, labels)\n\n m.end_epoch()\n m.end_run()\nm.save('results')\n\ntest_data_dir = '/home/rxb5452/Desktop/Deep Learning/Medical Imaging/processed_test_data/'\n\ntest_set = datasets.ImageFolder(test_data_dir, transform=transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.ToTensor(),\n transforms.Resize((120,120))\n ]))\n\nimage, label = train_set[0]\n\nprint(image.shape)\n\nloader = torch.utils.data.DataLoader(test_set, batch_size=62, num_workers=1)\ndata = next(iter(loader))\nmean = data[0].mean()\nstd = data[0].std()\n\ntest_set_normal = datasets.ImageFolder(\n test_data_dir\n ,transform=transforms.Compose([\n transforms.Grayscale(num_output_channels=1)\n ,transforms.ToTensor()\n ,transforms.Normalize(mean, std)\n ,transforms.Resize((120,120))\n ])\n)\nloader = torch.utils.data.DataLoader(test_set_normal, batch_size=62, num_workers=1)\n\n@torch.no_grad()\ndef get_all_preds(model, loader):\n model = model.to('cuda')\n all_preds = torch.tensor([]).to('cuda')\n labels_total = []\n count = 0\n count_num_correct = 0\n for batch in loader:\n images = batch[0].to('cuda') # Get Batch\n labels = batch[1].to('cuda')\n\n preds = model(images)\n all_preds = torch.cat(\n (all_preds.to('cuda'), preds.to('cuda'))\n ,dim=0\n )\n labels_total.append(labels)\n count += len(loader)\n count_num_correct += preds.argmax(dim=1).eq(labels).sum().item()\n test_accuracy = count_num_correct / len(loader.dataset)\n return [all_preds, labels_total, test_accuracy]\n\ntest1, test_labels, test_accuracy = get_all_preds(network, loader)\nprint(test_accuracy)\n\nprint(test1.argmax(dim=1))","repo_name":"rogerbalcells/MedicalImaging","sub_path":"Throat/src/main/Network/PredictionScript.py","file_name":"PredictionScript.py","file_ext":"py","file_size_in_byte":9887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22109778102","text":"\"\"\"\n FIND LARGEST SQUARE SUBMATRIX (50CIQ 7: SQUARE SUBMATRIX,\n leetcode.com/problems/maximal-square)\n\n Given an MxN matrix of values and a value k, write a function to find and return the size of the largest square\n submatrix of all ks.\n\n Example:\n Input = [[1, 1, 1, 0],[1, 1, 1, 1],[1, 1, 0, 0]], 1\n Output = 2\n\n Variations:\n - Find largest non-ragged (rectangular) submatrix.\n\"\"\"\n\n\n# Questions you should ask the interviewer (if not explicitly stated):\n# - What time/space complexity are you looking for?\n# - Can I modify the given matrix?\n# - What are the possible matrix dimensions?\n# - Will the matrix consist of ragged or jagged lists?\n\n\n# APPROACH: Naive Brute Force\n#\n# Iterate over all of the values in the matrix, when a value is found that matches k. Once a match is found, starting\n# at one, check for sum-matrices consisting of k values, terminating immediately if a non-k value is found. Return the\n# maximum size sub-matrix found.\n#\n# Time Complexity: O((r * c)**2), where r and c are the number of rows and columns in the matrix.\n# Space Complexity: O(1)\ndef find_largest_square_submatrix_naive(m, k):\n\n def is_submatrix_homogeneous(m, row, col, size, k):\n if 0 <= row <= (row + size - 1) < len(m) and 0 <= col <= (col + size - 1) < len(m[row]):\n for r in range(row, row + size):\n for c in range(col, col + size):\n if m[r][c] != k:\n return False\n return True\n return False\n\n if m is not None and k is not None:\n max_size = 0\n start_r = start_c = None\n for r in range(len(m)):\n for c in range(len(m[r])):\n if m[r][c] == k:\n size = 1\n while is_submatrix_homogeneous(m, r, c, size, k):\n if size > max_size:\n max_size = size\n start_r, start_c = r, c\n size += 1\n return max_size # Could also return start row and column: return max_size, start_r, start_c\n\n\n# APPROACH: Dynamic Programming\n#\n# This approach uses an additional matrix to track the size where the count is zero if the corresponding cell in the\n# provided matrix doesn't equal the value, or the running (current) size submatrix to which it belongs. This is\n# accomplished a single iteration (from matrix[0][0] to matrix[-1][-1]) over all of the values in the provided matrix,\n# and checking a maximum of four cells (three from the new matrix) to identify the maximum size. Then, return the\n# highest value from the tabulation matrix as the largest square submatrix.\n#\n# Essentially, each cell in the tabulation matrix:\n# tab[r][c] = min(tab[r-1][c-1], tab[r][c-1], tab[r-1][c]) + 1 if matrix[r][c] == k else 0\n#\n# Using the example matrix:\n# [[1, 1, 1, 0],\n# [1, 1, 1, 1],\n# [1, 1, 0, 0]]\n#\n# With a k value of 1, then the tabulation matrix would be:\n# [[1, 1, 1, 0],\n# [1, 2, 2, 1],\n# [1, 2, 0, 0]]\n#\n# Time Complexity: O(rc), where r and c are the number of rows and columns in the matrix.\n# Space Complexity: O(rc), where r and c are the number of rows and columns in the matrix.\ndef find_largest_square_submatrix_dp(m, k):\n if m is not None and k is not None:\n tab = [[0 for _ in r] for r in m]\n max_size = 0\n start_r = start_c = None\n for r in range(len(m)):\n for c in range(len(m[r])):\n if m[r][c] == k:\n tab[r][c] = 1 if r == 0 or c == 0 else min(tab[r-1][c-1], tab[r][c-1], tab[r-1][c]) + 1\n if tab[r][c] > max_size:\n max_size = tab[r][c]\n start_r, start_c = r - max_size, c - max_size\n return max_size # Could also return start row and column: return max_size, start_r, start_c\n\n\ndef format_matrix(m):\n try:\n w = max([len(str(e)) for r in m for e in r]) + 1\n except (ValueError, TypeError):\n return f\"\\n{None}\"\n return m if not m else \"\\n\" + '\\n'.join([''.join([f'{e!r:{w}}' for e in r if len(r) > 0]) for r in m if len(m) > 0])\n\n\nmatrices = [[[-2, -2, -2, -2],\n [-2, 2, 2, -2],\n [-2, 2, 2, 1]],\n [[ 2, -2, -2, -2],\n [-2, 2, 2, -2],\n [-2, 2, 2, 1]],\n [[True, True, True],\n [True, True, True],\n [True, True, False]],\n [[1, 1, 1]],\n [[1, 1],\n [1, 1]],\n [[0, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[\"b\", \"a\", \"a\", \"b\"],\n [\"a\", \"a\", \"a\", \"b\"],\n [\"c\", \"a\", \"a\", \"b\"],\n [\"a\", \"a\", \"a\", \"b\"],\n [\"c\", \"b\", \"b\", \"a\"]]]\nfns = [find_largest_square_submatrix_naive,\n find_largest_square_submatrix_dp]\n\nfor i, m in enumerate(matrices):\n print(f\"matrices[{i}]:{format_matrix(m)}\\n\")\n for fn in fns:\n print(f\"{fn.__name__}(matrices[{i}], {m[0][0]}):{fn(m, m[0][0])}\")\n print()\n\n\n","repo_name":"mpettersson/PythonReview","sub_path":"questions/list_and_recursion/find_largest_square_submatrix.py","file_name":"find_largest_square_submatrix.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70340238506","text":"from app import db\nfrom app.models import Groups, Challenges, ChallengeGroup, Users\nimport logging\n\n#check correct challenges setup\nfrom source import challenges\n\n\ndef truncate(x, digits):\n if isinstance(x, int):\n return x\n return int(10 ** digits * x) / 10 ** digits\n\n\ndef ceil_digit(x, digits):\n x = truncate(x, digits)\n return x + 10**(-digits)\n\n\n#check intro section\n\nintro = challenges.my_challenges[0]\nif len(intro) == 0:\n raise Exception('Intro section is missing. If not wanted, set the visible flag to false')\nfor check in ('id', 'title', 'text', 'allowed_functions', 'required_modules', 'visible'):\n if check not in intro:\n raise Exception(f'The {check} attribute is missing from the intro')\nif not isinstance(intro['id'], int):\n raise Exception('The intro id must be an integer')\nif intro['id'] != 0:\n raise Exception('The intro id must be equal to 0')\nif not isinstance(intro['allowed_functions'], list):\n raise Exception('The intro allowed_functions must be a list of strings. It is not a list')\nif not isinstance(intro['required_modules'], list):\n raise Exception('The intro required_modules must be a list of strings. It is not a list')\nif not isinstance(intro['title'], str):\n raise Exception('The intro title must be a string')\nif not isinstance(intro['text'], str):\n raise Exception('The intro text must be a string')\nfor item in intro['allowed_functions']:\n if not isinstance(item, str):\n raise Exception('Intro allowed_functions must be a list of string. It does not contain only strings')\nfor item in intro['required_modules']:\n if not isinstance(item, str):\n raise Exception('Intro required_modules must be a list of string. It does not contain only strings')\n\n\n#check challenges section\n\nchecks = {'id': 'The challenge id is missing, in /source/challenges.py, challenge %d',\n 'title': '','text': '','tips': '','allowed_functions': '','required_modules': '', 'func_name': '', 'max_score':''}\ndefault_error = 'The challenge %s is missing. In /source/challenges.py, challenge %d'\nerror_location = '. In /source/challenges.py, challenge %d'\n\nc = challenges.my_challenges[1:]\n\nfor i in range(len(c)):\n for check in checks:\n if check not in c[i]:\n if len(checks[check]) != 0:\n error = checks[check] % (i+1)\n else:\n error = default_error % (check, i+1)\n raise Exception(error)\n\n if not isinstance(c[i]['id'], int):\n raise Exception('Challenge id must be an integer. Error at %s' % str(c[i]['id']) + error_location % (i+1))\n if c[i]['id'] == 0:\n raise Exception('Challenge id must be different from 0, which is reserved for the intro. Error at %s' % str(c[i]['id']) + error_location % (i+1))\n if not isinstance(c[i]['tips'], list):\n raise Exception('Challenge tips must be a list of strings. It is not a list. Error at %s' % str(c[i]['tips']) + error_location % (i+1))\n if not isinstance(c[i]['allowed_functions'], list):\n raise Exception('Challenge allowed_functions must be a list of strings. It is not a list. Error at %s' % str(c[i]['allowed_functions']) + error_location % (i+1))\n if not isinstance(c[i]['required_modules'], list):\n raise Exception('Challenge required_modules must be a list of strings. It is not a list. Error at %s' % str(c[i]['required_modules']) + error_location % (i+1))\n if not isinstance(c[i]['title'], str):\n raise Exception('Challenge title must be a string. Error at %s' % str(c[i]['title']) + error_location % (i+1))\n if not isinstance(c[i]['text'], str):\n raise Exception('Challenge text must be a string. Error at %s' % str(c[i]['text']) + error_location % (i+1))\n if not isinstance(c[i]['func_name'], str):\n raise Exception('Challenge func_name must be a string. Error at %s' % str(c[i]['func_name']) + error_location % (i+1))\n if not isinstance(c[i]['max_score'], (float, int)):\n raise Exception('Challenge max_score must be a number. Error at %s' % str(c[i]['max_score']) + error_location % (i + 1))\n c[i]['max_score'] = truncate(c[i]['max_score'], 2)\n if 'is_simulation' not in c[i]:\n c[i]['is_simulation'] = False\n if not isinstance(c[i]['is_simulation'], bool):\n raise Exception('Challenge is_simulation must be a boolean. Error at %s' % str(c[i]['func_name']) + error_location % (i + 1))\n\n for tip in c[i]['tips']:\n if not isinstance(tip, str):\n raise Exception('Challenge tips must be a list of string. It does not contain only strings. Error at %s' % str(tip) + error_location % (i+1))\n for item in c[i]['allowed_functions']:\n if not isinstance(item, str):\n raise Exception('Challenge allowed_functions must be a list of string. It does not contain only strings. Error at %s' % str(item) + error_location % (i+1))\n for item in c[i]['required_modules']:\n if not isinstance(item, str):\n raise Exception('Challenge required_modules must be a list of string. It does not contain only strings. Error at %s' % str(item) + error_location % (i+1))\n\n if i != 0:\n if c[i]['id'] == c[i-1]['id']:\n raise Exception('Challenge ids must be unique' + error_location % (i+1))\n\n#check correct solutions setup\nfrom source import solution\n\ns = solution.my_solutions\nerror_location = '. In /source/solution.py, solution %d'\n\nif not len(s) == len(c):\n raise Exception('Mismatch between the number of challenges in /source/challenges.py and the number of solutions in /source/solution.py')\n\nfor i in range(len(c)):\n c_id = c[i]['id']\n if c_id not in s:\n raise Exception(f'There are no corresponding solutions for the challenge with id {c_id}' + error_location % (c_id))\n\n cases = s[c_id]\n # compute weights\n c[i]['weight'] = ceil_digit(c[i]['max_score'] / len(cases), 4)\n\n for i in range(len(cases)):\n if not len(cases[i]) == 2:\n raise Exception('Each test case must be a tuple containing two elements: an input and an output. Error at %s' % str(cases[i]) + error_location % (c_id))\n if not isinstance(cases[i][0], tuple):\n # raise Exception('Each test case input must be contained within a tuple. The tuple can contain multiple inputs. Error at %s' % cases[i][0] + error_location % (c_id))\n cases[i][0] = (cases[i][0],)\n if i != 0:\n if len(cases[i][0]) != len(cases[i-1][0]):\n raise Exception('For a given challenge, each test case must have the same input size. Error at %s' % str(cases[i][0]) + error_location % (c_id))\n if type(cases[i][1]) != type(cases[i-1][1]):\n raise Exception('For a given challenge, each test case must have the same output type. Error at %s' % str(cases[i][1]) + error_location % (c_id))\n\n# check imports and allowed functions work\nerror_location = '. In /source/challenges.py, challenge %d'\n\n\ndef check_funcs(c, i):\n modules = c['required_modules']\n if len(modules) != 0:\n imports = 'import ' + \"\\nimport \".join(modules)\n exec(imports) # here safe to use because this is platform manager generated content\n for f in c['allowed_functions']:\n try:\n eval(f)\n except:\n print(\"Make sure that the function includes a 'module.function' declaration. Error at %s\" % str(f) + error_location % (c_id))\n raise\n\nfor i in range(len(c)):\n check_funcs(c[i], i)\n\ncheck_funcs(intro, 0)\n\n#check if # challenges on Db match # challenges on local version, if not log a warning\n#rationale: can't delete them because of (possible) foreign key constraints, yet\n#if have more challenges on server will display a cached copy of an old one\n\nif Challenges.query.count() > len(c)+1:\n logging.warning('Found more challenges on Db than on current version, potentially unwanted challenges will be displayed!')\n\n#add challenges and solutions to db\n\nif len(intro['allowed_functions']) > 0:\n logging.warning('Adding functions defined in the intro section to all challenges.')\n\nfor i in range(len(c)):\n #pre-process required_modules\n modules = c[i]['required_modules']\n modules.extend(intro['required_modules'])\n c[i]['allowed_functions'].extend(intro['allowed_functions'])\n imports = ''\n if len(modules) != 0:\n imports = 'import ' + \"\\nimport \".join(modules)\n\n record = Challenges.query.filter_by(challenge_id=int(c[i]['id'])).first()\n if record is None:\n new_chall = Challenges(challenge_id=c[i]['id'], specification=str(c[i]), allowed_functions=str(c[i]['allowed_functions']),\n required_modules=imports, solutions=str(s[c[i]['id']]), func_name=c[i]['func_name'].strip(),\n max_score = c[i]['max_score'], weight = c[i]['weight'], title=c[i]['title'],\n is_simulation = c[i]['is_simulation'])\n db.session.add(new_chall)\n else:\n record.specification = str(c[i])\n record.allowed_functions = str(c[i]['allowed_functions'])\n record.required_modules = imports\n record.solutions = str(s[c[i]['id']])\n record.func_name = c[i]['func_name'].strip()\n record.max_score = c[i]['max_score']\n record.weight = c[i]['weight']\n record.is_simulation = c[i]['is_simulation']\n record.title = c[i]['title']\n\n# handling intro\n\nrecord = Challenges.query.filter_by(challenge_id=-1).first()\nif record is None:\n new_chall = Challenges(challenge_id=intro['id']-1, specification=str(intro), allowed_functions='',\n required_modules= '', solutions= '', func_name= '', weight= 0, is_simulation= False,\n max_score= 0, title='')\n db.session.add(new_chall)\nelse:\n record.specification = str(intro)\n\ndb.session.commit()\n","repo_name":"guglielmogattiglio/py_submit_plat","sub_path":"setup_db.py","file_name":"setup_db.py","file_ext":"py","file_size_in_byte":9829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3126910780","text":"from pathlib import Path\nimport re\nimport pandas as pd\nclass LexicalAnalyzer:\n def __init__(self):\n self.keywords=[\n \"integer\",\"real\",\"var\",\"true\",\"false\", #types\n \"program\",\"begin\",\"end\",\"while\", \"if\",\"else\",\"then\",\"do\",\"until\",#blocklevel\n \"repeat\", \"readln\", \"write\", \"writeln\", \"writeln\",#functions\n \"or\",\"div\",\"mod\",\"and\",\"not\",\"trunc\",#operations\n ]\n self.symbols=[\n \";\",\":\",\":=\",\"(\",\")\",\"+\",\"-\",\"/\",\"*\"\n ]\n self.operators=[\n \"<\",\">\",\"<>\",\">=\",\"<=\",\n ]\n def read_file(self,filename):\n self.string = Path(filename).read_text()\ndef checkwordIdentifier(word):\n pattern = \"[_a-zA-Z][_a-zA-Z0-9]{0,30}\"\n return re.match(pattern, word, )\ndef checkFloat(word):\n pattern = \"([-+]*\\d+\\.\\d+|[-+]*\\d+)\"\n return re.match(pattern, word, )\ndef checkInt(word):\n pattern = \"[0-9]+\"\n return re.fullmatch(pattern, word, )\ndef update(tokentype,lexeme,lineNumber,position):\n return {\"Token Type\":tokentype,\"Lexeme\":lexeme,\"Line Number\":lineNumber,\"Position\":position}\ndef symbolUpdate(id,type):\n return {\"Identifier\":id,\"Type\":type}\ndef stringToList(la):\n word=\"\"\n lexemes=pd.DataFrame(columns=[\"Token Type\", \"Lexeme\", \"Line Number\", \"Position\"])\n symbolTable=pd.DataFrame(columns=[\"Identifier\", \"Type\"])\n line=1\n pos=-1\n identifers={}\n lastkeyword=\"\"\n lastidentifer=\"\"\n str=la.string\n i=0\n off=True\n try:\n while i=2:\n num[n] = num[n-1]*n\n while not num[n]%10:\n num[n] = num[n]//10\n cnt +=1\n ans[n] = ans[n-1]+cnt\n else:\n num[n] = 1\nN = int(input())\nnum = [0] * (501)\nnum[1] = 1\nans = [0] * (501)\n\nif N>2:\n for i in range(2,N+1):\n fact(i)\n print(ans[N])\nelse:\n print(0)\n\n","repo_name":"JiIJu/algorithm_algorithm","sub_path":"학사 지이주/2023/5월/0523/1676.py","file_name":"1676.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71491954668","text":"from typing import List, Union\nfrom urllib.parse import urlparse\n\nfrom slack_bolt import App\n\nfrom settings import PlatformSetting\n\nANONYMOUS_BOARD_CHANNEL = \"C050C70HFHN\"\nBOT_OWN_CHANNEL = \"C04QK0W6072\"\n\n\nclass Slack(PlatformSetting):\n def __init__(self, tokens: List[str]):\n \"\"\"\n If you need to add some token or secret_key for [slack or bolt_python] setup, add it here.\n\n If there is a better way, please suggest to @IT-HONGREART.\n \"\"\"\n super().__init__()\n for token_name in tokens:\n setattr(self, f\"slack_{token_name}\", self.get_slack_detail_key(f\"{token_name}\"))\n\n @staticmethod\n def check_direct_link(url):\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc])\n except ValueError:\n return False\n\n def remote_function_button(self, bot_kind=Union[dict[str, str]]):\n dict_to_list = [\n {\n \"type\": \"button\",\n \"text\": {\"type\": \"plain_text\", \"text\": description, \"emoji\": True},\n \"url\": some_value,\n }\n if self.check_direct_link(some_value)\n else {\n \"type\": \"button\",\n \"text\": {\"type\": \"plain_text\", \"text\": description, \"emoji\": True},\n \"action_id\": some_value,\n }\n for some_value, description in bot_kind.items()\n ]\n return dict_to_list\n\n\nslack_setting = Slack(\n tokens=[\n \"bot_token\",\n \"app_token\",\n \"signing_secret\",\n ]\n)\nbolt_app = App(\n token=slack_setting.slack_bot_token,\n signing_secret=slack_setting.slack_signing_secret,\n)\n","repo_name":"IT-HONGREAT/slack_bot","sub_path":"bolt_python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"72349761066","text":"#####################################################################\r\n# SmartSecurity Core Python Program #\r\n# Team: SmartSecurity #\r\n# Tyler Harclerode, Brandon Lin, Zachary Davoli, Jonathan Griffin #\r\n# CET/CSC Senior Project 2017/2018 #\r\n#####################################################################\r\n\r\n# !/usr/bin/python # Script in Python interpreter\r\nimport RPi.GPIO as GPIO # For Raspberry Pi GPIO utilization\r\nimport time\r\n\r\n# Import project specific files #\r\nimport lcdDisplay # Printing to LCD\r\nimport stepperMotor # Controlling the stepper motor\r\nimport matrixKeypad # Handing the keypad IO\r\nimport DHT11 # Temperature and humidity sensor\r\nimport PIR # Pyroelectric Infrared Motion Detector\r\nimport LEDs # LEDs for status and alert\r\n\r\n#####################################################################\r\n# Class Instantiations #\r\nlcd = lcdDisplay.AdafruitCharLCD() # LCD\r\nkp = matrixKeypad.Keypad() # Keypad\r\nstpr = stepperMotor.StepperMotor() # Stepper Motor\r\npir = PIR.PIR() # Motion Detector\r\ndht = DHT11.DHT11() # Temperature and humidity sensor\r\nled = LEDs.LED() # Status/Alert LEDs\r\n\r\n\r\ndef __dht_test__():\r\n print('DHT11 TEST:')\r\n\r\n dht.__init__()\r\n\r\n dht.__read_sensor__()\r\n\r\n\r\ndef __pir_test__():\r\n pir.__init__()\r\n\r\n pir.pir_test()\r\n\r\n\r\n# Cleanup resources #\r\ndef __destroy__():\r\n GPIO.cleanup()\r\n\r\n\r\ndef __main__():\r\n __dht_test__()\r\n\r\n __destroy__()\r\n\r\n __pir_test_()\r\n\r\n\r\n\r\n__main__()\r\n","repo_name":"lin3247/sp_calu_2017_2018","sub_path":"smartsectest1.py","file_name":"smartsectest1.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22118566326","text":"import torch\nimport numpy as np\nimport cv2\nfrom time import time\nfrom ALPR import ocr\nimport easyocr\nfrom ALPR import freq\n\n\ndef get_video_capture(filename):\n \"\"\"\n Creates a new video streaming object to extract video frame by frame to make prediction on.\n :return: opencv2 video capture object, with lowest quality frame available for video.\n \"\"\"\n\n return cv2.VideoCapture(filename)\n\n\ndef load_model(model_name):\n \"\"\"\n Loads Yolo5 model from pytorch hub.\n :return: Trained Pytorch model.\n \"\"\"\n if model_name:\n model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_name, force_reload=True)\n else:\n model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)\n return model\n\n\ndef score_frame(frame, model, device):\n \"\"\"\n Takes a single frame as input, and scores the frame using yolo5 model.\n :param frame: input frame in numpy/list/tuple format.\n :return: Labels and Coordinates of objects detected by model in the frame.\n \"\"\"\n model.to(device)\n frame = [frame]\n results = model(frame)\n labels, cord = results.xyxyn[0][:, -1], results.xyxyn[0][:, :-1]\n return labels, cord\n\n\ndef class_to_label(x, classes):\n \"\"\"\n For a given label value, return corresponding string label.\n :param x: numeric label\n :return: corresponding string label\n \"\"\"\n return classes[int(x)]\n\n\ndef createPadding(src, top, bottom, left, right):\n '''\n will alter each frame to add padding to the left so there is constant space for plate ocr\n used to avoid flickering\n will also pad enough so that all sizes of plates can appear\n in case no plate is detected or the confidence is very low it will remain black\n '''\n padded = cv2.copyMakeBorder(src=src,\n top=top,\n bottom=bottom,\n left=left,\n right=right,\n borderType=cv2.BORDER_CONSTANT,\n value=[0, 0, 0]\n )\n return padded\n\n\ndef plot_boxes(results, frame, c, reader):\n \"\"\"\n Takes a frame and its results as input, and plots the bounding boxes and label on to the frame.\n :param results: contains labels and coordinates predicted by model on the given frame.\n :param frame: Frame which has been scored.\n :return: Frame with bounding boxes and labels ploted on it.\n \"\"\"\n labels, cord = results\n n = len(labels)\n x_shape, y_shape = frame.shape[1], frame.shape[0]\n if n > 0:\n # for every license plate identified by the model\n for i in range(n):\n row = cord[i]\n # if we are sure it is a license plate\n if row[4] >= 0.3:\n # get the coordinates + a bit of padding\n x1, y1, x2, y2 = int(row[0] * x_shape - 5), int(row[1] * y_shape - 10), int(row[2] * x_shape + 5), int(\n row[3] * y_shape + 10)\n bgr = (0, 0, 255)\n\n if x1 < 0:\n x1 = 0\n if y1 < 0:\n y1 = 0\n\n # crop the original picture and keep only the license plate\n cropped = frame[y1:y2, x1:x2]\n # run the license plate image through the OCR and get the black and white mask/results back\n ocr_results = ocr.main(cropped, c)\n ocr_results1 = reader.readtext(ocr_results)\n ocr.writeResults(ocr_results1, 'ALPR\\Results\\Recognized.txt')\n\n cv2.rectangle(frame, (x1, y1), (x2, y2), bgr, 1)\n try:\n text = ocr_results1[i][1] if ocr_results1 else ''\n except:\n text = ''\n cv2.putText(frame, text, (x1, y1), cv2.FONT_HERSHEY_TRIPLEX, 0.9, bgr, 2)\n\n c += 1\n\n return frame, c\n\n\ndef licencePlateDetection(\n model_name='C:\\\\Users\\\\MyrsiniasS\\\\OneDrive - Titan Cement Company SA\\\\Desktop\\\\pythonProject\\\\Traffic_monitor\\\\ALPR\\\\best.pt',\n filename=\"C:\\\\Users\\\\MyrsiniasS\\\\OneDrive - Titan Cement Company SA\\\\Desktop\\\\pythonProject\\\\Traffic_monitor\\\\ALPR\\\\IMG_8716.mp4\"):\n \"\"\"\n This function is called when class is executed, it runs the loop to read the video frame by frame,\n and write the output into a new file.\n :return: void\n \"\"\"\n\n model = load_model(model_name)\n video = get_video_capture(filename)\n classes = model.names\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n print(\"Using Device: \", device)\n\n cap = video\n\n ocr.cleandir()\n reader = easyocr.Reader(['en'])\n\n c = 0\n i = 0\n gif = []\n while True:\n i += 1\n try:\n (success, frame) = cap.read()\n assert success\n\n except:\n print('video ended')\n break\n\n start_time = time()\n results = score_frame(frame, model, device)\n\n frame, c = plot_boxes(results, frame, c, reader)\n\n end_time = time()\n fps = 1 / np.round(end_time - start_time, 2)\n\n cv2.putText(frame, f'FPS: {int(fps)}', (20, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 255), 1)\n cv2.putText(frame, f'Frame: {int(i)}', (100, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 255), 1)\n\n success, jpeg = cv2.imencode('.jpeg', frame)\n im_encoded = jpeg.tobytes()\n im_encoded = b\"--frame\\r\\n\" b\"Content-Type: image/jpeg\\r\\n\\r\\n\" + im_encoded + b\"\\r\\n\\r\\n\"\n yield im_encoded\n\n if cv2.waitKey(20) & 0xFF == ord('\\x1b'):\n print('video stopped')\n break\n\n cap.release()\n cv2.destroyAllWindows()\n freq.find_frequency()\n\n\nif __name__ == '__main__':\n licencePlateDetection()\n","repo_name":"SteveMyrsinias/CapstoneProject","sub_path":"ALPR/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42975301007","text":"from django.shortcuts import render, redirect\nfrom .forms import CustomUserCreationForm, CustomAuthenticationForm, ProfileForm, MessageForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, JsonResponse\nfrom .models import Messages, Profile, UserLogs\nfrom django.views import View\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\n\n\n# Create your views here.\n\n\"\"\"\n Если у тебя будет много вьюшек лучше создать отдельную папку views\n и раскидать их по файлам.\n\n По стилю кода +- нормально. Чекни ссылку:\n Чекни https://peps.python.org/pep-0008/\n\"\"\"\n\n\ndef register(request):\n form = CustomUserCreationForm() # <- A?\n if request.user.is_authenticated:\n return redirect('chat')\n else:\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST) # <- A?\n if form.is_valid():\n user = form.save()\n Profile.objects.create(\n user=user,\n )\n user_name = form.cleaned_data.get('username')\n messages.success(request, 'Аккаунт создан для ' + user_name)\n user = authenticate(\n username=form.cleaned_data['username'],\n password=form.cleaned_data['password1'],\n )\n login(request, user)\n\n # Когда создаёшь объект через objects.create его не обязательно сохранять и присваивать переменной\n # если ты не планируешь с ним дальше работать\n UserLogs.objects.create(user=user, dateLoged=datetime.now())\n return redirect('settings')\n\n context = {'form': form}\n return render(request, 'main/register.html', context)\n\n\ndef login_user(request):\n form = CustomAuthenticationForm()\n context = {'form': form}\n if request.user.is_authenticated:\n return redirect('chat')\n else:\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n # Когд�� создаёшь объект через objects.create его не обязательно сохранять и присваивать переменной\n # если ты не планируешь с ним дальше работать\n UserLogs.objects.create(user=user, dateLoged=datetime.now())\n return redirect('chat')\n else:\n messages.info(request, 'Неверный логин или пароль')\n return render(request, 'main/login.html', context)\n\n return render(request, 'main/login.html', context)\n\n\ndef logout_user(request):\n logout(request)\n return redirect('login')\n\n\n@login_required(login_url='login')\ndef index(request):\n if request.method == 'POST':\n form = MessageForm(request.POST) # <- А?\n if form.is_valid():\n stock = form.save(commit=False)\n stock.user = request.user\n form.save()\n return redirect('chat')\n form = MessageForm() # <- А?\n context = {'form': form}\n return render(request, 'main/main.html', context)\n\n\n@login_required(login_url='login')\ndef settings(request):\n profile = request.user.profile\n if request.method == 'POST':\n form = ProfileForm(request.POST, request.FILES, instance=profile) # <- А?\n if form.is_valid():\n form.save()\n return redirect('chat')\n form = ProfileForm() # <- А?\n logs = UserLogs.objects.filter(user=request.user)\n context = {'form': form, 'logs': logs}\n return render(request, 'main/settings.html', context)\n\n\n@login_required(login_url='login')\ndef sendmessage(request):\n text = request.POST['text']\n user = request.user\n # Когда создаёшь объект через objects.create его не обязательно сохранять и присваивать переменной\n # если ты не планируешь с ним дальше работать\n Messages.objects.create(user=user, text=text)\n # <- бесполезная строка\n return HttpResponse('message sent')\n\n\nclass getmessage(View):\n def get(self, request):\n messages_all = Messages.objects.all() # ?? Get all book objects from the database ??\n\n messages_serialized_data = [] # ?? to store serialized data ??\n for m in messages_all:\n user_id = User.objects.get(id=m.user_id) # Но это же не user_id а user или нет??)\n username = user_id.username\n profile = Profile.objects.get(user=m.user_id) # Но это же не profile_id а profile или нет??)\n profile_pic_url = profile.profilePic.url # А это ссылка на картинку) Правильней было бы назвать profile_pic_url\n messages_serialized_data.append({\n 'id': m.id,\n 'user_id': m.user_id,\n 'text': m.text,\n 'username': username,\n 'user_profile': profile_pic_url,\n 'date_created': m.dateCreated.strftime('%Y-%m-%d %H:%M'),\n 'changed': m.changed,\n 'dateChanged': m.dateChanged.strftime('%Y-%m-%d %H:%M'),\n 'textChanged': m.textChanged,\n })\n\n data = {\n 'messagesList': messages_serialized_data\n }\n return JsonResponse(data)\n\n\n@login_required(login_url='login')\ndef deletemessage(request):\n message_id = request.POST['id']\n user = request.user.id\n # А может просто Messages.objects.filter(...).delete() ?\n Messages.objects.filter(id=message_id, user=user).delete()\n return HttpResponse('message sent')\n\n\n@login_required(login_url='login')\ndef updatemessage(request):\n text = request.POST['text']\n message_id = request.POST['id']\n user = request.user\n # А может просто Messages.objects.get(...).update(...) ?\n Messages.objects.filter(id=message_id, user=user).update(\n textChanged=text,\n changed=True,\n dateChanged=datetime.now(),\n )\n return HttpResponse('message saved')\n\n","repo_name":"Zhmurikon/testChatMaskom","sub_path":"chatProject/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72235263468","text":"import weakref\n\nfrom boto.ec2.zone import Zone\nfrom boto.ec2.elb.listelement import ListElement\nfrom boto.resultset import ResultSet\nfrom boto.ec2.autoscale.trigger import Trigger\nfrom boto.ec2.autoscale.request import Request\n\nclass Instance(object):\n def __init__(self, connection=None):\n self.connection = connection\n self.instance_id = ''\n\n def __repr__(self):\n return 'Instance:%s' % self.instance_id\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'InstanceId':\n self.instance_id = value\n else:\n setattr(self, name, value)\n\n\nclass AutoScalingGroup(object):\n def __init__(self, connection=None, group_name=None,\n availability_zone=None, launch_config=None,\n availability_zones=None,\n load_balancers=None, cooldown=0,\n min_size=None, max_size=None):\n \"\"\"\n Creates a new AutoScalingGroup with the specified name.\n\n You must not have already used up your entire quota of\n AutoScalingGroups in order for this call to be successful. Once the\n creation request is completed, the AutoScalingGroup is ready to be\n used in other calls.\n\n :type name: str\n :param name: Name of autoscaling group.\n\n :type availability_zone: str\n :param availability_zone: An availability zone. DEPRECATED - use the\n availability_zones parameter, which expects\n a list of availability zone\n strings\n\n :type availability_zone: list\n :param availability_zone: List of availability zones.\n\n :type launch_config: str\n :param launch_config: Name of launch configuration name.\n\n :type load_balancers: list\n :param load_balancers: List of load balancers.\n\n :type minsize: int\n :param minsize: Minimum size of group\n\n :type maxsize: int\n :param maxsize: Maximum size of group\n\n :type cooldown: int\n :param cooldown: Amount of time after a Scaling Activity completes\n before any further scaling activities can start.\n\n :rtype: tuple\n :return: Updated healthcheck for the instances.\n \"\"\"\n self.name = group_name\n self.connection = connection\n self.min_size = min_size\n self.max_size = max_size\n self.created_time = None\n self.cooldown = cooldown\n self.launch_config = launch_config\n if self.launch_config:\n self.launch_config_name = self.launch_config.name\n else:\n self.launch_config_name = None\n self.desired_capacity = None\n lbs = load_balancers or []\n self.load_balancers = ListElement(lbs)\n zones = availability_zones or []\n self.availability_zone = availability_zone\n self.availability_zones = ListElement(zones)\n self.instances = None\n\n def __repr__(self):\n return 'AutoScalingGroup:%s' % self.name\n\n def startElement(self, name, attrs, connection):\n if name == 'Instances':\n self.instances = ResultSet([('member', Instance)])\n return self.instances\n elif name == 'LoadBalancerNames':\n return self.load_balancers\n elif name == 'AvailabilityZones':\n return self.availability_zones\n else:\n return\n\n def endElement(self, name, value, connection):\n if name == 'MinSize':\n self.min_size = value\n elif name == 'CreatedTime':\n self.created_time = value\n elif name == 'Cooldown':\n self.cooldown = value\n elif name == 'LaunchConfigurationName':\n self.launch_config_name = value\n elif name == 'DesiredCapacity':\n self.desired_capacity = value\n elif name == 'MaxSize':\n self.max_size = value\n elif name == 'AutoScalingGroupName':\n self.name = value\n else:\n setattr(self, name, value)\n\n def set_capacity(self, capacity):\n \"\"\" Set the desired capacity for the group. \"\"\"\n params = {\n 'AutoScalingGroupName' : self.name,\n 'DesiredCapacity' : capacity,\n }\n req = self.connection.get_object('SetDesiredCapacity', params,\n Request)\n self.connection.last_request = req\n return req\n\n def update(self):\n \"\"\" Sync local changes with AutoScaling group. \"\"\"\n return self.connection._update_group('UpdateAutoScalingGroup', self)\n\n def shutdown_instances(self):\n \"\"\" Convenience method which shuts down all instances associated with\n this group.\n \"\"\"\n self.min_size = 0\n self.max_size = 0\n self.update()\n\n def get_all_triggers(self):\n \"\"\" Get all triggers for this auto scaling group. \"\"\"\n params = {'AutoScalingGroupName' : self.name}\n triggers = self.connection.get_list('DescribeTriggers', params,\n [('member', Trigger)])\n\n # allow triggers to be able to access the autoscale group\n for tr in triggers:\n tr.autoscale_group = weakref.proxy(self)\n\n return triggers\n\n def delete(self):\n \"\"\" Delete this auto-scaling group. \"\"\"\n params = {'AutoScalingGroupName' : self.name}\n return self.connection.get_object('DeleteAutoScalingGroup', params,\n Request)\n\n def get_activities(self, activity_ids=None, max_records=100):\n \"\"\"\n Get all activies for this group.\n \"\"\"\n return self.connection.get_all_activities(self, activity_ids, max_records)\n\n","repo_name":"LinkedInAttic/indextank-service","sub_path":"api/boto/ec2/autoscale/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","stars":382,"dataset":"github-code","pt":"37"} +{"seq_id":"42087496092","text":"n = int(input())\ndata = [int(input()) for _ in range(n)]\nvalue = max(data)\n\nstack = []\nresult = []\ntmp = []\nnum = 1\nfor i in data: # 4 3 6 8 7 5 2 1\n while num <= i:\n stack.append(num)\n result.append(\"+\")\n num += 1\n if len(stack) > 0:\n if i == stack[-1]:\n tmp.append(stack.pop())\n result.append(\"-\")\nif tmp == data:\n for i in result:\n print(i)\nelse:\n print(\"NO\")","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution9.py","file_name":"solution9.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7222502938","text":"\"\"\"\nDesign a data structure that supports adding new words and finding if a string matches any previously added string.\n\nImplement the WordDictionary class:\n\nWordDictionary() Initializes the object.\nvoid addWord(word) Adds word to the data structure, it can be matched later.\nbool search(word) Returns true if there is any string in the data structure that matches word or false otherwise. word may contain dots '.' where dots can be matched with any letter.\n \n\nExample:\n\nInput\n[\"WordDictionary\",\"addWord\",\"addWord\",\"addWord\",\"search\",\"search\",\"search\",\"search\"]\n[[],[\"bad\"],[\"dad\"],[\"mad\"],[\"pad\"],[\"bad\"],[\".ad\"],[\"b..\"]]\nOutput\n[null,null,null,null,false,true,true,true]\n\nExplanation\nWordDictionary wordDictionary = new WordDictionary();\nwordDictionary.addWord(\"bad\");\nwordDictionary.addWord(\"dad\");\nwordDictionary.addWord(\"mad\");\nwordDictionary.search(\"pad\"); // return False\nwordDictionary.search(\"bad\"); // return True\nwordDictionary.search(\".ad\"); // return True\nwordDictionary.search(\"b..\"); // return True\n \n\nConstraints:\n\n1 <= word.length <= 25\nword in addWord consists of lowercase English letters.\nword in search consist of '.' or lowercase English letters.\nThere will be at most 3 dots in word for search queries.\nAt most 104 calls will be made to addWord and search.\n\n\"\"\"\n\nclass TrieNode:\n def __init__(self):\n self.children = {}\n self.is_word = False\n\n\n\nclass WordDictionary(object):\n\n\n def __init__(self):\n self.root = TrieNode()\n\n\n def addWord(self, word):\n node = self.root\n for char in word:\n if char not in node.children:\n node.children[char] = TrieNode()\n node = node.children[char]\n node.is_word = True\n\n def search(self, word):\n def search_helper(node, i):\n if i == len(word):\n return node.is_word\n if word[i] == '.':\n for child in node.children.values():\n if search_helper(child, i+1):\n return True\n elif word[i] in node.children:\n return search_helper(node.children[word[i]], i+1)\n return False\n\n return search_helper(self.root, 0)\n \n\n\n# Your WordDictionary object will be instantiated and called as such:\n# obj = WordDictionary()\n# obj.addWord(word)\n# param_2 = obj.search(word)","repo_name":"jnnganga/Daily-Code","sub_path":"leetcode/wordAddSearch.py","file_name":"wordAddSearch.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36667678719","text":"import pytest\n\nfrom web3 import (\n Web3,\n)\nfrom web3.middleware import (\n construct_error_generator_middleware,\n construct_fixture_middleware,\n construct_result_generator_middleware,\n)\nfrom web3.providers.base import (\n BaseProvider,\n)\n\n\nclass DummyProvider(BaseProvider):\n def make_request(self, method, params):\n raise NotImplementedError(f\"Cannot make request for {method}:{params}\")\n\n\n@pytest.fixture\ndef w3():\n return Web3(provider=DummyProvider(), middlewares=[])\n\n\n@pytest.mark.parametrize(\n \"method,expected\",\n (\n (\"test_endpoint\", \"value-a\"),\n (\"not_implemented\", NotImplementedError),\n ),\n)\ndef test_fixture_middleware(w3, method, expected):\n w3.middleware_onion.add(construct_fixture_middleware({\"test_endpoint\": \"value-a\"}))\n\n if isinstance(expected, type) and issubclass(expected, Exception):\n with pytest.raises(expected):\n w3.manager.request_blocking(method, [])\n else:\n actual = w3.manager.request_blocking(method, [])\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n \"method,expected\",\n (\n (\"test_endpoint\", \"value-a\"),\n (\"not_implemented\", NotImplementedError),\n ),\n)\ndef test_result_middleware(w3, method, expected):\n def _callback(method, params):\n return params[0]\n\n w3.middleware_onion.add(\n construct_result_generator_middleware(\n {\n \"test_endpoint\": _callback,\n }\n )\n )\n\n if isinstance(expected, type) and issubclass(expected, Exception):\n with pytest.raises(expected):\n w3.manager.request_blocking(method, [expected])\n else:\n actual = w3.manager.request_blocking(method, [expected])\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n \"method,expected\",\n (\n (\"test_endpoint\", \"value-a\"),\n (\"not_implemented\", NotImplementedError),\n ),\n)\ndef test_error_middleware(w3, method, expected):\n def _callback(method, params):\n return params[0]\n\n w3.middleware_onion.add(\n construct_error_generator_middleware(\n {\n \"test_endpoint\": _callback,\n }\n )\n )\n\n if isinstance(expected, type) and issubclass(expected, Exception):\n with pytest.raises(expected):\n w3.manager.request_blocking(method, [expected])\n else:\n with pytest.raises(ValueError) as err:\n w3.manager.request_blocking(method, [expected])\n assert expected in str(err)\n","repo_name":"ethereum/web3.py","sub_path":"tests/core/middleware/test_fixture_middleware.py","file_name":"test_fixture_middleware.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":4510,"dataset":"github-code","pt":"37"} +{"seq_id":"29971104041","text":"from django.core.management.base import BaseCommand\nimport json\n\nfrom core.services.deck_utils import DumpDeckListSerializer\n\n\nclass Command(BaseCommand):\n help = 'Loads deck data from JSON file'\n\n def add_arguments(self, parser):\n parser.add_argument('tempfile', nargs='?', default=None, help='Use temporary dump file for tests')\n\n def handle(self, *args, **options):\n temp = options['tempfile']\n path = temp if temp else 'core/management/commands/decks.json'\n try:\n data = load_deck_data(path)\n except FileNotFoundError:\n self.stdout.write('(!) Error: Cannot find decks.json')\n return\n serializer = DumpDeckListSerializer(data=data, many=True)\n if serializer.is_valid():\n serializer.save()\n else:\n self.stdout.write(f'Invalid JSON dump\\n{serializer.errors}')\n\n\ndef load_deck_data(path: str):\n with open(path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n\n return data\n","repo_name":"ysaron/NeuraHS","sub_path":"neura_hs/core/management/commands/loaddecks.py","file_name":"loaddecks.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6745703221","text":"import argparse\nimport utils\n\nparser = argparse.ArgumentParser(description='Creates a video from a set of static frames')\nparser.add_argument('--sequence', required=True, help='Path to the folder containing the frames')\nparser.add_argument('--reference-frame', type=int, default=0, help='Index of the reference frame')\nparser.add_argument('--alpha', type=int, default=50, help='Transparency level')\nparser.add_argument('--frame-step', type=int, default=1, help='Space between consecutive frame')\nparser.add_argument('--dest_folder', type=str, default='.', help='Path where the resulting video should be saved')\nparser.add_argument('--filename', type=str, help='Force a name for the output file')\nargs = parser.parse_args()\n\n# Create a FramesToVideo object\noverlap_frames = utils.OverlapFrames(args.reference_frame, args.alpha, args.frame_step)\n\n# Add the sequence to the object\noverlap_frames.add_sequence_from_path(args.sequence)\n\n# Save the video\noverlap_frames.save(args.dest_folder, args.filename)","repo_name":"AlomdaElmasry/master_thesis","sub_path":"scripts/frames_overlap.py","file_name":"frames_overlap.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42209670512","text":"import heapq\nfrom sys import stdin\ninput = stdin.readline\nINF = float(\"inf\")\n\n\ndef dijkstra(start, end):\n dist = [INF for _ in range(N+1)]\n dist[start] = 0\n\n q = []\n heapq.heappush(q, (0, start))\n\n while q:\n time, village = heapq.heappop(q)\n\n for ntime, nviliage in graph[village]:\n ntime += time\n if dist[nviliage] > ntime:\n dist[nviliage] = ntime\n heapq.heappush(q, (ntime, nviliage))\n\n return dist\n\nN, M, X = map(int, input().split(\" \"))\ngraph = [[] for _ in range(N+1)]\nfor i in range(M):\n v, u, t = map(int, input().split(\" \"))\n graph[v].append((t, u))\n\n# 파티 장소로 가기\nmax_time = -1\ntransfer_times = [[0] for _ in range(N+1)]\nfor i in range(1, N+1):\n if i == X:\n continue\n transfer_times[i] = dijkstra(i, X)[X]\n\ncomeback_times = dijkstra(X, None)\n# 집으로 돌아가기\nfor i in range(1, N+1):\n if i == X:\n continue\n transfer_times[i] += comeback_times[i]\n max_time = max(transfer_times[i], max_time)\n\nprint(max_time)","repo_name":"ssoso27/Smoothie2","sub_path":"pythAlgo/baekjoon/party2.py","file_name":"party2.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17260603870","text":"import json\n\n\ndef load_data():\n data = []\n for fname in [\n \"results_variants_1D.json\",\n \"results_variants_2D.json\",\n \"results_variants_3D.json\",\n ]:\n with open(fname, \"r\") as src:\n data.append(json.load(src))\n return data\n\n\ndef transpose_data(data):\n # exec times per dataset per backend\n backend_ds_res = {}\n\n for ds, res in data.items():\n if \"impl\" in ds:\n continue\n dsname = \"_\".join(ds.split(\"_\")[:-3])\n for backend, perfs in res.items():\n if \"Vertices\" in backend:\n continue\n\n if \"para\" in perfs:\n val = perfs[\"para\"][\"pers\"]\n else:\n val = perfs[\"timeout\"]\n backend_ds_res.setdefault(backend, {}).update({dsname: val})\n\n return backend_ds_res\n\n\ndef main():\n data = load_data()\n\n res = [{}, {}, {}]\n\n for dim in range(3):\n backend_ds_res = transpose_data(data[dim])\n for bk, perf in backend_ds_res.items():\n mean = 0.0\n for val in perf.values():\n mean += val\n if bk == \"DiscreteMorseSandwich\":\n print(perf)\n mean /= len(perf)\n res[dim][bk] = mean\n\n for i, dim_res in enumerate(res):\n print(f\"{i + 1}D:\")\n for k, v in sorted(dim_res.items()):\n print(f\" {k}: {v} s\")\n\n mean_pc_speedup = [{}, {}, {}]\n for dim in range(3):\n for ds, bk in data[dim].items():\n if \"impl\" in ds:\n continue\n dms_val = bk[\"DiscreteMorseSandwich\"][\"para\"][\"pers\"]\n if \"para\" in bk[\"PairCells\"]:\n pc_val = bk[\"PairCells\"][\"para\"][\"pers\"]\n mean_pc_speedup[dim][ds] = pc_val / dms_val\n\n for i, dim_res in enumerate(mean_pc_speedup):\n print(f\"{i + 1}D: ({len(dim_res)} datasets without PairCells timeout)\")\n mean = sum(dim_res.values()) / len(dim_res.values())\n print(f\" Mean DiscreteMorseSandwich speedup over PairCells: {mean}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pierre-guillou/pdiags_bench","sub_path":"plots/variants_efficiency.py","file_name":"variants_efficiency.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"30747265780","text":"\"\"\"add Description column to Book table\n\nRevision ID: 4fab784f33c8\nRevises: 7b557266895e\nCreate Date: 2021-05-29 10:47:07.023811\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4fab784f33c8'\ndown_revision = '7b557266895e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('book', sa.Column('description', sa.String(), nullable=True))\n op.create_index(op.f('ix_book_description'), 'book', ['description'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_book_description'), table_name='book')\n op.drop_column('book', 'description')\n # ### end Alembic commands ###\n","repo_name":"TalalMahmoodChaudhry/FastAPI-Postgres-Kubernetes","sub_path":"alembic/versions/4fab784f33c8_add_description_column_to_book_table.py","file_name":"4fab784f33c8_add_description_column_to_book_table.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72777135466","text":"# in 'for' both the condition is exicuted\nkhanna = ['roti','sabzi','chawal']\n# for item in khanna:\n# print(item)\n#\n# else:\n# print('items are printed.')\n\n\n\n# for item in khanna:\n# print(item)\n# break\n#\n# else:\n# print('items are printed.')\n\n\nfor item in khanna:\n if item == 'roti':\n break\n\nelse:\n print('items are printed.')\n\n# In general onw of the conditon is exicuted\n\n# if khanna[0] == 'roti':\n# print('present')\n# else:\n# print('not present ')","repo_name":"KojoAning/PYHTON_PRACTICE","sub_path":"FOR ke sath ELSE.py","file_name":"FOR ke sath ELSE.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14913960846","text":"import numpy as np\nimport pandas as pd\nimport operator\nimport json\nfrom pprint import pprint\nimport xml.etree.ElementTree as ET\nimport xmljson\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint\n\n\ntoken = 'AQAAAABXjzIIAAdSVYTRaTUWbUtzlhqF3lZc5uM'\nheaders = {'Authorization': 'OAuth ' + token}\nparams = {'metrics': 'ym:s:visits,ym:s:pageviews',\n 'dimensions': 'ym:s:referer',\n 'date1': '2019-06-01',\n 'date2': '2019-06-10',\n 'ids': 30177909}\n\nresponse = requests.get('https://api-metrika.yandex.net/stat/v1/data', params=params, headers=headers)\n# print(response.status_code)\n\nmetrika_data = response.json()\n# pprint(metrika_data)\n\n#print(metrika_data.keys())\n#print(metrika_data['total_rows'])\n#pprint(metrika_data['query'])\n#pprint(metrika_data['data'])\n\nprint('-------------------------------------------------------------------------------------------------------')\n\n# Создаём пустой список, в который будем добавлять словари в новом формате\nresult = []\n# Начинаем перебор элементов старого отчёта (только раздел с данными)\nfor data_item in metrika_data['data']:\n # Создаём словарь для хранения текущих данных в новом формате\n new_dict = {}\n # Обращаемся к разделу отчёта, содержащего метаданные\n # Перебираем названия группировок, которые использовались в отчёте\n for i, dimension in enumerate(data_item['dimensions']):\n # Создаём в новом словаре ключ для каждой группировки\n # Устанавливаем значение для каждого ключа\n new_dict[metrika_data['query']['dimensions'][i]] = dimension['name']\n # Те же действия выполняем для метрик\n for i, metric in enumerate(data_item['metrics']):\n new_dict[metrika_data['query']['metrics'][i]] = metric\n # Добавляем созданный словарь в итоговый список result\n result.append(new_dict)\n#pprint(result)\n\nprint('-------------------------------------------------------------------------------------------------------')\n\nparams = {'metrics': 'ym:s:users',\n 'dimensions': 'ym:s:date',\n 'date1': '2019-05-01',\n 'date2': '2019-05-31',\n 'ids': 30177909}\n\nresponse = requests.get('https://api-metrika.yandex.net/stat/v1/data', params=params, headers=headers)\n# print(response.status_code)\n\nmetrika_data = response.json()\nlst = metrika_data['data']\n#pprint(lst)\n\nres_lst = list(map(lambda x: x['metrics'][0], lst))\n#pprint(res_lst)\n\n# res = round(np.mean(res_lst), 2)\nres = sum(res_lst)/31.0\n#print(res)\n\nprint('-------------------------------------------------------------------------------------------------------')\n\nparams = {'metrics': 'ym:s:visits',\n 'dimensions': 'ym:s:lastSearchPhrase',\n 'date1': '2019-05-01',\n 'date2': '2019-05-31',\n 'ids': 30177909}\n\nresponse = requests.get('https://api-metrika.yandex.net/stat/v1/data', params=params, headers=headers)\n# print(response.status_code)\n\nmetrika_data = response.json()\nlst = metrika_data['data']\npprint(lst)\n\nres_lst = list(filter(lambda x: 'Python' in x['dimensions'][0]['name'], lst))\npprint(res_lst)\n\n# res_lst = list(map(lambda x: x['dimensions'][0]['name'], lst))\n# res_lst2 = list(map(lambda x: x.lower(), res_lst))\n# res_lst3 = list(filter(lambda x: 'python' in x, res_lst2))\n\n\n# res = round(np.mean(res_lst), 2)\n# res = sum(res_lst)/31.0\n# print(res)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"inbruk/Data_Processing_Tutorials_And_Examples","sub_path":"yandex_metrics.py","file_name":"yandex_metrics.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71811984428","text":"#! /usr/bin/python\n\nfrom scapy.all import *\nimport sys\nimport os\nfrom colorama import Fore\nfrom datetime import datetime\n\n\n\ntry:\n interface = input(\"[?] Interface: \")\n print(\"[!] Listening...\")\nexcept KeyboardInterrupt:\n print(\"[!] User Requested Shutdown...\")\n print(\"\\n[!] Exiting...\")\n sys.exit(1)\n\ndef querysniff(pkt):\n patterns = {\n \"tiktok\": \"[i] User opened tiktok\",\n \"instagram\": \"[i] User opened instagram\",\n \"twitter\": \"[i] User opened twitter\",\n \"contacts.icloud.com\": \"[i] User opened the contacts app\",\n \"guzzoni\": \"[i] User opened/closed their phone\"\n }\n\n if IP in pkt:\n ip_src = pkt[IP].src\n ip_dst = pkt[IP].dst\n last = \"\"\n if pkt.haslayer(DNS) and pkt.getlayer(DNS).qr == 0 and str(pkt.getlayer(DNS).qd.qname) != last:\n now = datetime.now()\n time_str = now.strftime(\"%H:%M:%S\")\n\n qname = str(pkt.getlayer(DNS).qd.qname)\n for webSite, action in patterns.items():\n if webSite in qname:\n print(\"[\", time_str, \"]\", Fore.GREEN, action)\n break # Exit the loop after the first match\n print(\"[\",time_str,\"]\", str(ip_src) , \" -> \" , str(ip_dst) , \" : \" , \"(\" + str(pkt.getlayer(DNS).qd.qname) , \")\")\n last = str(pkt.getlayer(DNS).qd.qname)\n\nsniff(iface = interface,filter = \"port 53\", prn = querysniff, store = 0)\nprint(\"\\n[!] Shutting Down...\")\n","repo_name":"iojaiustin/dns_spy","sub_path":"dns_sniffer/dns_sniff.py","file_name":"dns_sniff.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39393977575","text":"from tests.util import BusManager, MockAdapter, get_attrib\n\nimport re\nfrom typing import Collection\nfrom unittest import IsolatedAsyncioTestCase\n\nfrom bluez_peripheral.util import get_message_bus\nfrom bluez_peripheral.gatt.service import Service, ServiceCollection\n\n\nclass TestService1(Service):\n def __init__(self, includes: Collection[Service]):\n super().__init__(\"180A\", primary=False, includes=includes)\n\n\nclass TestService2(Service):\n def __init__(self):\n super().__init__(\"180B\")\n\n\nclass TestService3(Service):\n def __init__(self):\n super().__init__(\"180C\")\n\n\nclass TestService(IsolatedAsyncioTestCase):\n async def asyncSetUp(self):\n self._client_bus = await get_message_bus()\n self._bus_manager = BusManager()\n self._path = \"/com/spacecheese/bluez_peripheral/test_service\"\n\n async def asyncTearDown(self):\n self._client_bus.disconnect()\n self._bus_manager.close()\n\n async def test_structure(self):\n async def inspector(path):\n introspection = await self._client_bus.introspect(\n self._bus_manager.name, path\n )\n\n child_names = [node.name for node in introspection.nodes]\n child_names = sorted(child_names)\n\n i = 0\n for name in child_names:\n assert re.match(r\"^service0?\" + str(i) + \"$\", name)\n i += 1\n\n service1 = TestService1([])\n service2 = TestService2()\n service3 = TestService3()\n collection = ServiceCollection([service1, service2, service3])\n\n adapter = MockAdapter(inspector)\n\n await collection.register(self._bus_manager.bus, self._path, adapter)\n await collection.unregister()\n\n async def test_include_modify(self):\n service3 = TestService3()\n service2 = TestService2()\n service1 = TestService1([service2, service3])\n collection = ServiceCollection([service1, service2])\n\n expect_service3 = False\n\n async def inspector(path):\n service1 = await get_attrib(\n self._client_bus, self._bus_manager.name, path, \"180A\"\n )\n service = service1.get_interface(\"org.bluez.GattService1\")\n includes = await service.get_includes()\n\n service2 = await get_attrib(\n self._client_bus, self._bus_manager.name, path, \"180B\"\n )\n # Services must include themselves.\n assert service1.path in includes\n assert service2.path in includes\n\n if expect_service3:\n service3 = await get_attrib(\n self._client_bus, self._bus_manager.name, path, \"180C\"\n )\n assert service3.path in includes\n\n adapter = MockAdapter(inspector)\n await collection.register(self._bus_manager.bus, self._path, adapter=adapter)\n await collection.unregister()\n\n collection.add_service(service3)\n expect_service3 = True\n await collection.register(self._bus_manager.bus, self._path, adapter=adapter)\n await collection.unregister()\n\n collection.remove_service(service3)\n expect_service3 = False\n await collection.register(self._bus_manager.bus, self._path, adapter=adapter)\n\n","repo_name":"spacecheese/bluez_peripheral","sub_path":"tests/gatt/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"37"} +{"seq_id":"14874267045","text":"import matplotlib.pyplot as plt\nimport re\nimport datetime\nfrom pyspark import SparkConf, SparkContext\nimport os\nfrom pyspark.sql import SparkSession, functions, types\nimport json\nimport sys\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\n\n\ndef main(input, output):\n ffolder = os.path.split(os.path.abspath(__file__))[0]\n\n Amazon_Product_DF = spark.read.parquet(input)\n\n Amazon_Product_DF = Amazon_Product_DF.withColumn(\n 'Product_Main_Category', functions.regexp_replace(\"Product_Main_Category\", '&', '&'))\n\n Amazon_Product_DF = Amazon_Product_DF.select(\n \"Product_Asin\",\n \"Product_Main_Category\",\n \"Product_Price\",\n \"Reviewer_ID\",\n \"Review_Post_Date\",\n \"Rate\",\n \"Product_Purchased\").where((functions.year(Amazon_Product_DF.Review_Post_Date) >= 2010) & (Amazon_Product_DF[\"Product_Purchased\"] == 1)).repartition(64).cache()\n\n # Repurchase Analysis\n\n repurchase_count_df = Amazon_Product_DF.select(\n \"*\").where(Amazon_Product_DF[\"Product_Purchased\"] == 1).groupBy(\"Product_Main_Category\",\n \"Product_Asin\",\n \"Reviewer_ID\").agg(functions.count(\"Review_Post_Date\").alias(\"purchase_count\"),\n functions.avg(\"Rate\").alias(\"avg_rate\")).select(\"*\").where(functions.col(\"purchase_count\") > 1).repartition(64).cache()\n\n repurchasers = repurchase_count_df.groupBy(\"Reviewer_ID\").agg(\n functions.count(functions.col(\"avg_rate\")).alias(\"repurchase_count\")).orderBy(functions.col(\"repurchase_count\").desc()).filter(functions.col(\"repurchase_count\") >= 2).cache()\n\n repurchase_category = repurchase_count_df.groupBy(\"Product_Main_Category\").agg(\n functions.sum(repurchase_count_df.purchase_count).alias(\"total_repurchase_count\"))\n # .groupBy(Amazon_Product_DF.Product_Main_Category)\n\n purchase_count_category_df = Amazon_Product_DF.groupBy(\n \"Product_Main_Category\").agg(functions.count(Amazon_Product_DF.Review_Post_Date).alias(\"total_sales\")).select(\"Product_Main_Category\", \"total_sales\")\n\n compare_repurchase = purchase_count_category_df.join(\n repurchase_category, purchase_count_category_df.Product_Main_Category == repurchase_category.Product_Main_Category, \"inner\").select(purchase_count_category_df.Product_Main_Category,\n purchase_count_category_df.total_sales,\n repurchase_category.total_repurchase_count)\n compare_repurchase = compare_repurchase.withColumn(\"repurchase_ratio\", functions.col(\n \"total_repurchase_count\") / functions.col(\"total_sales\") * 100)\n '''\n +---------------------+-----------+----------------------+-------------------+\n |Product_Main_Category|total_sales|total_repurchase_count| repurchase_ratio|\n +---------------------+-----------+----------------------+-------------------+\n | Computers| 127504| 755| 0.5921382858576986|\n | All Electronics| 87689| 1985| 2.2636818757198736|\n | GPS & Navigation| 507| 6| 1.183431952662722|\n | Home Audio & Theater| 75618| 1328| 1.756195614800709|\n | Pet Supplies| 2034| 12| 0.5899705014749262|\n | Toys & Games| 3817| 121| 3.170028818443804|\n | Sports & Outdoors| 4840| 54| 1.115702479338843|\n | Grocery| 799| 68| 8.51063829787234|\n | Automotive| 111696| 575| 0.5147901446784129|\n | Amazon Home| 248145| 5043| 2.032279513993834|\n | Industrial & Scie...| 4267| 217| 5.085540192172487|\n | Health & Personal...| 581| 24| 4.130808950086059|\n | Cell Phones & Acc...| 5433| 10|0.18406037180195106|\n | Arts, Crafts & Se...| 2041| 38| 1.8618324350808426|\n | Amazon Devices| 718| 5| 0.6963788300835655|\n | Car Electronics| 6511| 58| 0.8908001843034865|\n | Tools & Home Impr...| 123431| 2930| 2.3737958859605772|\n | Office Products| 49769| 1444| 2.9014044887379695|\n | Appliances| 533| 6| 1.125703564727955|\n | Camera & Photo| 57401| 592| 1.03134091740562|\n | Musical Instruments| 15171| 281| 1.8522180475907983|\n | Portable Audio & ...| 950| 6| 0.631578947368421|\n +---------------------+-----------+----------------------+-------------------+\n '''\n\n compare_repurchase_pd = compare_repurchase.toPandas()\n fig = compare_repurchase_pd.plot.bar(\n x='Product_Main_Category', y='repurchase_ratio', linewidth=50, rot=70, figsize=(20, 10), title=\"Percentage of Repurchases in Each Category\").get_figure()\n # fig.savefig(output + \"repurchase_compare.png\")\n plt.subplots_adjust(bottom=0.3)\n plt.savefig(output + \"repurchase_compare.png\")\n\n\nif __name__ == '__main__':\n sc = SparkContext()\n spark = SparkSession.builder.config(\"spark.driver.memory\", \"2g\").config(\n \"spark.executor.memory\", \"6g\").appName(\"Review ETL\").getOrCreate()\n assert spark.version >= '3.0' # make sure we have Spark 3.0+\n spark.sparkContext.setLogLevel('WARN')\n\n inputs = sys.argv[1]\n output = sys.argv[2]\n main(inputs, output)\n","repo_name":"qiushuo222/AmazonReviewProductAnalysis","sub_path":"RePurchase_Analysis.py","file_name":"RePurchase_Analysis.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14274989006","text":"# model settings\nmodel = dict(\n type='Recognizer2D',\n backbone=dict(\n type='ResNetTSM',\n pretrained='torchvision://resnet50',\n depth=50,\n norm_eval=False,\n shift_div=8),\n cls_head=dict(\n type='TSMHead',\n num_classes=400,\n in_channels=2048,\n spatial_type='avg',\n consensus=dict(type='AvgConsensus', dim=1),\n dropout_ratio=0.5,\n init_std=0.001,\n is_shift=True),\n # model training and testing settings\n train_cfg=None,\n test_cfg=dict(average_clips='prob'))\n","repo_name":"wang-xinyu/tensorrtx","sub_path":"tsm/mmaction2_tsm_r50_config.py","file_name":"mmaction2_tsm_r50_config.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":6132,"dataset":"github-code","pt":"37"} +{"seq_id":"21921924641","text":"import datetime\nimport os\n\nimport pandas as pd\nimport numpy as np\n\nimport xgboost as xgb\n\n\nfrom sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error\n\npd.set_option(\"mode.chained_assignment\", None)\n\n\ndef process_schedule_data(schedule_data: pd.DataFrame):\n \"\"\"Process schedule reliability data\n\n Args:\n schedule_data (pd.DataFrame): raw data read from reliability schedule\n\n Returns:\n pd.DataFrame: processed dataframe excluding rows with null labels,\n creating new columns 'Date' and 'Avg_TurnoverDays'\n \"\"\"\n # process schedule data\n # exclude rows with null reliability values\n rel_df_nona = schedule_data[~schedule_data[\"OnTime_Reliability\"].isna()]\n\n # add date column\n # convert 3-letter month abbrev to integer equivalent\n rel_df_nona[\"Month(int)\"] = rel_df_nona[\"Month\"].apply(\n lambda x: datetime.datetime.strptime(x, \"%b\").month\n )\n # add date\n rel_df_nona[\"Date\"] = rel_df_nona.apply(\n lambda x: datetime.datetime(x[\"Calendary_Year\"], x[\"Month(int)\"], 1), axis=1\n )\n\n # change target field data type to float\n rel_df_nona.loc[:, \"OnTime_Reliability\"] = rel_df_nona[\"OnTime_Reliability\"].apply(\n lambda x: float(x[:-1])\n )\n\n # create new variable\n # Avg_TurnoverDays = Avg_TTDays + Avg_WaitTime_POD_Days\n rel_df_nona.loc[:, \"Avg_TurnoverDays\"] = (\n rel_df_nona[\"Avg_TTDays\"] + rel_df_nona[\"Avg_WaitTime_POD_Days\"]\n )\n\n return rel_df_nona\n\n\ndef restrict_by_coverage(rel_df_nona: pd.DataFrame, min_no_months=9):\n \"\"\"Restrict to carrier service routes with given no. of months covered\n\n Args:\n rel_df_nona (pd.DataFrame): shipping schedule dataframe\n min_no_months (int, optional): months threshold. Defaults to 9.\n\n Returns:\n pd.DataFrame: dataframe with routes having at least nine months' worth of data\n \"\"\"\n rel_df_nona_cvg = rel_df_nona.groupby([\"POL\", \"POD\", \"Carrier\", \"Service\"]).apply(\n lambda x: len(x[\"Month\"].unique())\n )\n\n rel_df_nona_full_cvg = rel_df_nona_cvg[rel_df_nona_cvg == min_no_months]\n\n rel_df_nona_full_cvg_indices = rel_df_nona_full_cvg.index\n\n base_features = zip(\n rel_df_nona[\"POL\"],\n rel_df_nona[\"POD\"],\n rel_df_nona[\"Carrier\"],\n rel_df_nona[\"Service\"],\n )\n\n new_indices = []\n for idx, base_feature in enumerate(base_features):\n if base_feature in rel_df_nona_full_cvg_indices:\n new_indices.append(idx)\n\n return rel_df_nona.iloc[new_indices, :]\n\n\ndef split_data(\n rel_df_nona: pd.DataFrame, datetime_split: datetime.datetime, max_month=9\n):\n \"\"\"Return train, val split for baseline model\n\n Args:\n rel_df_nona (pd.DataFrame): shipping schedule data\n datetime_split (datetime.datetime): time horizon\n max_month (int, optional): no. of months to cover. Defaults to 9.\n\n Returns:\n tuple: (unique 'POD, POL, Carrier, Service' rows with weighted avg and std,\n filtered validation data set with common 'POD, POL, Carrier, Service'\n values from both train and val)\n \"\"\"\n\n month_thresh = datetime.datetime(2022, max_month, 1)\n # train\n train = rel_df_nona[rel_df_nona[\"Date\"] < datetime_split]\n\n # val\n val = rel_df_nona[\n (rel_df_nona[\"Date\"] >= datetime_split) & (rel_df_nona[\"Date\"] <= month_thresh)\n ]\n\n # let's get multi-index pairs from train\n train_indices = list(\n train[[\"Carrier\", \"Service\", \"POD\", \"POL\"]]\n .groupby([\"Carrier\", \"Service\", \"POD\", \"POL\"])\n .count()\n .index\n )\n\n # now find the intersection between train and val\n indices_inter = []\n for ind, row in val.iterrows():\n ind_pair = (row[\"Carrier\"], row[\"Service\"], row[\"POD\"], row[\"POL\"])\n if ind_pair in train_indices:\n indices_inter.append(ind)\n\n # now restrict to the indices in the intersection\n val_res = val.loc[indices_inter, :]\n\n return train, val_res\n\n\ndef align_port_call(\n port_data,\n rel_df_nona,\n agg_cols=[\"seaport_code\", \"Month\", \"Year\"],\n target_cols=[\"Total_Calls\", \"Port_Hours\", \"Anchorage_Hours\"],\n):\n \"\"\"Compute aggregate by port, month, year, and merge with schedule data\n\n Args:\n port_data (pd.DataFrame): port call data frame\n rel_df_nona (pd.DataFrame): reliability schedule data frame\n agg_cols (list, optional): aggregate column group. Defaults to [\"seaport_code\", \"Month\", \"Year\"].\n target_cols (list, optional): target colums. Defaults to [\"Total_Calls\", \"Port_Hours\", \"Anchorage_Hours\"].\n\n Returns:\n pd.DataFrame: port call aggregate data merged with schedule\n \"\"\"\n\n # seaport code dict (schedule -> port call)\n seaport_code_map = {\"CNSHG\": \"CNSHA\", \"CNTNJ\": \"CNTXG\", \"CNQIN\": \"CNTAO\"}\n # add seaport_code column to port data\n port_call_df = port_data\n port_call_df.loc[:, \"seaport_code\"] = port_call_df[\"UNLOCODE\"].apply(\n lambda x: seaport_code_map[x] if x in seaport_code_map else x\n )\n # exclude rows with port code USORF from rel_df since it's missing\n rel_df_no_orf = rel_df_nona[~rel_df_nona.POD.isin([\"USORF\"])]\n # add seaport code column\n rel_df_no_orf.loc[:, \"seaport_code\"] = rel_df_no_orf[\"POD\"]\n # compute average hours per call\n # sum up calls, port/anchorage hours\n # and aggregate by port, month, and year\n port_hours_avg = (\n port_call_df[target_cols + agg_cols].groupby(agg_cols).sum().reset_index()\n )\n # average port hours by port, month\n port_hours_avg.loc[:, \"Avg_Port_Hours(by_call)\"] = (\n port_hours_avg[\"Port_Hours\"] / port_hours_avg[\"Total_Calls\"]\n )\n # average anchorage hours by port, month\n port_hours_avg.loc[:, \"Avg_Anchorage_Hours(by_call)\"] = (\n port_hours_avg[\"Anchorage_Hours\"] / port_hours_avg[\"Total_Calls\"]\n )\n # merge avg hours\n rel_df_no_orf_pt_hrs = rel_df_no_orf.merge(\n port_hours_avg,\n left_on=[\"Calendary_Year\", \"Month(int)\", \"seaport_code\"],\n right_on=[\"Year\", \"Month\", \"seaport_code\"],\n )\n\n return rel_df_no_orf_pt_hrs\n\n\ndef process_sales(sales_data, rel_df_nona):\n \"\"\"Add features to sales data\n\n Args:\n sales_data (pd.DataFrame): retail sales data frame\n rel_df_nona (pd.DataFrame): reliability schedule data frame\n \"\"\"\n\n # reliability POL mapping -> retail_sales country/region\n rel_port_map = {\n \"AEAUH\": \"Agg Middle East & Africa\",\n \"AEJEA\": \"Agg Middle East & Africa\",\n \"BEANR\": \"Belgium\",\n \"BRRIG\": \"Brazil\",\n \"CNNGB\": \"China\",\n \"CNSHA\": \"China\",\n \"CNSHK\": \"China\",\n \"CNTAO\": \"China\",\n \"CNYTN\": \"China\",\n \"COCTG\": \"Colombia\",\n \"DEHAM\": \"Denmark\",\n \"ESBCN\": \"Spain\",\n \"ESVLC\": \"Spain\",\n \"GBLGP\": \"U.K.\",\n \"GRPIR\": \"Greece\",\n \"HKHKG\": \"Hong Kong\",\n \"JPUKB\": \"Japan\",\n \"KRPUS\": \"South Korea\",\n \"LKCMB\": \"Agg Asia Pacific\",\n \"MAPTM\": \"Agg Middle East & Africa\",\n \"MXZLO\": \"Mexico\",\n \"MYPKG\": \"Agg Asia Pacific\",\n \"MYTPP\": \"Agg Asia Pacific\",\n \"NLRTM\": \"Netherlands\",\n \"NZAKL\": \"Agg Asia Pacific\",\n \"PAMIT\": \"Agg Latin America\",\n \"SAJED\": \"Agg Middle East & Africa\",\n \"SAJUB\": \"Agg Middle East & Africa\",\n \"SGSIN\": \"Singapore\",\n \"THLCH\": \"Thailand\",\n \"TWKHH\": \"Taiwan\",\n \"USBAL\": \"U.S.\",\n \"USCHS\": \"U.S.\",\n \"USHOU\": \"U.S.\",\n \"USILM\": \"U.S.\",\n \"USLAX\": \"U.S.\",\n \"USLGB\": \"U.S.\",\n \"USMOB\": \"U.S.\",\n \"USMSY\": \"U.S.\",\n \"USNYC\": \"U.S.\",\n \"USORF\": \"U.S.\",\n \"USSAV\": \"U.S.\",\n \"USTIW\": \"U.S.\",\n }\n # create region column\n rel_df_nona.loc[:, \"region\"] = rel_df_nona[\"POL\"].apply(lambda x: rel_port_map[x])\n # process retail sales data\n new_cols = [col.strip() for col in sales_data.columns]\n sales_data.columns = new_cols\n # add month column\n sales_data.loc[:, \"month\"] = sales_data[\"MonthYear\"].apply(\n lambda x: int(x.split(\"/\")[0])\n )\n # add year column\n sales_data.loc[:, \"year\"] = sales_data[\"MonthYear\"].apply(\n lambda x: int(x.split(\"/\")[1])\n )\n # add date column\n sales_data.loc[:, \"date\"] = sales_data[\"MonthYear\"].apply(\n lambda x: datetime.datetime.strptime(x, \"%m/%Y\")\n )\n # TODO: add support for moving average\n sales_data.loc[:, \"date(offset)\"] = sales_data[\"date\"]\n\n\ndef align_sales(sales_data, rel_df_nona, max_date):\n \"\"\"Add retail sales column to schedule data\n\n Args:\n sales_data (pd.DataFrame): retail sales data frame\n rel_df_nona (pd.DataFrame): reliability schedule data frame\n max_date (datetime.datetime): max data threshold\n \"\"\"\n\n # create a retail sales map given date and country/region\n # date, country/region -> retail sales index\n regions = [\n \"Agg North America\",\n \"U.S.\",\n \"Canada\",\n \"Mexico\",\n \"Agg Western Europe\",\n \"Austria\",\n \"Belgium\",\n \"Cyprus\",\n \"Denmark\",\n \"Euro Area\",\n \"Finland\",\n \"France\",\n \"Germany\",\n \"Greece\",\n \"Iceland\",\n \"Ireland\",\n \"Italy\",\n \"Luxembourg\",\n \"Netherlands\",\n \"Norway\",\n \"Portugal\",\n \"Spain\",\n \"Sweden\",\n \"Switzerland\",\n \"U.K.\",\n \"Agg Asia Pacific\",\n \"Australia\",\n \"China\",\n \"Hong Kong\",\n \"Indonesia\",\n \"Japan\",\n \"Kazakhstan\",\n \"Macau\",\n \"Singapore\",\n \"South Korea\",\n \"Taiwan\",\n \"Thailand\",\n \"Vietnam\",\n \"Agg Eastern Europe\",\n \"Bulgaria\",\n \"Croatia\",\n \"Czech Republic\",\n \"Estonia\",\n \"Hungary\",\n \"Latvia\",\n \"Lithuania\",\n \"Poland\",\n \"Romania\",\n \"Russia\",\n \"Serbia\",\n \"Slovenia\",\n \"Turkey\",\n \"Agg Latin America\",\n \"Argentina\",\n \"Brazil\",\n \"Chile\",\n \"Colombia\",\n \"Agg Middle East & Africa\",\n \"Israel\",\n \"South Africa\",\n ]\n date_region_sales = {}\n for region in regions:\n region_dict = dict(zip(sales_data[\"date(offset)\"], sales_data[region]))\n date_region_sales[region] = region_dict\n\n # finally, create new columns\n # iterate over rows\n rel_df_nona.loc[:, \"retail_sales\"] = rel_df_nona.apply(\n lambda x: date_region_sales[x[\"region\"]][x[\"Date\"]]\n if x[\"Date\"] <= max_date\n else None,\n axis=1,\n )\n\n\ndef process_cpi(cpi_df):\n \"\"\"Add date column to cpi data frame\n\n Args:\n cpi_df (pd.DataFrame): consumer price index data frame\n \"\"\"\n\n cpi_df.columns = [col.strip() for col in cpi_df.columns]\n\n cpi_df.columns = [\n \"MonthYear\",\n \"Agg North America\",\n \"U.S.\",\n \"Canada\",\n \"Mexico\",\n \"Agg Western Europe\",\n \"Austria\",\n \"Belgium\",\n \"Cyprus\",\n \"Denmark\",\n \"Euro Area\",\n \"Finland\",\n \"France\",\n \"Germany\",\n \"Greece\",\n \"Iceland\",\n \"Ireland\",\n \"Italy\",\n \"Luxembourg\",\n \"Malta\",\n \"Netherlands\",\n \"Norway\",\n \"Portugal\",\n \"Spain\",\n \"Sweden\",\n \"Switzerland\",\n \"U.K.\",\n \"Agg Asia Pacific\",\n \"Australia\",\n \"China\",\n \"India*\",\n \"Indonesia\",\n \"Japan\",\n \"Philippines\",\n \"Singapore\",\n \"South Korea\",\n \"Taiwan\",\n \"Thailand\",\n \"Agg Latin America\",\n \"Argentina\",\n \"Brazil\",\n \"Chile\",\n \"Colombia\",\n \"Peru\",\n \"Agg Eastern Europe\",\n \"Bulgaria\",\n \"Croatia\",\n \"Czech Republic\",\n \"Estonia\",\n \"Hungary\",\n \"Latvia\",\n \"Lithuania\",\n \"Poland\",\n \"Romania\",\n \"Russia\",\n \"Serbia\",\n \"Slovakia\",\n \"Slovenia\",\n \"Turkey\",\n \"Agg Middle East & Africa\",\n \"Egypt\",\n \"Iraq\",\n \"Israel\",\n \"South Africa\",\n ]\n\n cpi_df.loc[:, \"date\"] = cpi_df[\"MonthYear\"].apply(\n lambda x: datetime.datetime.strptime(x, \"%m/%Y\")\n )\n\n cpi_df.loc[:, \"date(offset)\"] = cpi_df[\"date\"]\n\n\ndef align_cpi(cpi_df, rel_df_nona):\n \"\"\"Align cpi data with schedule data\n\n Args:\n cpi_df (pd.DataFrame): consumer price index data frame\n rel_df_nona (pd.DataFrame): reliability schdule data frame\n \"\"\"\n\n regions_cpi = [\n \"Agg North America\",\n \"U.S.\",\n \"Canada\",\n \"Mexico\",\n \"Agg Western Europe\",\n \"Austria\",\n \"Belgium\",\n \"Cyprus\",\n \"Denmark\",\n \"Euro Area\",\n \"Finland\",\n \"France\",\n \"Germany\",\n \"Greece\",\n \"Iceland\",\n \"Ireland\",\n \"Italy\",\n \"Luxembourg\",\n \"Malta\",\n \"Netherlands\",\n \"Norway\",\n \"Portugal\",\n \"Spain\",\n \"Sweden\",\n \"Switzerland\",\n \"U.K.\",\n \"Agg Asia Pacific\",\n \"Australia\",\n \"China\",\n \"India*\",\n \"Indonesia\",\n \"Japan\",\n \"Philippines\",\n \"Singapore\",\n \"South Korea\",\n \"Taiwan\",\n \"Thailand\",\n \"Agg Latin America\",\n \"Argentina\",\n \"Brazil\",\n \"Chile\",\n \"Colombia\",\n \"Peru\",\n \"Agg Eastern Europe\",\n \"Bulgaria\",\n \"Croatia\",\n \"Czech Republic\",\n \"Estonia\",\n \"Hungary\",\n \"Latvia\",\n \"Lithuania\",\n \"Poland\",\n \"Romania\",\n \"Russia\",\n \"Serbia\",\n \"Slovakia\",\n \"Slovenia\",\n \"Turkey\",\n \"Agg Middle East & Africa\",\n \"Egypt\",\n \"Iraq\",\n \"Israel\",\n \"South Africa\",\n ]\n\n date_region_cpi = {}\n for region in regions_cpi:\n region_dict = dict(zip(cpi_df[\"date(offset)\"], cpi_df[region]))\n\n date_region_cpi[region] = region_dict\n\n # calculate max date to avoid index error\n max_date = cpi_df[\"date(offset)\"].max()\n\n rel_df_nona.loc[:, \"cpi\"] = rel_df_nona.apply(\n lambda x: date_region_cpi[x[\"region\"]][x[\"Date\"]]\n if x[\"Date\"] <= max_date\n else None,\n axis=1,\n )\n\n\ndef load_excel_data(config: dict, data_name: str):\n \"\"\"Load excel data corresp. to data name\n\n Args:\n config (dict): config dict consisting of data and eval params\n data_name (str): string representing data name (e.g. port call or retail sales)\n\n Returns:\n pd.DataFrame: dataframe corresponding to data_name\n \"\"\"\n\n filename = config[data_name][\"filename\"]\n sheetname = config[data_name][\"sheet\"]\n\n data_dir = config[\"data_path\"]\n\n path = os.path.join(data_dir, filename)\n data = pd.read_excel(path, sheet_name=sheetname)\n\n return data\n\n\ndef weighted_average_ser(ser: pd.Series):\n \"\"\"Return weighted average of series\n\n Args:\n ser (pd.Series): pandas Series with float values\n\n Returns:\n float: weighted average of series\n \"\"\"\n\n wts = pd.Series([1 / val if val != 0 else 0 for val in ser])\n\n if wts.sum() == 0:\n return 0 # avoid division by zero\n\n return (ser * wts).sum() / wts.sum()\n\n\ndef get_carr_serv_mask(df: pd.DataFrame, carrier: str, service: str):\n \"\"\"Return pandas series mask given specific carrier and service\n\n Args:\n df (pd.DataFrame): Dataframe containing columns 'Carrier' and 'Service'\n carrier (str): string corresp. to a specific carrier\n service (str): string corresp. to a specific service\n\n Returns:\n pd.Series: series mask corresponding to carrier and service\n \"\"\"\n return (df[\"Carrier\"] == carrier) & (df[\"Service\"] == service)\n\n\ndef get_reg_train_test(\n df: pd.DataFrame,\n datetime_split: datetime.datetime,\n label=\"Avg_TTDays\",\n use_retail=False,\n):\n \"\"\"Return train val data with train weighted mean features\n\n Args:\n df (pd.DataFrame): shipping + features train data\n datetime_split (datetime.datetime): time horizon to split from\n label (str, optional): column label. Defaults to 'Avg_TTDays'.\n use_retail (bool, optional): whether retail features are included. Defaults to False.\n\n Returns:\n tuple: train, val split\n \"\"\"\n\n date_column = \"Date\"\n # train\n train = df[df[date_column] < datetime_split]\n\n train_wt_mean = get_train_wt_avg(train, datetime_split, label=label)\n\n train_wt_mean.columns = [\n \"Carrier\",\n \"Service\",\n \"POD\",\n \"POL\",\n f\"{label}_train\",\n f\"{label}(std)_train\",\n ]\n\n train_min = get_train_wt_avg(train, datetime_split, label=label, agg_fun=np.min)\n train_min.columns = [\n \"Carrier\",\n \"Service\",\n \"POD\",\n \"POL\",\n f\"{label}_min_train\",\n f\"{label}(std)_min_train\",\n ]\n\n train_max = get_train_wt_avg(train, datetime_split, label=label, agg_fun=np.max)\n train_max.columns = [\n \"Carrier\",\n \"Service\",\n \"POD\",\n \"POL\",\n f\"{label}_max_train\",\n f\"{label}(std)_max_train\",\n ]\n\n train = train_wt_mean.merge(train, on=[\"Carrier\", \"Service\", \"POD\", \"POL\"])\n\n train = train_min.merge(train, on=[\"Carrier\", \"Service\", \"POD\", \"POL\"])\n\n train = train_max.merge(train, on=[\"Carrier\", \"Service\", \"POD\", \"POL\"])\n\n # val\n val = df[df[date_column] >= datetime_split]\n\n val = train_wt_mean.merge(val, on=[\"Carrier\", \"Service\", \"POD\", \"POL\"])\n\n val = train_min.merge(val, on=[\"Carrier\", \"Service\", \"POD\", \"POL\"])\n\n val = train_max.merge(val, on=[\"Carrier\", \"Service\", \"POD\", \"POL\"])\n\n # predictors\n if not use_retail:\n predictors = [\n \"POL\",\n \"POD\",\n \"Carrier\",\n \"Service\",\n \"Trade\",\n \"Avg_Port_Hours(by_call)\",\n \"Avg_Anchorage_Hours(by_call)\",\n f\"{label}_train\",\n f\"{label}(std)_train\",\n f\"{label}_min_train\",\n f\"{label}_max_train\",\n ]\n else:\n\n train.rename(columns={\"retail_sales_x\": \"retail_sales\"}, inplace=True)\n val.rename(columns={\"retail_sales_x\": \"retail_sales\"}, inplace=True)\n\n # drop retail sales na values\n train = train[~train[\"retail_sales\"].isna()]\n val = val[~val[\"retail_sales\"].isna()]\n\n predictors = [\n \"POL\",\n \"POD\",\n \"Carrier\",\n \"Service\",\n \"Trade\",\n \"retail_sales\",\n \"cpi\",\n f\"{label}_train\",\n f\"{label}(std)_train\",\n f\"{label}_min_train\",\n f\"{label}_max_train\",\n ]\n\n train_X, train_y = train[predictors], train[label]\n val_X, val_y = val[predictors], val[label]\n\n return train_X, train_y, val_X, val_y\n\n\ndef get_train_wt_avg(\n rel_df_nona: pd.DataFrame,\n datetime_split: datetime.datetime,\n label=\"Avg_TTDays\",\n agg_fun=weighted_average_ser,\n):\n \"\"\"Return data frame weighted aggregated indexed by Carrier, Service, POD, and POL columns\n\n Args:\n rel_df_nona (pd.DataFrame): shipping schedule data + features\n datetime_split (datetime.datetime): time horizon\n label (str, optional): target label. Defaults to \"Avg_TTDays\".\n agg_fun (Callable, optional): aggregate function to weigh values by. Defaults to weighted_average_ser.\n\n Returns:\n pd.DataFrame: dataframe with unique \"Carrier\", \"Service\", \"POL\", \"POD\" rows\n and label weighted by aggregate function\n \"\"\"\n\n train = rel_df_nona[rel_df_nona[\"Date\"] < datetime_split]\n\n # weighted average\n train_on_time_rel_by_carr_ser = (\n train[\n [\n \"Carrier\",\n \"Service\",\n \"POD\",\n \"POL\",\n label,\n ]\n ]\n .groupby([\"Carrier\", \"Service\", \"POD\", \"POL\"])\n .apply(lambda x: (agg_fun(x[label].values), x[label].values.std()))\n .reset_index()\n )\n\n train_on_time_rel_by_carr_ser.loc[:, f\"{label}\"] = train_on_time_rel_by_carr_ser[\n 0\n ].apply(lambda x: x[0])\n train_on_time_rel_by_carr_ser.loc[\n :, f\"{label}(std)\"\n ] = train_on_time_rel_by_carr_ser[0].apply(lambda x: x[1])\n\n train_on_time_rel_by_carr_ser.drop(0, axis=1, inplace=True)\n\n train_df = train_on_time_rel_by_carr_ser.copy()\n\n return train_df\n\n\ndef gen_lag(\n df: pd.DataFrame,\n lag=1,\n lag_col=\"Month(int)\",\n target_cols=[\"OnTime_Reliability\"],\n common_cols=[\"Carrier\", \"Service\", \"POL\", \"POD\", \"Trade\", \"Month(int)\"],\n):\n \"\"\"Generate month lag on a target column by some number of months.\n\n Args:\n df (pd.DataFrame): train data\n lag (int, optional): no. of months to lag by. Defaults to 1.\n lag_col (str, optional): lag column name. Defaults to \"Month(int)\".\n target_cols (list, optional): target column list. Defaults to [\"OnTime_Reliability\"].\n common_cols (list, optional): columns to merge on.\n Defaults to [\"Carrier\", \"Service\", \"POL\", \"POD\", \"Trade\", \"Month(int)\"].\n\n Returns:\n pd.DataFrame: dataframe with new lag column appended\n \"\"\"\n\n # make a copy of dataframe\n # to apply lag\n df_lag = df.copy()\n\n # apply lag\n df_lag.loc[:, lag_col] += 1\n\n # rename the target columns\n\n for target_col in target_cols:\n new_col_name = f\"{target_col}_lag_{lag}\"\n df_lag.loc[:, new_col_name] = df_lag[target_col]\n df_lag.drop(target_col, inplace=True, axis=1)\n\n # now merge lagged feature onto original df\n df_with_lag_feature = df.merge(df_lag, on=common_cols)\n df_with_lag_feature.rename(\n columns={\n \"Date_x\": \"Date\",\n \"Avg_TTDays_x\": \"Avg_TTDays\",\n \"Avg_WaitTime_POD_Days_x\": \"Avg_WaitTime_POD_Days\",\n \"Avg_Port_Hours(by_call)_x\": \"Avg_Port_Hours(by_call)\",\n \"Avg_Anchorage_Hours(by_call)_x\": \"Avg_Anchorage_Hours(by_call)\",\n },\n inplace=True,\n )\n\n return df_with_lag_feature\n\n\ndef filter_nonzero_values(data_X, data_y, preds, label):\n \"\"\"Filter feature and target data with nonzero labels\n\n Args:\n data_X (pd.DataFrame): feature data\n data_y (pd.Series): label data\n preds (list): model predictions\n label (str): target label\n\n Returns:\n tuple: filtered feature, labels, and predictions\n \"\"\"\n preds_array = np.array(preds)\n\n # create mask to use for filtering\n nonzero_mask = data_y != 0\n nonzero_mask = nonzero_mask.reset_index()[label]\n\n # filtering zero values\n if sum(nonzero_mask) != 0:\n\n preds = pd.Series(preds)[nonzero_mask]\n\n data_y = data_y.reset_index()[label]\n data_y = data_y[nonzero_mask]\n\n data_X = data_X.reset_index().drop(\"index\", axis=1)\n data_X = data_X[nonzero_mask]\n\n preds_array = np.array(preds)\n\n data_gt = data_y.values\n\n return data_X, data_gt, preds_array\n\n else:\n raise Exception(\"All target values are zero!\")\n\n\ndef compute_eval_metrics(\n model,\n train_X: pd.DataFrame,\n val_X: pd.DataFrame,\n train_y: pd.Series,\n val_y: pd.Series,\n include_overestimates=False,\n label=\"Avg_TTDays\",\n):\n \"\"\"Return train/validation MAE and MAPE metrics given scikit learn model\n\n Args:\n model (sklearn.base.BaseEstimator): scikit estimator object\n train_X (pd.DataFrame): feature data\n val_X (pd.DataFrame): val data\n train_y (pd.Series): train ground truth\n val_y (pd.Series): val ground truth\n include_overestimates (bool, optional): whether to calculate MAPE. Defaults to False.\n label (str, optional): target label. Defaults to \"Avg_TTDays\".\n\n Returns:\n tuple: MAE and MAPE values for validation\n \"\"\"\n\n train_X_rg, val_X_rg = compute_common_cols(train_X, val_X)\n\n # fit model\n model.fit(train_X_rg, train_y)\n\n train_preds = model.predict(train_X_rg)\n val_preds = model.predict(val_X_rg)\n\n # need to make sure reliability predictions are capped at 100 and 0\n if label == \"Avg_TTDays\":\n train_preds = list(\n map(lambda x: 100 if x >= 100 else x, model.predict(train_X_rg))\n )\n val_preds = list(map(lambda x: 100 if x >= 100 else x, model.predict(val_X_rg)))\n\n train_preds = list(map(lambda x: 0 if x <= 0 else x, train_preds))\n val_preds = list(map(lambda x: 0 if x <= 0 else x, val_preds))\n\n # val metrics\n val_X, val_gt, preds_array = filter_nonzero_values(val_X, val_y, val_preds, label)\n val_mae = mean_absolute_error(val_gt, preds_array)\n val_mape = mean_absolute_percentage_error(val_gt, preds_array)\n\n # train metrics\n train_X, train_gt, train_preds_array = filter_nonzero_values(\n train_X, train_y, train_preds, label\n )\n train_mae = mean_absolute_error(train_gt, train_preds_array)\n train_mape = mean_absolute_percentage_error(train_gt, train_preds_array)\n\n # create error result dataframe\n result_df = val_X.copy()\n result_df.loc[:, \"actual\"] = val_y\n result_df.loc[:, \"pred\"] = preds_array\n result_df.loc[:, \"error\"] = preds_array - val_y\n result_df.loc[:, \"perc_error\"] = (preds_array - val_y) / val_y\n\n result_df = result_df[\n [\"Carrier\", \"Service\", \"POD\", \"POL\", \"actual\", \"pred\", \"error\", \"perc_error\"]\n ]\n\n # overestimate mask\n diff = preds_array - val_y\n mask = diff > 0\n\n # mape\n if include_overestimates:\n\n if sum(mask) == 0:\n raise Exception(\"There are not overestimated preds!\")\n\n # compute mae (over)\n val_mae_over = diff[mask].mean()\n # series mask\n mask_ser = mask.reset_index()[label]\n # compute mape (over)\n val_preds_over = pd.Series(preds_array)[mask_ser]\n val_y_over = pd.Series(list(val_y))[mask_ser]\n val_mape_over = mean_absolute_percentage_error(val_y_over, val_preds_over)\n\n return (\n train_mae,\n train_mape,\n val_mae,\n val_mape,\n val_mae_over,\n val_mape_over,\n result_df,\n )\n\n return train_mae, train_mape, val_mae, val_mape, result_df\n\n\n# we need to restrict to common inputs\ndef compute_common_cols(train_X: pd.DataFrame, val_X: pd.Series):\n \"\"\"Return train and val restricted to common cols\n\n Args:\n train_X (pd.DataFrame): feature data\n val_X (pd.Series): test data\n\n Returns:\n tuple: feature and test data restricted to common columns (for regression fitting)\n \"\"\"\n\n # get dummies for categorical variables\n train_X_rg = pd.get_dummies(train_X)\n val_X_rg = pd.get_dummies(val_X)\n\n # restrict to common columns\n common_cols = list(set(train_X_rg.columns).intersection(set(val_X_rg.columns)))\n\n return train_X_rg[common_cols], val_X_rg[common_cols]\n","repo_name":"mosesckim/psa_ml","sub_path":"ontime/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":26841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8242618123","text":"class Settings():\n\n def __init__(self):\n # Screen settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (231, 231, 231)\n self.font_color = (80, 80, 80)\n\n # Nets settings\n self.net_speed = 1.5\n self.net_width = 200\n self.net_height = 2\n self.net_color = 60, 60, 60\n self.net_limit = 10 # 100 (one hundred)\n\n # Fish\n self.rows_drop = 0\n\n # 1 for right, -1 left\n self.rows_direction = 1\n\n self.game_active = True\n\n self.scores = 0\n\n self.url = 'https://api.myjson.com/bins/z3kww'\n self.filename = 'data.json'\n","repo_name":"skrzychuz/fishing-nets","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18193931652","text":"from typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom bofire.data_models.features.feature import Input, TTransform\n\n\nclass NumericalInput(Input):\n \"\"\"Abstract base class for all numerical (ordinal) input features.\"\"\"\n\n unit: Optional[str] = None\n\n @staticmethod\n def valid_transform_types() -> List:\n return []\n\n def to_unit_range(\n self, values: Union[pd.Series, np.ndarray], use_real_bounds: bool = False\n ) -> Union[pd.Series, np.ndarray]:\n \"\"\"Convert to the unit range between 0 and 1.\n\n Args:\n values (pd.Series): values to be transformed\n use_real_bounds (bool, optional): if True, use the bounds from the actual values else the bounds from the feature.\n Defaults to False.\n\n Raises:\n ValueError: If lower_bound == upper bound an error is raised\n\n Returns:\n pd.Series: transformed values.\n \"\"\"\n if use_real_bounds:\n lower, upper = self.get_bounds(transform_type=None, values=values)\n lower = lower[0]\n upper = upper[0]\n else:\n lower, upper = self.lower_bound, self.upper_bound # type: ignore\n if lower == upper:\n raise ValueError(\"Fixed feature cannot be transformed to unit range.\")\n valrange = upper - lower\n return (values - lower) / valrange\n\n def from_unit_range(\n self, values: Union[pd.Series, np.ndarray]\n ) -> Union[pd.Series, np.ndarray]:\n \"\"\"Convert from unit range.\n\n Args:\n values (pd.Series): values to transform from.\n\n Raises:\n ValueError: if the feature is fixed raise a value error.\n\n Returns:\n pd.Series: _description_\n \"\"\"\n if self.is_fixed():\n raise ValueError(\"Fixed feature cannot be transformed from unit range.\")\n valrange = self.upper_bound - self.lower_bound # type: ignore\n return (values * valrange) + self.lower_bound # type: ignore\n\n def is_fixed(self):\n \"\"\"Method to check if the feature is fixed\n\n Returns:\n Boolean: True when the feature is fixed, false otherwise.\n \"\"\"\n return self.lower_bound == self.upper_bound # type: ignore\n\n def fixed_value(\n self, transform_type: Optional[TTransform] = None\n ) -> Union[None, List[float]]:\n \"\"\"Method to get the value to which the feature is fixed\n\n Returns:\n Float: Return the feature value or None if the feature is not fixed.\n \"\"\"\n assert transform_type is None\n if self.is_fixed():\n return [self.lower_bound] # type: ignore\n else:\n return None\n\n def validate_experimental(self, values: pd.Series, strict=False) -> pd.Series:\n \"\"\"Method to validate the experimental dataFrame\n\n Args:\n values (pd.Series): A dataFrame with experiments\n strict (bool, optional): Boolean to distinguish if the occurence of fixed features in the dataset should be considered or not.\n Defaults to False.\n\n Raises:\n ValueError: when a value is not numerical\n ValueError: when there is no variation in a feature provided by the experimental data\n\n Returns:\n pd.Series: A dataFrame with experiments\n \"\"\"\n try:\n values = pd.to_numeric(values, errors=\"raise\").astype(\"float64\")\n except ValueError:\n raise ValueError(\n f\"not all values of input feature `{self.key}` are numerical\"\n )\n values = values.astype(\"float64\")\n if strict:\n lower, upper = self.get_bounds(transform_type=None, values=values)\n if lower == upper:\n raise ValueError(\n f\"No variation present or planned for feature {self.key}. Remove it.\"\n )\n return values\n\n def validate_candidental(self, values: pd.Series) -> pd.Series:\n \"\"\"Validate the suggested candidates for the feature.\n\n Args:\n values (pd.Series): suggested candidates for the feature\n\n Raises:\n ValueError: Error is raised when one of the values is not numerical.\n\n Returns:\n pd.Series: the original provided candidates\n \"\"\"\n try:\n values = pd.to_numeric(values, errors=\"raise\").astype(\"float64\")\n except ValueError:\n raise ValueError(\n f\"not all values of input feature `{self.key}` are numerical\"\n )\n return values\n\n def get_bounds(\n self,\n transform_type: Optional[TTransform] = None,\n values: Optional[pd.Series] = None,\n ) -> Tuple[List[float], List[float]]:\n assert transform_type is None\n if values is None:\n return [self.lower_bound], [self.upper_bound] # type: ignore\n lower = min(self.lower_bound, values.min()) # type: ignore\n upper = max(self.upper_bound, values.max()) # type: ignore\n return [lower], [upper] # type: ignore\n","repo_name":"experimental-design/bofire","sub_path":"bofire/data_models/features/numerical.py","file_name":"numerical.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"37"} +{"seq_id":"31320149704","text":"#!/usr/bin/env python\n\n\"\"\"grpah_tests.py: Testing installation, models, environment etc.\n\n Arguments:\n Required:\n\n Optional:\n\n Usage: grpah_tests.py [-h] -d DATA -l LOG -k KEYPOINTS [-c CHANNELS] [-e EPOCHS] [-n NAME] [-b BATCH]\n Usage:\n Example:\n\"\"\"\n\n\n# Imports\nimport sys\nimport os\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\nfrom torch_geometric.data import Data\nfrom torch_geometric.datasets import Planetoid\nfrom torch_geometric.data import DataLoader\n\n'''\nThese test cases are borrowed from the Pytorch Geometric introduction found at the\nfollowing link: https://pytorch-geometric.readthedocs.io/en/latest/notes/introduction.html\n'''\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, num_node_features, num_classes):\n super(Net, self).__init__()\n self.conv1 = GCNConv(num_node_features, 16)\n self.conv2 = GCNConv(16, num_classes)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n return F.log_softmax(x, dim=1)\n\n\ndef main():\n\n print('Testing Basic Graph...')\n nodes = torch.tensor([[-1], [0], [1]], dtype=torch.float)\n edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]], dtype=torch.long)\n graph_data = Data(x=nodes, edge_index=edge_index)\n print(f'Generated graph data: {graph_data}')\n\n print('Testing graphs to GPU...')\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n graph_data.to(device)\n print(f'Graph data was sent to {device}')\n\n print('Testing graph datasets...')\n dataset = Planetoid(root='/tmp/Cora', name='Cora')\n print(f'Enzyme dataset of size {len(dataset)} was loaded')\n print(f'Example graph: {dataset[0]}')\n\n print('Testing train loop...')\n loader = DataLoader(dataset, batch_size=32, shuffle=True)\n model = Net(dataset.num_node_features, dataset.num_classes).to(device)\n data = dataset[0].to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n\n for epoch in range(100):\n optimizer.zero_grad()\n out = model(data)\n loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])\n loss.backward()\n optimizer.step()\n\n model.eval()\n _, pred = model(data).max(dim=1)\n correct = int(pred[data.test_mask].eq(data.y[data.test_mask]).sum().item())\n acc = correct / int(data.test_mask.sum())\n print('Accuracy: {:.4f}'.format(acc))\n print('Done!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"StafaH/graph-imitation-learning","sub_path":"src/graphs/graph_tests.py","file_name":"graph_tests.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17538523819","text":"import discord, os\nfrom discord.ext import commands, tasks\nfrom discord.utils import get\n\nfrom utils.VatsimData import VatsimData\nVD = VatsimData()\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\nembed_colour = discord.Color.blue()\n\nclass testing(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n self.coordcategory = int(os.getenv('c_coordcategory'))\n self.channel_types = {\n 'DEL': 'Visual Room',\n 'GND': 'Visual Room',\n 'TWR': 'Visual Room',\n 'APP': 'Radar Room',\n 'DEP': 'Radar Room',\n 'CTR': 'Enroute',\n 'FSS': 'Enroute',\n }\n self.guild_id = os.getenv('guild_id')\n \n @commands.command(name=\"ch\")\n async def ch_command(self, ctx):\n pass\n \n\n\n\ndef setup(client):\n client.add_cog(testing(client))\n","repo_name":"vaccfr/Discord-Bot","sub_path":"cogs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72118563307","text":"# CGI\n# WSGI\n# WERKZEUG\n# JINJA2\n\nfrom flask import Flask, render_template, request, url_for, redirect\nimport sqlite3\n\napp = Flask(__name__) # создается объект приложения\n\n\n# connection = sqlite3.connect('my_database.db')\n# connection.close()\n# cursor.execute('''\n# CREATE TABLE IF NOT EXISTS Users (\n# name TEXT NOT NULL,\n# password TEXT NOT NULL\n# )\n# ''')\n\nconnection = sqlite3.connect('my_database.db')\ncursor = connection.cursor()\ncursor.execute('''\n CREATE TABLE IF NOT EXISTS Users (\n name TEXT NOT NULL UNIQUE,\n password TEXT NOT NULL\n )\n ''')\ncursor.execute('''\n CREATE TABLE IF NOT EXISTS News (\n name TEXT NOT NULL,\n content TEXT NOT NULL\n )\n ''')\ncursor.close()\nconnection.close()\n\n\n@app.route('/about')\n@app.route('/')\n@app.route('/home.html')\ndef hello_world(): # put application's code here\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n users = cursor.execute(\"SELECT * FROM users\").fetchall()\n print(users)\n # users = cursor.execute(\"SELECT * FROM users WHERE name=?\", (login,)).fetchone()\n connection.commit()\n cursor.close()\n connection.close()\n\n return render_template('home.html', authors=users)\n\n\n@app.route('/about')\ndef about_site():\n return render_template('news.html')\n\n\n@app.route('/users/')\ndef username_info(username):\n return f\"{username}\"\n\n\n@app.route('/posts/')\ndef post(id):\n return f\"{id}\"\n\n\n# можно явно задавать принимаемые запросы\n# @app.route('/login', methods=['POST', 'GET'])\n# def login_func():\n# if request.method == 'GET':\n# return render_template('auth.html')\n# elif request.method == 'POST':\n# return ' поздравляю, вы авторизировались ', 201\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n user = cursor.execute(f\"SELECT * FROM Users WHERE name=?\", (request.form.get('login'), )).fetchone()\n print(user)\n cursor.close()\n connection.close()\n if user == None:\n return 'пользователя не существует'\n\n if user[1] == request.form.get('pass'):\n return 'форма заполнена успешно'\n return 'пароль не правильный' # обязательно что-нибудь возвращаем\n else:\n return render_template('auth.html') # форма авторизации\n\n\n@app.route('/registration', methods=['GET', 'POST'])\ndef registration():\n if request.method == 'POST':\n print(request.form.get('login')) # достаем логин, который ввел пользователь\n print(request.form.get('pass')) # после достаем пароль\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n cursor.execute('INSERT INTO Users (name, password) VALUES (?, ?)',\n (request.form.get('login'), request.form.get('pass')))\n connection.commit()\n cursor.close()\n connection.close()\n\n return 'спасибо за регистрацию'\n else:\n return render_template('registration.html') # форма авторизации\n\n\n@app.route(\"/all-news\")\n@app.route(\"/all-news/\")\ndef all_news_func(login=None):\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n print(cursor.execute(\"SELECT * FROM users WHERE name=?\", (login, )).fetchone())\n news = []\n\n if login == None:\n login = ''\n news = cursor.execute(\"SELECT * FROM news\").fetchall()\n else:\n news = cursor.execute(\"SELECT * FROM news WHERE name=?\", (login, )).fetchall()\n\n return render_template('news.html', author=login, news=news)\n\n\n@app.route(\"/all-author\")\ndef all_author_func():\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n news = cursor.execute(\"SELECT * FROM users\").fetchall()\n connection.commit()\n cursor.close()\n connection.close()\n print(news)\n news_block = \"\"\n for i in news:\n news_block += f'

Автор: {i[0]}\\n

'\n html = f\"\"\"\n \n \n \n Title\n \n \n \n
\n
\n\n
\n

ALL AUTHOR

\n
\n
\n
\n \n {news_block}\n\n \n \"\"\"\n\n return html\n\n\n@app.route('/add-news', methods=['POST', 'GET'])\ndef add_news_func():\n if request.method == 'POST':\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n user = cursor.execute(f\"SELECT * FROM Users WHERE name=?\", (request.form.get('login'), )).fetchone()\n connection.commit()\n cursor.close()\n connection.close()\n if user == None:\n return 'пользователя не существует'\n\n print(request.form.get('login')) # достаем логин, который ввел пользователь\n print(request.form.get('content')) # после достаем пароль\n\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n cursor.execute('INSERT INTO news (name, content) VALUES (?, ?)',\n (request.form.get('login'), request.form.get('content')))\n\n connection.commit()\n cursor.close()\n connection.close()\n\n return redirect(f\"/all-news/{request.form.get('login')}\")\n else:\n return render_template('add_news.html')\n\n\n@app.route('/shop-list')\ndef shop_list_func():\n items = ['товар 1', 'товар 2', 'товар 3']\n return render_template('shop_list.html', items=items)\n\n\n@app.route('/admin', methods=['GET', 'POST'])\ndef admin_func():\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n admins = cursor.execute(\"SELECT * FROM users\").fetchall()\n # users = cursor.execute(\"SELECT * FROM users WHERE name=?\", (login,)).fetchone()\n connection.commit()\n cursor.close()\n connection.close()\n\n\n if request.method == 'POST':\n connection = sqlite3.connect('my_database.db')\n cursor = connection.cursor()\n cursor.execute(\"UPDATE Users SET description=? WHERE name=?\", (request.form.get('content'), request.form.get('login')))\n connection.commit()\n cursor.close()\n connection.close()\n print(request.form.get('login')) # достаем логин, который ввел пользователь\n print(request.form.get('content')) # после достаем пароль\n\n\n return redirect(f\"/\")\n else:\n return render_template('admin.html', admins=admins)\n\n\n\n\n\n\nif __name__ == '__main__':\n # app.run(debug=True)\n app.run(host=\"127.0.0.1\", port=80)\n # Устанавливаем соединение с базой данных\n","repo_name":"DevMosh/FlaskProjectLearning","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36000351522","text":"from kivymd.uix.boxlayout import MDBoxLayout\n\nfrom mvc.model import Dice, Wound\n\n\nclass DicePanel(MDBoxLayout):\n \"\"\"Widget for displaying dice.\n\n Methods:\n remove()\n \"\"\"\n def remove(self) -> bool:\n \"\"\"Delete itself.\n\n :return: True\n \"\"\"\n Container().remove_dice_panel(self)\n self.parent.remove_widget(self)\n\n return True\n\n\nclass Container(MDBoxLayout):\n \"\"\"The base container for the entire application.\n\n Methods:\n view_throws()\n remove_dice_panel(dice_panel)\n add_dice_panel()\n \"\"\"\n _dice_panels = list()\n\n def __init__(self):\n super().__init__()\n self.add_dice_panel()\n self.wound = None\n\n def view_throws(self) -> str:\n \"\"\"Show the result of the roll of the selected dice.\n\n :return: the result displayed on the screen\n \"\"\"\n self.wound = Wound()\n dices_result: list = []\n\n for dice_panel in self._dice_panels:\n sides: str = dice_panel.ids.sides_input.text\n count: str = dice_panel.ids.count_input.text\n if sides not in ['', '0'] and count not in ['', '0']:\n dice: Dice = Dice(int(sides))\n [dices_result.append(dice.throw()) for _ in range(int(count))]\n\n self.wound.calculate_wounds(dices_result)\n self.result.text = f'{self.wound}'\n return f'{self.result.text}'\n\n def remove_dice_panel(self, dice_panel: DicePanel) -> bool:\n \"\"\"Delete the received object from itself.\n\n :return: True\n \"\"\"\n self._dice_panels.remove(dice_panel)\n\n return True\n\n def add_dice_panel(self) -> Dice:\n \"\"\"Add a new dice panel to itself.\n\n :return: added dice panel\n \"\"\"\n new_dice_panel = DicePanel()\n self._dice_panels.append(new_dice_panel)\n self.add_widget(new_dice_panel, index=3)\n\n return new_dice_panel\n","repo_name":"Kapusta-fairy/App-DiceManager","sub_path":"mvc/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22069823372","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom tauv_msgs.msg import FeatureDetections, FeatureDetection\nfrom tauv_msgs.srv import MapFind, MapFindClosest\nfrom visualization_msgs.msg import MarkerArray, Marker\nfrom tauv_util.transforms import rpy_to_quat\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Point\nimport numpy as np\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\n\ndef getColor(tag, trans=False):\n color = [0,0,0,1]\n\n if(tag==\"phone\"):\n color = [1,0,0,1]\n\n if(tag==\"badge\"):\n color = [0,1,0,1]\n\n if(tag==\"notebook\"):\n color = [0,0,1,1]\n\n if(tag==\"gate\"):\n color = [1,1,0,1]\n\n if (tag == \"circle\"):\n color = [0, 1, 1, 1]\n\n if (tag == \"chevron\"):\n color = [0, 0.5, 1, 1]\n\n if(not trans):\n color[3] = 0.75\n\n return color\n\ndef makeMarker(tag, id, detection, color, scale = 0.5, shape = Marker.SPHERE, time = rospy.Duration(1.0)):\n marker = Marker()\n marker.header.frame_id = \"kf/odom\" # FIX THIS\n marker.header.stamp = rospy.Time()\n marker.ns = tag\n marker.id = id\n marker.type = shape\n marker.action = Marker.ADD\n marker.pose.position.x = detection.position.x\n marker.pose.position.y = detection.position.y\n marker.pose.position.z = detection.position.z\n marker.pose.orientation.x = 0.0\n marker.pose.orientation.y = 0.0\n marker.pose.orientation.z = 0.0\n marker.pose.orientation.w = 1.0\n marker.scale.x = scale\n marker.scale.y = scale\n marker.scale.z = scale\n marker.color.a = color[3]\n marker.color.r = color[0]\n marker.color.g = color[1]\n marker.color.b = color[2]\n marker.lifetime = time\n \n return marker\n\ndef makeCircleMarker(id, detection):\n marker = Marker()\n marker.header.frame_id = \"kf/odom\" # FIX THIS\n marker.header.stamp = rospy.Time()\n marker.ns = 'circle'\n marker.id = id\n marker.type = Marker.ARROW\n marker.pose.position.x = detection.position.x\n marker.pose.position.y = detection.position.y\n marker.pose.position.z = detection.position.z\n marker.pose.orientation = rpy_to_quat(np.array([\n detection.orientation.x,\n detection.orientation.y,\n detection.orientation.z,\n ]))\n marker.scale.x = 1\n marker.scale.y = 0.1\n marker.scale.z = 0.1\n marker.color.r = 1\n marker.color.g = 1\n marker.color.b = 0\n marker.color.a = 1\n marker.lifetime = rospy.Duration(1.0)\n\n return marker\n\n\nclass Logger():\n def __init__(self):\n rospy.init_node('logger')\n\n self.ind = 10000\n self.viz = rospy.Publisher(\"global_map/visualization_marker_array\", MarkerArray, queue_size=100)\n self.find = rospy.ServiceProxy(\"global_map/find\", MapFind)\n self.bridge = CvBridge()\n\n rospy.Subscriber(\"global_map/feature_detections\", FeatureDetections,\n self.publish)\n\n # ???\n # rospy.Subscriber(\"global_map/find\", FeatureDetections,\n # self.visualize)\n\n rospy.wait_for_service(\"global_map/find\")\n\n #rospy.Subscriber(\"/oakd/oakd_front/depth_map\", Image, self.depth)\n #rospy.Subscriber(\"/oakd/oakd_front/color_image\", Image, self.color)\n\n rospy.Timer(rospy.Duration(0.5), self.visualize)\n\n def color(self, data):\n print(\"COLOR\")\n cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding='bgr8')\n print(data.header)\n print(cv_image)\n\n def depth(self, data):\n print(\"BW\")\n cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding='mono16')\n print(data.header)\n print(cv_image)\n\n def visualize(self, time):\n #buckets = bucketList.bucket_list\n buckets1 = self.find(\"badge\").detections\n buckets2 = self.find(\"phone\").detections\n buckets3 = self.find(\"notebook\").detections\n buckets4 = self.find(\"gate\").detections\n buckets5 = self.find(\"circle\").detections\n buckets6 = self.find(\"chevron\").detections\n\n markers = []\n for ind in range(len(buckets1)):\n det = buckets1[ind]\n markers.append(makeMarker(\"badge\", ind, det, getColor(det.tag), 1, Marker.CUBE, rospy.Duration(5.0)))\n\n for ind in range(len(buckets2)):\n det = buckets2[ind]\n markers.append(makeMarker(\"phone\", ind+len(buckets1), det, getColor(det.tag), 1, Marker.CUBE, rospy.Duration(5.0)))\n\n for ind in range(len(buckets3)):\n det = buckets3[ind]\n markers.append(makeMarker(\"notebook\", ind+len(buckets1)+len(buckets2), det, getColor(det.tag), 1, Marker.CUBE, rospy.Duration(5.0)))\n\n for ind in range(len(buckets4)):\n det = buckets4[ind]\n markers.append(makeMarker(\"gate\", ind+len(buckets1)+len(buckets2)+len(buckets3), det, getColor(det.tag), 1, Marker.CUBE, rospy.Duration(5.0)))\n\n for ind in range(len(buckets5)):\n det = buckets5[ind]\n markers.append(makeCircleMarker(ind+len(buckets1)+len(buckets2)+len(buckets3)+len(buckets4), det))\n\n for ind in range(len(buckets6)):\n det = buckets6[ind]\n markers.append(makeCircleMarker(ind+len(buckets1)+len(buckets2)+len(buckets3)+len(buckets4)+len(buckets5), det))\n\n OBJ = MarkerArray()\n OBJ.markers = markers\n\n self.viz.publish(OBJ)\n\n def publish(self, objects):\n detections = objects.detections\n markers = []\n for detection in detections:\n markers.append(makeMarker(detection.tag, self.ind, detection, getColor(detection.tag, True), scale=0.05))\n self.ind += 1\n\n markersPub = MarkerArray()\n markersPub.markers = markers\n self.viz.publish(markersPub)\n\ndef main():\n s = Logger()\n rospy.spin()","repo_name":"Tartan-AUV/TAUV-ROS-Packages","sub_path":"src/packages/tauv_common/src/vision/detectors/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"8801508721","text":"import torch\nimport torch.nn as nn\n\nclass WideBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, stride=1, downsample=False, dropout=0):\n super(WideBlock, self).__init__()\n self.block1 = nn.Sequential(\n nn.BatchNorm2d(in_channels),\n nn.ReLU(inplace=True)\n )\n\n self.block2 = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3,\n stride=stride, padding=1),\n #nn.Dropout2d(dropout),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, kernel_size=3,\n stride=1, padding=1),\n )\n\n if downsample:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1,\n stride=stride),\n )\n else:\n self.downsample = None\n\n def forward(self, x):\n out = self.block1(x)\n\n if self.downsample is not None:\n res = self.downsample(out)\n else:\n res = x\n\n out = self.block2(out)\n \n return out + res\n\nclass WideResNet(nn.Module):\n\n def __init__(self):\n super(WideResNet, self).__init__()\n\n self.k = 8\n\n self.init = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(inplace=True),\n )\n\n self.layer1 = nn.Sequential(\n WideBlock(16, 16*self.k, 2, True),\n WideBlock(16*self.k, 16*self.k, 1, False, .3),\n WideBlock(16*self.k, 16*self.k, 1, False, .3)\n )\n\n self.layer2 = nn.Sequential(\n WideBlock(16*self.k, 32*self.k, 2, True),\n WideBlock(32*self.k, 32*self.k, 1, False, .3),\n WideBlock(32*self.k, 32*self.k, 1, False, .3)\n )\n\n self.layer3 = nn.Sequential(\n WideBlock(32*self.k, 64*self.k, 2, True),\n WideBlock(64*self.k, 64*self.k, 1, False, .3),\n WideBlock(64*self.k, 64*self.k, 1, False, .3)\n )\n\n self.end = nn.Sequential(\n nn.BatchNorm2d(64*self.k),\n nn.ReLU(inplace=True),\n nn.AvgPool2d(6, stride=1)\n )\n\n self.fc = nn.Sequential(\n nn.Linear(64*self.k, 7),\n nn.LogSoftmax(dim=1)\n )\n\n def forward(self, x):\n x = self.init(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.end(x)\n x = x.view(-1, 64*self.k)\n x = self.fc(x)\n\n return x\n\nclass Ensemble(nn.Module):\n\n def __init__(self, model1, model2, model3, model4):\n super(Ensemble, self).__init__()\n\n self.model1 = model1\n self.model2 = model2\n self.model3 = model3\n self.model4 = model4\n\n def forward(self, x):\n\n return x\n","repo_name":"empennage98/ML2018SPRING","sub_path":"hw3/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73511616106","text":"#!/usr/bin/env python3\n# Submodule name: data_selection.py\n\n\"\"\"\nSubmodule to select data: \n- rows of feature tables of the whole corpus based on the metadata file for the reduced corpus\n- txt files from the whole corpus, based on the metadata file for the reduced corpus\n\n@author: Ulrike Henny-Krahmer\n\n\"\"\"\n\nimport pandas as pd\nfrom os.path import join\nfrom os.path import basename\nfrom shutil import copy\nimport glob\n\ndef select_entries(wdir, md_file, feat_file, outfile):\n\t\"\"\"\n\tSelect only those rows from the feature matrices whose IDs are in the metadata table.\n\t\n\tArguments:\n\twdir (str): path to the working directory\n\tmd_file (str): relative path to the metadata file\n\tfeat_file (str): relative path to the file containing the full feature matrix\n\toutfile (str): relative path to the output file for the reduced feature matrix\n\t\n\t\"\"\"\n\t\n\tmd = pd.read_csv(join(wdir, md_file), index_col=0)\n\t\n\tidnos = list(md.index)\n\t\n\tfeatures = pd.read_csv(join(wdir, feat_file), index_col=0)\n\tfeatures_reduced = features.loc[idnos]\n\t\n\tfeatures_reduced.to_csv(join(wdir, outfile), encoding=\"UTF-8\")\n\t\n\tprint(\"Done\")\n\t\n\ndef select_files(wdir, md_file, file_folder, outfolder):\n\t\"\"\"\n\tSelect only those files from a folder whose IDs are in the metadata table.\n\t\n\tArguments:\n\twdir (str): path to the working directory\n\tmd_file (str): relative path to the metadata file\n\tfile_folder (str): relative path to the folder containing all the files (with file name extension pattern, e.g. txt/*.txt)\n\toutfolder (str): relative path to the output folder for the reduced set of files. This directory has to exist.\n\t\"\"\"\n\t\n\tmd = pd.read_csv(join(wdir, md_file), index_col=0)\n\tidnos = list(md.index)\n\t\n\tnum_files = 0\n\t\n\tfor filepath in glob.glob(join(wdir, file_folder)):\n\t\tfilename = basename(filepath)\n\t\tfileidno = filename[:-4]\n\t\t\n\t\tif fileidno in idnos:\n\t\t\tprint(\"copying \" + fileidno + \"...\")\n\t\t\tcopy(filepath,join(wdir, outfolder))\n\t\t\tnum_files += 1\n\t\n\tprint(\"Done: copied \" + str(num_files) + \" files\")\n\t\n\t\n\t\ndef add_cluster_info(wdir, md_file, cluster_file, outfile):\n\t\"\"\"\n\tAdds information about clusters to a metadata file and stores this specific file\n\t\n\tArguments:\n\twdir (str): path to the working directory\n\tmd_file (str): relative path to the metadata file\n\tcluster_file (str): relative path to the file containing the information about the clusters\n\toutfile (str): relative path to the output file\n\t\"\"\"\n\tmd = pd.read_csv(join(wdir, md_file), index_col=0)\n\tclusters = pd.read_csv(join(wdir, cluster_file), index_col=0)\n\tcluster_list = list(clusters[\"cluster\"])\n\tmd[\"cluster\"] = cluster_list\n\tmd.to_csv(join(wdir, outfile))\n\tprint(\"Done\")\n\t\n\t\n\t\ndef copy_cluster_files(wdir, text_dir, md_file, stylo_folder, cluster_1, cluster_2):\n\t\"\"\"\n\tCopy specific files (of one cluster vs. rest; or one cluster vs. another cluster) to a folder\n\tfor an oppose analysis with stylo. The first group is stored in a \"primary_set\" folder, the \n\tsecond in a \"secondary_set\" folder.\n\t\n\tArguments:\n\twdir (str): path to the working directory\n\ttext_dir (str): relative path to the folder containing the full text files of the corpus\n\tmd_file (str): relative path to the metadata file\n\tstylo_folder (str): relative path to the stylo folder\n\tcluster_1 (int): first cluster to keep\n\tcluster_2 (int): second cluster to keep for comparison. If cluster_1 should be compared to all other clusters together, set this to None\n\t\"\"\"\n\tprint(\"Copy cluster files...\")\n\t\n\tmd = pd.read_csv(join(wdir, md_file), index_col=0)\n\t# get the CLiGS ids of the novels belonging to the first cluster\n\tidnos_cluster_1 = md[md.cluster == cluster_1].index\n\t# copy the cluster files of cluster 1 to the stylo folder (make sure that the target directories exist and are empty)\n\tfor idno in idnos_cluster_1:\n\t\tfilename = idno + \".txt\"\n\t\tinpath = join(wdir, text_dir, filename)\n\t\toutpath = join(wdir, stylo_folder, \"primary_set\")\n\t\tcopy(inpath,join(wdir, outpath))\n\t\n\tif cluster_2:\n\t\t# keep only two selected clusters:\n\t\tmd = md[md.cluster.isin([cluster_1,cluster_2])]\n\t\t# get the CLiGS ids of the novels belonging to the second cluster\n\t\tidnos_cluster_2 = md[md.cluster == cluster_2].index\n\t\t\n\t\t# copy the cluster files of cluster 2 to the stylo folder (make sure that the target directories exist and are empty)\n\t\tfor idno in idnos_cluster_2:\n\t\t\tfilename = idno + \".txt\"\n\t\t\tinpath = join(wdir, text_dir, filename)\n\t\t\toutpath = join(wdir, stylo_folder, \"secondary_set\")\n\t\t\tcopy(inpath,join(wdir, outpath))\n\t\n\telse:\n\t\t# merge clusters (by changing cluster values):\n\t\t# first: find out, what \"the rest\" is\n\t\tcluster_rest = set(list(md.cluster))\n\t\tcluster_rest.remove(cluster_1)\n\t\t# get the CLiGS ids of the novels belonging to the rest cluster\n\t\tidnos_cluster_rest = md[md.cluster.isin(cluster_rest)].index\n\t\t\n\t\t# copy the cluster files of the rest cluster to the stylo folder (make sure that the target directories exist and are empty)\n\t\tfor idno in idnos_cluster_rest:\n\t\t\tfilename = idno + \".txt\"\n\t\t\tinpath = join(wdir, text_dir, filename)\n\t\t\toutpath = join(wdir, stylo_folder, \"secondary_set\")\n\t\t\tcopy(inpath,join(wdir, outpath))\n\t\t\n\tprint(\"Done\")\n\t\n\t\n\t\nwdir = \"/home/ulrike/Git/papers/family_resemblance_dsrom19/\"\n#select_entries(wdir, \"corpus_metadata/metadata.csv\", \"features/mfw_1000_tfidf_full.csv\", \"features/mfw_1000_tfidf.csv\")\n#select_entries(wdir, \"corpus_metadata/metadata_SENT.csv\", \"features/mfw_1000_tfidf_full.csv\", \"features/mfw_1000_tfidf_SENT.csv\")\n#select_entries(wdir, \"corpus_metadata/metadata_HIST.csv\", \"features/mfw_1000_tfidf_full.csv\", \"features/mfw_1000_tfidf_HIST.csv\")\n\n#select_entries(wdir, \"corpus_metadata/metadata.csv\", \"features/avgtopicscores_by-idno_full.csv\", \"features/avgtopicscores_by-idno.csv\")\n#select_entries(wdir, \"corpus_metadata/metadata_SENT.csv\", \"features/avgtopicscores_by-idno_full.csv\", \"features/avgtopicscores_by-idno_SENT.csv\")\n#select_entries(wdir, \"corpus_metadata/metadata_HIST.csv\", \"features/avgtopicscores_by-idno_full.csv\", \"features/avgtopicscores_by-idno_HIST.csv\")\n\t\n#select_files(wdir, \"corpus_metadata/metadata.csv\", \"txt_full/*.txt\", \"txt/\")\n\n#add_cluster_info(wdir, \"corpus_metadata/metadata_HIST.csv\", \"analysis/clusters/clusters_3nn_cosine_mfw_1000_tfidf_HIST.csv\", \"corpus_metadata/metadata_HIST_mfw_1000_cl.csv\")\n#add_cluster_info(wdir, \"corpus_metadata/metadata_HIST.csv\", \"analysis/clusters/clusters_3nn_cosine_topics_100_HIST.csv\", \"corpus_metadata/metadata_HIST_topics_100_cl.csv\")\nadd_cluster_info(wdir, \"corpus_metadata/metadata_SENT.csv\", \"analysis/clusters/clusters_3nn_cosine_mfw_1000_tfidf_SENT.csv\", \"corpus_metadata/metadata_SENT_mfw_1000_tfidf_cl.csv\")\nadd_cluster_info(wdir, \"corpus_metadata/metadata_SENT.csv\", \"analysis/clusters/clusters_3nn_cosine_topics_100_SENT.csv\", \"corpus_metadata/metadata_SENT_topics_100_cl.csv\")\nadd_cluster_info(wdir, \"corpus_metadata/metadata.csv\", \"analysis/clusters/clusters_3nn_cosine_mfw_1000_tfidf.csv\", \"corpus_metadata/metadata_mfw_1000_tfidf_cl.csv\")\nadd_cluster_info(wdir, \"corpus_metadata/metadata.csv\", \"analysis/clusters/clusters_3nn_cosine_topics_100.csv\", \"corpus_metadata/metadata_topics_100_cl.csv\")\n\n#copy_cluster_files(wdir, \"texts/txt_full\", \"corpus_metadata/metadata_HIST_mfw_1000_cl.csv\", \"stylo\", 3, None)\n\n","repo_name":"cligs/scripts-nh","sub_path":"analysis/family_resemblance/data_selection.py","file_name":"data_selection.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71270293549","text":"from __future__ import print_function\nimport unittest\n\nimport sys, os\nmy_path = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, my_path + '/../')\n\nimport re\nimport requests\nimport teres\nimport teres.bkr_handlers\nimport tempfile\n\nENV = not bool(os.environ.get(\"BEAKER_RECIPE_ID\") and os.environ.get(\"BEAKER_LAB_CONTROLLER_URL\"))\n\nTS_REGEXP = r'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}\\.[0-9]{6} '\ndef prefix_ts_regexp(line):\n if line:\n return TS_REGEXP + re.escape(line)\n return line\n\ndef ts_regexp(text):\n return \"\\n\".join([prefix_ts_regexp(line) for line in text.split('\\n')])\n\n@unittest.skipIf(ENV, \"Beaker environment variables are not set.\")\nclass BkrEnv(unittest.TestCase):\n def mySetUp(self, *args, **kwargs):\n self.reporter = teres.Reporter()\n\n self.handler = teres.bkr_handlers.ThinBkrHandler(*args, **kwargs)\n self.assertIsNotNone(self.handler.recipe_id)\n self.assertIsNotNone(self.handler.lab_controller_url)\n\n self.reporter.add_handler(self.handler)\n\n def assertMatchesLong(self, test, reference):\n\n t = test.splitlines()\n r = reference.splitlines()\n\n try:\n for i in range(max(len(t), len(r))):\n self.assertRegexpMatches(teres.make_text(t[i]), r[i])\n except IndexError:\n print()\n print()\n print(\"Test string:\\n{}\".format(test))\n print()\n print(\"Reference string:\\n{}\".format(reference))\n print()\n\n self.fail(\"Test string and reference string are of different length.\")\n\n\nclass BkrTest(BkrEnv):\n def test_simple_messages(self):\n \"\"\"\n Test posting of simple messages to beaker.\n \"\"\"\n test = \"test_simple_messages\"\n self.mySetUp(task_log_name=test)\n\n self.reporter.log_error(\"error msg\")\n self.reporter.log_fail(\"fail msg\")\n self.reporter.log_pass(\"pass msg\")\n self.reporter.log_info(\"info msg\")\n self.reporter.log_debug(\"debug msg\")\n\n self.reporter.test_end()\n\n # Check the results.\n ref = ts_regexp(\"\"\":: [ ERROR ] :: error msg\n:: [ FAIL ] :: fail msg\n:: [ PASS ] :: pass msg\n:: [ INFO ] :: info msg\n:: [ ERROR ] :: Test finished with the result: ERROR\n\"\"\")\n\n url = self.handler._get_task_url() + \"logs/\" + test\n content = requests.get(url).content\n\n self.assertMatchesLong(content, ref)\n\n def test_file_names(self):\n \"\"\"\n Test file naming of logs sent to beaker.\n \"\"\"\n test = \"test_file_names\"\n self.mySetUp(task_log_name=test)\n\n self.reporter.send_file('/proc/cmdline')\n self.reporter.send_file('/proc/cpuinfo', logname=\"custom_file_name\")\n\n f = open(\"/tmp/foo bar\", \"w+\")\n f.close()\n self.reporter.send_file('/tmp/foo bar')\n\n tmp = tempfile.TemporaryFile()\n tmp.write(\"I'm a temporary file.\".encode())\n self.reporter.send_file(tmp)\n self.reporter.send_file(tmp, logname=\"tmp_file\")\n\n self.reporter.test_end()\n\n # Check the results.\n ref = ts_regexp(\"\"\":: [ FILE ] :: Sending file \"/proc/cmdline\" as \"cmdline\".\n:: [ FILE ] :: Sending file \"/proc/cpuinfo\" as \"custom_file_name\".\n:: [ FILE ] :: Sending file \"/tmp/foo bar\" as \"foo_bar\".\n:: [ FILE ] :: Sending file \"tmp_file\".\n:: [ NONE ] :: Test finished with the result: NONE\n\"\"\")\n\n url = self.handler._get_task_url() + \"logs/\" + test\n content = requests.get(url).content\n\n self.assertMatchesLong(content, ref)\n\n def test_overall_result(self):\n self.mySetUp(task_log_name=\"test_overall_result\", report_overall=\"Overall result\")\n\n self.reporter.log_fail(\"This test has successfully failed.\")\n self.reporter.test_end()\n\n # Check the results.\n ref = ts_regexp(\"\"\":: [ FAIL ] :: This test has successfully failed.\n:: [ FAIL ] :: Test finished with the result: FAIL\n:: [ FAIL ] :: Overall result\"\"\")\n\n url = self.handler._get_task_url() + \"logs/test_overall_result\"\n content = requests.get(url).content\n\n self.assertMatchesLong(content, ref)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rhinstaller/teres","sub_path":"tests/test_bkr_handlers.py","file_name":"test_bkr_handlers.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28331438592","text":"from fastapi import FastAPI, File, UploadFile\r\nfrom sklearn.feature_extraction import image\r\nimport uvicorn\r\nimport numpy as np\r\nimport cv2\r\nfrom fastapi.middleware.cors import CORSMiddleware\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\nimport tensorflow as tf\r\nimport requests\r\n\r\napp = FastAPI()\r\n\r\norigins = [\r\n \"http://localhost\",\r\n \"http://localhost:3000\",\r\n]\r\napp.add_middleware(\r\n CORSMiddleware,\r\n allow_origins=origins,\r\n allow_credentials=True,\r\n allow_methods=[\"*\"],\r\n allow_headers=[\"*\"],\r\n)\r\n\r\n#endpoint = \"http://localhost:8605/v1/models/corn_model:predict\"\r\n\r\nMODEL = tf.keras.models.load_model(\"C:\\\\Users\\\\HP\\\\Documents\\\\Winter_sem_2021_2022\\\\code1\\\\Corn-disease-classification\\\\saved_models\\\\3\")\r\nclass_names = ['Blight', 'Common_Rust', 'Gray_Leaf_Spot', 'Healthy']\r\n@app.get(\"/ping\")\r\nasync def ping():\r\n return \"hello,i am alive\"\r\n\r\ndef read_file_as_image(data) -> np.ndarray:\r\n image = np.array(Image.open(BytesIO(data)))\r\n return image\r\n\r\n@app.post(\"/predict\")\r\nasync def predict(\r\n file: UploadFile = File(...)\r\n):\r\n image = read_file_as_image(await file.read())\r\n image_batch = np.expand_dims(image,0)\r\n #json_data = {\r\n # \"instances\":image_batch.tolist()\r\n # }\r\n\r\n #response = requests.post(endpoint,json=json_data)\r\n #prediction = response.json()[\"predictions\"][0]\r\n\r\n #predicted_class = class_names[np.argmax(prediction)]\r\n #confidence = np.max(prediction)\r\n \r\n #return{\r\n # \"class\":predicted_class,\r\n # \"confidence\":float(confidence)\r\n #}\r\n\r\n prediction = MODEL.predict(image_batch)\r\n predicted_class = class_names[np.argmax(prediction[0])]\r\n confidence = np.max(prediction[0])\r\n #dynamically route your traffic to different model\r\n return {\r\n 'class':predicted_class,\r\n 'confidence': float(confidence)\r\n }\r\n #if it takes two seconds to read the file instead of waiting\r\n #puts this function in suspend mode and second request can be served\r\n #need to convert these bytes to array\r\n\r\n \r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host = 'localhost', port=8000)","repo_name":"Kishy1224/Deep_Learning_based_Corn_disease_classification-End-to-End-","sub_path":"Code1/Corn-disease-classification/API/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25851551799","text":"import pandas as pd\n\n\ndef rank_subject_clones(df, subject, by, limit=20):\n df = df[df.identifier == subject]\n df = df.drop(['identifier'], axis=1)\n norm_df = df / df.sum()\n norm_df = norm_df.sort_values(by, ascending=False).reset_index()\n return norm_df[:limit]\n\n\nif __name__ == '__main__':\n df = pd.read_csv('sizes.tsv', delimiter='\\t', index_col='id')\n for subject in df.identifier.unique():\n for by in ('overall_unique_cnt', 'instance_cnt', 'overall_total_cnt'):\n pdf = rank_subject_clones(df, subject, by)\n pdf = pdf.to_csv('{}_ranks_by_{}.tsv'.format(subject, by),\n sep='\\t')\n","repo_name":"DrexelSystemsImmunologyLab/frontiers-clone-size-scripts","sub_path":"clone_size/clone_size.py","file_name":"clone_size.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21612819425","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path(\"\",views.index,name='homeShop'),\n path(\"aboutUs/\",views.about,name='about'),\n path(\"contactUs/\",views.contactUs,name='contactus'),\n path(\"products/\",views.productView,name='productview'),\n path(\"search/\",views.search,name='search'),\n path(\"tracker/\",views.tracker,name='tracker'),\n path(\"checkout/\",views.checkout,name='checkout'),\n path(\"handlerequest/\",views.handleRequest,name='handlerequest'),\n path(\"bat/\",views.bat,name='bat'),\n path(\"gloves/\",views.gloves,name='gloves'),\n path(\"helmets/\",views.helmets,name='helmets'),\n path(\"balls/\",views.balls,name='balls'),\n path(\"pads/\",views.pads,name='pads')\n \n]\n","repo_name":"Nishant127/justCricketThings","sub_path":"mydjangowebsit/shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"25738469640","text":"import sys\nimport os\n#To import from utils\nsys.path.append(os.path.join(os.path.dirname(sys.path[0])))\n\nfrom utils import get_demo_data, get_input_data\n\nlines = get_input_data()\n\n#Part 1:\ncalories = 0\nmax_calories = 0\nfor line in lines:\n try:\n calories += int(line.strip())\n except ValueError:\n if max_calories < calories:\n max_calories = calories \n calories = 0\n\nprint(max_calories)\n\n#Part 2\ncalories_list = []\ncalories=0\nfor line in lines:\n try:\n calories += int(line.strip())\n except ValueError:\n calories_list.append(calories)\n calories = 0\n\nprint(sum(sorted(calories_list,reverse=True)[:3]))\n","repo_name":"AboveTheHeavens/Advent-of-code","sub_path":"2022/1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20196531742","text":"from collections import defaultdict\n\n\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n t_len = len(t)\n s_len = len(s)\n if t_len > s_len:\n return \"\"\n ans = \"\"\n t_freq = defaultdict(int)\n for c in t:\n t_freq[c] += 1\n w_freq = defaultdict(int)\n for i in range(t_len):\n w_freq[s[i]] += 1\n l = 0\n r = t_len - 1\n # print(t_freq, w_freq)\n while l <= s_len - t_len + 1 and r < s_len:\n satisfy = True\n for k, v in t_freq.items():\n if w_freq[k] < v:\n satisfy = False\n break\n if satisfy:\n new = s[l:r + 1]\n ans = new if ans == \"\" or len(new) < len(ans) else ans\n w_freq[s[l]] -= 1\n l += 1\n else:\n r += 1\n if r < s_len:\n w_freq[s[r]] += 1\n return ans\n\n\nif __name__ == \"__main__\":\n print(Solution().minWindow(\"whatever\", \"eve\"))\n","repo_name":"miruts-xz/competitive-programming","sub_path":"daily-questions/minimum-window-substring.py","file_name":"minimum-window-substring.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18730592028","text":"import numpy as np\n\ndef calc_rms(x, scale):\n shape = (x.shape[0]//scale, scale)\n X = np.lib.stride_tricks.as_strided(x,shape=shape)\n scale_ax = np.arange(scale)\n rms = np.zeros(X.shape[0])\n for e, xcut in enumerate(X):\n coeff = np.polyfit(scale_ax, xcut, 1)\n xfit = np.polyval(coeff, scale_ax)\n rms[e] = np.sqrt(np.mean((xcut-xfit)**2))\n return rms\n\ndef dfa(data, scale_lim = [5,9], scale_dens = 0.25):\n data = np.frombuffer(data)\n cumSum = np.cumsum(data - np.mean(data))\n scales = (2**np.arange(scale_lim[0], scale_lim[1], scale_dens)).astype(np.int)\n fluct = np.zeros(len(scales))\n for index, item in enumerate(scales):\n fluct[index] = np.mean(np.sqrt(calc_rms(cumSum, item)**2))\n coeff = np.polyfit(np.log2(scales), np.log2(fluct), 1)\n return coeff[0]\n","repo_name":"Beokro/r1010","sub_path":"server/DFA.py","file_name":"DFA.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3774689679","text":"tCases = int(input())\nfor _ in range(tCases):\n n = int(input())\n arr = list(map(int, input().split()))\n player = 1\n flag = False\n for i in range(n):\n if arr[i] > 1:\n flag = True\n if i % 2 == 0:\n player = 1\n else:\n player = 2\n break\n if flag:\n if player == 1:\n print(\"First\")\n elif player == 2:\n print(\"Second\")\n else:\n if n % 2 == 0:\n print(\"Second\")\n else:\n print(\"First\")","repo_name":"amit-kr-debug/CP","sub_path":"codeForces/round 658 - div 2/B. Sequential Nim.py","file_name":"B. Sequential Nim.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"5376202536","text":"import pandas as pd\nimport numpy as np\nimport json\nfrom flask import Flask\nfrom flask import jsonify\nfrom flask import request\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n@app.route('/get_json')\ndef get_json():\n country=request.args['country']\n url='google'\n countries=pd.read_csv('tsv/products_countries.tsv','\\t')\n products=pd.read_csv('tsv/products.tsv','\\t',low_memory=False)[['code','name']]\n additives=pd.read_csv('tsv/products_additives.tsv','\\t')\n d=pd.merge(countries[countries['country']==country],pd.merge(products,additives,how='inner',left_on='code',right_on='code'),how='inner',left_on='code',right_on='code')[['code','name','additive']][:20000]\n data=[]\n groups=d.groupby('additive')\n for name,g in groups:\n additive=dict({})\n additive['name']=name\n additive['count']=g.shape[0]\n additive['key']=name\n pages=[]\n for i,p in g.iterrows():\n product=dict({})\n product['name']=p['name']\n product['key']=p['code']\n product['title']=p['name']\n product['url']=url+p['code']\n pages.append(product)\n additive['pages']=pages\n data.append(additive)\n return jsonify(data)\n@app.route('/get_products_info')\ndef get_products_info():\n country=request.args['country']\n countries=pd.read_csv('tsv/products_countries.tsv','\\t',low_memory=False)\n products_in_country=pd.DataFrame(countries[countries['country']==country]['code'])\n product=pd.read_csv('tsv/products.tsv','\\t',low_memory=False)[['code','energy_100g','n_additives','proteins_100g','fat_100g','carbohydrates_100g','sugars_100g','saturated-fat_100g','salt_100g','sodium_100g']]\n product=products_in_country.merge(product,left_on='code',right_on='code')\n data=dict({})\n means=product.mean()\n stds=product.std()\n for i in product.columns:\n if(i=='code'):\n continue\n l=np.array(product[i].dropna().tolist())\n data[i]=l[np.logical_and(l>means[i]-2*stds[i],(l10:\n\ttemp=segmentspecificfeature(masks)\n\tsegFeatures[i-1,:]=temp\t\t\nprint(segFeatures)\t\n","repo_name":"kumar1701/internship","sub_path":"segmentRegionFeature.py","file_name":"segmentRegionFeature.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36402202550","text":"from typing import Any, Dict, List, Optional\nfrom .typeOverrides import TypeOverride\n\n\nclass Session:\n def __init__(\n self,\n session_id: str,\n params: Dict[str, Any],\n type_overrides: Optional[List[TypeOverride]],\n language: str\n ):\n self.id: str = session_id\n self.params: Dict[str, Any] = params\n self.type_overrides = type_overrides\n self.language = language\n\n @classmethod\n def from_payload(cls, payload: Dict[str, Any]):\n return cls(\n session_id=payload['id'],\n params=payload.get('params', {}),\n type_overrides=[\n TypeOverride.from_payload(x) for x in payload['typeOverrides']\n ],\n language=payload['languageCode']\n )\n","repo_name":"gunyu1019/myschool","sub_path":"app/models/assistant/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20125321899","text":"from flask import Flask\n\n\"\"\"Flask HTTP Handlers\"\"\"\n\napp = Flask(__name__)\n\nIO_CTL = None\n\n# Turn on\n@app.route(\"/on\")\ndef on():\n global IO_CTL\n IO_CTL.on()\n return \"on\"\n\n# Turn off\n@app.route(\"/off\")\ndef off():\n global IO_CTL\n IO_CTL.off()\n return \"off\"\n\n# Check status, returns str(1) or str(0)\n@app.route(\"/status\")\ndef status():\n global IO_CTL\n return str(1 if IO_CTL.relay_on else 0)\n\n\ndef run():\n global app\n app.run(host=\"localhost\", port=5001, debug=False)","repo_name":"monoKeith/Pi_HomebridgeServer","sub_path":"switchServer.py","file_name":"switchServer.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37561067994","text":"import numpy as np\r\nimport psana\r\nimport warnings\r\nimport time\r\nfrom Utils import ROIMetrics, GlobalCalibration, ShotToShotParameters\r\nimport Constants\r\n\r\ndef getCameraSaturationValue(evt):\r\n try:\r\n analysis_version = psana.Detector(Constants.ANALYSIS_VERSION)\r\n if analysis_version(evt) is not None:\r\n return (1<<12)-1\r\n except:\r\n pass\r\n\r\n return (1<<14)-1\r\n \r\n\r\ndef getGlobalXTCAVCalibration(evt):\r\n \"\"\"\r\n Obtain the global XTCAV calibration form the epicsStore\r\n Arguments:\r\n epicsStore\r\n Output:\r\n globalCalibration: struct with the parameters\r\n ok: if all the data was retrieved correctly\r\n \"\"\"\r\n def getCalibrationValues(possible_detector_names):\r\n for i in range(len(possible_detector_names)):\r\n try:\r\n det = psana.Detector(possible_detector_names[i])\r\n val = det(evt)\r\n if abs(val) < 1e-100:\r\n continue\r\n return val \r\n except KeyError:\r\n continue\r\n return None\r\n\r\n global_calibration = GlobalCalibration(\r\n umperpix=getCalibrationValues(Constants.UM_PER_PIX_names), \r\n strstrength=getCalibrationValues(Constants.STR_STRENGTH_names), \r\n rfampcalib=getCalibrationValues(Constants.RF_AMP_CALIB_names), \r\n rfphasecalib=getCalibrationValues(Constants.RF_PHASE_CALIB_names), \r\n dumpe=getCalibrationValues(Constants.DUMP_E_names), \r\n dumpdisp=getCalibrationValues(Constants.DUMP_DISP_names)\r\n )\r\n \r\n for k,v in global_calibration._asdict().iteritems():\r\n if not v:\r\n warnings.warn_explicit('No XTCAV Calibration for epics variable ' + k, UserWarning,'XTCAV',0)\r\n return None\r\n\r\n return global_calibration\r\n \r\n\r\ndef getXTCAVImageROI(evt):\r\n\r\n for i in range(len(Constants.ROI_SIZE_X_names)):\r\n try:\r\n roiXN=psana.Detector(Constants.ROI_SIZE_X_names[i])\r\n roiX=psana.Detector(Constants.ROI_START_X_names[i])\r\n roiYN=psana.Detector(Constants.ROI_SIZE_Y_names[i])\r\n roiY=psana.Detector(Constants.ROI_START_Y_names[i])\r\n\r\n xN = roiXN(evt) #Size of the image in X \r\n x0 = roiX(evt) #Position of the first pixel in x\r\n yN = roiYN(evt) #Size of the image in Y \r\n y0 = roiY(evt) #Position of the first pixel in y\r\n x = x0+np.arange(0, xN) \r\n y = y0+np.arange(0, yN) \r\n\r\n return ROIMetrics(xN, x0, yN, y0, x, y) \r\n\r\n except KeyError:\r\n continue\r\n \r\n warnings.warn_explicit('No XTCAV ROI info',UserWarning,'XTCAV',0)\r\n return None\r\n\r\n\r\ndef getShotToShotParameters(ebeam, gasdetector, evt_id):\r\n time = evt_id.time()\r\n sec = time[0]\r\n nsec = time[1]\r\n unixtime = int((sec<<32)|nsec)\r\n fiducial = evt_id.fiducials()\r\n\r\n energydetector = Constants.ENERGY_DETECTOR\r\n \r\n if ebeam: \r\n ebeamcharge=ebeam.ebeamCharge()\r\n xtcavrfamp=ebeam.ebeamXTCAVAmpl()\r\n xtcavrfphase=ebeam.ebeamXTCAVPhase()\r\n dumpecharge=ebeam.ebeamDumpCharge()*Constants.E_CHARGE #In C \r\n \r\n if gasdetector:\r\n energydetector=(gasdetector.f_11_ENRC()+gasdetector.f_12_ENRC())/2 \r\n return ShotToShotParameters(ebeamcharge = ebeamcharge, \r\n xtcavrfphase = xtcavrfphase, xtcavrfamp = xtcavrfamp, \r\n dumpecharge = dumpecharge, xrayenergy = 1e-3*energydetector, \r\n unixtime = unixtime, fiducial = fiducial) \r\n else: \r\n warnings.warn_explicit('No gas detector info',UserWarning,'XTCAV',0)\r\n \r\n else: \r\n warnings.warn_explicit('No ebeamv info',UserWarning,'XTCAV',0)\r\n \r\n return ShotToShotParameters(unixtime = unixtime, fiducial = fiducial, valid = 0)\r\n \r\n\r\n\r\ndef divideImageTasks(first_image, last_image, rank, size):\r\n \"\"\"\r\n Split image numbers among cores based on number of cores and core ID\r\n The run will be segmented into chunks of 4 shots, with each core alternatingly assigned to each.\r\n e.g. Core 1 | Core 2 | Core 3 | Core 1 | Core 2 | Core 3 | ....\r\n \"\"\"\r\n num_shots = last_image - first_image\r\n if num_shots <= 0:\r\n return np.empty()\r\n tiling = np.arange(rank*4, rank*4+4,1) # returns [0, 1, 2, 3] if e.g. rank == 0 and size == 4:\r\n comb1 = np.tile(tiling, np.ceil(num_shots/(4.*size)).astype(int)) # returns [0, 1, 2, 3, 0, 1, 2, 3, ...] \r\n comb2 = np.repeat(np.arange(0, np.ceil(num_shots/(4.*size)), 1), 4) # returns [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, ...]\r\n # list of shot numbers assigned to this core\r\n main = comb2*4*size + comb1 + first_image # returns [ 0. 1. 2. 3. 16. 17. 18. 19. 32. 33. ... ]\r\n main = np.delete(main, np.where(main>=last_image) ) # remove element if greater or equal to maximum number of shots in run\r\n return main.astype(int)\r\n\r\n","repo_name":"xiaozhg/xtcav","sub_path":"xtcav/UtilsPsana.py","file_name":"UtilsPsana.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"19372017055","text":"import numpy as np\nimport nltk\nimport tokenize\nimport re\nimport random\n\nfrom nltk.corpus import reuters\nfrom nltk import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords\n\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('reuters')\n# defined stop words\ncachedStopWords = stopwords.words(\"english\")\n\ndocuments = reuters.fileids()\ntrain_docs = list(filter(lambda doc: doc.startswith(\"train\"), documents))\ncategories = reuters.categories()\n\n\nclass AntColony:\n def __init__(train_docs, categories, num_ants, num_iterations, alpha, beta, rho, Q):\n train_docs = train_docs[0:50]\n categories = categories\n num_topics = len(categories)\n num_documents = len(train_docs)\n num_words = 0\n prop = np.array([])\n coverage = 0\n coherence = 0\n\n num_ants = 10\n num_iterations = 10\n alpha = 0.5\n beta = 0.5\n rho = 0.5\n Q = 1\n pheromone_matrix = np.array([])\n document_topic_matrix = np.array([])\n word_topic_matrix = np.array([]) #TODO, will be added again as self, to avoid matrix from returning it.\n topic_probabilities = np.zeros(num_topics)\n\n @staticmethod\n def tokenize(text):\n min_length = 3\n words = map(lambda word: word.lower(), word_tokenize(text))\n words = [word for word in words if word not in cachedStopWords]\n tokens = (list(map(lambda token: PorterStemmer().stem(token), words)))\n p = re.compile('[a-zA-Z]+')\n filtered_tokens = list(filter(lambda token: p.match(token) and len(token) >= min_length, tokens))\n return filtered_tokens\n\n def preprocess(train_docs):\n tokenized_documents = []\n documents_topics = []\n for doc in train_docs:\n # tokenize the document\n tokenized_doc = tokenize(reuters.raw(doc))\n if len(tokenized_doc) != 0:\n tokenized_documents.append(tokenized_doc)\n documents_topics.append([categories.index(topic) for topic in reuters.categories(doc)])\n unique_tokens = list(set(term for doc in tokenized_documents for term in doc))\n return documents_topics, tokenized_documents, unique_tokens\n \n def run(train_docs):\n documents_topics, tokenized_documents, unique_tokens = preprocess(train_docs)\n num_documents = len(tokenized_documents)\n num_words = len(unique_tokens)\n \n pheromone_matrix = np.ones(shape=(num_words, num_topics)) / num_topics\n document_topic_matrix = np.zeros((num_documents, num_topics))\n document_term_topic = np.zeros((num_topics, num_words, num_documents), dtype=int)\n\n for index, doc in enumerate(tokenized_documents):\n for term in doc:\n term_index = unique_tokens.index(term)\n for topic_index in documents_topics[0]:\n document_term_topic[topic_index, term_index, index] += 1\n\n # bow_document is the number of term in each document over all topics (sum over all topics)\n bow_documents = [[sum(topics) for topics in zip(*topic_document_set)] for topic_document_set in zip(*(document_term_topic.tolist()))]\n # transpose bow_documents\n bow_documents = np.array(bow_documents).T.tolist()\n # initialize word_topic_matrix\n word_topic_matrix = initialize_word_topic_matrix(bow_documents)\n # assign a random topic to each of the documents, size = (term_count, doc_count)\n ant_solutions = [initialize_ant_solution(bow_document) for bow_document in bow_documents]\n\n\n for iteration in range(num_iterations):\n documents_coverage = calculate_coverage(ant_solutions)\n objective_values = documents_coverage\n for idx, ant_solution in enumerate(ant_solutions):\n update_pheromone_matrix(ant_solution, documents_coverage[idx])\n\n best_ant_solution = ant_solutions[np.argmax(objective_values)]\n update_document_topic_matrix(best_ant_solution, bow_documents)\n update_word_topic_matrix(best_ant_solution, unique_tokens)\n calculate_topic_probabilities()\n ant_solutions = [construct_ant_solution(bow_document) for bow_document in bow_documents]\n\n return document_topic_matrix, word_topic_matrix\n\n def initialize_word_topic_matrix(bow_documents): # initialize word_topic_matrix\n \"\"\"\n INITIAL UNIFORM DISTRIBUTION\n Normalize the count of a given word assigned to a particular topic across all documents in the corpus.\n without normalization, longer documents would have a greater influence on the word-topic matrix,\n since they contain more words and therefore more counts of each word\n \"\"\"\n word_topic_matrix = np.zeros(shape=(num_words, num_topics))\n for doc in bow_documents:\n for j, word_count in enumerate(doc):\n for k in range(num_topics):\n word_topic_matrix[j, k] += word_count / len(bow_documents)\n\n # normalize word_topic_matrix\n word_topic_matrix /= np.sum(word_topic_matrix, axis=0)\n return word_topic_matrix\n\n def initialize_ant_solution(bow_document):\n ant_solution = [-1] * len(bow_document)\n for word_index in range(len(bow_document)):\n topic_probabilities = calculate_topic_probabilities(word_index)\n pheromone_probabilities = pheromone_matrix[word_index] ** alpha\n probabilities = (1 - beta) * pheromone_probabilities + beta * topic_probabilities\n probabilities /= sum(probabilities)\n ant_solution[word_index] = np.random.choice(np.arange(num_topics), p=probabilities)\n return ant_solution\n\n\n def calculate_topic_probabilities(word_index):\n \"\"\"\n Calculates the probability of each topic being assigned to a given word.\n\n Parameters:\n \n word_index (int): the index of the word in the vocabulary\n\n Returns:\n topic_probabilities (numpy.ndarray): a 1D array where each element represents the probability of a topic being assigned to the word\n \"\"\"\n # TODO needs word_topic_matrix\n topic_probabilities = np.zeros(num_topics)\n for j in range(num_topics):\n topic_probabilities[j] = word_topic_matrix[word_index, j] / np.sum(word_topic_matrix[:, j])\n return topic_probabilities\n\n # def convert_to_bow(document, vocabulary):\n # bow = [0] * num_words\n # for word in document:\n # if word in vocabulary:\n # bow[vocabulary.index(word)] += 1\n # return bow\n\n def calculate_proportion(document_term_topic):\n prop = np.zeros(shape=(num_topics, num_documents))\n for topic in range(num_topics):\n # Number of terms in topic k\n n_tk = np.count_nonzero(document_term_topic[topic, :, :])\n for document in range(num_documents):\n # Summation of the tf of terms in document under the topic (numerator)\n sum_tf_term = sum(document_term_topic[topic, :, document])\n # number of terms in the document under the topic\n num_term_in_doc = np.count_nonzero(document_term_topic[topic, :, document])\n prop[topic][document] += (sum_tf_term / (n_tk - num_term_in_doc + 1))\n\n def calculate_coverage(document_term_topic):\n # get prop from the constructor\n documents_coverage = np.zeros(shape=(num_documents))\n for doc in range(num_documents):\n under_root = 0 # from the formula, it is the total before getting root!\n p2 = 0 # second part of the formula(summation of multiplication of weights by prop)\n count_each_term_in_document = sum(document_term_topic[:, :, doc]) # count of each term in the document\n total_num_terms_in_document = sum(sum(document_term_topic[:, :, doc])) # total number of terms in the doc\n for term in range(count_each_term_in_document):\n for topic in range(num_topic):\n weight = document_term_topic[topic, term, doc] / document_term_topic[topic, :, doc]\n p2 += weight * prop[topic, doc]\n p1 = count_each_term_in_document[term] / total_num_terms_in_document\n under_root += (p1 - p2)**2\n documents_coverage[doc] = under_root ** 0.5\n return documents_coverage\n # np.sqrt(sum(np.power(documents_coverage, 2)) / num_documents)\n\n def calculate_coherence(document_term_topic):\n for topic in range(num_topics):\n pmi = 0\n terms_in_topic = np.sum(document_term_topic[topic, :, :], 1)\n total_num_term = sum(terms_in_topic)\n combinations = [(a, b) for idx, a in enumerate(terms_in_topic) for b in terms_in_topic[idx + 1:]]\n num_relevant_terms_co_occurrences = 0\n for a,b in combinations: # (a,b) a = first term's count under topic, b = second term's count\n if a and b: # a and b not zero\n p_a = a / total_num_term\n p_b = b / total_num_term\n p_ab = min(a, b) / total_num_term\n co_occurrence.append(np.log10(p_a) + np.log10(p_b)) / np.log10(p_ab)\n num_relevant_terms_co_occurrences += 1\n else:\n co_occurrence = -1\n pmi += co_occurrence\n pmi = pmi / num_relevant_terms_co_occurrences\n coherence = coherence + pow(1-pmi, 2)\n coherence = np.sqrt(coherence)\n\n def update_pheromone_matrix(ant_solution, coverage):\n delta_pheromone_matrix = np.zeros((num_topics, num_words))\n for i in range(len(ant_solution)):\n for j in range(len(ant_solution[i])):\n delta_pheromone_matrix[ant_solution[i][j], j] += coverage\n pheromone_matrix = (1 - rho) * pheromone_matrix + Q * delta_pheromone_matrix\n\n def update_document_topic_matrix(ant_solution, bow_documents):\n for i in range(len(ant_solution)):\n for j in range(len(ant_solution[i])):\n document_topic_matrix[i, ant_solution[i][j]] += bow_documents[i][j]\n\n def update_word_topic_matrix(ant_solution, vocabulary):\n for i in range(len(ant_solution)):\n for j in range(len(ant_solution[i])):\n word_topic_matrix[vocabulary.index(words[i][j]), ant_solution[i][j]] += 1\n\n def construct_ant_solution(bow_document):\n ant_solution = []\n for i in range(len(bow_document)):\n probabilities = calculate_probabilities(bow_document[i])\n topic = select_topic\n","repo_name":"alimoayedi/thesisProject","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":10700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70797660908","text":"from flask import Flask, request, render_template, redirect, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom surveys import Question, Survey, satisfaction_survey, personality_quiz, surveys\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"survey123\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\ndebug = DebugToolbarExtension(app)\nRESPONSES_KEY = \"responses\"\n\n\n@app.route('/')\ndef home_page():\n satisfaction_survey_title = satisfaction_survey.title\n satisfaction_survey_instructions = satisfaction_survey.instructions\n return render_template('home.j2', survey_title=satisfaction_survey_title, survey_instructions=satisfaction_survey_instructions)\n\n\n@app.route('/questions/')\ndef do_form(number):\n question = satisfaction_survey.questions[number]\n responses = session.get(RESPONSES_KEY)\n response_len = len(responses)\n\n if responses is None:\n return redirect(\"/\")\n if response_len == len(satisfaction_survey.questions):\n return redirect(\"/thanks\")\n if response_len != number:\n return redirect(f\"/questions/{response_len}\")\n\n return render_template('questions.j2', question=question, number=number, responses=responses)\n\n\n@app.route(\"/answer\", methods=[\"POST\"])\ndef add_answer():\n answer = request.form['answer']\n responses = session[RESPONSES_KEY]\n responses.append(answer)\n session[RESPONSES_KEY] = responses\n response_len = len(responses)\n\n if (response_len == len(satisfaction_survey.questions)):\n return redirect(\"/thanks\")\n else:\n return redirect(f\"/questions/{response_len}\")\n\n\n@app.route(\"/thanks\")\ndef finish_survey():\n return render_template(\"thank-you.j2\")\n\n\n@app.route(\"/start\", methods=[\"POST\"])\ndef clear_session():\n session[RESPONSES_KEY] = []\n return redirect(\"/questions/0\")\n","repo_name":"StoneAndMud/FlaskSurvey","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41266376282","text":"from flask import Flask, render_template, request\nimport argparse\nimport requests\nimport cv2\nimport logging\nimport numpy as np\nimport os\nimport glob\nfrom pprint import pformat\nfrom random import randint\n\n# setup logging\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"DEBUG\"))\n\n# parse port and model endpoint args\nparser = argparse.ArgumentParser(description='MAX Object Detector (Lite)')\nparser.add_argument('--port', type=int, nargs='?', default=8090,\n help='port to run the web app on')\nparser.add_argument('--ml-endpoint', nargs='?', metavar='URL',\n default='http://localhost:5000', help='model api server')\nargs = parser.parse_args()\n\napp = Flask(__name__)\n\n\ndef image_resize(img_array):\n \"\"\"Resize the image before processing. This is required for consistency\n since the bounding box and label are drawn relative to the image size.\"\"\"\n height, width, _ = np.shape(img_array)\n img_resize = cv2.resize(img_array, (1024, int(1024 * height / width)))\n img_height, img_width, _ = np.shape(img_resize)\n return img_resize, img_height, img_width\n\n\ndef draw_label_box(prediction, image, img_width, img_height):\n \"\"\"Draw the given label and bounding box on the given image\"\"\"\n label = prediction['label']\n ymin, xmin, ymax, xmax = prediction['detection_box']\n (left, right, top, bottom) = (int(xmin * img_width),\n int(xmax * img_width),\n int(ymin * img_height),\n int(ymax * img_height))\n\n font = cv2.FONT_HERSHEY_DUPLEX\n text_width, text_height = cv2.getTextSize(label, font, 0.8, 1)[0]\n\n cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)\n cv2.rectangle(image, (left, top),\n (left + text_width, top + int(text_height * 1.4)),\n (0, 255, 0), -1)\n cv2.putText(image, label, (left, top + text_height),\n font, 0.8, (0, 0, 0), 1)\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef root():\n\n # removing all previous files in folder before start processing\n output_folder = 'static/img/temp/'\n for file in glob.glob(output_folder + '*'):\n os.remove(file)\n\n # on POST handle upload\n if request.method == 'POST':\n\n # get file details\n file_data = request.files.get('file')\n if file_data is None:\n err_msg = 'No input image was provided.'\n logging.error(err_msg)\n return render_template('index.html', error_msg=err_msg)\n\n # read image from string data\n file_request = file_data.read()\n # convert string data to numpy array\n np_inp_image = np.fromstring(file_request, np.uint8)\n # convert numpy array to image\n img = cv2.imdecode(np_inp_image, cv2.IMREAD_UNCHANGED)\n try:\n # resize image to consistent size\n (image_processed,\n img_processed_height,\n img_processed_width) = image_resize(img)\n except Exception as e:\n err_msg = 'Error processing image, try uploading a different image'\n logging.error(str(e))\n return render_template('index.html', error_msg=err_msg)\n\n # encode image\n _, image_encoded = cv2.imencode('.jpg', img)\n\n # TODO R1: review inference request payload\n # Required inference request parameter: image (JPG/PNG encoded)\n files = {\n 'image': image_encoded.tostring(),\n 'Content-Type': 'multipart/form-data',\n }\n\n # Optional inference parameter: threshold (default: 0.7, range [0,1])\n data = {'threshold': '0.5'}\n\n # TODO T1: replace model URL placeholder\n model_url = args.ml_endpoint.rstrip('/') + '**TODO**'\n\n # Send image file form to model endpoint for prediction\n try:\n results = requests.post(url=model_url, files=files, data=data)\n except Exception as e:\n err_msg_temp = 'Prediction request to {} failed: {}'\n err_msg = err_msg_temp.format(model_url, 'Check log for details.')\n logging.error(err_msg_temp.format(model_url, str(e)))\n return render_template(\"index.html\", error_msg=err_msg)\n\n # surface any prediction errors to user\n if results.status_code != 200:\n err_msg = ('Prediction request returned status code {} '\n + 'and message {}').format(results.status_code,\n results.text)\n logging.error(err_msg)\n return render_template('index.html', error_msg=err_msg)\n\n # extract prediction from json return\n output_data = results.json()\n\n # log output in debug\n logging.debug('\\n' + pformat(output_data))\n\n result = []\n # TODO T2: uncomment next line and replace placeholder\n # result = output_data['**TODO**']\n\n if len(result) == 0:\n msg = 'No objects detected, try uploading a new image'\n return render_template('index.html', error_msg=msg)\n else:\n # draw the labels and bounding boxes on the image\n for i in range(len(result)):\n draw_label_box(result[i], image_processed,\n img_processed_width, img_processed_height)\n\n # save the output image to return\n file_name = (str(randint(0, 999999)) + '.jpg')\n output_name = output_folder + '/' + file_name\n cv2.imwrite(output_name, image_processed)\n\n return render_template('index.html', image_name=output_name)\n\n else:\n # on GET return index.html\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=args.port)\n","repo_name":"IBM/max-tutorial-app-python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5790,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"31627110980","text":"from typing import List\n\n\ndef partition(arr: List[int], front: int, end: int) -> int:\n pivot_index = end\n pivot = arr[pivot_index]\n i = front - 1\n for j in range(front, end):\n if arr[j] < pivot:\n i += 1\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n arr[i], arr[pivot_index] = arr[pivot_index], arr[i]\n return i\n\n\ndef quick_sort(arr: List[int], front: int, end: int) -> List[int]:\n if front < end:\n pivot_index = partition(arr, front, end)\n quick_sort(arr, front, pivot_index - 1)\n quick_sort(arr, pivot_index + 1, end)\n\n\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int],\n n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n for i in range(n):\n nums1[m + i] = nums2[i]\n quick_sort(nums1, 0, len(nums1) - 1)\n\n\nif __name__ == '__main__':\n nums1 = [1, 2, 3, 0, 0, 0]\n nums2 = [2, 5, 6]\n Solution().merge(nums1, 3, nums2, 3)\n assert (nums1 == [1, 2, 2, 3, 5, 6])\n nums1 = [1]\n nums2 = []\n Solution().merge(nums1, 1, nums2, 0)\n assert (nums1 == [1])\n print('all test cases pass')\n","repo_name":"mikeyangyo/LeetCodeProblems","sub_path":"88. Merge Sorted Array/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26283716438","text":"class Solution(object):\n def maxPoints(self, points):\n \"\"\"\n :type points: List[List[int]]\n :rtype: int\n \"\"\"\n m = len(points)\n n = len(points[0])\n left = [0] * n\n right = [0] * n\n dp = points[0]\n\n for i in range(1, m):\n for j in range(n):\n if j == 0:\n left[j] = dp[j]\n else:\n left[j] = max(left[j - 1] - 1, dp[j])\n\n for x in range(n - 1, -1, -1):\n if x == n - 1:\n right[x] = dp[x]\n else:\n right[x] = max(right[x + 1] - 1, dp[x])\n\n for y in range(n):\n dp[y] = points[i][y] + max(left[y], right[y])\n\n return max(dp)\n","repo_name":"humanalgorithm/leetcode_solutions","sub_path":"maximum-number-of-points-with-cost/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72257242026","text":"'''\nLink: https://leetcode.com/problems/merge-intervals/\nTime Complexity: O(nlogn)\nSpace Complexity: O(n)\n'''\nfrom typing import List\n\nclass Solution:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n stack = []\n\n for start, end in sorted(intervals):\n if not stack or stack[-1][1] < start:\n stack.append([start, end])\n else:\n stack[-1][1] = max(end, stack[-1][1])\n\n return stack\n","repo_name":"suru003/DSA-Leetcode-Python3","sub_path":"Arrays/Merge Intervals/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15480009388","text":"# Goes with programming lessons on: https://soriki.com/pico/\n# Lensyl Urbano\n\nimport board\nimport neopixel\nimport time\nimport math\npixels = neopixel.NeoPixel(board.GP0, 12)\n\nfor i in range(10*5):\n n=math.floor(i/5)\n for j in range(10):\n if j>n:\n pixels[j]=(0,200,0)\n else:\n pixels[j]=(0,0,0)\n print (i,n)\n time.sleep (1)\n \nfor i in range(5*5):\n n=math.floor(i/5)\n for j in range(5):\n if j>n:\n pixels[j]=(200,200,0)\n else:\n pixels[j]=(0,0,0)\n print (i,n)\n time.sleep (1)\n \n \n\n\n\n","repo_name":"l4589/Timer","sub_path":"senior.py","file_name":"senior.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32878321443","text":"givenString = input('Enter string: ')\r\n\r\n\r\ndef add_dollar_sign(given_string):\r\n answer = \"\"\r\n first_letter = given_string[0]\r\n for idx, item in enumerate(given_string):\r\n if idx == 0 or item != first_letter:\r\n answer = answer + item\r\n else:\r\n answer = answer + \"$\"\r\n return answer\r\n\r\n\r\nprint(add_dollar_sign(givenString))\r\n\r\n\r\n","repo_name":"sidmaskey13/python_assignments","sub_path":"DT3.py","file_name":"DT3.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11590584053","text":"def dict_comparator(dict0,dict1,list_to_check=False):\n \"\"\"\n Compare deux dictionnaires de taille identique et avec les même clefs et \n renvoie la liste des clefs pour lesquelles les valeurs des dictionnaires \n sont différentes \n \n Arguments :\n ¯¯¯¯¯¯¯¯¯¯¯\n dict0 : type=dict\n Premier dictionnaire à comparer\n \n dict1 : type=dict\n Second dictionnaire à comparer\n \n list_to_check : type=list\n Si renseignée, liste des clefs à vérifier dans les dictionnaires\n \n Returns :\n ¯¯¯¯¯¯¯¯¯\n list_different_keys : type=list\n Liste des clefs pour lesquelles les valeurs des deux dictionnaires \n sont différentes\n \"\"\"\n list_different_keys=[]\n \n if list_to_check==False:\n domain=dict0\n else:\n domain=list_to_check\n \n for key in domain :\n if dict0[key]!=dict1[key]:\n list_different_keys.append(key)\n \n return list_different_keys","repo_name":"paulluneaug/PythonFirstProjects","sub_path":"Fonctions eventuellement pratiques/dict_comparator.py","file_name":"dict_comparator.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19115014387","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom deta.preparer import prepare\nfrom deta.searcher import init\nfrom deta.configManager import Config\nfrom deta.serializer import serialize\nfrom django.conf import settings\nfrom PIL import Image\nfrom cv2 import resize\nfrom deta.utils import read\n\nimport uuid\nimport os\n\n# Create your models here.\nclass Photo(models.Model):\n date_create = models.DateTimeField(\n auto_now_add=True,\n )\n\n photo = models.ImageField(\n upload_to=settings.PHOTO_DIRNAME\n )\n\n def __unicode__(self):\n return ' '.join([\n self.photo.name,\n ])\n\n def save(self, *args, **kwargs):\n super(Photo, self).save(*args, **kwargs)\n\n image = Image.open(self.photo)\n w = int(Config.get('THUMBS', 'w'))\n h = int(Config.get('THUMBS', 'h'))\n image = image.resize((w, h), Image.ANTIALIAS)\n image.save(self.photo.path)\n\n\nclass Company(models.Model):\n name = models.CharField(\n max_length=50,\n )\n\n date_create = models.DateTimeField(\n auto_now_add=True,\n )\n\n def __unicode__(self):\n return self.name\n\n\nclass Staff(models.Model):\n company = models.ForeignKey(Company)\n\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n\n def __unicode__(self):\n return ' '.join([\n self.company.name,\n '-',\n self.user.email,\n ])\n\n\nclass CompanyInvite(models.Model):\n company = models.ForeignKey(Company)\n\n creator = models.ForeignKey(settings.AUTH_USER_MODEL)\n\n html = models.TextField(\n null=True\n )\n\n date_create = models.DateTimeField(\n auto_now_add=True,\n )\n\n date_change = models.DateTimeField(\n auto_now=True,\n )\n\n def __unicode__(self):\n return self.company.name\n\n\nclass CompanyLogo(models.Model):\n company = models.ForeignKey(Company)\n\n creator = models.ForeignKey(settings.AUTH_USER_MODEL)\n\n photo = models.ImageField(\n upload_to=settings.LOGO_DIRNAME\n )\n\n date_create = models.DateTimeField(\n auto_now_add=True,\n )\n\n serial_kp_file = models.FileField(blank=True, null=True)\n serial_desc_file = models.FileField(blank=True, null=True)\n\n def __unicode__(self):\n return ' '.join([\n self.company.name,\n '-',\n self.photo.url,\n ])\n\n def save(self, *args, **kwargs):\n super(CompanyLogo, self).save(*args, **kwargs)\n detector, matcher = init()\n\n\n kp, desc = prepare(os.path.join(settings.MEDIA_ROOT, self.photo.name), detector, int(Config.get('LOGOS', 'w')),\n int(Config.get('LOGOS', 'h')))\n uid = uuid.uuid4()\n kp_filepath = os.path.join(settings.MEDIA_ROOT, settings.SERIAL_DIRNAME, 'kp_{}.pick'.format(uid))\n desc_filepath = os.path.join(settings.MEDIA_ROOT, settings.SERIAL_DIRNAME, 'desc_{}.pick'.format(uid))\n self.serial_kp_file = serialize(kp, kp_filepath)\n self.serial_desc_file = serialize(desc, desc_filepath)\n super(CompanyLogo, self).save(*args, **kwargs)\n\n\nclass LogoStatistic(models.Model):\n # photo = models.ForeignKey(Photo)\n\n logo = models.ForeignKey(CompanyLogo)\n\n position = models.IntegerField()\n\n go_to_company = models.BooleanField(default=False)\n\n date_create = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return ' '.join([\n self.logo.company.name,\n '-URL:',\n self.logo.photo.url,\n '-POS:',\n str(self.position),\n '-GO_TO_COMPANY:',\n str(self.go_to_company),\n '-DATE:',\n str(self.date_create)\n ])\n","repo_name":"abryazgin/detector","sub_path":"lightsite/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11105886396","text":"from spark_parser.scanner import GenericScanner\nfrom py2_token import PythonToken\n\nimport re\nRESERVED_WORDS = re.split(\"\\s+\",\n\"\"\"and as assert break class continue def del eval exec else elif for from global\nif in import lambda or pass print return while with yield None\"\"\")\n\nBRACKET2NAME = {\n '(': 'LPAREN', ')': 'RPAREN',\n '{': 'LBRACE', '}': 'RBRACE',\n '[': 'LBRACKET', ']': 'RBRACKET',\n }\n\nSYMBOL2NAME = {\n '@': 'AT', '`': 'BACKTICK',\n ':': 'COLON', ',': 'COMMA',\n '.': 'DOT',\n }\n\nENDMARKER = r'\u0004' # ctrl-d\n\nclass Python2Scanner(GenericScanner):\n\n def error(self, s, pos):\n \"\"\"Show text and a carot under that. For example:\nx = 2y + z\n ^\n\"\"\"\n print(\"Lexical error:\")\n print(\"%s\" % s[:pos+10]) # + 10 for trailing context\n print(\"%s^\" % (\" \"*(pos-1)))\n for t in self.rv: print(t)\n raise SystemExit\n\n def __init__(self):\n self.is_newline = True\n self.indents = [0]\n self.lineno = 1\n self.column = 0\n GenericScanner.__init__(self)\n\n def tokenize(self, string):\n self.rv = []\n GenericScanner.tokenize(self, string)\n return self.rv\n\n def add_token(self, name, s, is_newline=False):\n self.column += len(s)\n t = PythonToken(name, s, self.lineno, self.column)\n if is_newline:\n self.lineno += 1\n self.column = 0\n if self.is_newline and name not in ['DEDENT', 'INDENT']:\n while 0 < self.indents[-1]:\n self.indents = self.indents[0:-1]\n self.rv.append(PythonToken('DEDENT', '', self.lineno, self.column))\n pass\n self.is_newline = is_newline\n self.rv.append(t)\n\n # The function names below begin with 't_'.\n # This indicates to GenericScanner that these routines\n # form the tokens. GenericScanner introspects on the\n # method names of this class and the docstrings to come\n # up with both the names of the tokens and the regular expressions\n # that make up those tokens\n\n def t_paren(self, s):\n r'[(){}[\\]]'\n self.add_token(BRACKET2NAME[s], s)\n\n def t_symbol(self, s):\n r'[@:,.`]'\n self.add_token(SYMBOL2NAME[s], s)\n\n def t_endmarker(self, s):\n \"\"\"\u0004\"\"\"\n self.add_token('ENDMARKER', s)\n\n # These can a appear as unary operators. Some are also binary operators\n UNOP2NAME = {'+': 'PLUS', '-': 'MINUS', '~': 'TILDE'}\n\n def t_op(self, s):\n r'\\+=|-=|\\*=|/=|%=|&=|\\|=|^=|<<=|>>=|\\*\\*=|//=|//|==|<=|>=|<<|>>|[<>%^&+/=~-]'\n\n # Operators need to be further classified since the grammar requires this\n if s in ('<', '>', '==', '>=', '<=', '<>', '!='):\n self.add_token('COMP_OP', s)\n elif s in ('+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=',\n '//='):\n self.add_token('AUGASSIGN', s)\n elif s in self.UNOP2NAME.keys():\n self.add_token(self.UNOP2NAME[s], s)\n elif s in ('|', '^', '&', '<<', '>>', '**', '/', '%', '//'):\n # These are *ONLY* binary operators. Operators which are exclusively or\n # can be unary operators were handled previously\n self.add_token('BINOP', s)\n elif s == '=':\n self.add_token('EQUAL', s)\n else:\n print(\"Internal error: Unknown operator %s\" % s)\n raise SystemExit\n\n def t_linesep(self, s):\n r';'\n self.add_token('SEMICOLON', s)\n\n def t_nl(self, s):\n r'\\n'\n self.add_token('NEWLINE', s, is_newline=True)\n\n def t_name(self, s):\n r'[A-Za-z_][A-Za-z_0-9]*'\n if s in RESERVED_WORDS:\n self.add_token(s.upper(), s)\n else:\n self.add_token('NAME', s)\n\n # A way to handle the problem of having to match two different\n # tokens with a single regular expression.\n # We can't have two separate defs because then it would be indeterminate\n # whether we get two single stars or one double star.\n def t_star_star(self, s):\n r'\\*\\*?'\n token_name = \"STARSTAR\" if len(s) == 2 else 'STAR'\n self.add_token(token_name, s)\n\n # CONSTANTS\n # ---------\n\n def t_string(self, s):\n r\"([\\\"]{3}(.|[\\n])*[\\\"]{3})|('{3}(.|[\\n])*'{3})|('[^']*')|(\\\"[^\\\"]*\\\")\"\n self.add_token('STRING', s)\n\n # numbers; int, float, and complex\n\n # Note we have to put longer matches earlier. Specifically radix notation and\n # fixed-point notation\n def t_number(self, s):\n r'(0x[0-9a-f]+|0b[01]+|0o[0-7]+|\\d+\\.\\d|\\d+)j?'\n self.add_token('NUMBER', s)\n\n # Ugh. Handle Python's indent/dedent mess.\n def handle_indent_dedent(self, s):\n indent = len(s)\n if indent > self.indents[-1]:\n self.add_token('INDENT', s)\n self.indents.append(indent)\n if indent == self.indents[-1]:\n self.is_newline = False\n pass\n else:\n # May need several levels of dedent\n while indent < self.indents[-1]:\n self.indents = self.indents[0:-1]\n self.add_token('DEDENT', s)\n pass\n pass\n return\n\n # Combine comment and whitespace because we want to\n # capture the space before a comment.\n def t_whitespace_or_comment(self, s):\n r'([ \\t]*[#].*[^\\x04][\\n]?)|([ \\t]+)'\n if '#' in s:\n # We have a comment\n matches = re.match('(\\s+)(.*[\\n]?)', s)\n if matches and self.is_newline:\n self.handle_indent_dedent(matches.group(1))\n s = matches.group(2)\n if s.endswith(\"\\n\"):\n self.add_token('COMMENT', s[:-1])\n self.add_token('NEWLINE', \"\\n\")\n else:\n self.add_token('COMMENT', s)\n elif self.is_newline:\n self.handle_indent_dedent(s)\n pass\n return\n\n\nif __name__ == \"__main__\":\n scan = Python2Scanner()\n\n def showit(expr):\n print(expr)\n tokens = scan.tokenize(expr + ENDMARKER)\n for t in tokens: print(t)\n print('-' * 30)\n return\n\n # showit(\"1 # hi\")\n showit(\"\"\"def foo():\n # comment\n return\n\"\"\")\n# showit(\"(10.5 + 2 / 30) // 3 >> 1\")\n# showit(\"1 + 2\")\n# showit(\"\"\"\n# () { } + - 'abc' \\\"abc\\\" 10 10j 0x10 # foo\n# # bar\n# \"\"\")\n# showit(\"\"\"\n# for i in range(x):\n# if True:\n# pass\n# pass\n# pass\"\"\")\n# showit(\"\"\"\n# for i in range(x):\n# while True:\n# break\n# \"\"\")\n","repo_name":"rocky/python-spark","sub_path":"example/python2/py2_scan.py","file_name":"py2_scan.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"37"} +{"seq_id":"15544615118","text":"import os\nimport unittest\n\nfrom peloton_client.pbgen.mesos.v1 import mesos_pb2 as mesos\nfrom peloton_client.pbgen.peloton.api.v0.task import task_pb2 as task\nfrom peloton_client.pbgen.peloton.api.v0.respool import respool_pb2 as respool\nfrom peloton_client.pbgen.peloton.api.v0 import peloton_pb2 as peloton\nfrom tools.vcluster.config_generator import (\n load_config,\n create_mesos_task_config,\n create_pool_config,\n)\n\n\nclass ConfigGeneratorTest(unittest.TestCase):\n def test_create_mesos_task_config(self):\n dynamic_env_master = {\"APP\": \"hostmgr\"}\n config_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"..\",\n \"config\",\n \"default.yaml\",\n )\n config = load_config(config_file)\n got = create_mesos_task_config(\n config,\n module=\"peloton\",\n dynamic_env=dynamic_env_master,\n image_path=\"myregistry/peloton:0.1.0\",\n )\n\n expected = task.TaskConfig(\n resource=task.ResourceConfig(\n cpuLimit=2.0, memLimitMb=4096, diskLimitMb=2048\n ),\n ports=[\n task.PortConfig(name=\"HTTP_PORT\", envName=\"HTTP_PORT\"),\n task.PortConfig(name=\"GRPC_PORT\", envName=\"GRPC_PORT\"),\n ],\n container=mesos.ContainerInfo(\n type=\"MESOS\",\n mesos=mesos.ContainerInfo.MesosInfo(\n image=mesos.Image(\n type=\"DOCKER\",\n docker=mesos.Image.Docker(\n name=\"myregistry/peloton:0.1.0\"\n ),\n )\n ),\n ),\n command=mesos.CommandInfo(\n uris=[\n mesos.CommandInfo.URI(\n value=\"https://gist.githubusercontent.com/scy0208/\"\n \"08a66afe3a7837e5e1c1528d16b47e6f/raw/\"\n \"2119f0fe20b7a1e827e4e43b288545799d6b4e5e/\"\n \"hostmgr_mesos_secret\",\n executable=False,\n cache=False,\n output_file=\"hostmgr_mesos_secret\",\n )\n ],\n shell=True,\n value=\"bash /bin/entrypoint.sh\",\n environment=mesos.Environment(\n variables=[\n mesos.Environment.Variable(\n name=\"CONFIG_DIR\", value=\"config\"\n ),\n mesos.Environment.Variable(\n name=\"AUTO_MIGRATE\", value=\"true\"\n ),\n mesos.Environment.Variable(\n name=\"MESOS_SECRET_FILE\",\n value=\"/mnt/mesos/sandbox/hostmgr_mesos_secret\",\n ),\n mesos.Environment.Variable(\n name=\"APP\", value=\"hostmgr\"\n ),\n ]\n ),\n ),\n )\n\n self.assertEqual(got, expected)\n\n def test_create_pool_config(self):\n expected = respool.ResourcePoolConfig(\n name=\"test_respool\",\n resources=[\n respool.ResourceConfig(\n kind=\"cpu\", reservation=1.0, limit=1.0, share=1\n ),\n respool.ResourceConfig(\n kind=\"memory\", reservation=1024, limit=1024, share=1\n ),\n respool.ResourceConfig(\n kind=\"disk\", reservation=1024, limit=1024, share=1\n ),\n ],\n parent=peloton.ResourcePoolID(value=\"root\"),\n )\n\n actual = create_pool_config(\n name=\"test_respool\", cpu=1.0, memory=1024, disk=1024\n )\n self.assertEqual(actual, expected)\n","repo_name":"uber/peloton","sub_path":"tools/vcluster/tests/test_config_generator.py","file_name":"test_config_generator.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":624,"dataset":"github-code","pt":"37"} +{"seq_id":"8417333681","text":"def binary_search(List, item, low, high):\r\n while low <= high:\r\n middle = (low + high) // 2\r\n if List[middle] == item:\r\n return middle\r\n\r\n elif List[middle] < item:\r\n low = middle + 1\r\n\r\n else:\r\n high = middle - 1\r\n return \"Item not found\"\r\n\r\n\r\nmyList = [1, 8, 7, 9, 44, 55, 86, 97, 174, 243, 588, 4488, 568789, 5466446]\r\nenter = int(input(\"Enter a number : \"))\r\nresult = binary_search(myList, enter, 0, len(myList) - 1)\r\nprint(result)\r\n","repo_name":"SorooshDaryabari/binary-search-algorithm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13430815738","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 30 12:53:11 2019\n\n@author: mehmet.yilmaz\n\"\"\"\n\n#%% import library\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#%% read data\ndata = pd.read_csv(\"data.csv\")\ndata.drop([\"id\", \"Unnamed: 32\"], axis = 1, inplace = True)\n\n#%% select data and show anyone two features\nM = data[data.diagnosis == \"M\"]\nB = data[data.diagnosis == \"B\"]\n\nplt.scatter(M.radius_mean, M.texture_mean, color = \"red\", label = \"bad\")\nplt.scatter(B.radius_mean, B.texture_mean, color = \"green\", label = \"good\")\nplt.xlabel(\"radius_mean\")\nplt.ylabel(\"texture_mean\")\nplt.legend()\nplt.show()\n\n#%% convert string to int\ndata.diagnosis = [1 if each == \"M\" else 0 for each in data.diagnosis]\n\n#%% find x and y\ny = data.diagnosis.values\nx_data = data.drop([\"diagnosis\"], axis = 1)\n\n#%% normalization\nx = (x_data - np.min(x_data)) / ((np.max(x_data) - np.min(x_data)))\n\n#%% choose test and train data\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 1)\n\n#%% navie bayes algorithm\nfrom sklearn.naive_bayes import GaussianNB\nnavie = GaussianNB()\nnavie.fit(x_train, y_train)\n\n#%% test\nprint(\"print test score : \", navie.score(x_test, y_test))\n\n","repo_name":"mehmetyilmaz0/MachineLearningProjects","sub_path":"canserPrediction-master/navieBayesAlgorithm.py","file_name":"navieBayesAlgorithm.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28200193486","text":"#CS4349 - Advanced Algorithm Design\n#This program was created by Mathews Fazza as the programming project for CS4349\n#\n#there are three ways to run the program:\n#1 - No arguments: if the program is run without arguments the user will be prompted to enter a text to be justified\n#2 - 1 argument: The argument needs to be a valid filename. The file will be output justified.\n#3 - 2 arguments: In this mode the user can specify both the file and the width of each line. A negative width\n# will change the extra spaces into plus signs\n#\n#The algorithm is described in the report. Please, see the report for details on this implementation\n\nimport math\nimport sys\n\ndef Print_Neatly(words, n, M):\n extras = [[9999 for i in range(n + 1)] for j in range(n + 1)]\n lc = [[9999 for i in range(n + 1)] for j in range(n + 1)]\n c = [sys.maxsize] * (n + 1)\n p = [9999] * (n + 1)\n\n #find the values of extras\n for i in range(1, n+1):\n extras[i][i] = M - len(words[i])\n for j in range(i+1, n+1):\n extras[i][j] = extras[i][j-1] - len(words[j]) - 1\n\n #find the values of lc\n for i in range(1, n+1):\n for j in range(1, n+1):\n if (extras[i][j] < 0 and extras[i][j]):\n lc[i][j] = sys.maxsize\n elif j == n and extras[i][j] >= 0:\n lc[i][j] = 0\n else:\n lc[i][j] = math.pow((extras[i][j]), 3)\n\n #find c and p\n c[0] = 0\n for j in range(1, n+1):\n for i in range(1, j+1):\n if c[i-1] + lc[i][j] < c[j]:\n c[j] = c[i-1] + lc[i][j]\n p[j] = i\n\n return c, p\n\ndef Build_Line(text, j, P):\n\n i = P[j]\n line = 1\n if i != 1:\n line = Build_Line(text, i - 1, P) + 1\n\n #find the number of extra spaces needed for each line\n extra_spaces = M - ( sum(map(len, text[i:(j+1)])) + len(text[i:(j+1)])) +1\n\n #this loop will print justified and add spaces as necessary\n for x in range(i, j+1):\n\n #first word of line\n if(x == i):\n print(text[x], end=' ')\n #last word of line\n elif(x == j+1):\n print(text[x], end='')\n #all other words have a chance of having an extra space attached to them\n else:\n if(extra_spaces>0):\n if(S==-1):\n print(text[x], end='+ ')\n else:\n print(text[x], end=' ')\n extra_spaces += -1\n\n else:\n print(text[x], end=' ')\n\n print()\n return line\n\ndef main(argv):\n text = ''\n global M\n global S #switch to turn spaces into plus signs\n S = 0\n if(len(argv)<1):\n text = input(\"Please enter the text to be justified.\")\n M=80\n elif(len(argv)==1):\n try:\n textfile = open(sys.argv[1], 'r')\n text = textfile.read()\n M=80\n except FileNotFoundError:\n print(\"Please, try a valid file name next time.\")\n elif(len(argv)==2):\n try:\n textfile = open(sys.argv[1], 'r')\n text = textfile.read()\n except FileNotFoundError:\n print(\"Please, try a valid file name next time.\")\n try:\n M = int(sys.argv[2])\n if(M<0):\n M = abs(M)\n S = -1\n except ValueError:\n print(\"Please, enter a number as your second argument.\")\n\n paragraphs = text.split('\\n')\n paragraphs = list(filter(None, paragraphs))\n\n for words in paragraphs:\n words = ['BLANK'] + words.split(' ')\n n = len(words) - 1\n C, P = Print_Neatly(words, n, M)\n Build_Line(words, n, P)\n print()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"mfazza/programSamples","sub_path":"DP2.py","file_name":"DP2.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35764801670","text":"import numpy as np\nimport scipy.signal\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.annotations import DeveloperAPI\n\n\ndef discount(x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n\nclass Postprocessing:\n \"\"\"Constant definitions for postprocessing.\"\"\"\n\n ADVANTAGES = \"advantages\"\n VALUE_TARGETS = \"value_targets\"\n\n\n@DeveloperAPI\ndef compute_advantages(rollout,\n last_r,\n gamma=0.9,\n lambda_=1.0,\n use_gae=True,\n use_critic=True):\n \"\"\"\n Given a rollout, compute its value targets and the advantage.\n\n Args:\n rollout (SampleBatch): SampleBatch of a single trajectory\n last_r (float): Value estimation for last observation\n gamma (float): Discount factor.\n lambda_ (float): Parameter for GAE\n use_gae (bool): Using Generalized Advantage Estimation\n use_critic (bool): Whether to use critic (value estimates). Setting\n this to False will use 0 as baseline.\n\n Returns:\n SampleBatch (SampleBatch): Object with experience from rollout and\n processed rewards.\n \"\"\"\n\n traj = {}\n trajsize = len(rollout[SampleBatch.ACTIONS])\n for key in rollout:\n traj[key] = np.stack(rollout[key])\n\n assert SampleBatch.VF_PREDS in rollout or not use_critic, \\\n \"use_critic=True but values not found\"\n assert use_critic or not use_gae, \\\n \"Can't use gae without using a value function\"\n\n if use_gae:\n vpred_t = np.concatenate(\n [rollout[SampleBatch.VF_PREDS],\n np.array([last_r])])\n delta_t = (\n traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])\n # This formula for the advantage comes from:\n # \"Generalized Advantage Estimation\": https://arxiv.org/abs/1506.02438\n traj[Postprocessing.ADVANTAGES] = discount(delta_t, gamma * lambda_)\n traj[Postprocessing.VALUE_TARGETS] = (\n traj[Postprocessing.ADVANTAGES] +\n traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)\n else:\n rewards_plus_v = np.concatenate(\n [rollout[SampleBatch.REWARDS],\n np.array([last_r])])\n discounted_returns = discount(rewards_plus_v,\n gamma)[:-1].copy().astype(np.float32)\n\n if use_critic:\n traj[Postprocessing.\n ADVANTAGES] = discounted_returns - rollout[SampleBatch.\n VF_PREDS]\n traj[Postprocessing.VALUE_TARGETS] = discounted_returns\n else:\n traj[Postprocessing.ADVANTAGES] = discounted_returns\n traj[Postprocessing.VALUE_TARGETS] = np.zeros_like(\n traj[Postprocessing.ADVANTAGES])\n\n traj[Postprocessing.ADVANTAGES] = traj[\n Postprocessing.ADVANTAGES].copy().astype(np.float32)\n\n assert all(val.shape[0] == trajsize for val in traj.values()), \\\n \"Rollout stacked incorrectly!\"\n return SampleBatch(traj)\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"third_party/ray/rllib/evaluation/postprocessing.py","file_name":"postprocessing.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"29648435684","text":"from app.pyimagesearch.motion_detection import SingleMotionDetector\nfrom app.config import config\nfrom app.emailer_classes import EmailSender\nimport threading\nimport datetime\nimport imutils\nimport time\nimport cv2\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger = config.config_logger(logger)\n\noutputFrame = None\nlock = threading.Lock()\n\nemail_sender = EmailSender()\n\n\ndef detect_motion(frameCount, video_stream):\n global outputFrame, lock\n\n t = threading.currentThread()\n vs = video_stream\n\n # Initialize the motion detector and the total number of frames\n md = SingleMotionDetector(accumWeight=0.1)\n total = 0\n\n time.sleep(2) # Leave time for the webcam to warm up\n\n # Loop over frames from the video stream\n while getattr(t, \"do_run\", True):\n # Read the next frame from the video stream, resize it, convert to grayscale, and blur it\n frame = vs.read()\n frame = imutils.resize(frame, width=400)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (7, 7), 0)\n\n # Grab the current timestamp and draw it on the frame\n timestamp = datetime.datetime.now()\n cv2.putText(frame, timestamp.strftime(\n \"%A %d %B %Y %I:%M:%S%p\"), (10, frame.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)\n\n # If the total number of frames has reached a sufficient\n # number to construct a reasonable background model, then\n # continue to process the frame\n if total > frameCount:\n # detect motion in the image\n motion = md.detect(gray)\n\n # check to see if motion was found in the frame\n if motion is not None:\n # unpack the tuple and draw surrounding box\n (thresh, (minX, minY, maxX, maxY)) = motion\n cv2.rectangle(frame, (minX, minY), (maxX, maxY),\n (0, 0, 255), 2)\n\n # Update the background model and increment the total number of frames read thus far\n md.update(gray)\n total += 1\n\n # Acquire the lock, set the output frame, and release the lock\n with lock:\n outputFrame = frame.copy()\n\n\ndef generate_video_feed():\n global outputFrame, lock\n\n # Loop over frames from the output stream\n while True:\n # Wait until the lock is acquired\n with lock:\n # Check if the output frame is available, otherwise skip the iteration of the loop\n if outputFrame is None:\n continue\n\n # Encode the frame in JPEG format\n (flag, encodedImage) = cv2.imencode(\".jpg\", outputFrame)\n\n # Ensure the frame was successfully encoded\n if not flag:\n continue\n\n # Yield the output frame in the byte format\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encodedImage) + b'\\r\\n')\n","repo_name":"JoaquinRives/IoT_Home_RPi","sub_path":"app/camera_management.py","file_name":"camera_management.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39462247175","text":"# -*- coding: utf-8 -*-\n\nimport os\n\ndata_input_path = os.path.join('..','..', 'sogou_news')\ndata_output_path = os.path.join('..','..', 'sogou_news_line')\n\n\nif __name__=='__main__':\n for idx,file_name in enumerate(os.listdir(data_input_path)):\n with open(os.path.join(data_input_path, file_name), 'r', encoding='utf8') as fin, \\\n open(os.path.join(data_output_path, file_name), 'w', encoding='utf8') as fout :\n for line in fin.readlines():\n elems = line.split('`1`2')\n fout.write(elems[2]+elems[1]+'\\n'+'\\n')\n\n if idx %10 ==0 :\n print('propress %d files..' % idx)\n # break\n\n fout.close()","repo_name":"waywaywayw/rnn_ptb","sub_path":"sogouNewsData_propress/rawNews_to_newsLine.py","file_name":"rawNews_to_newsLine.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19602211209","text":"from os import name\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('cart', views.cart, name='cart'),\n path('checkout', views.checkout, name='checkout'),\n path('contact', views.contact, name='contact'),\n path('about', views.about, name='about'),\n path('wishlist', views.wishlist, name='wishlist'),\n path('shop/', views.shop, name='shop'),\n path('shop/product/', views.product, name='product'),\n path('update_item/', views.updateItem, name= 'update_item'),\n path('register/', views.register, name= 'register'),\n\n]","repo_name":"Shadhin004/Django-ecommerce","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41690333666","text":"from enum import Enum\nfrom scipy import optimize\n\nfrom resdx.fan import ConstantEfficacyFan\n\nfrom koozie import fr_u, to_u\nfrom ..defrost import DefrostStrategy\n\nfrom .base_model import DXModel\n\nclass Title24DXModel(DXModel):\n\n def __init__(self):\n super().__init__()\n self.allowed_kwargs += [\n \"cap17\",\n \"cap35\",\n \"cop35\",\n \"input_cooling_efficiency_multiplier\",\n ]\n\n @staticmethod\n def CA_regression(coeffs,T_ewb,T_odb,T_edb,V_standard_per_rated_cap):\n return coeffs[0]*T_edb + \\\n coeffs[1]*T_ewb + \\\n coeffs[2]*T_odb + \\\n coeffs[3]*V_standard_per_rated_cap + \\\n coeffs[4]*T_edb*T_odb + \\\n coeffs[5]*T_edb*V_standard_per_rated_cap + \\\n coeffs[6]*T_ewb*T_odb + \\\n coeffs[7]*T_ewb*V_standard_per_rated_cap + \\\n coeffs[8]*T_odb*V_standard_per_rated_cap + \\\n coeffs[9]*T_ewb*T_ewb + \\\n coeffs[10]/V_standard_per_rated_cap + \\\n coeffs[11]\n\n def gross_shr(self, conditions):\n T_iwb = to_u(conditions.indoor.get_wb(),\"°F\") # Cutler curves use °F\n T_odb = to_u(conditions.outdoor.db,\"°F\") # Title 24 curves use °F\n T_idb = to_u(conditions.indoor.db,\"°F\") # Title 24 curves use °F\n CFM_per_ton = to_u(conditions.standard_volumetric_airflow_per_capacity,\"cfm/ton_ref\")\n coeffs = [0.0242020,-0.0592153,0.0012651,0.0016375,0,0,0,-0.0000165,0,0.0002021,0,1.5085285]\n SHR = Title24DXModel.CA_regression(coeffs,T_iwb,T_odb,T_idb,CFM_per_ton)\n return min(1.0, SHR)\n\n @staticmethod\n def eer_rated(seer):\n if seer < 13.0:\n return 10.0 + 0.84 * (seer - 11.5)\n elif seer < 16.0:\n return 11.3 + 0.57 * (seer - 13.0)\n else:\n return 13.0\n\n class MotorType(Enum):\n PSC = 1,\n BPM = 2\n\n @staticmethod\n def fan_efficacy_rated(flow_per_capacity, motor_type=MotorType.PSC):\n if motor_type == Title24DXModel.MotorType.PSC:\n power_per_capacity = fr_u(500,'(Btu/h)/ton_ref')\n else:\n power_per_capacity = fr_u(283,'(Btu/h)/ton_ref')\n return power_per_capacity/flow_per_capacity\n\n def gross_total_cooling_capacity(self, conditions):\n shr = self.gross_shr(conditions)\n T_iwb = to_u(conditions.indoor.get_wb(),\"°F\") # Title 24 curves use °F\n T_odb = to_u(conditions.outdoor.db,\"°F\") # Title 24 curves use °F\n T_idb = to_u(conditions.indoor.db,\"°F\") # Title 24 curves use °F\n CFM_per_ton = to_u(conditions.standard_volumetric_airflow_per_capacity,\"cfm/ton_ref\")\n if shr < 1:\n coeffs = [0,0.009645900,0.002536900,0.000171500,0,0,-0.000095900,0.000008180,-0.000007550,0.000105700,-53.542300000,0.381567150]\n else: # shr == 1\n coeffs = [0.009483100,0,-0.000600600,-0.000148900,-0.000032600,0.000011900,0,0,-0.000005050,0,-52.561740000,0.430751600]\n return Title24DXModel.CA_regression(coeffs,T_iwb,T_odb,T_idb,CFM_per_ton)*self.system.rated_gross_total_cooling_capacity[conditions.compressor_speed]\n\n def gross_sensible_cooling_capacity(self, conditions):\n return self.gross_shr(conditions)*self.system.gross_total_cooling_capacity(conditions)\n\n def gross_cooling_power(self, conditions):\n shr = self.gross_shr(conditions)\n T_iwb = to_u(conditions.indoor.get_wb(),\"°F\") # Title 24 curves use °F\n T_odb = to_u(conditions.outdoor.db,\"°F\") # Title 24 curves use °F\n T_idb = to_u(conditions.indoor.db,\"°F\") # Title 24 curves use °F\n CFM_per_ton = to_u(conditions.standard_volumetric_airflow_per_capacity,\"cfm/ton_ref\")\n cap95 = self.system.rated_net_total_cooling_capacity[conditions.compressor_speed]\n q_fan = self.system.rated_cooling_fan_power[conditions.compressor_speed]\n if T_odb < 95.0:\n seer = fr_u(self.system.input_seer,'Btu/Wh')\n if shr < 1:\n seer_coeffs = [0,-0.0202256,0.0236703,-0.0006638,0,0,-0.0001841,0.0000214,-0.00000812,0.0002971,-27.95672,0.209951063]\n cap_coeffs = [0,0.009645900,0.002536900,0.000171500,0,0,-0.000095900,0.000008180,-0.000007550,0.000105700,-53.542300000,0.381567150]\n else: # shr == 1\n seer_coeffs = [0.0046103,0,0.0125598,-0.000512,-0.0000357,0.0000105,0,0,0,0,0,-0.316172311]\n cap_coeffs = [0.009483100,0,-0.000600600,-0.000148900,-0.000032600,0.000011900,0,0,-0.000005050,0,-52.561740000,0.430751600]\n f_cond_seer = Title24DXModel.CA_regression(cap_coeffs,T_iwb,T_odb,T_idb,CFM_per_ton)/Title24DXModel.CA_regression(seer_coeffs,T_iwb,T_odb,T_idb,CFM_per_ton)\n seer_nf = f_cond_seer*(1.09*cap95+q_fan)/(1.09*cap95/seer - q_fan) # unitless\n else:\n seer_nf = 0.0\n if T_odb > 82.0:\n eer = self.system.rated_net_cooling_cop[conditions.compressor_speed]\n if shr < 1:\n eer_coeffs = [0,-0.020225600,0.023670300,-0.000663800,0,0,-0.000184100,0.000021400,-0.000008120,0.000297100,-27.956720000,0.015003100]\n else: # shr == 1\n eer_coeffs = [0.004610300,0,0.012559800,-0.000512000,-0.000035700,0.000010500,0,0,0,0,0,-0.475306500]\n cap_nf = self.system.rated_gross_total_cooling_capacity[conditions.compressor_speed]\n f_cond_eer = Title24DXModel.CA_regression(eer_coeffs,T_iwb,T_odb,T_idb,CFM_per_ton)\n eer_nf = cap_nf/(f_cond_eer*(cap95/eer - q_fan/3.413))\n else:\n eer_nf = 0.0\n if T_odb <= 82.0:\n eer_t = seer_nf\n elif T_odb < 95.0:\n eer_t = seer_nf + (T_odb - 82.0)*(eer_nf - seer_nf)/13.0\n else:\n eer_t = eer_nf\n if \"input_cooling_efficiency_multiplier\" in self.system.kwargs:\n f_eff = self.system.kwargs[\"input_cooling_efficiency_multiplier\"]\n else:\n f_eff = 1.0\n return self.system.gross_total_cooling_capacity(conditions)/(eer_t*f_eff)\n\n @staticmethod\n def cap17_ratio_rated(hspf):\n '''\n Return the ratio of net integrated heating capacity for 47 F / 17 F.\n '''\n if hspf < 7.5:\n return 0.1113 * hspf - 0.22269\n elif hspf < 9.5567:\n return 0.017 * hspf + 0.4804\n elif hspf < 10.408:\n return 0.0982 * hspf - 0.2956\n else:\n return 0.0232 * hspf + 0.485\n\n def get_cap17(self, conditions):\n '''\n Return the net integrated heating capacity at 17 F.\n '''\n # If not already in the model data, initialize the model data\n if \"cap17\" not in self.system.model_data:\n self.system.model_data[\"cap17\"] = [None]*self.system.number_of_heating_speeds\n\n if self.system.model_data[\"cap17\"][conditions.compressor_speed] is not None:\n # If it's already in the model data, return the stored value\n return self.system.model_data[\"cap17\"][conditions.compressor_speed]\n else:\n # If not already in the model data then...\n if \"cap17\" in self.system.kwargs:\n # Read from model kwargs (if provided)\n self.system.model_data[\"cap17\"][conditions.compressor_speed] = self.system.kwargs[\"cap17\"][conditions.compressor_speed]\n else:\n # or use the Title 24 default calculation\n cap47 = self.system.rated_net_heating_capacity[conditions.compressor_speed]\n self.system.model_data[\"cap17\"][conditions.compressor_speed] = self.cap17_ratio_rated(self.system.input_hspf)*cap47\n return self.system.model_data[\"cap17\"][conditions.compressor_speed]\n\n def get_cap35(self, conditions):\n if \"cap35\" not in self.system.model_data:\n self.system.model_data[\"cap35\"] = [None]*self.system.number_of_heating_speeds\n\n if self.system.model_data[\"cap35\"][conditions.compressor_speed] is not None:\n return self.system.model_data[\"cap35\"][conditions.compressor_speed]\n else:\n if \"cap35\" in self.system.kwargs:\n self.system.model_data[\"cap35\"][conditions.compressor_speed] = self.system.kwargs[\"cap35\"][conditions.compressor_speed]\n else:\n cap47 = self.system.rated_net_heating_capacity[conditions.compressor_speed]\n cap17 = self.get_cap17(conditions)\n cap35 = cap17 + 0.6*(cap47 - cap17)\n if self.system.defrost.strategy != DefrostStrategy.NONE:\n cap35 *= 0.9\n self.system.model_data[\"cap35\"][conditions.compressor_speed] = cap35\n return self.system.model_data[\"cap35\"][conditions.compressor_speed]\n\n def gross_steady_state_heating_capacity(self, conditions):\n T_odb = to_u(conditions.outdoor.db,\"°F\") # Title 24 curves use °F\n cap47 = self.system.rated_net_heating_capacity[conditions.compressor_speed]\n cap17 = self.get_cap17(conditions)\n slope = (cap47 - cap17)/(47.0 - 17.0)\n return cap17 + slope*(T_odb - 17.0) - self.system.rated_heating_fan_power[conditions.compressor_speed]\n\n def gross_integrated_heating_capacity(self, conditions):\n T_odb = to_u(conditions.outdoor.db,\"°F\") # Title 24 curves use °F\n cap47 = self.system.rated_net_heating_capacity[conditions.compressor_speed]\n cap17 = self.get_cap17(conditions)\n cap35 = self.get_cap35(conditions)\n if self.system.defrost.in_defrost(conditions) and (T_odb > 17.0 and T_odb < 45.0):\n slope = (cap35 - cap17)/(35.0 - 17.0)\n else:\n slope = (cap47 - cap17)/(47.0 - 17.0)\n return cap17 + slope*(T_odb - 17.0) - self.system.rated_heating_fan_power[conditions.compressor_speed]\n\n @staticmethod\n def rated_net_heating_cop(hspf):\n return 0.3225*hspf + 0.9099\n\n def check_hspf(self, conditions, cop17):\n # Calculate region 4 HSPF\n cap47 = self.system.rated_net_heating_capacity[conditions.compressor_speed]\n cop47 = self.system.rated_net_heating_cop[conditions.compressor_speed]\n inp47 = cap47/cop47\n cap35 = self.get_cap35(conditions)\n cap17 = self.get_cap17(conditions)\n inp17 = cap17/cop17\n\n if \"cop35\" in self.system.kwargs:\n cop35 = self.system.kwargs[\"cop35\"][conditions.compressor_speed]\n self.system.model_data[\"cop35\"][conditions.compressor_speed] = cop35\n inp35 = cap35/cop35\n else:\n inp35 = inp17 + 0.6*(inp47 - inp17)\n if self.system.defrost.strategy != DefrostStrategy.NONE:\n inp35 *= 0.985\n cop35 = cap35/inp35\n self.system.model_data[\"cop35\"][conditions.compressor_speed] = cop35\n\n out_tot = 0\n inp_tot = 0\n\n T_bins = [62.0, 57.0, 52.0, 47.0, 42.0, 37.0, 32.0, 27.0, 22.0, 17.0, 12.0, 7.0, 2.0, -3.0, -8.0]\n frac_hours = [0.132, 0.111, 0.103, 0.093, 0.100, 0.109, 0.126, 0.087, 0.055, 0.036, 0.026, 0.013, 0.006, 0.002, 0.001]\n\n T_design = 5.0\n T_edb = 65.0\n C = 0.77 # AHRI \"correction factor\"\n T_off = 0.0 # low temp cut-out \"off\" temp (F)\n T_on = 5.0 # low temp cut-out \"on\" temp (F)\n dHRmin = cap47\n\n for i, T_odb in enumerate(T_bins):\n bL = ((T_edb - T_odb) / (T_edb - T_design)) * C * dHRmin\n\n if (T_odb > 17.0 and T_odb < 45.0):\n cap_slope = (cap35 - cap17)/(35.0 - 17.0)\n inp_slope = (inp35 - inp17)/(35.0 - 17.0)\n else:\n cap_slope = (cap47 - cap17)/(47.0 - 17.0)\n inp_slope = (inp47 - inp17)/(47.0 - 17.0)\n cap = cap17 + cap_slope*(T_odb - 17.0)\n inp = inp17 + inp_slope*(T_odb - 17.0)\n\n x_t = min(bL/cap, 1.0)\n PLF = 1.0 - (self.system.c_d_heating * (1.0 - x_t))\n if T_odb <= T_off or cap/inp < 1.0:\n sigma_t = 0.0\n elif T_odb <= T_on:\n sigma_t = 0.5\n else:\n sigma_t = 1.0\n\n inp_tot += x_t*inp*sigma_t/PLF*frac_hours[i] + (bL - (x_t*cap*sigma_t))*frac_hours[i]\n out_tot += bL*frac_hours[i]\n\n return to_u(out_tot/inp_tot,\"Btu/Wh\")\n\n @staticmethod\n def cop47_rated(hspf):\n return 0.3225*hspf + 0.9099\n\n @staticmethod\n def c_d_heating(hspf):\n return max(min(.25 - 0.2*(hspf-6.8)/(10.0-6.8),0.25),0.05)\n\n def calculate_cops(self, conditions):\n if \"cop35\" not in self.system.model_data:\n self.system.model_data[\"cop35\"] = [None]*self.system.number_of_heating_speeds\n\n if \"cop17\" not in self.system.model_data:\n self.system.model_data[\"cop17\"] = [None]*self.system.number_of_heating_speeds\n\n root_fn = lambda cop17 : self.check_hspf(conditions, cop17) - self.system.input_hspf\n cop17_guess = 3.0 #0.2186*hspf + 0.6734\n self.system.model_data[\"cop17\"][conditions.compressor_speed] = optimize.newton(root_fn, cop17_guess)\n\n def get_cop35(self, conditions):\n if \"cop35\" not in self.system.model_data:\n self.calculate_cops(conditions)\n\n return self.system.model_data[\"cop35\"][conditions.compressor_speed]\n\n def get_cop17(self, conditions):\n if \"cop17\" not in self.system.model_data:\n self.calculate_cops(conditions)\n\n return self.system.model_data[\"cop17\"][conditions.compressor_speed]\n\n def gross_steady_state_heating_power(self, conditions):\n T_odb = to_u(conditions.outdoor.db,\"°F\") # Title 24 curves use °F\n cap47 = self.system.rated_net_heating_capacity[conditions.compressor_speed]\n cap17 = self.get_cap17(conditions)\n\n cop47 = self.system.rated_net_heating_cop[conditions.compressor_speed]\n cop17 = self.get_cop17(conditions)\n\n inp47 = cap47/cop47\n inp17 = cap17/cop17\n\n slope = (inp47 - inp17)/(47.0 - 17.0)\n return inp17 + slope*(T_odb - 17.0) - self.system.rated_heating_fan_power[conditions.compressor_speed]\n\n def gross_integrated_heating_power(self, conditions):\n T_odb = to_u(conditions.outdoor.db,\"°F\") # Title 24 curves use °F\n cap47 = self.system.rated_net_heating_capacity[conditions.compressor_speed]\n cap35 = self.get_cap35(conditions)\n cap17 = self.get_cap17(conditions)\n\n cop47 = self.system.rated_net_heating_cop[conditions.compressor_speed]\n cop35 = self.get_cop35(conditions)\n cop17 = self.get_cop17(conditions)\n\n inp47 = cap47/cop47\n inp35 = cap35/cop35\n inp17 = cap17/cop17\n\n if self.system.defrost.in_defrost(conditions) and (T_odb > 17.0 and T_odb < 45.0):\n slope = (inp35 - inp17)/(35.0 - 17.0)\n else:\n slope = (inp47 - inp17)/(47.0 - 17.0)\n return inp17 + slope*(T_odb - 17.0) - self.system.rated_heating_fan_power[conditions.compressor_speed]\n\n # TODO: Default assumptions\n def set_rated_fan_characteristics(self, fan):\n if fan is not None:\n pass\n else:\n # Airflows\n flow_per_cap_default = fr_u(350.,\"cfm/ton_ref\")\n\n self.system.rated_cooling_airflow_per_rated_net_capacity = [flow_per_cap_default]\n self.system.rated_heating_airflow_per_rated_net_capacity = [flow_per_cap_default]\n\n def set_fan(self, input):\n if input is not None:\n # TODO: Handle default mappings?\n self.system.fan = input\n else:\n airflows = []\n efficacies = []\n fan_speed = 0\n if self.system.cooling_fan_speed is None:\n set_cooling_fan_speed = True\n self.system.cooling_fan_speed = []\n self.system.rated_cooling_fan_speed = []\n\n if self.system.heating_fan_speed is None:\n set_heating_fan_speed = True\n self.system.heating_fan_speed = []\n self.system.rated_heating_fan_speed = []\n\n rated_fan_efficacy = Title24DXModel.fan_efficacy_rated(fr_u(350.,\"cfm/ton_ref\"))\n for i, cap in enumerate(self.system.rated_net_total_cooling_capacity):\n self.system.rated_cooling_airflow[i] = cap*self.system.rated_cooling_airflow_per_rated_net_capacity[i]\n airflows.append(self.system.rated_cooling_airflow[i])\n efficacies.append(rated_fan_efficacy)\n self.system.rated_cooling_fan_power[i] = self.system.rated_cooling_airflow[i]*rated_fan_efficacy\n if set_cooling_fan_speed:\n self.system.cooling_fan_speed.append(fan_speed)\n self.system.rated_cooling_fan_speed.append(fan_speed)\n fan_speed += 1\n\n for i, cap in enumerate(self.system.rated_net_total_cooling_capacity):\n self.system.rated_heating_airflow[i] = cap*self.system.rated_heating_airflow_per_rated_net_capacity[i]\n airflows.append(self.system.rated_heating_airflow[i])\n efficacies.append(rated_fan_efficacy)\n self.system.rated_heating_fan_power[i] = self.system.rated_heating_airflow[i]*rated_fan_efficacy\n if set_heating_fan_speed:\n self.system.heating_fan_speed.append(fan_speed)\n self.system.rated_heating_fan_speed.append(fan_speed)\n fan_speed += 1\n\n fan = ConstantEfficacyFan(airflows, fr_u(0.20, \"in_H2O\"), design_efficacy=efficacies)\n self.system.fan = fan\n\n def set_net_capacities_and_fan(self, rated_net_total_cooling_capacity, rated_net_heating_capacity, fan):\n self.set_rated_fan_characteristics(fan)\n self.set_rated_net_total_cooling_capacity(rated_net_total_cooling_capacity)\n self.set_rated_net_heating_capacity(rated_net_heating_capacity)\n self.set_fan(fan)\n","repo_name":"bigladder/resdx","sub_path":"resdx/models/title24.py","file_name":"title24.py","file_ext":"py","file_size_in_byte":16256,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"494853223","text":"import logging\nimport os\nimport os.path\n\nfrom PIL import Image, ImageFont, ImageDraw\n\nclass ImageCaptioner(object):\n\tdef __init__(self, homeDir):\n\t\tself.logger = logging.getLogger('2020sHumorBot').getChild('ImageCaptioner')\n\t\tself.homeDir = homeDir\n\t\t\n\t\tself.textColor = (255, 255, 255)\n\t\tself.textStrokeColor = (0, 0, 0)\n\t\tself.textPadding = 20\n\t\tself.startingFontSize = 12\n\t\tself.strokeDivisor = 20\n\t\t\n\t\tself.fontFilename = None # hang onto this in between runs to save ourselves time if we're doing curation\n\t\t\n\tdef writeText(self, imagePath, outputFolder, text):\n\t\tself.logger.info('Writing text \"' + text + '\" onto image from path: ' + imagePath)\n\t\tinputImage = Image.open(imagePath)\n\t\toutputImage = inputImage.copy()\n\t\t\n\t\t# get the path to the font\n\t\tfontPath = self._getFontFilename()\n\t\tself.logger.debug(\"loading font at path: \" + fontPath)\n\t\t\n\t\t# load the font\n\t\tfontSize = self._determineFontSize(fontPath, text, outputImage)\n\t\tfont = ImageFont.truetype(font=fontPath, size=fontSize)\n\t\tsizeX, sizeY = font.getsize(text)\n\t\t\n\t\t# determine draw coords\n\t\tstrokeWidth = int(fontSize/self.strokeDivisor)\n\t\tdrawCoords = (int((outputImage.width / 2) - (sizeX / 2)),\n\t\t\t\t\t outputImage.height - sizeY - self.textPadding - strokeWidth)\n\t\tself.logger.info(\"Writing text with size \" + str((sizeX, sizeY)) + \" with stroke width \" + str(strokeWidth) + \" at position \" + str(drawCoords))\n\t\t\n\t\t# write to the image\n\t\tdrawer = ImageDraw.Draw(outputImage)\n\t\tdrawer.text(drawCoords, text, font=font, fill=self.textColor, stroke_width=strokeWidth, stroke_fill=self.textStrokeColor)\n\t\tself.logger.debug(\"successfully written text to image\")\n\t\t\n\t\t# done\n\t\toutputPath = os.path.join(outputFolder, os.path.split(imagePath)[1])\n\t\tself.logger.info(\"Saving output image to path: \" + outputPath)\n\t\toutputImage.save(outputPath)\n\t\toutputImage.close()\n\t\tinputImage.close()\n\t\t\n\t\treturn outputPath\n\t\n\tdef _getFontFilename(self):\n\t\tif self.fontFilename == None:\n\t\t\tfontDir = os.path.join(self.homeDir, \"font\")\n\t\t\tif not os.path.exists(fontDir):\n\t\t\t\traise RuntimeError(\"Font directory does not exist: \" + fontDir)\n\t\t\tfontPathContents = os.listdir(fontDir)\n\t\t\tif len(fontPathContents) > 1:\n\t\t\t\traise RuntimeError(\"More than one file is present in the font directory. Don't know which font to use.\")\n\t\t\telif len(fontPathContents) < 1:\n\t\t\t\traise RuntimeError(\"No font is present in font directory.\")\n\t\t\tfontPath = os.path.join(fontDir, fontPathContents[0])\n\t\t\tself.fontFilename = fontPath\n\t\t\treturn fontPath\n\t\telse:\n\t\t\treturn self.fontFilename\n\t\n\tdef _determineFontSize(self, fontPath, text, outputImage):\n\t\t# step up the size until it's too big, and back off one, because PIL doesn't have a \"write text to fill area\" function.\n\t\t# This is very slow and inefficient, but whatever, object identification is orders of magnitude slower than this will ever be.\n\t\tmaxX = outputImage.width - self.textPadding * 2\n\t\tmaxY = int(outputImage.height/4)\n\t\tself.logger.debug(\"Allowing text to take up a maximum space of size \" + str((maxX, maxY)))\n\t\tcurSize = self.startingFontSize\n\t\tfont = ImageFont.truetype(font=fontPath, size=curSize)\n\t\tsizeX, sizeY = font.getsize(text)\n\t\twhile sizeX <= maxX and sizeY <= maxY:\n\t\t\tcurSize += 1\n\t\t\tfont = ImageFont.truetype(font=fontPath, size=curSize)\n\t\t\tsizeX, sizeY = font.getsize(text)\n\t\t# back off one\n\t\tcurSize -= 1\n\t\tself.logger.info(\"Determined that font size \" + str(curSize) + \" will fit in the allowed area after \" + str(curSize - self.startingFontSize + 2) + \" iterations.\")\n\t\treturn curSize","repo_name":"HelloLobsterDog/2020sHumorBot","sub_path":"TwentyTwentiesHumorBot/ImageCaptioner.py","file_name":"ImageCaptioner.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"40165456219","text":"import os\r\nimport cv2\r\n# import numpy as np\r\n# a=0\r\n# for i in os.listdir('./images'):\r\n# if '_fake_B' in i:\r\n# img=cv2.imread('./images/{}'.format(i))\r\n# A=i.replace('_fake_B.png','.jpg')\r\n# B1=A.replace('A','B1')\r\n# B2 = A.replace('A', 'B2')\r\n# B3 = A.replace('A', 'B3')\r\n#\r\n# imgA=cv2.imread('./images/{}'.format(A))\r\n# imgB1 = cv2.imread('./images/{}'.format(B1))\r\n# imgB2 = cv2.imread('./images/{}'.format(B2))\r\n# imgB3 = cv2.imread('./images/{}'.format(B3))\r\n#\r\n# I=np.hstack((imgA,imgB1,imgB2,imgB3,img))\r\n#\r\n# cv2.imwrite('./result/{}.jpg'.format(a),I,[int(cv2.IMWRITE_JPEG_QUALITY),100])\r\n# a+=1\r\n# print(a)\r\n\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\n\r\n\r\n\r\ndef random_crop(input_img, crop_size):\r\n\r\n img_h, img_w, img_c = input_img.shape\r\n crop_h = np.random.randint(0, 512- crop_size)\r\n crop_w = np.random.randint(0, 512- crop_size)\r\n crop_h1 = np.random.randint(0, 512 - crop_size)\r\n crop_w1 = np.random.randint(0, 512 - crop_size)\r\n input_img0 = input_img[crop_h:crop_h + crop_size, crop_w:crop_w + crop_size, :]\r\n input_img1 = input_img[crop_h:crop_h+crop_size, crop_w+512:crop_w+crop_size+512, :]\r\n input_img2 = input_img[crop_h:crop_h + crop_size, crop_w+1024:crop_w + crop_size+1024, :]\r\n input_img3 = input_img[crop_h:crop_h + crop_size, crop_w+1536:crop_w+ crop_size+1536, :]\r\n input_img8 = input_img[crop_h:crop_h + crop_size, crop_w+2048:crop_w+ crop_size+2048, :]\r\n\r\n input_img4 = input_img[crop_h1:crop_h1 + crop_size, crop_w1:crop_w1 + crop_size, :]\r\n input_img5 = input_img[crop_h1:crop_h1 + crop_size, crop_w1 + 512:crop_w1 + crop_size + 512, :]\r\n input_img6 = input_img[crop_h1:crop_h1 + crop_size, crop_w1 + 1024:crop_w1 + crop_size + 1024, :]\r\n input_img7 = input_img[crop_h1:crop_h1 + crop_size, crop_w1 + 1536:crop_w1 + crop_size + 1536, :]\r\n input_img9 = input_img[crop_h1:crop_h1 + crop_size, crop_w1 + 2048:crop_w1 + crop_size + 2048, :]\r\n\r\n return input_img0,input_img1,input_img2,input_img3,input_img4,input_img5,input_img6,input_img7,input_img8,input_img9\r\n\r\na=0\r\nfor i in os.listdir('result'):\r\n path=os.path.join('./result',i)\r\n img=cv2.imread(path)\r\n img0,img1,img2,img3,img4,img5,img6,img7,img8,img9=random_crop(img,256)\r\n\r\n img8=np.hstack((img0,img1,img2,img3,img8))\r\n\r\n cv2.imwrite('./R/{}.jpg'.format(a),img8,[int(cv2.IMWRITE_WEBP_QUALITY),100])\r\n a=a+1\r\n if a%3==0:\r\n img9 = np.hstack((img4, img5, img6, img7,img9))\r\n cv2.imwrite('./R/{}.jpg'.format(a), img9, [int(cv2.IMWRITE_WEBP_QUALITY), 100])\r\n a = a + 1\r\n print(a)\r\n\r\n","repo_name":"liping1997/cycle3","sub_path":"results/FFA/test_latest/123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11546858258","text":"import functools\nimport hashlib\nimport os\nimport pickle\nimport time\n\nimport xbmcvfs # pylint: disable=import-error\n\nfrom ..constants import ADDON_ID\n\nENABLED = True\nPATH = xbmcvfs.translatePath('special://temp/%s/cache/' % ADDON_ID)\n\n\ndef make_path():\n if not xbmcvfs.exists(PATH):\n xbmcvfs.mkdirs(PATH)\n\n return xbmcvfs.exists(PATH)\n\n\ndef reset_cache():\n xbmcvfs.rmdir(PATH, force=True)\n return make_path()\n\n\ndef _get_filename(name, args, kwargs):\n return hashlib.md5(name.encode('utf-8')).hexdigest() + \\\n hashlib.md5(str(args).encode('utf-8')).hexdigest() + \\\n hashlib.md5(str(kwargs).encode('utf-8')).hexdigest()\n\n\ndef _load(name, args=None, kwargs=None, limit=60):\n if not ENABLED or limit <= 0:\n return False, None\n\n if args is None:\n args = []\n if kwargs is None:\n kwargs = {}\n\n now = time.time()\n max_age = now - limit\n\n filename = os.path.join(PATH, _get_filename(name, args, kwargs))\n if xbmcvfs.exists(filename):\n mtime = xbmcvfs.Stat(filename).st_mtime()\n\n if mtime >= max_age:\n with open(filename, 'rb') as file_handle:\n payload = file_handle.read()\n\n return True, pickle.loads(payload)\n\n return False, None\n\n\ndef _save(name, args=None, kwargs=None, result=None):\n if args is None:\n args = []\n if kwargs is None:\n kwargs = {}\n\n try:\n payload = pickle.dumps(result)\n\n filename = os.path.join(PATH, _get_filename(name, args, kwargs))\n with open(filename, 'wb') as file_handle:\n file_handle.write(payload)\n\n return True\n\n except: # pylint: disable=bare-except\n return False\n\n\ndef cache_method(limit):\n def wrap(func):\n\n @functools.wraps(func)\n def memoizer(*args, **kwargs):\n if args:\n klass, rargs = args[0], args[1:]\n name = '%s.%s.%s' % (klass.__module__, klass.__class__.__name__, func.__name__)\n else:\n name = func.__name__\n rargs = args\n\n cached, payload = _load(name, rargs, kwargs, limit=limit)\n if cached:\n return payload\n\n payload = func(*args, **kwargs)\n if ENABLED and limit > 0:\n _save(name, rargs, kwargs, payload)\n\n return payload\n\n return memoizer\n\n return wrap\n\n\ndef cache_function(limit):\n def wrap(func):\n\n @functools.wraps(func)\n def memoizer(*args, **kwargs):\n name = func.__name__\n\n cached, payload = _load(name, args, kwargs, limit=limit)\n if cached:\n return payload\n\n payload = func(*args, **kwargs)\n if ENABLED and limit > 0:\n _save(name, args, kwargs, payload)\n\n return payload\n\n return memoizer\n\n return wrap\n\n\nmake_path()\n","repo_name":"anxdpanic/plugin.video.tubed","sub_path":"resources/lib/src/lib/memoizer.py","file_name":"memoizer.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"1334717108","text":"# we can carry put conditional logic\r\ns = 'text' # this is a string\r\ns = 4 # now it points to an integer\r\ns = (3,) # a tuple\r\ns = 4.2 # a float\r\nif type(s) == str: # check the data type\r\n print('it is a string')\r\nelif type(s) == int: # elif means 'else if'\r\n print(' it is an integer')\r\nelif type(s) == float:\r\n print('it is a float')\r\nelse:\r\n print('not a string or an integer or float')\r\n\r\n# we can type-check fresh data\r\nn = input('Please enter an integer numeric value ')\r\n# check if it's a numeric (i.e. digits only)\r\nif n.isnumeric():\r\n print('You entered', int(float(n)) ) # confidently type cast as in of float (from string)\r\n n = int(float(n)) # safely cast to an integer\r\nelse:\r\n print('that is not numeric')\r\n\r\nsquares = {1, 4, 9, 16, 25}\r\nprimes = (1, 2, 3, 5, 7, 11)\r\n\r\n# we can do other checks\r\nif n/2 == n//2 and n in squares: # check if it is an even number and a square nubmer\r\n print('that is an even square number')\r\nelif n/2 != n//2 or n in primes: # check odd or prime\r\n print('that is an odd or prime number')\r\n\r\n# comparison operators\r\n# ==, !=, <, >, <=, >=\r\n\r\n# the 'while' loop\r\nwhile True:\r\n print('this will never end')\r\n a = input('is it lunch yet? ')\r\n if a == 'yes':\r\n break # breaks out of the loop\r\n","repo_name":"onionmccabbage/Python_Intro_Nov2022","sub_path":"conditionals.py","file_name":"conditionals.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73800070508","text":"##DFS : 깊이 우선 탐색 알고리즘(Depth-First Search)\n##특정 경로로 탐색하다 특정 상황에서 최대한 깊숙이 들어가 노드를 방문한 후\n##다시 돌아가 다른 경로를 탐색한다\n##DFS는 스택 자료구조를 이용하며 구체적 동작은 다음과 같다\n##1. 탐색 시작 노드를 스택에 삽입하고 방문처리\n##2. 스택의 최상단 노드에 방문하지 않은 인접노드가 있으면 그를 스택에 넣고 방문처리\n##방문하지 않은 인접노드가 없으면 스택에서 최상단 노드 pop\n##3.2번 과정을 끝까지 반복\n##**일반적으로 인접노드중 방문하지 않은 노드가 여러개면 번호가 낮은 순서부터 처\n##DFS는 스택을 기초로 하기 때문에 구현이 간단하며, 실제로는 스택을 안써도 된다.\n##탐색을 수행함에 있어 데이터개수가 N개인 경우 O(N)의 시간이 소요된다\n##또한 스택을 이용하기에 재귀함수를 이용하면 매우 간결히 구현가능하다.\n##한번 해보자!\ndef dfs(graph, v, visited):\n visited[v] = True ##노드방문을 기록\n print(v, end=' ') ##노드 한번 출력해보기\n \n for i in graph[v]: ##v번 노드에 연결된 모든 노드에 접근한다는 의미\n if not visited[i]: ##만약 v에 연결된 i번째 노드가 미방문이라면?\n dfs(graph, i, visited) ##dfs 진행. 이 노드는 방문처리가 됨과 동\n ##시에 스택구조의 최상단 노드가 \n\n\n\ngraph = [\n [],\n [2,3,4],\n [3,6],\n [4,5, 6,7],\n [5,7],\n [7,8],\n [2,3],\n [3,4,5],\n [5]\n]\n\nvisited = [False]*9##최초 visited는 전부 False 설정. 이 배열의 각 인덱스는\n##해당노드의 번호를 의미.\n\ndfs(graph, 1, visited)\n","repo_name":"csw1511/algorithm_study","sub_path":"Python codingtest bookstudy/3 DFSBFS/3탐색알고리즘DFS.py","file_name":"3탐색알고리즘DFS.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70950890349","text":"#вариант1\r\n#задание1\r\n\r\nimport re\r\n\r\n# эта функция считывает файл\r\ndef read_file():\r\n with open('mystem.xml', encoding = 'utf-8') as f:\r\n file = f.read()\r\n return file\r\n\r\n#считает число строк внутри тега se\r\ndef count_se(file):\r\n i = 0\r\n with open('result.txt', 'w', encoding = 'utf-8') as f:\r\n count_se = re.findall(r'.*?', file)\r\n for se in count_se:\r\n se = se.split('\\n')\r\n for line in element:\r\n i += 1\r\n f.write(str(i))\r\n return\r\n\r\n#эта функция главная \r\ndef main():\r\n file = read_file()\r\n count_se(file)\r\n return\r\n\r\nmain()\r\n\r\n\r\n","repo_name":"katestratulat1999/PROGRAMMING","sub_path":"Control_07.04.18/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25560168466","text":"def solution(S):\n print(\"input:{}\".format(S))\n answer = 0\n length=[]\n #n개단위로 짜른 문자열 : n은 len(st//2)까지\n #2부터 하면 안됌. 예외:aabbaccc\n for n in range(1,len(S)//2+1):\n print()\n print(\"{}개단위로 짜르기\".format(n))\n\n #짜른단위만큼 같은 문자가 몇개 있는지(count X)\n count=1\n result=''\n tmpstr=S[:n]\n #print(\"초기화\")\n #print(tmpstr)\n for i in range(n,len(S),n):\n #print(\"#비교>>>\"+str(i))\n #print(tmpstr,S[i:i+n]) #첫번째, 이후값들\n\n if S[i:i+n]==tmpstr:\n count=count+1\n else:\n if count==1:\n count=\"\"\n result+=str(count)+tmpstr\n tmpstr=S[i:i+n]\n count=1\n #print(\"#최종\")\n #print(result)\n\n #짜르고 남은것들까지 붙인 최종\n if count==1:\n count=\"\"\n result+=str(count)+tmpstr\n #print(\"한번더\")\n #print(result)\n length.append(len(result))\n print(length)\n #print(\"리턴\")\n return min(length)\n\n\n\n '''\n for sb in ssplit:\n count=0\n if sb not in count_dict:\n count=count+1\n count_dict[sb]=count\n else:\n count_dict[sb]=count_dict[sb]+1\n print(count_dict)\n '''\n\n\n #새로운 문자열\n\n\n\n#\"aabbaccc\"\t7\n#\"ababcdcdababcdcd\"\t9\n#\"abcabcdede\"\t8\n#\"abcabcabcabcdededededede\"\t14\n#\"xababcdcdababcdcd\"\t17\n#print(solution(\"aabbaccc\"))\n#print(solution(\"ababcdcdababcdcd\"))\n#print(solution(\"abcabcdede\"))\nprint(solution(\"abcabcabcabcdededededede\"))\n#print(solution(\"xababcdcdababcdcd\"))\n","repo_name":"vvspearlvvs/CodingTest","sub_path":"4.기출문제/카카오/문자열압축/kakao.py","file_name":"kakao.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33653216387","text":"# cs1030\r\n# name: Ian Selby\r\n# 700720666\r\n# Assignment / Exercise: 2.16 Average Acceleration\r\n\r\n# Ask user for input\r\nv0, v1, t = eval(input(\"Please enter Velocity Initial, Velocity Final, and time: \"))\r\n\r\n\r\n# Assign values / calculate\r\n\r\naverage = (v1 - v0) / t\r\n\r\n\r\n# Print results (rounds to 4 decimal places)\r\nprint(f\"The average acceleration is: {round(average, 4)}\")\r\n","repo_name":"1221210/Projects","sub_path":"Python Classwork 2021/HW2/Completed/Ian Selby Problem 2.16.py","file_name":"Ian Selby Problem 2.16.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3282242046","text":"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nfrom sql_queries import *\n\n\ndef process_song_file(cur, filepath):\n \"\"\"\n Process the songs files and insert data into dimension tables: songs and artists.\n :param cur: the database cursor\n :param filepath: the path to the song file\n \"\"\"\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n song_data = df[['song_id', 'title', 'artist_id', 'year', 'duration']].values[0].tolist()\n cur.execute(song_table_insert, song_data)\n \n # insert artist record0\n artist_data = df[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)\n\n\ndef process_log_file(cur, filepath):\n \"\"\"\n Process the log files and insert data into dimension tables: time and users.\n Insert data into the facts table songplays.\n :param cur: the database cursor\n :param filepath: the path to the log file\n \"\"\"\n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = df[df['page']=='NextSong']\n\n # convert timestamp column to datetime\n t = pd.to_datetime(df.ts, unit='ms')\n \n # insert time data records\n time_data = [t, t.dt.hour, t.dt.day, t.dt.isocalendar().week, t.dt.month, t.dt.year, t.dt.weekday]\n column_labels = ['start_time', 'hour', 'day', 'week', 'month', 'year', 'weekday']\n time_df = pd.DataFrame.from_dict(dict(zip(column_labels, time_data)))\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = df[[\"userId\", \"firstName\", \"lastName\", \"gender\", \"level\"]]\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (pd.to_datetime(row.ts, unit='ms'), int(row.userId), row.level, songid, artistid, row.sessionId,\n row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)\n\n\ndef process_data(cur, conn, filepath, func):\n \"\"\"\n Processes either logs or songs depending on the given function.\n :param cur: the database cursor\n :param conn: the database connection\n :param filepath: the path to the data directory\n :param func: the function (process songs or logs)\n \"\"\"\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))\n\n\ndef main():\n \n \"\"\"\n Make a connection to the localhost, build the cursor, and processes \n data in the `data/song_data` and `data/log_data` folders.\n by inserting it in the database\n \"\"\"\n conn = psycopg2.connect(\"dbname=sparkifydb\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Yahia-Ferchouli/Data-Engineer-Nanodegree","sub_path":"Project 1 Data Modeling with PostgreSQL/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41171750088","text":"from Estructuras.Nodos.NodoDispersa import NodoDispersa\n#import random\n\n#Estructuras adicionales para la matriz dispersa\nclass NodoEncabezado:\n def __init__(self, id):\n self.id = id\n self.siguiente = None\n self.anterior = None\n self.acceso = None\n\nclass ListaEncabezado:\n def __init__(self):\n self.primero = None\n def insertar(self, nuevo):\n if self.primero == None:\n self.primero = nuevo\n elif nuevo.id < self.primero.id:\n nuevo.siguiente = self.primero\n self.primero.anterior = nuevo\n self.primero = nuevo\n else:\n aux = self.primero\n while aux.siguiente != None:\n if nuevo.id < aux.siguiente.id:\n nuevo.siguiente = aux.siguiente\n aux.siguiente.anterior = nuevo\n nuevo.anterior = aux\n aux.siguiente = nuevo\n break\n aux = aux.siguiente\n\n if aux.siguiente == None:\n aux.siguiente = nuevo\n nuevo.anterior = aux\n\n def devolver_encabezado(self, id):\n aux = self.primero\n while aux != None:\n if aux.id == id:\n return aux\n aux = aux.siguiente\n return None\n\n#Matriz Dispersa\nclass Dispersa:\n def __init__(self):\n self.encabezado_filas = ListaEncabezado()\n self.encabezado_columnas = ListaEncabezado()\n self.contador_filas = 0\n self.contador_columnas = 0\n #lista de tareas\n def insertar(self, fila, columna, contenido):\n nuevo_nodo = NodoDispersa(fila, columna)\n nuevo_nodo.celdas = contenido\n if (fila > 0 and fila < 25) and (columna > 0 and columna < 32):\n if self.contador_filas < 25:\n efila = self.encabezado_filas.devolver_encabezado(fila)\n if efila == None:\n efila = NodoEncabezado(fila)\n efila.acceso = nuevo_nodo\n self.encabezado_filas.insertar(efila)\n self.contador_filas += 1\n else:\n if nuevo_nodo.columna < efila.acceso.columna:\n nuevo_nodo.derecha = efila.acceso\n efila.acceso.izquierda = nuevo_nodo\n efila.acceso = nuevo_nodo\n else:\n aux = efila.acceso\n while aux.derecha != None:\n if nuevo_nodo.columna < aux.derecha.columna:\n nuevo_nodo.derecha = aux.derecha\n aux.derecha.izquierda = nuevo_nodo\n nuevo_nodo.izquierda = aux\n aux.derecha = nuevo_nodo\n break\n aux = aux.derecha\n if aux.derecha == None:\n aux.derecha = nuevo_nodo\n nuevo_nodo.izquierda = aux\n\n #Insertar encabezado columna\n if self.contador_columnas < 31:\n ecolumna = self.encabezado_columnas.devolver_encabezado(columna)\n if ecolumna == None:\n ecolumna = NodoEncabezado(columna)\n ecolumna.acceso = nuevo_nodo\n self.encabezado_columnas.insertar(ecolumna)\n self.contador_columnas += 1\n else:\n if nuevo_nodo.fila < ecolumna.acceso.fila:\n nuevo_nodo.abajo = ecolumna.acceso\n ecolumna.acceso.arriba = nuevo_nodo\n ecolumna.acceso = nuevo_nodo\n else:\n aux = ecolumna.acceso\n while aux.abajo != None:\n if nuevo_nodo.fila < aux.abajo.fila:\n nuevo_nodo.abajo = aux.abajo\n aux.abajo.arriba = nuevo_nodo\n nuevo_nodo.arriba = aux\n aux.abajo = nuevo_nodo\n break\n aux = aux.abajo\n if aux.abajo == None:\n aux.abajo = nuevo_nodo\n nuevo_nodo.arriba = aux\n\n def recorrerColumnas(self):\n eColumna = self.encabezado_columnas.primero\n print('\\n************ #Recorrido por columnas ************')\n\n while eColumna != None:\n\n actual = eColumna.acceso\n print('\\ncolumna ',str(actual.columna))\n print('fila Valor')\n while actual != None:\n print(str(actual.fila)+\" \"+actual.celdas)\n actual = actual.abajo\n eColumna = eColumna.siguiente\n print('*********** fin recorrido por columnas ************')\n\n def recorrerFilas(self):\n eFila = self.encabezado_filas.primero\n print('\\n***** #Recorrido por filas ****')\n \n while eFila != None:\n\n actual = eFila.acceso\n print('\\nFila',actual.fila)\n while actual != None:\n print(actual.columna)\n actual = actual.derecha\n eFila = eFila.siguiente\n\n def buscarLista(self, fila, columna):\n #Se hace un recorrido por columnas\n eColumna = self.encabezado_columnas.primero #Encabezado columnas\n while eColumna != None:\n actual = eColumna.acceso\n while actual != None:\n if fila == actual.fila and columna == actual.columna:\n #print(str(actual.fila)+\"-\"+str(actual.columna))\n return actual\n actual = actual.abajo\n eColumna = eColumna.siguiente\n return None\n \n ","repo_name":"Diegomrza/EDD_P1_2S_2021","sub_path":"Fase2/Estructuras/Dispersa.py","file_name":"Dispersa.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24161618540","text":"import random\n\n\ndef gen_password(length):\n result = ''.join([\n str(random.randint(0, 9))\n for _ in range(length)\n ])\n return result\n\n\ndef parse_length(request, default=10):\n length = request.GET.get('length', str(default))\n\n if not length.isnumeric():\n raise ValueError(\"VALUE ERROR: int\")\n\n length = int(length)\n\n if not 3 < length < 100:\n raise ValueError(\"RANGE ERROR: [3..10]\")\n\n return length\n\n\ndef format_list(lst):\n return '
'.join(\n str(rec)\n for rec in lst\n )\n\n","repo_name":"dbradul/test_django","sub_path":"students/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17542136576","text":"# coding=utf-8\r\nimport json\r\nfrom math import fabs, copysign\r\n\r\nimport numpy as np\r\nimport xlrd\r\nfrom sklearn.feature_selection import f_regression\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\n# plot colre_names:\r\ncnames = {\r\n 'aliceblue': '#F0F8FF',\r\n 'antiquewhite': '#FAEBD7',\r\n 'aqua': '#00FFFF',\r\n 'aquamarine': '#7FFFD4',\r\n 'azure': '#F0FFFF',\r\n 'beige': '#F5F5DC',\r\n 'bisque': '#FFE4C4',\r\n 'black': '#000000',\r\n 'blanchedalmond': '#FFEBCD',\r\n 'blue': '#0000FF',\r\n 'blueviolet': '#8A2BE2',\r\n 'brown': '#A52A2A',\r\n 'burlywood': '#DEB887',\r\n 'cadetblue': '#5F9EA0',\r\n 'chartreuse': '#7FFF00',\r\n 'chocolate': '#D2691E',\r\n 'coral': '#FF7F50',\r\n 'cornflowerblue': '#6495ED',\r\n 'cornsilk': '#FFF8DC',\r\n 'crimson': '#DC143C',\r\n 'cyan': '#00FFFF',\r\n 'darkblue': '#00008B',\r\n 'darkcyan': '#008B8B',\r\n 'darkgoldenrod': '#B8860B',\r\n 'darkgray': '#A9A9A9',\r\n 'darkgreen': '#006400',\r\n 'darkkhaki': '#BDB76B',\r\n 'darkmagenta': '#8B008B',\r\n 'darkolivegreen': '#556B2F',\r\n 'darkorange': '#FF8C00',\r\n 'darkorchid': '#9932CC',\r\n 'darkred': '#8B0000',\r\n 'darksalmon': '#E9967A',\r\n 'darkseagreen': '#8FBC8F',\r\n 'darkslateblue': '#483D8B',\r\n 'darkslategray': '#2F4F4F',\r\n 'darkturquoise': '#00CED1',\r\n 'darkviolet': '#9400D3',\r\n 'deeppink': '#FF1493',\r\n 'deepskyblue': '#00BFFF',\r\n 'dimgray': '#696969',\r\n 'dodgerblue': '#1E90FF',\r\n 'firebrick': '#B22222',\r\n 'floralwhite': '#FFFAF0',\r\n 'forestgreen': '#228B22',\r\n 'fuchsia': '#FF00FF',\r\n 'gainsboro': '#DCDCDC',\r\n 'ghostwhite': '#F8F8FF',\r\n 'gold': '#FFD700',\r\n 'goldenrod': '#DAA520',\r\n 'gray': '#808080',\r\n 'green': '#008000',\r\n 'greenyellow': '#ADFF2F',\r\n 'honeydew': '#F0FFF0',\r\n 'hotpink': '#FF69B4',\r\n 'indianred': '#CD5C5C',\r\n 'indigo': '#4B0082',\r\n 'ivory': '#FFFFF0',\r\n 'khaki': '#F0E68C',\r\n 'lavender': '#E6E6FA',\r\n 'lavenderblush': '#FFF0F5',\r\n 'lawngreen': '#7CFC00',\r\n 'lemonchiffon': '#FFFACD',\r\n 'lightblue': '#ADD8E6',\r\n 'lightcoral': '#F08080',\r\n 'lightcyan': '#E0FFFF',\r\n 'lightgoldenrodyellow': '#FAFAD2',\r\n 'lightgreen': '#90EE90',\r\n 'lightgray': '#D3D3D3',\r\n 'lightpink': '#FFB6C1',\r\n 'lightsalmon': '#FFA07A',\r\n 'lightseagreen': '#20B2AA',\r\n 'lightskyblue': '#87CEFA',\r\n 'lightslategray': '#778899',\r\n 'lightsteelblue': '#B0C4DE',\r\n 'lightyellow': '#FFFFE0',\r\n 'lime': '#00FF00',\r\n 'limegreen': '#32CD32',\r\n 'linen': '#FAF0E6',\r\n 'magenta': '#FF00FF',\r\n 'maroon': '#800000',\r\n 'mediumaquamarine': '#66CDAA',\r\n 'mediumblue': '#0000CD',\r\n 'mediumorchid': '#BA55D3',\r\n 'mediumpurple': '#9370DB',\r\n 'mediumseagreen': '#3CB371',\r\n 'mediumslateblue': '#7B68EE',\r\n 'mediumspringgreen': '#00FA9A',\r\n 'mediumturquoise': '#48D1CC',\r\n 'mediumvioletred': '#C71585',\r\n 'midnightblue': '#191970',\r\n 'mintcream': '#F5FFFA',\r\n 'mistyrose': '#FFE4E1',\r\n 'moccasin': '#FFE4B5',\r\n 'navajowhite': '#FFDEAD',\r\n 'navy': '#000080',\r\n 'oldlace': '#FDF5E6',\r\n 'olive': '#808000',\r\n 'olivedrab': '#6B8E23',\r\n 'orange': '#FFA500',\r\n 'orangered': '#FF4500',\r\n 'orchid': '#DA70D6',\r\n 'palegoldenrod': '#EEE8AA',\r\n 'palegreen': '#98FB98',\r\n 'paleturquoise': '#AFEEEE',\r\n 'palevioletred': '#DB7093',\r\n 'papayawhip': '#FFEFD5',\r\n 'peachpuff': '#FFDAB9',\r\n 'peru': '#CD853F',\r\n 'pink': '#FFC0CB',\r\n 'plum': '#DDA0DD',\r\n 'powderblue': '#B0E0E6',\r\n 'purple': '#800080',\r\n 'red': '#FF0000',\r\n 'rosybrown': '#BC8F8F',\r\n 'royalblue': '#4169E1',\r\n 'saddlebrown': '#8B4513',\r\n 'salmon': '#FA8072',\r\n 'sandybrown': '#FAA460',\r\n 'seagreen': '#2E8B57',\r\n 'seashell': '#FFF5EE',\r\n 'sienna': '#A0522D',\r\n 'silver': '#C0C0C0',\r\n 'skyblue': '#87CEEB',\r\n 'slateblue': '#6A5ACD',\r\n 'slategray': '#708090',\r\n 'snow': '#FFFAFA',\r\n 'springgreen': '#00FF7F',\r\n 'steelblue': '#4682B4',\r\n 'tan': '#D2B48C',\r\n 'teal': '#008080',\r\n 'thistle': '#D8BFD8',\r\n 'tomato': '#FF6347',\r\n 'turquoise': '#40E0D0',\r\n 'violet': '#EE82EE',\r\n 'wheat': '#F5DEB3',\r\n 'white': '#FFFFFF',\r\n 'whitesmoke': '#F5F5F5',\r\n 'yellow': '#FFFF00',\r\n 'yellowgreen': '#9ACD32'}\r\n\r\n'''\r\n输入lab曲线 curve 即可得到lab值\r\n'''\r\n\r\n\r\ndef fun1(x, y, s):\r\n a = np.sum([x[i] * s[i] for i in range(81)])\r\n b = np.sum([y[i] * s[i] for i in range(81)])\r\n res = 100 * a / b\r\n return res\r\n\r\n\r\ndef fun2(x, y, s, r):\r\n a = np.sum([x[i] * s[i] * r[i] for i in range(81)])\r\n b = np.sum([y[i] * s[i] for i in range(81)])\r\n res = a / b\r\n return res\r\n\r\n\r\ndef fun3(Xxn):\r\n if Xxn > 0.008856:\r\n fXxn = copysign(fabs(Xxn) ** (1 / 3), Xxn)\r\n else:\r\n fXxn = 7.787 * Xxn + 16 / 116\r\n\r\n return fXxn\r\n\r\n\r\ndef weights(X, Y, Z, S, Xn, Yn, Zn):\r\n '''\r\n 三个颜色在81维频段内有不同的weights需求.\r\n 所以暂时不针对乘积权值加权lab曲线的loss。\r\n\r\n '''\r\n w1 = [X[i] * S[i] / Xn for i in range(81)]\r\n w2 = [Y[i] * S[i] / Yn for i in range(81)]\r\n w3 = [Z[i] * S[i] / Zn for i in range(81)]\r\n # print(w1.index(min(w1)), w1.index(max(w1)))\r\n # print(w2.index(min(w2)), w2.index(max(w2)))\r\n # print(w3.index(min(w3)), w3.index(max(w3)))\r\n\r\n\r\ndef calculate_Lab(curve):\r\n S = [33.0, 39.92, 47.4, 55.17, 63.3, 71.81, 80.6, 89.53, 98.1, 105.8, 112.4, 117.75, 121.5, 123.45, 124.0, 123.6,\r\n 123.1, 123.3, 123.8, 124.09, 123.9, 122.92, 120.7, 116.9, 112.1, 106.98, 102.3, 98.81, 96.9, 96.78, 98.0,\r\n 99.94, 102.1, 103.95, 105.2, 105.67, 105.3, 104.11, 102.3, 100.15, 97.8, 95.43, 93.2, 91.22, 89.7, 88.83, 88.4,\r\n 88.19, 88.1, 88.06, 88.0, 87.86, 87.8, 87.99, 88.2, 88.2, 87.9, 87.22, 86.3, 85.3, 84.0, 82.21, 80.2, 78.24,\r\n 76.3, 74.36, 72.4, 70.4, 68.3, 66.3, 64.4, 62.8, 61.5, 60.2, 59.2, 58.5, 58.1, 58.0, 58.2, 58.5, 59.1]\r\n XYZ_fun = r'D:\\work\\project\\卡尔蔡司AR镀膜\\文档s\\蔡司资料0615\\Lab计算及膜厚范围.xlsx'\r\n wb = xlrd.open_workbook(XYZ_fun)\r\n data = wb.sheet_by_name(r'色分配函数')\r\n fx = data.col_values(2)[4:]\r\n fy = data.col_values(3)[4:]\r\n fz = data.col_values(4)[4:]\r\n Xn = fun1(fx, fy, S)\r\n Yn = fun1(fy, fy, S)\r\n Zn = fun1(fz, fy, S)\r\n weights(fx, fy, fz, S, Xn, Yn, Zn)\r\n X = fun2(fx, fy, S, curve)\r\n Y = fun2(fy, fy, S, curve)\r\n Z = fun2(fz, fy, S, curve)\r\n Xxn = X / Xn\r\n Yyn = Y / Yn\r\n Zzn = Z / Zn\r\n fXxn = fun3(Xxn)\r\n fYyn = fun3(Yyn)\r\n fZzn = fun3(Zzn)\r\n if Yyn > 0.008856:\r\n L = 116 * copysign(fabs(Yyn) ** (1 / 3), Yyn) - 16\r\n else:\r\n L = 903.3 * Yyn\r\n a = 500 * (fXxn - fYyn)\r\n b = 200 * (fYyn - fZzn)\r\n # print(\"Lab value: L: {}, a: {}, b: {}\".format(L, a, b))\r\n return L, a, b\r\n","repo_name":"jiachen0212/zeiss_rgb2lab","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4655129274","text":"#Email:fanyucai1@126.com\n#2019.7.5\n\nimport os\nimport subprocess\nimport re\nimport configparser\nimport argparse\nclass Myconf(configparser.ConfigParser):\n def __init__(self, defaults=None):\n configparser.ConfigParser.__init__(self, defaults=defaults)\n def optionxform(self, optionstr):\n return optionstr\n\nout_name=['Chr','Start','End','Ref','Alt','Func.refGene','Gene.refGene','GeneDetail.refGene',\n 'ExonicFunc.refGene',\t'AAChange.refGene',\t'cytoBand',\t'1000g2015aug_all',\t'avsnp150',\t'snp138',\n 'CLNALLELEID','CLNDN','CLNDISDB',\t'CLNREVSTAT','CLNSIG','cosmic88_coding','SIFT_score','SIFT_pred',\n 'Polyphen2_HDIV_score','Polyphen2_HDIV_pred','esp6500siv2_all','ExAC_ALL','ExAC_EAS','1000g2015aug_eas',\n '1000g2015aug_sas','1000g2015aug_afr','1000g2015aug_amr','1000g2015aug_eur','InterVar_automated','GT','AAChange.1',\n 'Ref_Reads',\t'Alt_Reads','Var']\ndef run_hgvs(var_site):\n p1=re.search(r'[A-Z]fs\\*\\d+$',var_site)###匹配移码突变\n p2=re.search(r'del([ACGT]+)ins',var_site)###匹配del和ins\n if p1:\n new=re.sub(r'[A-Z]fs\\*\\d+$',\"\",var_site)\n new=new+\"fs\"\n else:\n new=var_site\n if var_site.endswith(\"X\"):####终止密码子X替换*\n new1= re.sub(r'X$', \"*\", new)\n else:\n new1=new\n if p2:\n new2=re.sub(p2.group(1),\"\",new1,count=1)\n else:\n new2 = new1\n return new2\n\ndef sub_transcript(genename,configfile):\n config = Myconf()\n config.read(configfile)\n clinvar = config.get('database', 'Canonical_transcript_file')\n MSK=config.get('database','msk_transcript')\n genelist={}\n infile=open(MSK,\"r\")\n for line in infile:\n line=line.strip()\n array = re.split(\"\\s\", line)\n genelist[array[0]]=array[1]\n infile.close()\n infile=open(clinvar,\"r\")\n for line in infile:\n line=line.strip()\n array = line.split(\"\\t\")\n if not array[0] in genelist:\n genelist[array[0]]=array[1].split(\".\")[0]\n infile.close()\n if genename in genelist:\n return genelist[genename]\n else:\n return \"no\"\n#######################################################################################\ndef run(vcf,outdir,prefix,configfile):\n config = Myconf()\n config.read(configfile)\n annovar=config.get('software','annovar')\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n out=outdir+\"/\"+prefix\n ##########################run annovar\n par = \" -protocol refGene,cytoBand,snp138,avsnp150,exac03,esp6500siv2_all,1000g2015aug_all,1000g2015aug_eas,gnomad211_exome,gnomad211_genome,cosmic88_coding,clinvar_20190305,ljb26_all,intervar_20180118\"\n par += \",1000g2015aug_sas,1000g2015aug_afr,1000g2015aug_amr,1000g2015aug_eur \"\n par += \" -operation g,r,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f \"\n par += \" -nastring . -polish \"\n subprocess.check_call(\"perl %s/table_annovar.pl %s %s/humandb -buildver hg19 -out %s -remove %s -vcfinput \" % (annovar, vcf, annovar, out, par), shell=True)\n subprocess.check_call(\"rm -rf %s.hg19_multianno.vcf %s.avinput\" % (out, out), shell=True)\n infile = open(\"%s.hg19_multianno.txt\" % (out), \"r\")\n outfile = open(\"%s.annovar.tsv\" % (out), \"w\")\n for i in range(len(out_name)):\n if i == 0:\n outfile.write(\"%s\" % (out_name[i]))\n else:\n outfile.write(\"\\t%s\" % (out_name[i]))\n outfile.write(\"\\n\")\n dict = {}\n for line in infile:\n line = line.strip()\n array = line.split(\"\\t\")\n name = []\n if line.startswith(\"Chr\"):\n for i in range(len(array)):\n name.append(array[i])\n dict[array[i]] = i\n else:\n GT = array[-1].split(\";\")[0].split(\"=\")[1]\n Ref_Reads = array[-1].split(\";\")[1].split(\"=\")[1]\n Alt_Reads = array[-1].split(\";\")[2].split(\"=\")[1]\n Var = array[-1].split(\";\")[3].split(\"=\")[1]\n ##########################format output knownCanonical transcript\n tmp = array[dict['AAChange.refGene']].split(\",\")\n final_nm =sub_transcript(array[6],configfile)\n if array[dict['AAChange.refGene']]==\".\":\n final_nm =\".\"\n elif final_nm==\"no\":\n final_nm=tmp[0]\n else:\n for key in tmp:\n if re.search(\"%s\"%(final_nm),key):\n final_nm=key\n ##########################\n for l in range(len(out_name)):\n if l == 0:\n outfile.write(\"%s\" % (array[dict[out_name[l]]]))\n elif out_name[l] == \"Var\":\n tmp_num = float(Var) * 100\n outfile.write(\"\\t%.2f\" % (tmp_num) + \"%\")\n elif out_name[l] == \"Alt_Reads\":\n outfile.write(\"\\t%s\" % (Alt_Reads))\n elif out_name[l] == \"Ref_Reads\":\n outfile.write(\"\\t%s\" % (Ref_Reads))\n elif out_name[l] == \"AAChange.1\":\n outfile.write(\"\\t%s\" % (run_hgvs(final_nm)))\n elif out_name[l] == \"GT\":\n outfile.write(\"\\t%s\" % (GT))\n else:\n outfile.write(\"\\t%s\" % (array[dict[out_name[l]]]))\n outfile.write(\"\\n\")\n infile.close()\n outfile.close()\n if os.path.exists(\"%s.hg19_multianno.txt\" % (out)):\n subprocess.check_call(\"rm -rf %s.hg19_multianno.txt\" % (out), shell=True)\n\nif __name__==\"__main__\":\n parser=argparse.ArgumentParser(\"Run annovar\")\n parser.add_argument(\"-v\",\"--vcf\",help=\"vcf file\",required=True)\n parser.add_argument(\"-o\",\"--outdir\",help=\"output directory\",required=True)\n parser.add_argument(\"-p\",\"--prefix\",help=\"prefix of output\",required=True)\n parser.add_argument(\"-c\", \"--config\", help=\"config file\", required=True)\n args=parser.parse_args()\n run(args.vcf,args.outdir,args.prefix,args.config)","repo_name":"fanyucai1/Tumor_BMC","sub_path":"core/anno_vcf.py","file_name":"anno_vcf.py","file_ext":"py","file_size_in_byte":5911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27142073915","text":"from tkinter import Entry, Button, Tk, END, mainloop, Label\nimport MySQLdb\n\nclass Employee:\n def __init__(self):\n self.root = Tk()\n self.root.title('Employee')\n\n # upload data to db\n def submit(self):\n db = MySQLdb.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"Gman1212!\"\n )\n\n enum = self.employee_num_e.get()\n self.employee_num_e.delete(0, END)\n ssn = self.ssn_e.get()\n self.ssn_e.delete(0, END)\n mentor = self.ismentor_e.get()\n self.ismentor_e.delete(0, END)\n\n c = db.cursor()\n c.execute('USE psych_office_DB')\n c.execute('SET FOREIGN_KEY_CHECKS = 0')\n c.execute(\"INSERT INTO EMPLOYEE VALUES (\" + enum + \",\" + ssn + \",\" + mentor + \")\")\n c.execute('SET FOREIGN_KEY_CHECKS = 1')\n db.commit()\n c.close()\n db.close()\n\n def run(self):\n self.employee_num_lbl = Label(self.root, text='Employee ID:')\n self.employee_num_lbl.grid(row=0, column=0, padx=5, pady=5)\n self.employee_num_e = Entry(self.root, width=10, borderwidth=1)\n self.employee_num_e.grid(row=0, column=1, padx=5, pady=5)\n\n self.ssn_lbl = Label(self.root, text='SSN:')\n self.ssn_lbl.grid(row=0, column=2, padx=5, pady=5)\n self.ssn_e = Entry(self.root, width=10, borderwidth=1)\n self.ssn_e.grid(row=0, column=3, padx=5, pady=5)\n\n self.ismentor_lbl = Label(self.root, text='Is Mentor:')\n self.ismentor_lbl.grid(row=0, column=4, padx=5, pady=5)\n self.ismentor_e = Entry(self.root, width=10, borderwidth=1)\n self.ismentor_e.grid(row=0, column=5, padx=5, pady=5)\n\n self.submit = Button(self.root, text='Submit', padx=5, pady=5, command=self.submit)\n self.submit.grid(row=1, column=0)\n\n mainloop()\n\nif __name__ == '__main__':\n e = Employee()\n e.run()","repo_name":"grantiod/COSC457","sub_path":"employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71125340266","text":"import sys\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n#receive input\r\ninput_files = sys.argv[1:-3]\r\noutput_file = sys.argv[-3]\r\nx_label = sys.argv[-2]\r\ny_label = sys.argv[-1]\r\n\r\n#plot csv dat and find common x-axis range between them\r\nplt.figure(figsize=(12,5))\r\n\r\nlist_ranges = []\r\n\r\nfor file in input_files:\r\n data = np.loadtxt(file, delimiter=',')\r\n left_val = np.min(data[:,0])\r\n right_val = np.max(data[:,0])\r\n range_val = (left_val, right_val)\r\n list_ranges.append(range_val)\r\n data_label = file[:-4]\r\n plt.plot(data[:,0], data[:,1], label=data_label)\r\n\r\nplt.yscale('log')\r\nplt.legend()\r\nplt.xlabel(x_label)\r\nplt.ylabel(y_label)\r\nplt.savefig(output_file)\r\nplt.show()\r\n\r\nmax_left = max(list_ranges, key = lambda x: x[0])[0]\r\nmin_right = min(list_ranges, key = lambda x: x[1])[1]\r\n\r\nprint(\"Overlap range: \", (max_left, min_right))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"HeatherAn/project-code-for-trainings","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16210131970","text":"from __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport os\nimport re\nfrom collections import defaultdict\n\nfrom six.moves import range\n\nfrom zincutils.zinc_analysis import (APIs, Compilations, CompileSetup, Relations,\n SourceInfos, Stamps, ZincAnalysis)\n\n\nclass ZincAnalysisParser(object):\n \"\"\"Parses a zinc analysis file.\"\"\"\n\n class ParseError(Exception):\n pass\n\n def parse_from_path(self, infile_path):\n \"\"\"Parse a ZincAnalysis instance from a text file.\"\"\"\n with open(infile_path, 'rb') as infile:\n return self.parse(infile)\n\n def parse(self, infile):\n \"\"\"Parse a ZincAnalysis instance from an open text file.\"\"\"\n def parse_element(cls):\n parsed_sections = [self._parse_section(infile, header) for header in cls.headers]\n return cls(parsed_sections)\n\n self._verify_version(infile)\n compile_setup = parse_element(CompileSetup)\n relations = parse_element(Relations)\n stamps = parse_element(Stamps)\n apis = parse_element(APIs)\n source_infos = parse_element(SourceInfos)\n compilations = parse_element(Compilations)\n return ZincAnalysis(compile_setup, relations, stamps, apis, source_infos, compilations)\n\n def parse_products(self, infile):\n \"\"\"An efficient parser of just the products section.\"\"\"\n self._verify_version(infile)\n return self._find_repeated_at_header(infile, b'products')\n\n def parse_deps(self, infile, classes_dir):\n self._verify_version(infile)\n # Note: relies on the fact that these headers appear in this order in the file.\n bin_deps = self._find_repeated_at_header(infile, b'binary dependencies')\n src_deps = self._find_repeated_at_header(infile, b'direct source dependencies')\n ext_deps = self._find_repeated_at_header(infile, b'direct external dependencies')\n\n # TODO(benjy): Temporary hack until we inject a dep on the scala runtime jar.\n scalalib_re = re.compile(r'scala-library-\\d+\\.\\d+\\.\\d+\\.jar$')\n filtered_bin_deps = defaultdict(list)\n for src, deps in bin_deps.iteritems():\n filtered_bin_deps[src] = filter(lambda x: scalalib_re.search(x) is None, deps)\n\n transformed_ext_deps = {}\n def fqcn_to_path(fqcn):\n return os.path.join(classes_dir, fqcn.replace(b'.', os.sep) + b'.class')\n for src, fqcns in ext_deps.items():\n transformed_ext_deps[src] = [fqcn_to_path(fqcn) for fqcn in fqcns]\n\n ret = defaultdict(list)\n for d in [filtered_bin_deps, src_deps, transformed_ext_deps]:\n for src, deps in d.items():\n ret[src].extend(deps)\n return ret\n\n def rebase_from_path(self, infile_path, outfile_path, pants_home_from, pants_home_to, java_home=None):\n with open(infile_path, 'rb') as infile:\n with open(outfile_path, 'wb') as outfile:\n self.rebase(infile, outfile, pants_home_from, pants_home_to, java_home)\n\n def rebase(self, infile, outfile, pants_home_from, pants_home_to, java_home=None):\n self._verify_version(infile)\n outfile.write(ZincAnalysis.FORMAT_VERSION_LINE)\n\n def rebase_element(cls):\n for header in cls.headers:\n self._rebase_section(cls, header, infile, outfile, pants_home_from, pants_home_to, java_home)\n\n rebase_element(CompileSetup)\n rebase_element(Relations)\n rebase_element(Stamps)\n rebase_element(APIs)\n rebase_element(SourceInfos)\n rebase_element(Compilations)\n\n def _rebase_section(self, cls, header, lines_iter, outfile,\n pants_home_from, pants_home_to, java_home=None):\n # Booleans describing the rebasing logic to apply, if any.\n rebase_pants_home_anywhere = header in cls.pants_home_anywhere\n rebase_pants_home_prefix = header in cls.pants_home_prefix_only\n filter_java_home_anywhere = java_home and header in cls.java_home_anywhere\n filter_java_home_prefix = java_home and header in cls.java_home_prefix_only\n\n # Check the header and get the number of items.\n line = next(lines_iter)\n if header + b':\\n' != line:\n raise self.ParseError('Expected: \"{}:\". Found: \"{}\"'.format(header, line))\n n = self._parse_num_items(next(lines_iter))\n\n # Iterate over the lines, applying rebasing/dropping logic as required.\n rebased_lines = []\n num_rebased_items = 0\n for _ in range(n):\n line = next(lines_iter)\n drop_line = ((filter_java_home_anywhere and java_home in line) or\n (filter_java_home_prefix and line.startswith(java_home)))\n if not drop_line:\n if rebase_pants_home_anywhere:\n rebased_line = line.replace(pants_home_from, pants_home_to)\n elif rebase_pants_home_prefix and line.startswith(pants_home_from):\n rebased_line = pants_home_to + line[len(pants_home_from):]\n else:\n rebased_line = line\n rebased_lines.append(rebased_line)\n num_rebased_items += 1\n if not cls.inline_vals: # These values are blobs and never need to be rebased.\n rebased_lines.append(next(lines_iter))\n elif not cls.inline_vals:\n next(lines_iter) # Also drop the non-inline value.\n\n # Write the rebased lines back out.\n outfile.write(header + b':\\n')\n outfile.write(b'{} items\\n'.format(num_rebased_items))\n chunk_size = 10000\n for i in range(0, len(rebased_lines), chunk_size):\n outfile.write(b''.join(rebased_lines[i:i+chunk_size]))\n\n def _find_repeated_at_header(self, lines_iter, header):\n header_line = header + b':\\n'\n while next(lines_iter) != header_line:\n pass\n return self._parse_section(lines_iter, expected_header=None)\n\n def _verify_version(self, lines_iter):\n version_line = next(lines_iter)\n if version_line != ZincAnalysis.FORMAT_VERSION_LINE:\n raise self.ParseError('Unrecognized version line: ' + version_line)\n\n def _parse_section(self, lines_iter, expected_header=None):\n \"\"\"Parse a single section.\"\"\"\n if expected_header:\n line = next(lines_iter)\n if expected_header + b':\\n' != line:\n raise self.ParseError('Expected: \"{}:\". Found: \"{}\"'.format(expected_header, line))\n n = self._parse_num_items(next(lines_iter))\n relation = defaultdict(list) # Values are lists, to accommodate relations.\n for _ in range(n):\n k, _, v = next(lines_iter).partition(b' -> ')\n if len(v) == 1: # Value on its own line.\n v = next(lines_iter)\n relation[k].append(v[:-1])\n return relation\n\n _num_items_re = re.compile(r'(\\d+) items\\n')\n\n def _parse_num_items(self, line):\n \"\"\"Parse a line of the form ' items' and returns as an int.\"\"\"\n matchobj = self._num_items_re.match(line)\n if not matchobj:\n raise self.ParseError('Expected: \" items\". Found: \"{0}\"'.format(line))\n return int(matchobj.group(1))\n","repo_name":"pantsbuild/zincutils","sub_path":"zincutils/zinc_analysis_parser.py","file_name":"zinc_analysis_parser.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28386207149","text":"from aws_cdk import Stack\nfrom aws_cdk import aws_s3 as _s3\nfrom constructs import Construct\n\n\nclass S3DeployStack(Stack):\n def __init__(\n self, scope: Construct, construct_id: str, bucket_id: str, **kwargs\n ) -> None:\n super().__init__(scope, construct_id, **kwargs)\n self.create_bucket(bucket_id)\n\n def create_bucket(self, bucket_id: str):\n _s3.Bucket(\n self,\n bucket_id,\n bucket_name=bucket_id,\n )\n","repo_name":"M-Borsuk/BudgetGuard","sub_path":"budgetguard/core/infrastructure/cdk_infrastructure/s3_buckets/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2826574967","text":"import execjs\r\nfrom main import run\r\nimport random\r\n\r\ndef getdata(id):\r\n def get_js():\r\n f = open(\"setup.js\", 'r', encoding='UTF-8')\r\n line = f.readline()\r\n htmlstr = ''\r\n while line:\r\n htmlstr = htmlstr + line\r\n line = f.readline()\r\n return htmlstr\r\n\r\n\r\n jsstr = get_js()\r\n ctx = execjs.compile(jsstr)\r\n s = str(id)\r\n return(ctx.call('getPrice',s))\r\n\r\nmaxResult = [0,0]\r\nwhile 1:\r\n i = random.randint(1,4294967296)\r\n inputData = getdata(i)\r\n result = run(inputData)\r\n if result > maxResult[1]: maxResult = [i,result]\r\n print(result,'i=',i,', Best: id=',maxResult[0],', Score=',maxResult[1])\r\n","repo_name":"Kyoko-N/MS_algorithm_battle","sub_path":"RunData.py","file_name":"RunData.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28795296875","text":"import os\r\nimport re\r\nfrom xml.etree import ElementTree as ET\r\n\r\nimport LTFuncsHTMLParser\r\n\r\n\r\nclass HTMLType:\r\n ALL, SCV, FO, NLFIT = range(4)\r\n\r\n\r\ndef _func_entries(funcs):\r\n for func in funcs:\r\n yield func.split(\"\\t\" * 10)\r\n\r\n\r\nclass GenerateHTML:\r\n def __init__(self, lang, funcs):\r\n self.funcs = funcs\r\n self.lang = lang\r\n\r\n def Exec(self, htmlType):\r\n s = ''\r\n\r\n fitfunc = False\r\n funcs_done = set()\r\n fitfunc_categoryname = ''\r\n\r\n def check_htmlType_continue():\r\n if fitfunc:\r\n if htmlType == HTMLType.NLFIT and fitfunc_categoryname in ('Implicit', 'PFW', 'Surface Fitting', 'Oberflächenanpassung'):\r\n return True\r\n if htmlType == HTMLType.SCV and fitfunc_categoryname in ('Multiple Variables', '複数の変数', 'Mehrere Variablen'):\r\n return True\r\n else:\r\n if htmlType == HTMLType.FO or htmlType == HTMLType.NLFIT:\r\n return True\r\n return False\r\n\r\n fitting_function_prefixs = {'E': 'Fitting Functions', 'J': 'フィット関数', 'G': 'Anpassungsfunktionen'}\r\n for entries in _func_entries(self.funcs):\r\n if len(entries) == 1: # category\r\n func = entries[0]\r\n funcs_done.clear()\r\n fitfunc = func.startswith(fitting_function_prefixs[self.lang])\r\n if fitfunc:\r\n try:\r\n fitfunc_categoryname = func.split('-')[1].lstrip()\r\n except IndexError:\r\n print(func.encode())\r\n continue\r\n if htmlType != HTMLType.SCV:\r\n func = fitfunc_categoryname\r\n if check_htmlType_continue():\r\n continue\r\n if len(s):\r\n s += '\\n'\r\n s += '\\n \\n' % func.strip()\r\n else:\r\n if check_htmlType_continue():\r\n continue\r\n total = (len(entries) - 1) // 2\r\n for i in range(total):\r\n funclink = entries[i * 2].strip()\r\n funcname = entries[i * 2 + 1].strip()\r\n description = entries[-1].strip()\r\n funcnametest = funcname\r\n if fitfunc:\r\n funcnametest = 'nlf_' + funcnametest\r\n if funcnametest in funcs_done:\r\n continue\r\n funcs_done.add(funcnametest)\r\n if not funclink.startswith('http'):\r\n funclink = 'http://wikis' + funclink\r\n description = description.replace('href=\"/doc', 'href=\"{}/doc'.format(LTFuncsHTMLParser.http_originlab))\r\n s += ' \\n \\n \\n \\n' \\\r\n % (funclink, \"nlf_\" if fitfunc else \"\", funcname[len(\"nlf_\"):] if fitfunc else funcname, description)\r\n\r\n if len(s):\r\n s += '
%s
%s%s
'\r\n s = s.replace(LTFuncsHTMLParser.get_image_path(self.lang), './images/').replace(LTFuncsHTMLParser.image_path_suffix, '')\r\n return s\r\n\r\n\r\nclass GenerateXML:\r\n def __init__(self):\r\n self.lang = 'E'\r\n parser = LTFuncsHTMLParser.MyHTMLParser()\r\n parser.feed(LTFuncsHTMLParser.get_page_source(self.lang))\r\n self.funcs = parser.results\r\n # with open('parse_results_{}.txt'.format(self.lang), encoding='utf-8') as f:\r\n # self.funcs = f.readlines()\r\n\r\n def Exec(self):\r\n imagepath = LTFuncsHTMLParser.get_image_path(self.lang).replace('\\\\', '\\\\\\\\')\r\n p_imagepath = re.compile(r'src=\"({}.+?\\.png)\\?v=0\"'.format(imagepath))\r\n root = ET.Element('Root')\r\n tree = ET.ElementTree(root)\r\n for entries in _func_entries(self.funcs):\r\n if len(entries) == 1: # category\r\n category = ET.SubElement(root, 'Category')\r\n category.set('Label', entries[0])\r\n else:\r\n func = entries[1]\r\n function = ET.SubElement(category, func[:func.find('(')])\r\n function.text = func\r\n images = p_imagepath.findall(entries[2])\r\n if images:\r\n function.set('images', '|'.join(images))\r\n\r\n with open('Functions.xml', 'w', encoding='utf-8') as f:\r\n tree.write(f, encoding='unicode')\r\n","repo_name":"folger/Python","sub_path":"GenerateLabtalkFunctionsHTML/GenerateLTFuncHTML.py","file_name":"GenerateLTFuncHTML.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26628318950","text":"l1=[\"Bhindi\",\"Aloo\",\"Chowmin\",\"Pasta\"]\r\n#there is a prbm.now u take only odd position saman,skip even position\r\n#this is normal programming\r\ni=1\r\nfor item in l1:\r\n if i%2 is not 0:\r\n print(f\"Manisha please buy {item}\")\r\n i+=1\r\n#now we use enumareated\r\n#here it starts from 0 so here we use even position\r\nfor index,item in enumerate(l1):\r\n if index %2==0:\r\n print(f\"Sanu please bye {item}\")\r\n\r\n","repo_name":"manisha-jaiswal/tuple-list-dictionary-enum","sub_path":"enumerated_function.py","file_name":"enumerated_function.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16219371957","text":"from copy import *\nfrom layer import *\n\n\nclass NeuralNetwork:\n \"\"\"This class represents a neural network that can have several layers.\n A neural network takes a given number of inputs and yields the same number\n of outputs as the number of neurons in the last layer\"\"\"\n\n def __init__(self, list_layers):\n \"\"\"\n usage : my_net = neural_network(list_layers)\n Inits a neural network that has list_layers for layers. This function\n also check the validity of such a network (it checks if every layer takes\n as much input as there is neurons in the previous layer).\n :param list_layers: List of layers from which to create the neural network.\n This list is deepcopy'ed.\n \"\"\"\n # check if list_layers is a list of layers\n for a in list_layers:\n if not isinstance(a, Layer):\n raise TypeError(\"In 'NeuralNetwork.__init__()' :\"\n \" The list you provided does not only contain Layers. It should.\")\n\n # check validity of layers\n cur_nbr_input = list_layers[0].nbr_input\n for layer in list_layers:\n if layer.nbr_input != cur_nbr_input:\n raise ValueError(\"In 'NeuralNetwork.__init__() : the layers provided do not form a valid network\\n\")\n cur_nbr_input = layer.nbr_neuron\n\n # actual function\n self.layers = deepcopy(list_layers)\n self.nbr_layer = len(self.layers)\n self.nbr_input = list_layers[0].nbr_input\n self.nbr_output = len(list_layers[-1]) # list[-1] -> last element of list\n\n @staticmethod\n def quick_init(list_sizes, nbr_input):\n \"\"\"\n Usage : my_net = NeuralNetwork.random_init(list_sizes, nbr_input)\n Inits a neural network that has 'len(list_sizes)' layers, their respective\n size (number of neurons in the given layer) should be given by list_sizes,\n where list_sizes[n] is the size of the nth layer\n :return: a bot created as described.\n \"\"\"\n if not isinstance(list_sizes, list):\n raise TypeError(\"In 'NeuralNetwork.init_random' : argument 'list_sizes' has to be a list.\")\n for a in list_sizes:\n if not isinstance(a, int):\n raise TypeError(\"In 'NeuralNetwork.init_random' : list_sizes should only contain integers.\")\n if not isinstance(nbr_input, int):\n raise TypeError(\"In 'NeuralNetwork.init_random' : argument 'nbr_input' should be an integer\")\n\n list_layers = []\n cur_nbr_input = nbr_input # needed to make every layer take the same number\n # of input that the previous layer had neurons\n for i in range(len(list_sizes)):\n list_layers.append(Layer.random_init(list_sizes[i], cur_nbr_input))\n cur_nbr_input = list_sizes[i]\n return NeuralNetwork(list_layers)\n\n def get_output(self, inputs):\n \"\"\"\n Feeds the first layer of the net with 'inputs', and then feeds every output\n from one layer to the following. returns the output from the last layer\n \"\"\"\n if not isinstance(inputs, list):\n raise TypeError(\"In 'NeuralNetwork.get_output()' : argument 'inputs' has to be a list.\")\n\n if len(inputs) != self.nbr_input:\n raise TypeError(\n \"In 'NeuralNetwork.get_output()': wrong number of input. Given \" + str(len(inputs)) + \" needed \" + str(\n self.nbr_input) + \".\\n\")\n\n cur_input = inputs # will hold the successive result of each layer\n for layer in self.layers:\n cur_input = layer.get_output(cur_input)\n return cur_input\n\n def __repr__(self):\n res = \"\"\n res += \"This neural network takes \" + str(self.nbr_input) + \" inputs.\\n\"\n for i, layer in enumerate(self.layers):\n res += \"Layer \" + str(i) + \" :\\n\"\n res += str(layer)\n return res\n\n def same_structure(self, brain2):\n \"\"\"\n Returns 1 if brain2 has the same structure as self\n Same structure implies same number of inputs, same number of layers,\n same number of neurons on each layers\n \"\"\"\n if not isinstance(brain2, NeuralNetwork):\n raise TypeError(\"In 'neural_network.crossover()': neural_network expected, \" + str(type(brain2)) + \" given.\")\n if self.nbr_input != brain2.nbr_input:\n return 0\n if len(self.layers) != len(brain2.layers):\n return 0\n\n for i in range(len(self.layers)):\n if len(self.layers[i]) != len(brain2.layers[i]):\n return 0\n\n return 1\n\n def crossover(self, brain2):\n \"\"\"Creates a new neural_network, crossover from self and brain2. The new\n network brain takes exactly (and for each layer) half his neurons from 'self'\n and the other half from 'brain2' : only the distribution on each layer\n is random.\n :return: a new bot with, on each layer, as many neurons from bot 1 as bot 2.\n \"\"\"\n # protection\n if not isinstance(brain2, NeuralNetwork):\n raise TypeError(\"In 'neural_network.crossover()': neural_network expected, \" + str(type(brain2)) + \" given.\")\n if not self.same_structure(brain2):\n raise ValueError(\"In 'neural_network.crossover()':\"\n \" the neural_network provided doesn't have the same structure as 'self'\")\n\n list_layer_brain3 = [] # layers of the neural net of brain3\n\n for i_layer, layer in enumerate(self.layers):\n list_neurons = [] # neurons that we will insert in brain3 as a layer\n nbr_neuron_from_self = len(layer)//2\n\n # We have to select nbr_neuron_from_self integers in the sequence range(len(layer))\n index_of_neurons_from_self = []\n for i in range(nbr_neuron_from_self):\n index = randrange(len(layer))\n while index in index_of_neurons_from_self: # We don't want the same index twice\n index = randrange(len(layer))\n index_of_neurons_from_self.append(index)\n\n for i in range(len(layer)):\n if i in index_of_neurons_from_self:\n list_neurons.append(deepcopy(self[i_layer][i]))\n else:\n list_neurons.append(deepcopy(brain2[i_layer][i]))\n\n list_layer_brain3.append(Layer(list_neurons))\n\n return NeuralNetwork(list_layer_brain3)\n\n def mutation(self, avg):\n \"\"\"\n Mutate (or not) the neural network\n :param avg: the avergae of brain who will be mutated if you call this function with a tons of neural network\n :return:\n \"\"\"\n for l in self.layers:\n l.mutation(avg / len(self.layers))\n\n # The following functions were created in order to meet python's protocol for\n # sequences. (protocol = interface in python)\n # with the following function, 'NeuralNetwork' will act as a \"sequence\"\n # (like a list) of layers. eg: 'for layer in neural_network:'\n def __len__(self):\n return self.nbr_layer\n\n def __getitem__(self, key):\n if key >= self.nbr_layer:\n raise ValueError(\"in 'Layer.__getitem__()' : No such layer. There is only \" + str(self.nbr_layer) + \", you asked for 'neural_network[\" + str(key) + \"]'.\\n\")\n return self.layers[key]\n\n def __setitem__(self, key, value):\n if key >= self.nbr_layer:\n raise ValueError(\"in 'Layer.__setitem__()' : No such layer. There is only \" + str(self.nbr_layer) + \", you asked for 'neural_network[\" + str(key) + \"]'.\\n\")\n\n if not isinstance(value, Layer):\n raise TypeError(\"in 'Layer.__setitem__()' : neural_network only contains layers, not \" + str(type(value)) + \"s.\\n\")\n\n self.layers[key] = value\n\n def __delitem__(self, key):\n return self.layers.__delitem__(key)\n\n def __iter__(self):\n return self.layers.__iter__()\n\n def __reversed__(self):\n return self.layers.__reversed__()\n","repo_name":"Yoz0/Agar.io-Learning-Bots","sub_path":"neuralnetwork.py","file_name":"neuralnetwork.py","file_ext":"py","file_size_in_byte":8084,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"22744160643","text":"from bwsi_grader.python.three_five import grader\n\ndef student_func(x):\n answer=\"\"\n if x%3==0:\n answer=answer+\"three\"\n if x%5==0:\n answer=answer+\"five\"\n if answer==\"\":\n return x\n return answer\ngrader(student_func) ","repo_name":"hyu-likelion/Y1K3","sub_path":"week1/soonwoo/session/three_five.py","file_name":"three_five.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32006322978","text":"import time\nfrom datetime import datetime\n\nfrom config import configured_sensors, dashboard_update, prediction_update, homeassistant_integration\nfrom sensor_calculations import (\n determine_last_watered,\n determine_next_water,\n poll_sensors,\n)\nfrom streamlit_components import streamlit_init_layout\n\nif homeassistant_integration:\n import json\n import paho.mqtt.client as mqtt\n from config import clientname, hostname, port, timeout, hass_username, hass_password, mqtt_topic_root\n\n\ndef create_hero_string(available_sensors: list, new_vals: list, sensor_time: datetime) -> str:\n str_ar = []\n sensor_list = list(available_sensors)\n for idx in range(0, len(sensor_list)):\n str_ar.append(\n \"\".join(\n [\n f\"
{sensor_list[idx].capitalize()}:
\",\n f\"{new_vals[idx]:.2f}\",\n \"
\",\n ]\n )\n )\n str_ar.append(f\"

{str(sensor_time).split('T')[-1][:8]}

\")\n return \"\".join(str_ar)\n\n\ndef create_info_string(last_watered: datetime, next_water: datetime) -> str:\n s1 = f\"
Last watered: {last_watered}
\"\n s2 = f\"
Water next: {next_water}
\"\n return \"\".join([s1, s2])\n\n\ndef monitor_plants(curr_time: datetime) -> None:\n if homeassistant_integration:\n client = mqtt.Client(clientname)\n client.username_pw_set(hass_username, hass_password)\n client.connect(hostname, port, timeout)\n\n client.loop_start()\n\n print(f\"\\nCurrent Time is {curr_time}\")\n # Define the sensors and current time\n available_sensors = configured_sensors.keys()\n # Initialise from saved data\n sensor_dict, hero, info = streamlit_init_layout(available_sensors, curr_time)\n # Initialise variables\n last_watered = None\n\n # Recieve new data\n while True:\n # Run predictions\n last_watered = determine_last_watered(last_watered)\n next_water, last_watered = determine_next_water(last_watered)\n\n if homeassistant_integration:\n data = {f\"water_next\": str(next_water)}\n client.publish(mqtt_topic_root + \"water_next\", json.dumps(data))\n print(f\"Published {data} to {mqtt_topic_root + 'water_next'} on MQTT\")\n \n data = {f\"water_last\": str(last_watered)}\n client.publish(mqtt_topic_root + \"water_last\", json.dumps(data))\n print(f\"Published {data} to {mqtt_topic_root + 'water_last'} on MQTT\")\n\n # Add information readouts\n info_string = create_info_string(last_watered, next_water)\n info.markdown(info_string, unsafe_allow_html=True)\n\n # Update sensors\n for _ in range(0, prediction_update):\n # time_now = datetime.now()\n updated_sensor_dict, new_vals, sensor_time = poll_sensors(\n sensor_dict, available_sensors\n )\n # Update the 'hero' sensor readouts\n hero_string = create_hero_string(available_sensors, new_vals, sensor_time)\n hero.markdown(\n f\"

{hero_string}

\",\n unsafe_allow_html=True,\n )\n\n time.sleep(dashboard_update)\n\n\ndef main() -> None:\n time_start = datetime.now()\n monitor_plants(time_start)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andrewginns/plant-watch","sub_path":"src/plant_watch.py","file_name":"plant_watch.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25127075778","text":"#!/usr/bin/env python\n\nimport sys\n\nimport Script\n\ndef usage():\n sys.stderr.write(\"TBW\\n\")\n\nP = Script.Script(\"inserts.py\", version=\"1.0\", usage=usage)\n\nclass GTFregions():\n entries = {}\n currentChrom = \"\"\n currentRegions = []\n span = 1000\n\n def __init__(self, span=1000):\n self.entries = {}\n self.currentChrom = \"\"\n self.currentRegions = []\n self.span = span\n\n def addRegion(self, chrom, position):\n if chrom != self.currentChrom:\n if self.currentRegions != []:\n self.entries[self.currentChrom] = self.currentRegions\n self.currentChrom = chrom\n if chrom in self.entries:\n self.currentRegions = self.entries[self.currentChrom]\n else:\n self.currentRegions = []\n self.currentRegions.append((position - self.span, position + self.span))\n\n def doneAdding(self):\n if self.currentRegions != []:\n self.entries[self.currentChrom] = self.currentRegions\n for (chrom, regions) in Utils.get_iterator(self.entries):\n regions.sort(key=lambda s: s[0])\n print(\"{}: {} regions.\".format(chrom, len(regions)))\n\n def parseGTF(self, filename):\n with open(filename, \"r\") as f:\n for line in f:\n if not line.startswith('#'):\n fields = line.split('\\t')\n if fields[2] == 'transcript':\n chrom = fields[0]\n if fields[6] == '+':\n pos = int(fields[3])\n else:\n pos = int(fields[4])\n self.addRegion(chrom, pos)\n self.doneAdding()\n\n def posInRegion(self, chrom, pos):\n regions = self.entries[chrom]\n for r in regions:\n if r[0] > pos:\n return False\n elif r[0] <= pos <= r[1]:\n return True\n\nclass Params():\n maxsize = 1000\n outfile = None\n gtffile = None\n regsize = 1000\n gtfregions = None\n\n def __init__(self, args):\n P.standardOpts(args)\n next = \"\"\n for a in args:\n if next == '-o':\n self.outfile = a\n next = \"\"\n elif next == '-gtf':\n self.gtffile = P.isFile(a)\n next = \"\"\n elif next == '-s':\n self.maxsize = P.toInt(a)\n next = \"\"\n elif next == '-r':\n self.regsize = P.toInt(a)\n next = \"\"\n elif a in ['-o', '-gtf', '-s', '-r']:\n next = a\n if self.gtffile:\n G = GTFregions()\n G.parseGTF(self.gtffile)\n self.gtfregions = G\n\ndef main(PA):\n totlines = 0\n data = [0]*PA.maxsize\n G = PA.gtfregions\n\n while True:\n line = sys.stdin.readline()\n if line == '':\n break\n totlines += 1\n fields = line.rstrip(\"\\r\\n\").split(\"\\t\")\n if fields[6] == '=' and fields[8][0] != '-':\n c = int(fields[8])\n if c < PA.maxsize:\n good = True\n if G:\n chrom = fields[2]\n pos = int(fields[3])\n good = G.posInRegion(chrom, pos)\n if good:\n data[c] += 1\n\n if PA.outfile:\n out = open(PA.outfile, \"w\")\n else:\n out = sys.stdout\n try:\n for i in range(PA.maxsize):\n out.write(\"{}\\t{}\\t{}\\n\".format(i, data[i], 1.0*data[i]/totlines))\n finally:\n if PA.outfile:\n out.close()\n\nif __name__ == \"__main__\":\n PA = Params(sys.argv[1:])\n main(PA)\n\n","repo_name":"uf-icbr-bioinformatics/bioscripts","sub_path":"inserts.py","file_name":"inserts.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"42657835211","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\ndef add_with_carry(val1, val2 , carry):\n helper = val1 + val2 + carry\n if helper >= 10:\n carry = math.floor(helper/10)\n helper = helper % 10\n else:\n carry = 0\n return helper, carry\n \n\nclass Solution: #using a class for this is weird and non-pythonic, but it was required on leetcode\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n \n carry = 0\n root = True\n \n #will work if same size only lul\n #l3 = ListNode()\n \n while (l1 and l2):\n helper, carry = add_with_carry(l1.val, l2.val, carry)\n\n new_node = ListNode(helper)\n\n if root:\n l3 = new_node\n current_node = new_node\n else:\n current_node.next = new_node\n current_node = new_node\n\n l1 = l1.next\n l2 = l2.next\n root = False\n\n while (l1 and not l2):\n helper, carry = add_with_carry(l1.val, 0, carry)\n new_node = ListNode(helper)\n current_node.next = new_node\n current_node = new_node\n l1 = l1.next\n root = False\n\n while (l2 and not l1):\n helper, carry = add_with_carry(0, l2.val, carry)\n new_node = ListNode(helper)\n current_node.next = new_node\n current_node = new_node\n l2 = l2.next\n root = False\n\n if (carry != 0 and l1 == None and l2 == None):\n new_node = ListNode(carry)\n current_node.next = new_node\n #should be done here, no need to set current\n \n return l3\n","repo_name":"chrisspencer1013/fundamentals_practice","sub_path":"adding_two_linkedlists.py","file_name":"adding_two_linkedlists.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3229281256","text":"\"\"\"This Python script counts words in a Markdown / LaTeX document(s).\n\nUsage:\n python3 count_words.py \n\nIt will ignore ATX headers, LaTeX & Markdown comments, and LaTeX markup tags.\n\n\"\"\"\nimport sys\nimport os\nimport re\n\n\ndef is_md(filename: str,\n _ignored_files: set={'README.md', 'build.md'},\n _legal_extensions: set={'.md', '.markdown', '.tex'},\n) -> bool:\n \"\"\"\n Return a boolean determining if the filename is a markdown file.\n\n Args:\n filename: the filename to validate\n\n _legal_extensions: specific extensions to recognize as Markdown\n\n Returns:\n true if the filename is a markdown file, false otherwise\n\n \"\"\"\n # if the filename is in the Markdown black list, ignore it\n if filename in _ignored_files:\n return False\n # iterate over the extensions in the accepted extensions\n for markdown_extension in _legal_extensions:\n if markdown_extension == filename[-len(markdown_extension):]:\n return True\n # the filename doesn't have a valid extension, return False\n return False\n\n\ndef markdown_filenames(directory: str) -> list:\n \"\"\"\n Return a list of the filenames in the input directory.\n\n Args:\n directory: the input directory\n\n Returns:\n a list of the markdown files in the input directory.\n\n \"\"\"\n try:\n # return a sorted list of the files in the given directory if they\n # are legal markdown files\n return sorted([file for file in os.listdir(directory) if is_md(file)])\n except FileNotFoundError:\n # catch a file not found error if the directory doesn't exist\n print('{} does not exist!'.format(directory))\n exit(1)\n\n\ndef clean_line(line: str) -> str:\n \"\"\"\n Clean a single line and return it.\n\n Args:\n line: the line of Markdown / LaTeX to clean\n\n Returns:\n a cleaned line of text\n\n \"\"\"\n # ignore latex comments, and markdown headers\n if '%' in line[:1] or '#' in line[:1]:\n # ignore LaTeX comment lines and Markdown header lines\n return ''\n # strip the line of all whitespace and new lines and append a single space\n return line.rstrip() + ' '\n\n\ndef clean_contents(contents: str) -> str:\n \"\"\"\n Clean and return the contents of a LaTeX / Markdown file.\n\n Args:\n contents: the contents of the file to clean\n\n Returns:\n a file with all markup nonsense removed\n\n \"\"\"\n # remove the LaTeX comment blocks\n contents = re.sub(r'\\\\begin{comment}.+?\\\\end{comment}', '', contents)\n # remove the general LaTeX markup\n contents = re.sub(r'\\\\.+?{.+?}(\\[.+?\\])?', '', contents)\n # remove the markdown comments from the text\n contents = re.sub(r'', '', contents)\n # remove markdown links and figures\n contents = re.sub(r'!?\\[.*\\](\\(.+?\\))?(:\\s+?.+)?', '', contents)\n # remove markdown standard tables\n contents = re.sub(r'\\|.*\\|', '', contents)\n # return the clean text\n return contents\n\n\ndef read_file(filename: str) -> str:\n \"\"\"\n Read the contents of a single file.\n\n Args:\n filename: the name of the file to read\n\n Returns:\n the string contents of the file\n\n \"\"\"\n if not is_md(filename):\n raise ValueError('filename must have a valid markdown extension')\n # initialize the contents to store from the file\n contents = ''\n # open the file into the contents one line at a time\n with open(filename) as md_file:\n # iterate over each line in the file and write it to the output\n for line in md_file:\n contents += clean_line(line)\n # clean the entire contents of the file\n contents = clean_contents(contents)\n\n return contents\n\n\ndef read_dir(directory: str) -> str:\n \"\"\"\n Read the contents of every Markdown / LaTeX file in a directory.\n\n Args:\n directory: the name of the directory to read files from\n\n Returns:\n the concatenated contents of the files in the directory\n\n \"\"\"\n # initialize the contents to store from the files\n contents = ''\n # iterate over the files and collect their contents\n for filename in markdown_filenames(directory):\n contents += read_file('{}/{}'.format(directory, filename))\n\n return contents\n\n\ndef read_contents(filename: str) -> str:\n \"\"\"\n Read the contents of a file or directory.\n\n Args:\n filename: the filename or directory to read\n\n Returns:\n the concatenated text from the file(s)\n\n \"\"\"\n # if it's a directory, enter it and read the files\n if os.path.isdir(filename):\n return read_dir(filename)\n # otherwise read the file\n return read_file(filename)\n\n\ndef words(contents: str) -> list:\n \"\"\"\n Return the list of words in a file's contents.\n\n Args:\n contents: the text to get the words in\n\n Returns:\n a list of all the words in the string\n\n \"\"\"\n return re.findall(r'\\w+', contents)\n\n\ndef get_filename() -> str:\n \"\"\"\n Get the filename from the command line.\n\n Returns:\n the first positional argument (the filename)\n\n \"\"\"\n try:\n # try to get the filename\n return sys.argv[1]\n except IndexError:\n # pass the exception along to the next level\n raise ValueError('no filename position argument!')\n\n\ndef _main():\n \"\"\"Execute the code in this module as a standalone script.\"\"\"\n try:\n # get the filename from the command line\n filename = get_filename()\n except ValueError:\n # print the usage information and exit\n print(__doc__)\n sys.exit(1)\n # read the contents of the file\n contents = read_contents(filename)\n # split the contents into words and count them\n word_count = len(words(contents))\n # print the word count to the console\n print('{} words in {}'.format(word_count, filename))\n\n\nif __name__ == '__main__':\n _main()\n","repo_name":"Forecasting-using-ML/power-grid-cost-forecasting","sub_path":"paper/src/python/count_words.py","file_name":"count_words.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34824009207","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport pandas as pd\nimport argparse\nimport numpy as np\n### Extract of file Chromosome, Position, Added Windows\n### dic_list_inf : retur dictonnary by chromosome with widos\n### list_inf :return a dictionnary to save result\ndef CheckPosInLPos(listpos, pos):\n for lpos in listpos :\n if pos>=lpos[0] and pos<=lpos[1] :\n return True\n return False\n\ndef read_list_info(file_inf, ChroHeadInf,BpHeadInf, Wind):\n print(\"---- begin : read pos to search in \"+args.list_info+\"----\")\n read=open(file_inf)\n head=read.readline().replace('\\n','')\n spl=head.replace('\"','').split('\\t')\n poschro=spl.index(ChroHeadInf)\n posbp=spl.index(BpHeadInf)\n dic_list_inf={}\n list_inf={}\n for line in read :\n spl=line.replace('\"','').split('\\t')\n if spl[poschro] not in dic_list_inf :\n dic_list_inf[spl[poschro]]=[]\n list_inf[spl[poschro]]={}\n if spl[posbp]!='NA':\n postmp=int(float(spl[posbp]))\n dic_list_inf[spl[poschro]].append([postmp-Wind,postmp+Wind, postmp])\n list_inf[spl[poschro]][postmp]=None\n for chro in dic_list_inf.keys():\n dic_list_inf[chro].sort()\n print(\"---- end : read pos to search in \"+args.list_info+\"----\")\n return (dic_list_inf,list_inf)\n \ndef WriteGWASPlk(file_gwas,infopos, fileplkgwas, filesubgwas,args, infors, ResLD=None) :\n print(\"---- begin : format gwas for plk ----\")\n readgwas=open(file_gwas)\n headgwas=readgwas.readline()\n splhead=headgwas.replace('\\n','').split()\n ChroPosGwas=splhead.index(args.chro_header_gwas)\n BpPosGwas=splhead.index(args.bp_header_gwas)\n RsPosGwas=splhead.index(args.rs_header_gwas)\n PvPosGwas=splhead.index(args.pval_header_gwas)\n cmtline=0\n writegwasplk=open(fileplkgwas,'w')\n writegwassub=open(filesubgwas,'w')\n writegwasplk.write(\"SNP\\tP\\n\")\n listchro=infopos.keys()\n infogwas={}\n writegwas_linfo=open(args.out+'.in.list_info','w')\n writegwas_linfo.write(headgwas)\n writegwassub.write(headgwas)\n othergwaspval=[]\n for line in readgwas:\n spl=line.split()\n chro=spl[ChroPosGwas]\n if spl[BpPosGwas]=='NA':\n continue\n bp=int(float(spl[BpPosGwas]))\n rs=spl[RsPosGwas]\n Pv=spl[PvPosGwas]\n if (spl[ChroPosGwas] in listchro) :\n if bp in infors[chro] :\n infors[chro][bp]=[rs,spl[PvPosGwas]]\n writegwas_linfo.write(line)\n if ResLD :\n if chro not in infogwas :\n infogwas[chro]={}\n if CheckPosInLPos(infopos[chro], float(bp)) or CheckPosInLPos(ResLD[chro], float(bp)): \n infogwas[chro][bp]=[rs,Pv]\n writegwasplk.write(rs+\"\\t\"+Pv+\"\\n\")\n writegwassub.write(line)\n cmtline+=1\n else :\n othergwaspval.append(float(Pv))\n elif CheckPosInLPos(infopos[spl[ChroPosGwas]], float(bp)) :\n if chro not in infogwas :\n infogwas[chro]={}\n infogwas[chro][bp]=[rs,Pv]\n writegwasplk.write(spl[RsPosGwas]+\"\\t\"+Pv+\"\\n\")\n writegwassub.write(line)\n cmtline+=1\n readgwas.close()\n writegwasplk.close()\n writegwassub.close()\n print(\"---- end : format gwas for plk ----\")\n return (infogwas, othergwaspval)\n\n## What done : used position to analyse, to search in data_clump what is position most closest\n## write final \n## input :\n### info_pos : list of pos to anlyse\n### data_clum : data have been clumped\n### window : windows around each position \n###\n\ndef GetResByPos(infopos,data_clump, windows_size_kb, out) :\n print(\"---- begin :merge clump and dataI by pos ----\")\n Cmt=0\n listpos=[]\n listposi=[]\n listchro=[]\n for chro in infopos.keys() :\n data_clump_chr=data_clump[data_clump['CHR']==str(chro)]\n for info in infopos[chro] :\n pos=info[2]\n lmin=[abs(x-pos) for x in data_clump_chr['BP']]\n if len(lmin)>0 and min(lmin)=posbeg and pos[2]<=posend : \n posgood+=str(pos[2])+';' \n listpos_a.append(pos)\n if posgood!='':\n if chro not in dicldwind : \n dicldwind[chro]=[]\n infold=[posbeg,posend,posgood, listpos_a]\n #pos[3]=infold\n dicldwind[chro].append(infold)\n return dicldwind \n\nimport time\nimport random\n\nfrom multiprocessing import Process, Queue, current_process, freeze_support\ndef getminpvalrand(args,args2):\n return min(random.sample(args, args2))\n\ndef GetPvalAdj2(pval,listpvali,nsnp, nbrep, nbprocess):\n def calculate(func, args):\n result = func(*args)\n return result\n\n def worker(input, output):\n for func, args in iter(input.get, 'STOP'):\n result = calculate(func, args)\n output.put(result)\n\n TASKS1 = [(getminpvalrand, ([listpvali, nsnp])) for i in range(nbrep)]\n task_queue = Queue()\n done_queue = Queue()\n\n # Submit tasks\n for task in TASKS1:\n task_queue.put(task)\n for i in range(nbprocess):\n Process (target=worker, args=(task_queue, done_queue)).start()\n res=[]\n for i in range(len(TASKS1)):\n res.append(done_queue.get())\n for i in range(nbprocess):\n task_queue.put('STOP')\n\n return len([x for x in res if x<=pval])/float(nbrep)\n\ndef GetResByLDWind(ld_info, data_clump, infors,out, listpval, nbrepet, nbprocess):\n ## we extracted all ld_block with info_pos\n cmtpos=0\n print(' nbprocess '+str(nbprocess))\n print(\"---- begin :merge clump and dataI by block ----\")\n Header=\"Chro\\tBeginBlock\\tEndBlock\\tBPClump\\tPClump\\tPAdjOtherP\\tPWindAdjRand\\tRsClump\\tBPInfo\"\n Write=open(out+\".clump.ldbloc.detail\", 'w')\n Write.write(Header+'\\n')\n for chro in ld_info.keys() :\n data_clump_chr=data_clump[data_clump['CHR']==str(chro)]\n for info in ld_info[chro] :\n posbegin=int(info[0])\n posend=int(info[1])\n for index, row in data_clump_chr.iterrows():\n if int(row['BP'])>= posbegin and int(row['BP'])<=posend :\n TotalNbSnp=int(row['TOTAL'])\n PI=float(row['P'])\n #PvalAdjRandWind=GetPvalAdj2(PI,listpval,TotalNbSnp, nbrepet, nbprocess)\n PvalAdjRandWind=\"NA\"\n nbpossamp=int(len(listpval)/2)\n PvalAdj=len([x for x in random.sample(listpval,nbpossamp) if x max_year or start_year < min_year:\n raise ValueError('startYear must be value between {min} and {max}.'.format(min=min_year, max=max_year))\n\n # validate endYear\n try:\n end_year = int(end_year)\n except ValueError:\n raise ValueError('endYear must be valid integer.')\n\n if end_year > max_year or end_year < min_year:\n raise ValueError('endYear must be value between {min} and {max}.'.format(min=min_year, max=max_year))\n\n if start_year > end_year:\n raise ValueError(\"startYear must not be greater than endYear.\")\n\n # aggregate data\n data = list()\n\n for year in range(start_year, end_year+1):\n for topic in parsed_topics:\n tmp = fetch(topic, year)\n tmp = normalize_confs(tmp, topic, year)\n data.extend(tmp)\n return data\n\n\ndef fetch(topic, year):\n \"\"\"Fetches data from https://github.com/tech-conferences/confs.tech\n\n :type topic: str\n :type year: str, int\n \"\"\"\n\n # check if cached\n cache_key = f'raw-{year}-{topic}'\n cached = get_cache(cache_key)\n if cached is not None:\n return cached\n\n if topic not in list(topics.keys()):\n return []\n\n # fetch from api\n src = f'https://raw.githubusercontent.com/tech-conferences/conference-data/master/conferences/{year}/{topic}.json'\n r = requests.get(src)\n try:\n r.raise_for_status()\n confs = r.json()\n except requests.HTTPError:\n confs = []\n\n set_cache(cache_key, confs, fetch_cache)\n return confs\n","repo_name":"Fischerfredl/muperconfs","sub_path":"backend/lib/fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29531068458","text":"\nfrom perfkitbenchmarker.linux_packages import proxy\nfrom absl import flags\nimport json\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('docker_dist_repo', None,\n 'Path to the dockerce repository.')\nflags.DEFINE_string('docker_version', '20.10',\n 'Specify the docker version.')\nflags.DEFINE_list('docker_registry_mirrors', [],\n 'Specify the docker mirrors.')\n\n\ndef YumInstall(vm):\n repo = FLAGS.docker_dist_repo if FLAGS.docker_dist_repo else \"https://download.docker.com/linux/centos\"\n vm.InstallPackages(\"yum-utils device-mapper-persistent-data lvm2\")\n # Package for RHEL8 containerd.io does not yet exist - this is a workaround\n if vm.OS_TYPE == \"centos8\" or vm.OS_TYPE == \"rhel8\":\n cmd = \"sudo yum install -y \" + repo + \"/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm\"\n cmd += \" && sudo yum-config-manager --add-repo \" + repo + \"/docker-ce.repo\"\n else:\n cmd = 'sudo yum-config-manager --add-repo ' + repo + '/docker-ce.repo'\n vm.RemoteCommand(cmd)\n vm.InstallPackages('docker-ce')\n\n proxy.AddProxy(vm, \"docker\")\n AddConfig(vm)\n _AddUserToDockerGroup(vm)\n\n\ndef AptInstall(vm):\n repo = FLAGS.docker_dist_repo if FLAGS.docker_dist_repo else \"https://download.docker.com/linux/ubuntu\"\n vm.InstallPackages(\"apt-transport-https ca-certificates curl gnupg-agent software-properties-common\")\n vm.RemoteCommand(f'curl -fsSL {repo}/gpg | sudo apt-key add -')\n vm.RemoteCommand(f\"bash -c 'sudo -E add-apt-repository \\\"deb [arch=$(dpkg --print-architecture)] {repo} $(grep CODENAME /etc/lsb-release | cut -f2 -d=) stable\\\"'\")\n vm.AptUpdate()\n version, _ = vm.RemoteCommand(f\"sudo apt-cache madison docker-ce | grep {FLAGS.docker_version} | cut -f2 -d'|' | tr -d ' ' | sort -V -r | head -n 1\")\n vm.InstallPackages(f'docker-ce={version.strip()} --allow-change-held-packages')\n\n proxy.AddProxy(vm, \"docker\")\n AddConfig(vm)\n _AddUserToDockerGroup(vm)\n\n\ndef SwupdInstall(vm):\n vm.RemoteCommand(\"sudo swupd update\")\n vm.InstallPackages(\"containers-basic\")\n\n proxy.AddProxy(vm, \"docker\")\n AddConfig(vm)\n _AddUserToDockerGroup(vm)\n\n\ndef AddConfig(vm, config={}):\n config[\"exec-opts\"] = [\"native.cgroupdriver=systemd\"]\n if FLAGS.docker_registry_mirrors:\n config[\"registry-mirrors\"] = FLAGS.docker_registry_mirrors\n\n vm.RemoteCommand(f\"echo '{json.dumps(config)}' | sudo tee /etc/docker/daemon.json\")\n vm.RemoteCommand(f\"sudo systemctl daemon-reload\")\n vm.RemoteCommand(f\"sudo systemctl restart docker\")\n \n \ndef _AddUserToDockerGroup(vm):\n \"\"\"\n Add user to the docker group so docker commands can be executed without sudo\n \"\"\"\n vm.RemoteCommand(\"sudo usermod --append --groups docker {}\".format(vm.user_name))\n vm.RemoteCommand(\"sudo systemctl restart docker\")\n\n # SSH uses multiplexing to reuse connections without going through the SSH handshake\n # for a remote host. Typically we need to logout / login after adding the user to\n # the docker group as group memberships are evaluated at login.\n # See: https://docs.docker.com/engine/install/linux-postinstall/\n # This requirement along with the multiplexing causes subsequent docker commands run in the\n # reused session to fail with \"permission denied\" errors.\n # This command will cause the ssh multiplexing for this particular VM to stop causing the next\n # SSH command to the VM to restart a multiplex session with ControlMaster=auto. This new session\n # will start with docker group membership and will be able to execute docker commands without root.\n vm.RemoteCommand('', ssh_args = ['-O', 'stop'])\n\n\ndef IsDocker(vm):\n return \"docker_ce\" in vm._installed_packages\n\n\ndef Uninstall(vm):\n pass\n","repo_name":"przmk0/workload-services-framework","sub_path":"script/cumulus/pkb/perfkitbenchmarker/linux_packages/docker_ce.py","file_name":"docker_ce.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"6559167858","text":"import sys; address = \" \".join(sys.argv[1:])\nfrom geopy.geocoders import Nominatim\n\ndef main():\n geolocator = Nominatim(user_agent=\"WaterFinder\")\n print(address)\n location = geolocator.geocode(address)\n print(location.latitude, location.longitude)\n\nif __name__ == '__main__':\n main()","repo_name":"SharpGuanaco/Hack3Testing","sub_path":"GeoCoding.py","file_name":"GeoCoding.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20478276961","text":"\"\"\"\nTic Tac Toe Player\n\"\"\"\n\nimport math\nimport copy\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\n\ndef initial_state():\n \"\"\"\n Returns starting state of the board.\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\n\ndef player(board):\n \"\"\"\n Returns player who has the next turn on a board.\n \"\"\"\n count = 0\n for row in board:\n for line in row:\n if line == X or line == O:\n count += 1\n\n # If in middle of game\n count = count % 2\n if count == 0:\n return X\n else:\n return O\n\n raise NotImplementedError\n\n\ndef actions(board):\n \"\"\"\n Returns set of all possible actions (i, j) available on the board.\n \"\"\"\n actions = set()\n for k in range(3):\n for i in range(3):\n if board[k][i] == None:\n action = (k, i)\n actions.add(action)\n return actions\n\n\n\n raise NotImplementedError\n\n\ndef result(board, action):\n \"\"\"\n Returns the board that results from making move (i, j) on the board.\n \"\"\"\n if board[action[0]][action[1]] != None:\n raise Exception\n\n newBoard = copy.deepcopy(board)\n newBoard[action[0]][action[1]] = player(board)\n return newBoard\n\n\n raise NotImplementedError\n\n\ndef winner(board):\n \"\"\"\n Returns the winner of the game, if there is one.\n \"\"\"\n # horizontal\n for row in board:\n check = set(row)\n if len(check) == 1:\n winner = list(check)\n if winner[0] != None:\n return winner[0]\n\n\n # Vertical\n winner = set()\n for i in range(3):\n for k in range(3):\n winner.add(board[k][i])\n if len(winner) == 1:\n winner = list(winner)\n if winner[0] != None:\n return winner[0]\n else:\n winner = set(winner)\n winner.clear()\n else:\n winner.clear()\n\n # Diagonal\n if board[0][0] == board[1][1] == board[2][2] or board[0][2] == board[1][1] == board[2][0]:\n if board[1][1] != None:\n return board[1][1]\n\n\n return None\n raise NotImplementedError\n\n\ndef terminal(board):\n \"\"\"\n Returns True if game is over, False otherwise.\n \"\"\"\n # check for winner\n if winner(board) == None:\n # check if board is filled\n \n for row in board:\n if None in row:\n return False\n\n return True\n \n else:\n return True\n \n\n \n raise NotImplementedError\n\n\ndef utility(board):\n \"\"\"\n Returns 1 if X has won the game, -1 if O has won, 0 otherwise.\n \"\"\"\n if winner(board) == None:\n return 0\n elif winner(board) == 'X':\n return 1\n else:\n return -1\n\n raise NotImplementedError\n\n\ndef minimax(board):\n \"\"\"\n Returns the optimal action for the current player on the board.\n \"\"\"\n\n if terminal(board):\n return None\n\n if player(board) == 'X':\n values = []\n minScore = 99\n maxScore = -99\n for action in actions(board):\n newBoard = result(board, action)\n values.append(minValue(newBoard,minScore,maxScore))\n\n for i in range(len(values)):\n if max(values) == values[i]:\n bestAction = list(actions(board))\n bestAction = bestAction[i]\n return bestAction\n\n elif player(board) == 'O':\n values = []\n maxScore = -99\n minScore = 99\n for action in actions(board):\n newBoard = result(board,action)\n values.append(maxValue(newBoard, minScore,maxScore))\n\n for i in range(len(values)):\n if min(values) == values[i]:\n bestAction = list(actions(board))\n bestAction = bestAction[i]\n return bestAction\n\n\n\n\ndef maxValue(board, minScore, maxScore):\n if terminal(board):\n return utility(board)\n else:\n scores = []\n \n for action in actions(board):\n newBoard = result(board,action)\n score = minValue(newBoard, minScore, maxScore)\n\n # lowest score possible from this board\n if score > maxScore:\n maxScore = score\n if score > minScore:\n scores.append(score)\n break\n \n scores.append(score)\n return max(scores)\n\n\n\ndef minValue(board, minScore, maxScore):\n if terminal(board):\n return utility(board)\n else:\n scores = []\n\n for action in actions(board):\n newBoard = result(board,action)\n score = maxValue(newBoard, minScore, maxScore)\n\n # get the highest score achievable from this board\n if score < minScore:\n minScore = score\n if score < maxScore:\n scores.append(score)\n break\n\n scores.append(score)\n return min(scores)\n \n\n\n raise NotImplementedError\n","repo_name":"Seranie/CS50","sub_path":"tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28889043309","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom rospy import loginfo, logwarn, logerr\nfrom motivational_model.classes.motivation import Motivation\nfrom motivational_model.classes.homeostaticvariable import HomeostaticVariable\nfrom motivational_model.classes.stimulus import Stimulus\nfrom motivational_model.classes.action import Action\nfrom motivational_model.classes.agent import Agent\nfrom motivational_model.classes.effect import Effect\nfrom motivational_model.classes.state import State\n\nclass RelateObject():\n\n def __get_int_list(self, related_var):\n \"\"\"\n Transforms str data from database into int lists\n @ related_var str: data variable\n - Splits str (TEXT) data from database\n - Checks all entries are digits\n - Converts all entries to int\n \"\"\"\n aux_var = list()\n x_list = list()\n\n if type(related_var) is unicode and \",\" in related_var:\n aux_var = related_var.split(\",\")\n elif bool(related_var):\n aux_var.append(related_var)\n\n for x in aux_var:\n\n if x.isdigit():\n x_list.append(x)\n else:\n rospy.logerr(\"related_var given is not correct\")\n \n related_var = map(int, x_list)\n\n return related_var\n\n def create_hv_list(self, hv, std, time_step=1, logging=False):\n \"\"\"\n Defining Homeostatic Variables and organizing them into a list\n @ hv dict: homeostatic variable data from database\n @ std dict: standard evolution parameter data from database\n \"\"\"\n hv_list = list()\n\n for idx, i in enumerate(list(hv)):\n for idy, std_var in enumerate(list(std)):\n if std_var[\"id\"] == i[\"std_evol\"]:\n params_std = {\"te_id\": std_var['type'], \"slope\": std_var['slope'], \"tau\": std_var['tau'], \"step\": std_var['step']}\n break\n\n hv_list.append(HomeostaticVariable(i['id'], str(i['name']), i['initial_value'], i['ideal_value'], i['upper_limit'], i['lower_limit'], i['satisfaction_time'], params_std, time_step, logging))\n\n return hv_list\n\n def create_mot_list(self, mot, homeostatic_variables, stimuli, agents, logging=False):\n \"\"\"\n Defining motivations and organizing them into a list\n @ mot dict: motivation data from database\n @ homeostatic_variables list: homeostatic variable objects list\n @ stimuli list: stimulus objects\n @ agents list: agent objects\n \"\"\"\n mot_list = list()\n aux_var = None\n\n for idx, i in enumerate(list(mot)):\n aux_var = Motivation(i['id'], str(i['name']), i[\"threshold\"], logging)\n\n for hv in homeostatic_variables:\n if hv.get_id() in self.__get_int_list(i[\"related_hv\"]):\n aux_var.add_hv(hv)\n\n for sti in stimuli:\n if sti.get_id() in self.__get_int_list(i[\"related_sti\"]):\n aux_var.add_sti(sti)\n\n for ag in agents:\n if ag.get_id() in self.__get_int_list(i[\"related_ag\"]):\n aux_var.add_ag(ag)\n\n mot_list.append(aux_var)\n \n return mot_list\n\n def create_eff_list(self, effects, homeostatic_variables, std, con):\n \"\"\"\n Defining Effects and organizing them into a list\n @ effects list: effect objects list\n @ homeostatic_variables list: homeostatic variable objects list\n @ std dict: standard evolution parameter data from database\n @ con dict: constancy data from database\n \"\"\"\n eff_list = list()\n aux_var = None\n x_list = list()\n\n for idx, i in enumerate(list(effects)):\n for idy, std_var in enumerate(list(std)):\n if std_var[\"id\"] == i[\"std_evol\"]:\n params_std = {\"te_id\": std_var['type'], \"slope\": std_var['slope'], \"tau\": std_var['tau'], \"step\": std_var['step']}\n break\n \n aux_var = Effect(i['id'], i['constancy'], params_std)\n \n for idw, con_var in enumerate(list(con)):\n if con_var[\"id\"] == i[\"constancy\"]:\n i[\"constancy\"] = con_var[\"name\"]\n break\n\n for hv in homeostatic_variables:\n if hv.get_id() in self.__get_int_list(i[\"related_hv\"]):\n aux_var.add_hv(hv.get_id())\n\n eff_list.append(aux_var)\n\n return eff_list\n\n def create_sta_list(self, sta, std, time_step=1, logging=False):\n \"\"\"\n Defining States and organizing them into a list\n @ sta dict: state data from database\n @ std dict: standard evolution parameter data from database\n \"\"\"\n sta_list = list()\n\n for idx, i in enumerate(list(sta)):\n for idy, std_var in enumerate(list(std)):\n if std_var[\"id\"] == i[\"activation_evol\"]:\n params_act = {\"te_id\": std_var['type'], \"slope\": std_var['slope'], \"tau\": std_var['tau'], \"step\": std_var['step']}\n if std_var[\"id\"] == i[\"deactivation_evol\"]:\n params_deact = {\"te_id\": std_var['type'], \"slope\": std_var['slope'], \"tau\": std_var['tau'], \"step\": std_var['step']}\n\n sta_list.append(State(i['id'], str(i['name']), i[\"related_ag\"], i[\"related_sti\"], params_act, params_deact, time_step, logging))\n\n return sta_list\n\n def create_sti_list(self, sti, states):\n \"\"\"\n Defining Stimuli and organizing them into a list\n @ sti dict: stimulus data from database\n @ states list: state objects list\n \"\"\"\n sti_list = list()\n aux_states = list()\n\n for idx, i in enumerate(list(sti)):\n aux_states = list()\n for sta in states:\n if sta.get_related_sti() == i[\"id\"]:\n aux_states.append(sta)\n if sta.get_id() == i[\"current_state\"]:\n current_stimuli_state = sta\n\n sti_list.append(Stimulus(i['id'], str(i['name']), current_stimuli_state, i['topic'], i['msg'], i['pkg']), aux_states)\n\n return sti_list\n\n def create_act_list(self, act, end_exo, effects, ag):\n \"\"\"\n Defining Actions and organizing them into a list\n @ act dict: action data from database\n @ end_exo dict: endogenous/exogenous data from database\n @ effects list: effect objects list\n @ con dict: constancy data from database\n \"\"\"\n act_list = list()\n aux_var = None\n x_list = list()\n\n for idx, i in enumerate(list(act)):\n aux_var = Action(i['id'], str(i['name']), i ['type'], i['related_ag'])\n\n for idy, e_var in enumerate(list(end_exo)):\n if e_var[\"id\"] == i[\"type\"]:\n i[\"type\"] = e_var[\"name\"]\n break\n\n for eff in effects:\n if eff.get_id() in self.__get_int_list(i[\"effects\"]):\n aux_var.add_eff(eff)\n \n act_list.append(aux_var)\n\n return act_list\n\n def create_ag_list(self, ag, states, actions, homeostatic_variables):\n \"\"\"\n Defining Agents and organizing them into a list\n @ ag dict: agent data from database\n @ states list: state objects list\n @ actions list: action objects list\n @ homeostatic_variables list: homeostatic variable objects list\n \"\"\"\n ag_list = list()\n aux_states = list()\n aux_actions = list()\n\n for idx, i in enumerate(list(ag)):\n aux_states = list()\n for sta in states:\n if sta.get_related_ag() == i[\"id\"]:\n aux_states.append(sta)\n if sta.get_id() == i[\"current_state\"]:\n current_agent_state = sta\n aux_actions = list()\n for act in actions:\n if i['id'] == act.get_related_ag():\n aux_actions.append(act)\n \n ag_list.append(Agent(i['id'], str(i['name']), current_agent_state, i['topic'], i['msg'], i['pkg'], aux_states, aux_actions))\n\n return ag_list","repo_name":"juanmolera/mini-robot-motivational-model","sub_path":"relateobject.py","file_name":"relateobject.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74220677866","text":"from sanic import Request\nfrom shared import app\nfrom utils import render_template\nfrom helpers import statshelper, userhelper\nfrom objects import GameType, Team\nfrom helpers.statshelper import sentry_trace\nfrom db.models import SM5Game, Player, EntityEnds, LaserballGame\nfrom tortoise.expressions import F\nfrom sanic.log import logger\n\n@app.get(\"/stats\")\n@sentry_trace\nasync def stats(request: Request) -> str:\n logger.info(\"Loading stats page\")\n\n logger.debug(\"Loading general stats\")\n\n total_players = await Player.all().count()\n total_games = await SM5Game.all().count() + await LaserballGame.all().count()\n ranked_games = await SM5Game.filter(ranked=True).count() + await LaserballGame.filter(ranked=True).count()\n total_games_played = await EntityEnds.all().count()\n ranking_accuracy = await statshelper.get_ranking_accuracy()\n\n logger.debug(\"Loading SM5 stats\")\n\n sm5_red_wins = await SM5Game.filter(winner=Team.RED, ranked=True).count()\n sm5_green_wins = await SM5Game.filter(winner=Team.GREEN, ranked=True).count()\n points_scored = await statshelper.get_points_scored()\n nukes_launched = await statshelper.get_nukes_launched()\n nukes_cancelled = await statshelper.get_nukes_cancelled()\n medic_hits = await statshelper.get_medic_hits()\n own_medic_hits = await statshelper.get_own_medic_hits()\n\n logger.debug(\"Loading SM5 role stats\")\n\n top_commanders = await statshelper.get_top_commanders()\n top_heavies = await statshelper.get_top_heavies()\n top_scouts = await statshelper.get_top_scouts()\n top_ammos = await statshelper.get_top_ammos()\n top_medics = await statshelper.get_top_medics()\n\n logger.debug(\"Loading laserball stats\")\n\n laserball_red_wins = await LaserballGame.filter(winner=Team.RED, ranked=True).count()\n laserball_blue_wins = await LaserballGame.filter(winner=Team.BLUE, ranked=True).count()\n goals_scored = await statshelper.get_goals_scored()\n assists = await statshelper.get_assists()\n passes = await statshelper.get_passes()\n steals = await statshelper.get_steals()\n clears = await statshelper.get_clears()\n blocks = await statshelper.get_blocks()\n\n logger.debug(\"Rendering stats page\")\n\n return await render_template(request,\n \"stats.html\",\n zip=zip,\n\n # general stats\n\n total_players=total_players,\n total_games=total_games,\n ranked_games=ranked_games,\n total_games_played=total_games_played,\n ranking_accuracy=ranking_accuracy,\n\n # sm5 stats\n \n sm5_red_wins=sm5_red_wins,\n sm5_green_wins=sm5_green_wins,\n points_scored=points_scored,\n nukes_launched=nukes_launched,\n nukes_cancelled=nukes_cancelled,\n medic_hits=medic_hits,\n own_medic_hits=own_medic_hits,\n\n # sm5 role stats\n\n top_commanders=top_commanders,\n top_heavies=top_heavies,\n top_scouts=top_scouts,\n top_ammos=top_ammos,\n top_medics=top_medics,\n\n # laserball stats\n\n laserball_red_wins=laserball_red_wins,\n laserball_blue_wins=laserball_blue_wins,\n goals_scored=goals_scored,\n assists=assists,\n passes=passes,\n steals=steals,\n clears=clears,\n blocks=blocks\n )","repo_name":"spookybear0/laserforce_ranking","sub_path":"handlers/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"18968159283","text":"# An episode should halt when either a) the cart successfully reaches the rightmost point, where pos = 0.6,\n# or b) 1000 actions have been taken.\n\n\n# TODO: Implement visualization of 100 episodes of mountain-car simulations, with a maximum of 1000 steps allowed per episode.\n# TODO: Illustration of mountain-car simulation status (normally visualized as a movie) with the curved line depicting the landscape and the oval denoting the mountain car.\n# TODO: Visualize the reward function\n\nfrom agent.critic import Critic\nfrom agent.actor import Actor\nfrom environment.environment import Environment\nimport yaml\nimport matplotlib.pyplot as plt\nfrom copy import copy\nfrom tqdm import tqdm # Progressbar\n\nconfig = yaml.full_load(open(\"configs/config.yml\"))\nenv_cfg = config[\"Environment\"]\nactor_cfg = config[\"Actor\"]\ncritic_cfg = config[\"Critic\"]\ntraining_cfg = config[\"Training\"]\n\n\ndef plot_learning(steps_per_episode):\n \"\"\"\n Plots remaining pieces after each episode during a full run of training\n Should converge to one if the agent is learning\n \"\"\"\n episode = [i for i in range(len(steps_per_episode))]\n plt.plot(episode, steps_per_episode)\n plt.xlabel(\"Episode number\")\n plt.ylabel(\"Steps\")\n plt.show()\n\n\ndef main():\n \"\"\"\n Sets the parameters for the Environment, Critic, and Actor according to the imported config file.\n Creates an environment where a predefined number of episodes can be performed.\n Instantiates an actor to keep track of the policy, and a critic to keep track of the value at each state\n Runs a predefined number of episodes creating a new board for each episode.\n For each episode, the actor and the critic are updated according to the Actor-Critic model.\n Finally, epsilon is set to zero, and the environment plays a game with the updated policy.\n \"\"\"\n\n env = Environment(env_cfg)\n granularity = env_cfg[\"granularity\"]\n critic = Critic(critic_cfg, granularity)\n actor = Actor(actor_cfg)\n\n episodes = training_cfg[\"number_of_episodes\"]\n visualize_episodes = training_cfg[\"visualize_episodes\"]\n steps_per_episode = []\n\n for episode in tqdm(range(episodes), desc=f\"Playing {episodes} episodes\", colour='#39ff14'):\n env.new_simulation()\n path = []\n positions = []\n critic.reset_eli_dict()\n actor.reset_eli_dict()\n while not env.reached_top() and not env.reached_max_steps():\n env.update_steps()\n current_state = copy(env.get_state())\n legal_actions = env.get_actions()\n action = actor.get_action(\n state=current_state, legal_actions=legal_actions)\n path.append((str(current_state), str(action)))\n reward = env.perform_action(action=action)\n\n td_err = critic.compute_td_err(\n current_state=current_state, next_state=env.get_state(), reward=reward)\n\n # Previous states on the path are updated as well during the call to train() by eligibility traces\n critic.train(state=current_state, td_error=td_err)\n critic.update_eligs()\n\n # Update actor beliefs on SAPs for all pairs seen thus far in the episode\n for i, sap in enumerate(reversed(path)):\n actor.update_eli_dict(\n state=str(sap[0]), action=str(sap[1]), i=i)\n actor.update_policy_dict(\n state=str(sap[0]), action=str(sap[1]), td_err=td_err)\n\n positions.append(env.get_position())\n\n print(\"steps used in this episode\", env.steps)\n if episode in visualize_episodes:\n env.visualize_landscape(positions)\n steps_per_episode.append(env.steps)\n\n plot_learning(steps_per_episode)\n\n # Enable history tracking to visualize final simulation\n env.new_simulation()\n\n print(f\"Actor final epsilon: {actor.epsilon}\")\n actor.epsilon = 0 # Set exploration to 0\n print(\"Attempting final simulation to show you how smart I am now\")\n while not env.reached_top() and not env.reached_max_steps():\n current_state = env.get_state()\n legal_actions = env.get_actions()\n action = actor.get_action(current_state, legal_actions)\n env.perform_action(action)\n\n\nmain()\n","repo_name":"fredrikwaaler/AiProg3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69947784749","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 22 21:29:50 2020\n\n@author: hihyun\n\"\"\"\n\nimport sys\n#num=int(sys.stdin.readline())\nnum=int(input())\ng=[0]*(num*2+1)\n\ntotal=1\nfor i in range(num-1):\n total+=(2*i+1)\n\n\n\n\nprint(((g[1])+1)%9901)","repo_name":"hyeinhyun/alg_prac","sub_path":"boj/1309_3.py","file_name":"1309_3.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33318050362","text":"import ics\nimport kontest\n\n\ndef make_ical(provider: str):\n cal = ics.Calendar()\n contest_data = kontest.get_provider_contests(provider)\n \n for contest in contest_data:\n cal.events.add(ics.Event(\n name=contest.name,\n begin=contest.start_time,\n end=contest.end_time,\n url=contest.url\n ))\n \n return cal\n\n\nif __name__ == '__main__':\n ical = make_ical(input(\"Contest: \"))\n print(ical)\n","repo_name":"uint0/contest-calendar","sub_path":"src/ical.py","file_name":"ical.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35685686078","text":"import threading\n\nimport sqlalchemy\nfrom sqlalchemy.engine.url import make_url\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.schema import MetaData\nfrom sqlalchemy import orm\nfrom sqlalchemy.orm.exc import UnmappedClassError\n\ndef _create_scoped_session(db, query_cls):\n session = orm.sessionmaker(autoflush=True, autocommit=False,\n query_cls=query_cls)\n return orm.scoped_session(session)\n\ndef _tablemaker(db):\n def make_sa_table(*args, **kwargs):\n if len(args) > 1 and isinstance(args[1], db.Column):\n args = (args[0], db.metadata) + args[1:]\n return sqlalchemy.Table(*args, **kwargs)\n\n return make_sa_table\n\ndef _include_sqlalchemy(db):\n for module in sqlalchemy, sqlalchemy.orm:\n for key in module.__all__:\n if not hasattr(db, key):\n setattr(db, key, getattr(module, key))\n db.Table = _tablemaker(db)\n db.event = sqlalchemy.event\n\n\nclass BaseQuery(orm.Query):\n\n def get_or_error(self, uid, error):\n \"\"\"Like :meth:`get` but raises an error if not found instead of\n returning `None`.\n \"\"\"\n rv = self.get(uid)\n if rv is None:\n if isinstance(error, Exception):\n raise error\n return error()\n return rv\n\n def first_or_error(self, error):\n \"\"\"Like :meth:`first` but raises an error if not found instead of\n returning `None`.\n \"\"\"\n rv = self.first()\n if rv is None:\n if isinstance(error, Exception):\n raise error\n return error()\n return rv\n\n def paginate(self, **kwargs):\n \"\"\"Paginate this results.\n Returns a :class:`Pagination` object.\n \"\"\"\n return Pagination(self, **kwargs)\n\n\nclass _QueryProperty(object):\n\n def __init__(self, db):\n self.db = db\n\n def __get__(self, obj, type):\n try:\n mapper = orm.class_mapper(type)\n if mapper:\n return type.query_class(mapper, session=self.db.session())\n except UnmappedClassError:\n return None\n\n\nclass EngineConnector(object):\n\n def __init__(self, sa_obj):\n self._sa_obj = sa_obj\n self._engine = None\n self._connected_for = None\n self._lock = threading.Lock()\n\n def get_engine(self):\n with self._lock:\n uri = self._sa_obj.uri\n info = self._sa_obj.info\n options = self._sa_obj.options\n echo = options.get('echo')\n if (uri, echo) == self._connected_for:\n return self._engine\n self._engine = engine = sqlalchemy.create_engine(info, **options)\n self._connected_for = (uri, echo)\n return engine\n\n\nclass Model(object):\n \"\"\"Baseclass for custom user models.\n \"\"\"\n\n #: the query class used. The :attr:`query` attribute is an instance of\n #: this class. By default a :class:`BaseQuery` is used.\n query_class = BaseQuery\n\n #: an instance of :attr:`query_class`. Can be used to query the database\n #: for instance of this model.\n query = None\n\n def __iter__(self):\n \"\"\"Returns an iterable that supports .next() so we can\n do dict(sa_instance).\n \"\"\"\n for k in self.__dict__.keys():\n if not k.startswith('_'):\n yield (k, getattr(self, k))\n\n def __repr__(self):\n return '<%s>' % self.__class__.__name__\n\n\nclass SQLAlchemy(object):\n \"\"\"This class is used to instantiate a SQLAlchemy connection to a database.\n \"\"\"\n\n def __init__(self, query_cls=BaseQuery):\n self.uri = None\n self.info = None\n self.options = None\n self.connector = None\n self._engine_lock = threading.Lock()\n self.session = _create_scoped_session(self, query_cls=query_cls)\n\n self.Model = self.make_declarative_base()\n\n _include_sqlalchemy(self)\n\n def make_declarative_base(self):\n \"\"\"Creates the declatative base.\"\"\"\n base = declarative_base(cls=Model, name='Model')\n base.db = self\n base.query = _QueryProperty(self)\n return base\n\n def _cleanup_options(self, **kwargs):\n options = dict([\n (key, val)\n for key, val in kwargs.items()\n if val is not None\n ])\n return self._apply_driver_hacks(options)\n\n def _apply_driver_hacks(self, options):\n if self.info.drivername == 'mysql':\n self.info.query.setdefault('charset', 'utf8')\n options.setdefault('pool_size', 10)\n options.setdefault('pool_recycle', 7200)\n\n elif self.info.drivername == 'sqlite':\n no_pool = options.get('poll_size') == 0\n memory_based = self.info.database in (None, '', ':memory:')\n if memory_based and no_pool:\n raise ValueError(\n \"SQLite in-memory database with an empty queue\"\n \" (pool_size = 0) is not posibe due to data loss.\"\n )\n return options\n\n def configure(self, uri='sqlite://', app=None, echo=False, pool_size=None,\n pool_timeout=None, pool_recycle=None, convert_unicode=True):\n self.uri = uri\n self.info = make_url(uri)\n self.options = self._cleanup_options(\n echo = echo,\n pool_size = pool_size,\n pool_timeout = pool_timeout,\n pool_recycle = pool_recycle,\n convert_unicode = convert_unicode\n )\n self.session.configure(bind=self.engine)\n self.Model.metadata.bind = self.engine\n\n @property\n def engine(self):\n \"\"\"Gives access to the engine.\"\"\"\n if self.info is None:\n raise ValueError(\n \"You must configure SQLAlchemy via .configure() method before\"\n \" working with any connection to database.\"\n )\n with self._engine_lock:\n connector = self.connector\n if connector is None:\n connector = EngineConnector(self)\n self.connector = connector\n return connector.get_engine()\n\n @property\n def metadata(self):\n \"\"\"Proxy for Model.metadata\"\"\"\n return self.Model.metadata\n\n @property\n def query(self):\n \"\"\"Proxy for session.query\"\"\"\n return self.session.query\n\n def add(self, *args, **kwargs):\n \"\"\"Proxy for session.add\"\"\"\n return self.session.add(*args, **kwargs)\n\n def add_all(self, *args, **kwargs):\n \"\"\"Proxy for session.add_all\"\"\"\n return self.session.add_all(*args, **kwargs)\n\n def flush(self, *args, **kwargs):\n \"\"\"Proxy for session.flush\"\"\"\n return self.session.flush(*args, **kwargs)\n\n def commit(self):\n \"\"\"Proxy for session.commit\"\"\"\n return self.session.commit()\n\n def rollback(self):\n \"\"\"Proxy for session.rollback\"\"\"\n return self.session.rollback()\n\n def create_all(self):\n \"\"\"Creates all tables.\"\"\"\n self.Model.metadata.create_all(bind=self.engine)\n\n def drop_all(self):\n \"\"\"Drops all tables.\"\"\"\n self.Model.metadata.drop_all(bind=self.engine)\n\n def reflect(self, meta=None):\n \"\"\"Reflection tables from the database.\n \"\"\"\n meta = meta or MetaData()\n meta.reflect(bind=self.bind)\n return meta\n\n def __repr__(self):\n return \"\".format(\n self.uri if self.uri is not None else ''\n )\n","repo_name":"coyotevz/anviz-sync","sub_path":"anviz_sync/saw.py","file_name":"saw.py","file_ext":"py","file_size_in_byte":7525,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"42840550830","text":"from django.db import models\nfrom django.utils import timezone\nfrom datetime import datetime\nfrom staff.models import MyUser\n\nFINISHJOB=3\nMIDDLEPRI=1\n\n# Create your models here.\nclass ProInfo(models.Model):\n province_choice = (\n (11, '北京'),\n (12, '天津'),\n (13, '河北'),\n (14, '山西'),\n (15, '内蒙'),\n (21, '辽宁'),\n (22, '吉林'),\n (23, '黑龙江'),\n (31, '上海'),\n (32, '江苏'),\n (33, '浙江'),\n (34, '安徽'),\n (35, '福建'),\n (36, '江西'),\n (37, '山东'),\n (41, '河南'),\n (42, '湖北'),\n (43, '湖南'),\n (44, '广东'),\n (45, '广西'),\n (46, '海南'),\n (50, '重庆'),\n (51, '四川'),\n (52, '贵州'),\n (53, '云南'),\n (54, '西藏'),\n (61, '陕西'),\n (62, '甘肃'),\n (63, '青海'),\n (64, '宁夏'),\n (65, '新疆'),\n (71, '台湾'),\n (81, '香港'),\n (82, '澳门'),\n (91, '国外'),\n )\n province = models.PositiveSmallIntegerField('省市', choices = province_choice, default = 33)\n pro_name = models.CharField('项目名称', max_length = 24, unique = True)\n pro_ctime = models.DateField('创建时间', default = timezone.now)\n pro_link = models.URLField('link')\n\n def get_event_status(self):\n for item in EventInfo.objects.filter(pro_id=self.id):\n if not item.event_priority == FINISHJOB:\n return False\n return True\n get_event_status.boolean = True\n get_event_status.short_description = 'Done'\n\n def __str__(self):\n return self.pro_name\n\n class Meta:\n verbose_name = '1-项目信息'\n verbose_name_plural = '1-项目信息'\n\n\nclass EventInfo(models.Model):\n event_priority_choices = (\n (0, '高'),\n (MIDDLEPRI, '中'),\n (2, '低'),\n (FINISHJOB, '已完成'),\n (4, '不处理'),\n )\n pro = models.ForeignKey(ProInfo, verbose_name = '所属项目', on_delete = models.PROTECT)\n event_name = models.CharField(verbose_name = '事件名称', max_length=24)\n event_priority = models.PositiveSmallIntegerField('优先级', choices = event_priority_choices, default = MIDDLEPRI)\n\n def __str__(self):\n return self.event_name\n\n class Meta:\n verbose_name = '2-事件信息'\n verbose_name_plural = '2-事件信息'\n\n def get_task_status(self):\n finish = 0\n total = 0\n for item in TaskInfo.objects.filter(task_id_id=self.id):\n total += 1\n if item.task_status: finish += 1\n return '{}/{}'.format(finish, total)\n get_task_status.short_description = '任务状态'\n\n\nclass TaskInfo(models.Model):\n task_priority_choices = (\n (0, '高'),\n (MIDDLEPRI, '中'),\n (2, '低'),\n (3, 'pass'),\n )\n task_id = models.ForeignKey(\n EventInfo,\n verbose_name = '所属事件',\n on_delete = models.CASCADE\n )\n task_dealer = models.ForeignKey(\n MyUser, to_field = 'num',\n verbose_name = '处理人',\n default = 824,\n on_delete = models.DO_NOTHING\n )\n task_name = models.CharField(verbose_name = '任务名称', max_length=24)\n task_status = models.BooleanField(verbose_name = '任务状态', default = False)\n task_stime = models.DateField('开始时间', auto_now_add = True)\n task_mtime = models.DateField(verbose_name = '修改时间', auto_now = True)\n task_ftime = models.DateField(verbose_name = '计划完成时间')\n task_nice = models.BooleanField(verbose_name = '按时', default = False, blank = True)\n task_note = models.CharField('备注', max_length = 100, default='', blank=True)\n\n def __str__(self):\n return self.task_name\n\n class Meta:\n verbose_name = '3-任务信息'\n verbose_name_plural = '3-任务信息'\n\n def get_pro_name(self):\n return self.task_id.pro\n get_pro_name.short_description = '所属项目'\n\n","repo_name":"nizijing/Django-worknote","sub_path":"pro/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4307371545","text":"# solution without overlap\nimport numpy as np\n\ndef Count(Text,Pattern):\n count = 0\n lenText = len(Text)\n lenPatt = len(Pattern)\n listText = list(Text)\n listPatt = list(Pattern)\n for i in range(lenText-lenPatt):\n if listText[i:i+lenPatt] == listPatt[0:lenPatt]:\n count += 1\n listText[i+lenPatt] = \"X\"\n return count\n \n#---------------------------------------------------#\n#---------------------------------------------------#\n#---------------------------------------------------#\n#---------------------------------------------------#\n\nsample2 = \"ACAACTATGCATACTATCGGGAACTATCCT\"\nsample3 = \"CGATATATCCATAG\"\npattern = \"ATA\"\ncountem = Count(sample3,pattern)\n\nallData = np.loadtxt('rosalind_ba1a.txt',dtype='str')\n\ntext = allData[0]\npattern = allData[1]\n\nprint( Count(text,pattern) )\n","repo_name":"teyter/bioinfo","sub_path":"rosalind/problem02/hacky.py","file_name":"hacky.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8284848493","text":"# Problem Statement#\n# Given an array of sorted numbers and a target sum, find a pair in the array whose sum is equal to the given target.\n\n# Write a function to return the indices of the two numbers (i.e. the pair) such that they add up to the given target.\n\n# Example 1:\n\n# Input: [1, 2, 3, 4, 6], target=6\n# Output: [1, 3]\n# Explanation: The numbers at index 1 and 3 add up to 6: 2+4=6\n# Example 2:\n\n# Input: [2, 5, 9, 11], target=11\n# Output: [0, 2]\n# Explanation: The numbers at index 0 and 2 add up to 11: 2+9=11\n\n\n\n\ndef find_pair(arr, target):\n if not arr:\n return None\n start = 0\n end = len(arr)-1\n\n while start < end:\n pair_sum = arr[start] + arr[end]\n if pair_sum < target:\n start +=1\n elif pair_sum > target:\n end -= 1\n else:\n return [start, end]\n\n return [-1, -1]\n\n\nassert find_pair([1, 2, 3, 4, 6], target=6 ) == [1,3]\nassert find_pair([2, 5, 9, 11], target=11) == [0, 2]\nassert find_pair([1,2,3,4], 15) == [-1,-1]\nassert find_pair([1,2,3], 6) == [-1,-1]\n\n# Time complexity = O(n)\n# Space complexity = O(1)\n# where n is the number of element in the array \n\n\n\n# Hash table approach\ndef hash_find_pair(arr, target):\n if not arr:\n return None\n seen = dict()\n\n for i in range(len(arr)):\n if target - arr[i] in seen:\n return [seen[target - arr[i]], i]\n else:\n seen[arr[i]] = i\n return [-1,-1]\n\nassert hash_find_pair([1, 2, 3, 4, 6], target=6) == [1,3]\nassert hash_find_pair([2, 5, 9, 11], target=11) == [0, 2]\nassert hash_find_pair([1,2,3,4], 15) == [-1,-1]\nassert hash_find_pair([1,2,3], 6) == [-1,-1]\n\n# Time complexity = O(n)\n# Space complexity = O(n)\n# where n is the number of element in the array \n\n\n# using binary search\ndef binary_search(arr, num, start):\n # start = 0\n stop = len(arr)-1\n\n while start <= stop:\n mid = (start + stop)//2\n if num > arr[mid]:\n stop = mid-1\n elif num < arr[mid]:\n start = mid + 1\n else:\n return mid\n return None\n\ndef bin_search_find_pair(arr, target):\n for i in range(len(arr)):\n find_other_num = binary_search(arr, target-arr[i], i+1)\n \n if find_other_num:\n return [i, find_other_num]\n\n return [-1,-1]\n\nassert bin_search_find_pair([1, 2, 3, 4, 6], target=6) == [1,3]\nassert bin_search_find_pair([2, 5, 9, 11], target=11) == [0, 2]\nassert bin_search_find_pair([1,2,3,4], 15) == [-1,-1]\nassert bin_search_find_pair([1,2,3], 6) == [-1,-1]\nassert bin_search_find_pair([1, 2, 3, 4, 6], target=10) == [3,4]\n\n# Time complexity = O(nlog(n)) because we perform a binary search(log(n)) on all elements\n# Space complexity = O(1)\n# where n is the number of element in the array \n\n\n# using brute force\ndef brute_find_pair(arr, target):\n\n for i in range(len(arr)-1):\n for j in range(i+1, len(arr)):\n if arr[i] + arr[j] == target:\n return [i,j]\n \n return [-1,-1]\nassert brute_find_pair([1, 2, 3, 4, 6], target=6) == [1,3]\nassert brute_find_pair([2, 5, 9, 11], target=11) == [0, 2]\nassert brute_find_pair([1,2,3,4], 15) == [-1,-1]\nassert brute_find_pair([1,2,3], 6) == [-1,-1]\nassert brute_find_pair([1, 2, 3, 4, 6], target=10) == [3,4]\n\n# Time complexity = O(n^2)\n# Space complexity = O(1)\n# where n is the number of element in the array ","repo_name":"bolu-tife/Data-Structures-and-Algorithms","sub_path":"Two Pointers/pair_with_target_sum.py","file_name":"pair_with_target_sum.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70612523628","text":"from os import system\nfrom math import factorial\nsystem(\"cls\")\n'''\nУсловие:\nНа входной двери подъезда установлен кодовый замок, содержащий десять кнопок с цифрами от 0 до 9. Код содержит три цифры, которые нужно нажать одновременно. Какова вероятность того, что человек, не знающий код, откроет дверь с первой попытки?\n'''\n# Формула подсчета количества сочетаний по k элементов из множества n\n# C(nk) = n!/(k!*(n - k)!)\n\n\ndef f_combination(n, k):\n return (factorial(n) / (factorial(k) * factorial(n-k)))\n\n\n'''\n P(A)=m/n - общее количество исходов определяется кол-ом сочетаний из 10 возможных 3,\n а количество благоприятных исходов открытия замка 1\n'''\nm = 1 # m = 1\nn = f_combination(10, 3) # n = из \"10\" \"3\"\nP = m / n\nprint(\n f'\\n<<< Вероятность того, что замок откроется с первой попытки = {round(P*100,3)}% >>>')\n","repo_name":"YuriyOzornin/probabiality_theory_math_stat","sub_path":"DZ1/DZ1_2.py","file_name":"DZ1_2.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36686245026","text":"# Implement an algorithm that takes as input an array of distinct\n# elements and a size, and returns a subset of\n# the given size of the array elements. All subsets should be\n# equally likely. Return result in input array.\n\nimport random\n\ndef random_sampling(k, A):\n for i in range(k):\n # generate a random index in [i, len(A) - 1]\n r = random.randint(i, len(A) - 1)\n A[i], A[r] = A[r], A[i]\n\n# time complexity is O(k) to select the elements\n# runs in O(1) space\n\n","repo_name":"linkel/epi-python","sub_path":"Arrays/5.12-sampleoffline.py","file_name":"5.12-sampleoffline.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"37"} +{"seq_id":"10264656163","text":"import cv2\nimport numpy as np\n\nMIN_CONTOUR_AREA = 5000\nRECTANGLE_NUM_SIZE = 4\n\n\ndef auto_canny(image, sigma=0.33):\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n # apply automatic Canny edge detection using the computed median\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 + sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n # return the edged image\n return edged\n\n\ndef find_biggest_contour_approx(contours, min_contour_area=MIN_CONTOUR_AREA):\n biggest_contour_approx = np.array([])\n max_area = 0\n for contour in contours:\n contour_area = cv2.contourArea(contour)\n if contour_area > min_contour_area:\n contour_perimeter = cv2.arcLength(contour, closed=True)\n contour_approx = cv2.approxPolyDP(contour, 0.02 * contour_perimeter, closed=True)\n if contour_area > max_area and len(contour_approx) == RECTANGLE_NUM_SIZE:\n max_area, biggest_contour_approx = contour_area, contour_approx\n return biggest_contour_approx, max_area\n\n\ndef reorder_contour_approx(contour):\n points = contour.reshape((4, 2))\n points_result = np.zeros((4, 1, 2), dtype=np.int32)\n add = points.sum(1)\n\n points_result[0] = points[np.argmin(add)]\n points_result[3] = points[np.argmax(add)]\n diff = np.diff(points, axis=1)\n points_result[1] = points[np.argmin(diff)]\n points_result[2] = points[np.argmax(diff)]\n\n return points_result\n\n\ndef remove_shadow(img):\n dilated_img = cv2.dilate(img, np.ones((5, 5), np.uint8))\n bg_img = cv2.medianBlur(dilated_img, 83)\n diff_img = 255 - cv2.absdiff(img, bg_img)\n norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n _, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC)\n cv2.normalize(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n _, thr_img = cv2.threshold(thr_img, 230, 255, cv2.THRESH_BINARY)\n return thr_img\n","repo_name":"khaidongduc/document-scanner","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11987800379","text":"from datetime import datetime\nimport tkinter\n\n\n#merge test\n\n\ndef gettime():\n \"\"\"\n test\n\n\n Returns: None\n\n \"\"\"\n timestr = datetime.now().strftime(\"%H:%M:%S\") # 获取当前的时间并转化为字符串\n lb.configure(text=timestr)\n time_datetype = datetime.strptime(timestr, \"%H:%M:%S\")\n root.after(5000, gettime) # 每隔1s调用函数gettime自身获取时间\n # 设置标识符\n flag = 0\n for i in timedict:\n temp = datetime.strptime(i, \"%H:%M:%S\")\n if time_datetype >= temp: # 这里会赋值多次,但是结果是可以的\n lb2.configure(text=timedict[i])\n flag = 1\n if flag == 1:\n flag = 0\n else:\n lb2.configure(text=\"睡觉\")\n\n# def speak(text):\n# #语音\n# import pyttsx3\n# speaker=pyttsx3.init()\n# speaker.setProperty('rate',120)\n# speaker.setProperty('volume',1)#音量\n# speaker.say(text)\n# speaker.runAndWait()\n# #音乐\n\n\nif __name__ == '__main__':\n timedict = {'08:00:00': '早餐', '08:30:00': '口语', '09:00:00': '数学', '11:30:00': '午饭', '12:00:00': '单词',\n '12:30:00': '午觉', '13:30:00': '英语', '16:00:00': '力扣', '17:00:00': '公考与国策', '18:00:00': '晚饭',\n '18:30:00': '休息', '19:00:00': '健身', '20:30:00': '洗澡', '21:00:00': '钢琴', '22:00:00': '阅读',\n '22:30:00': '回复消息', '23:00:00': '睡觉'}\n root = tkinter.Tk()\n root.title(\"时钟\")\n # root.iconbitmap('head.ico')\n\n lb = tkinter.Label(root, text='', fg='black', font=(\"黑体\", 80))\n lb2 = tkinter.Label(root, text='', fg='black', font=(\"黑体\", 60))\n\n lb.pack()\n lb2.pack()\n gettime()\n root.mainloop()\n","repo_name":"MaynardDavison/timmer","sub_path":"time_main.py","file_name":"time_main.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"43820832669","text":"import pygame\n\n\nclass DrawnLines:\n\n def __init__(self):\n\n self.line_list = []\n self.highlighted_line = None\n\n self.selected_line_list = []\n\n def add_line_to_line_list(self, line):\n\n self.line_list.append([line.x1,\n line.y1,\n line.x2,\n line.y2,\n line.m,\n line.c,\n line.is_inverted])\n\n def highlight_drawn_line_if_mouse_touches_line(self, x, y):\n\n self.highlighted_line = None\n\n for line in self.line_list:\n\n self.check_if_mouse_is_touching_line(x, y, line)\n\n def check_if_mouse_is_touching_line(self, x, y, line):\n\n x1 = line[0]\n y1 = line[1]\n x2 = line[2]\n y2 = line[3]\n\n is_in_range = self.check_if_mouse_is_between_both_ends_of_line(x1, y1, x2, y2, x, y)\n\n if not is_in_range:\n\n return\n\n is_inverted = line[6]\n\n x, y = self.invert_x_y_if_drawn_line_x_y_was_inverted(x, y, is_inverted)\n\n m = line[4]\n c = line[5]\n\n is_touching_line = self.check_if_distance_between_mouse_and_line_is_small(m, c, x, y)\n\n if is_touching_line:\n\n self.create_highlighted_line(x1, y1, x2, y2)\n\n @staticmethod\n def invert_x_y_if_drawn_line_x_y_was_inverted(x, y, is_inverted):\n\n if is_inverted:\n y, x = x, y\n\n return x, y\n\n @staticmethod\n def check_if_mouse_is_between_both_ends_of_line(x1, y1, x2, y2, x, y):\n\n small_x = min(x1, x2)\n big_x = max(x1, x2)\n\n is_in_range = small_x - 10 < x < big_x + 10\n\n if not is_in_range:\n return is_in_range\n\n small_y = min(y1, y2)\n big_y = max(y1, y2)\n\n is_in_range = small_y - 10 < y < big_y + 10\n\n return is_in_range\n\n @staticmethod\n def check_if_distance_between_mouse_and_line_is_small(m, c, x, y):\n\n c_mouse = y - m * x\n distance_to_line = abs(c - c_mouse)\n\n is_touching_line = distance_to_line < 10\n\n return is_touching_line\n\n def create_highlighted_line(self, x1, y1, x2, y2):\n\n self.highlighted_line = [x1, y1, x2, y2]\n\n def if_line_is_clicked_select_or_deselect_line_if_clicked(self):\n\n if self.highlighted_line is not None:\n\n is_selected = self.check_whether_line_is_already_selected()\n self.select_or_deselect_line(is_selected)\n\n def check_whether_line_is_already_selected(self):\n\n if self.selected_line_list.count(self.highlighted_line) == 1:\n is_selected = True\n\n else:\n is_selected = False\n\n return is_selected\n\n def select_or_deselect_line(self, is_selected):\n\n if is_selected:\n\n self.deselect_line()\n\n else:\n\n self.select_line()\n\n def deselect_line(self):\n\n self.selected_line_list.remove(self.highlighted_line)\n\n def select_line(self):\n\n self.selected_line_list.append(self.highlighted_line)\n\n def remove_selected_lines_if_typed_delete(self, event):\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_DELETE:\n self.remove_selected_lines()\n\n def remove_selected_lines(self):\n\n lines_to_remove = self.find_lines_in_line_list_equal_to_selected_lines()\n\n self.remove_lines_from_line_list(lines_to_remove)\n\n def find_lines_in_line_list_equal_to_selected_lines(self):\n\n lines_to_remove = []\n\n for selected_line in self.selected_line_list:\n\n for line in self.line_list:\n\n if selected_line == line[0:4]:\n\n lines_to_remove.append(line)\n\n return lines_to_remove\n\n def remove_lines_from_line_list(self, lines_to_remove):\n\n for line in lines_to_remove:\n\n self.line_list.remove(line)\n\n self.deselect_all_lines()\n\n def deselect_all_lines(self):\n\n self.selected_line_list = []\n\n\n\n# connected_lines\n# when last point of connected line is equal to first point of connected line --> polygon_list\n\n# Not that simple\n# branches\n\n# each connection create a new list of lines\n\n","repo_name":"PeterJBooth/DrawingUI","sub_path":"window_directory/drawing_surface/drawn_lines.py","file_name":"drawn_lines.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42323247705","text":"\nfrom scipy.cluster.vq import *\nfrom numpy import *\n\n#import vlfeat as sift\n# from cyvlfeat import sift\nfrom PCV.localdescriptors import sift\n\nclass Vocabulary(object):\n def __init__(self, name):\n self.name = name\n self.voc = []\n self.idf = []\n self.trainingdata = []\n self.nbr_words = 0\n\n def train(self, featurefiles, k=100, subsampling=10):\n nbr_images = len(featurefiles)\n descr = []\n \n descr.append(sift.read_features_from_file(featurefiles[0])[1]) \n descriptors = descr[0]\n for i in range(1, nbr_images):\n descr.append(sift.read_features_from_file(featurefiles[i])[1]) \n descriptors = vstack((descriptors, descr[i]))\n\n self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)\n self.nbr_words = self.voc.shape[0]\n\n imwords = zeros((nbr_images, self.nbr_words))\n\n for i in range( nbr_images ):\n imwords[i] = self.project(descr[i])\n\n nbr_occurences = sum((imwords > 0)*1, axis=0)\n self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )\n self.trainingdata = featurefiles\n\n def project(self, descriptors):\n\n imhist = zeros((self.nbr_words)) \n words, distance = vq(descriptors, self.voc) \n for w in words:\n imhist[w] += 1\n \n return imhist\n\n\n","repo_name":"anaf007/flask_cv","sub_path":"vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29424918218","text":"import os\nimport csv\nimport json\n\ncommittee_for_analysis = \"actblue\"\n\nDATAFILE_DIRECTORY = f\"../data/{committee_for_analysis}\"\nOUTFILE_DESTINATION = f\"../data/processed_data/contributions/consolidated_{committee_for_analysis}.csv\"\n# saving some file size by removing \"employer\", \"occupation\" for now, but\n# ...some version of this processed data may eventually need to include these\nRETAINED_FIELDS = [\"first_name\", \"last_name\", \"zip\", \"date\", \"amount\", \"cycle\"]\n\ndirect_file_field_mapping = {\n \"contributor_first_name\": \"first_name\",\n \"contributor_last_name\": \"last_name\",\n \"contribution_date\": \"date\",\n \"contribution_amount\": \"amount\",\n \"contributor_zip_code\": \"zip\",\n \"contribution_purpose_descrip\": \"memo_text\"\n}\n\n\ndef process_row(row, ccl_mappings, campaign_data, direct=False):\n if direct:\n for field in direct_file_field_mapping.keys():\n row[direct_file_field_mapping[field]] = row[field]\n\n row[\"zip\"] = row[\"zip\"][:5]\n row[\"cycle\"] = \"2022\"\n # On WinRed/ActBlue filings, contributions towards committees contain the destination committee in the memo text\n # (e.g. \"Earmarked for NRCC (C00075820)\"). Here, we'll want to filter out any refunds, etc (non-contributions)\n # and any rows where row is a contribution, but the committee isn't specified in parentheses (very rare)\n if \"Earmarked for \" not in row[\"memo_text\"] or \"(\" not in row[\"memo_text\"]:\n return None\n\n donor_id = row[\"first_name\"].lower() + \"~\" + \\\n row[\"last_name\"].lower() + \"~\" + row[\"zip\"]\n destination_committee = row[\"memo_text\"].split(\"(\")[1].strip(\")\")\n\n out_data = {k: v for k, v in row.items(\n ) if k in RETAINED_FIELDS and k in row.keys()}\n out_data[\"donor_id\"] = donor_id\n out_data[\"destination_committee\"] = destination_committee\n\n # For now, if the destination committee isn't associated with any particular campaign in\n # ...our mapping, we'll just ignore the row, since we won't be able to assign it to a candidate\n # ...for overlap analysis anyway. We'll definitely want to include these rows in other projects\n affiliated_campaign = ccl_mappings.get(destination_committee)\n if affiliated_campaign == None:\n return None\n else:\n out_data[\"destination_campaign\"] = affiliated_campaign\n\n if campaign_data.get(affiliated_campaign) == None:\n return None\n\n affiliated_campaign_cycles = str(campaign_data.get(\n affiliated_campaign)[\"all_cycles\"])\n\n # This ensures that our consolidated dataset only includes \"in-cycle\" contributions\n # (e.g. we won't consider contributions to 2022 senate candidates during the 2020 cycle)\n # Including these contributions skews overlap data in a way that's misleading\n if str(row[\"cycle\"]) not in affiliated_campaign_cycles:\n return None\n else:\n out_data[\"cycle\"] = str(row[\"cycle\"])\n return out_data\n\n\ndef process_file(file, ccl_mappings, campaign_data, out_csv, direct=False):\n row_count = 0\n\n reader = csv.DictReader(file)\n rows_remaining = True\n while rows_remaining:\n try:\n current_row = next(reader)\n processed_row_data = process_row(\n current_row, ccl_mappings, campaign_data, direct)\n if processed_row_data:\n out_csv.writerow(processed_row_data)\n except StopIteration:\n rows_remaining = False\n\n if (row_count % 1000000 == 0):\n print(f\"Finished processing {row_count:,} rows\")\n row_count += 1\n\n\ndef main():\n with open(\"../data/processed_data/ccl_mapping.json\", \"r\") as f:\n ccl_mappings = json.load(f)\n\n with open(\"../data/processed_data/campaign_data.json\", \"r\") as f:\n campaign_data = json.load(f)\n\n with open(OUTFILE_DESTINATION, 'w') as out_file:\n all_fields = [\"first_name\", \"last_name\", \"zip\", \"date\", \"amount\",\n \"cycle\"] + [\"donor_id\", \"destination_committee\", \"destination_campaign\"]\n out_csv = csv.DictWriter(out_file, fieldnames=all_fields)\n out_csv.writeheader()\n\n winred_files = [x for x in os.listdir(\n DATAFILE_DIRECTORY) if \".csv\" in x]\n for file in sorted(winred_files):\n with open(DATAFILE_DIRECTORY + \"/\" + file, 'r') as f:\n print(f\"===== FILE: {file} =====\")\n\n direct = \"direct\" in file\n process_file(f, ccl_mappings, campaign_data, out_csv, direct)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Financial-Times/fec-donor-overlaps","sub_path":"scripts/process_contributions.py","file_name":"process_contributions.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"38"} +{"seq_id":"39159358476","text":"# Escribe un programa que solicite al usuario ingresar el nombre de un mes y determine cuántos días\n# tiene ese mes. Utiliza estructuras condicionales para asociar cada mes con la cantidad\n# correspondiente de días y muestra un mensaje con el resultado.\nmeses31 = ['enero', 'marzo', 'diciembre', 'octubre', 'mayo', 'julio', 'agosto']\nmeses30 = ['noviembre', 'abril', 'septiembre', 'junio']\n\nmes = input(\"Ingresa el nombre del mes y te dare los dias: \")\nif mes.lower() in meses31:\n print(\"Tu mes tiene 31 dias\")\nif mes.lower() in meses30:\n print(\"Tu mes tiene 30 dias\")\nif (mes.lower() == 'febrero'):\n print(\"su mes tiene 28 o 29 dias.\")\n","repo_name":"Ewin24/python","sub_path":"taller1/ejercicio11.py","file_name":"ejercicio11.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"72040987952","text":"#!/usr/bin/python3\n\"\"\"A module that writes an Object to a text file,\n using a JSON representation\"\n\"\"\"\n\n\nimport json\n\n\ndef save_to_json_file(my_obj, filename):\n \"\"\" A function use json \"\"\"\n\n with open(filename, 'w', encoding='utf-8') as f:\n string = json.dumps(my_obj)\n f.write(string)\n","repo_name":"SSun97/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/5-save_to_json_file.py","file_name":"5-save_to_json_file.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27969020434","text":"def is_equal(c1,c2):\n if c1==\"(\" and c2==\")\":\n return True\n if c1==\"{\" and c2==\"}\":\n return True\n if c1==\"[\" and c2==\"]\":\n return True\n return False\n\n\ndef valid_paranthesis(s:str)->bool:\n st=[]\n for character in s:\n if (len(st)!=0):\n li =st[-1]\n if (is_equal(li, character)):\n st.pop()\n continue\n st.append(character)\n return len(st)==0\n\n\n","repo_name":"qalmaqihir/50_leetcode_problems_challenge","sub_path":"valid_paranthesis/valid_paranthesis.py","file_name":"valid_paranthesis.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22610448888","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('inseriscinumero', views.index, name='InserisciNumero'),\n path('contatti', views.contatti, name='Contatti'),\n path('chat/', views.chat, name='chat'),\n path('setcontatti//', views.setcontatti, name='setcontatti'),\n path('setcontatti/', views.setcontatti, name='setcontatti'),\n path('setcontatti/', views.setcontatti, name='setcontatti'),\n path('chatdebug//', views.chatdebug, name='chatdebug'),\n path('chatdebug/', views.chatdebug, name='chatdebug'),\n path('contattidebug/', views.contattidebug, name='contattidebug'),\n path('chat/update', views.update, name='update'),\n #path('bottone',views.contatti_bottone, name='bottone'),\n #path('button_listener/', views.button_listener, name='button_listener'),\n #path('button_listener/update_bottone', views.update_bottone, name='update_bottone')\n]\n","repo_name":"castiglioniwalter/aTelegram","sub_path":"aTelegram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37753387027","text":"import argparse\n\nfrom msccl.language import *\nfrom msccl.topologies import *\nfrom msccl.language.collectives import Collective\n\nclass ReduceGather(Collective):\n def __init__(self, num_ranks, chunk_factor, inplace, groups):\n Collective.__init__(self, num_ranks, chunk_factor, inplace)\n self.groups = groups\n self.gpus_per_group = num_ranks // groups\n assert chunk_factor == 1, \"Only supports chunks == number of ranks\"\n\n def init_buffers(self):\n assert self.chunk_factor == 1\n rank_buffers = []\n chunks_per_node = self.num_ranks\n for r in range(self.num_ranks):\n input_buffer = [None] * self.gpus_per_group\n output_buffer = [None] * chunks_per_node\n for c in range(self.groups):\n input_buffer[c] = Chunk(r, c, -1, c)\n buffers = {Buffer.input : input_buffer, \n Buffer.output : output_buffer}\n rank_buffers.append(buffers)\n return rank_buffers\n \n\n def check(self, prog):\n expected_chunks = []\n for r in range(self.num_ranks):\n chunk = ReduceChunk([])\n for x in range(self.groups):\n y = r // self.groups\n next = y * self.groups + x\n chunk = chunk.reduce(Chunk(next, r % self.gpus_per_group))\n expected_chunks.append(chunk)\n\n correct = True\n for r in range(self.num_ranks):\n output = prog.buffers[r][Buffer.output]\n for c in range(self.num_ranks):\n chunk = output[c]\n if chunk is None or chunk != expected_chunks[c]:\n print(f'Rank {r} chunk {c} is incorrect should be {expected_chunks[c]} given {chunk}')\n correct = False\n return correct\n\n\ndef program(num_ranks, groups, instances, protocol):\n gpus_per_group = num_ranks // groups\n topology = fully_connected(num_ranks)\n chunk_factor = 1\n inplace = False\n collective = ReduceGather(num_ranks, chunk_factor, inplace, groups)\n\n with MSCCLProgram(\"reduce-gather\", topology, collective, instances, protocol, threadblock_policy=ThreadblockPolicy.manual):\n\n # Per group reduce scatter\n for y in range(groups):\n for x in range(gpus_per_group):\n output_index = y * groups + x\n input_index = x\n gpu = y * groups + (x+1) % gpus_per_group\n c = chunk(gpu, Buffer.input, input_index)\n # Use the input buffer to perform reduction across groups\n for x_ in range(1, gpus_per_group):\n c = c.reduce(y * groups + (x + 1 + x_) % gpus_per_group, Buffer.input, input_index, sendtb=0, recvtb=0, ch=0)\n # Copy reduced chunk into the output buffer\n c = c.send(c.rank, Buffer.output, output_index, sendtb=0, recvtb=0, ch=0)\n\n\n # Ring Allgather\n for r in range(num_ranks):\n c = chunk(r, Buffer.output, r)\n next = (r + 1) % num_ranks\n while next != r:\n c = c.send(next, Buffer.output, r, sendtb=1, recvtb=1, ch=1)\n next = (next + 1) % num_ranks\n\n Check()\n XML()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('num_ranks', type=int, help ='number of ranks')\n parser.add_argument('groups', type=int, help='number of reduction groups')\n parser.add_argument('--instances', type=int, default=1, help='number of instances')\n parser.add_argument('--protocol', type=str, default='Simple', \n choices=['Simple', 'LL', 'LL128'], help ='NCCL protocol')\n args = parser.parse_args()\n\n assert args.num_ranks % args.groups == 0\n\n program(args.num_ranks, args.groups, args.instances, args.protocol)\n","repo_name":"microsoft/msccl-tools","sub_path":"examples/mscclang/reducegather.py","file_name":"reducegather.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"38"} +{"seq_id":"26798165652","text":"def straight(ranks):\r\n if len(set(ranks))==5 and (max(ranks)-min(ranks)==4):\r\n return True\r\n return False\r\n \r\ndef flush(suits):\r\n if len(set(suits))==1:\r\n return True\r\n return False\r\n\r\ndef kind(n,ranks):\r\n for r in ranks:\r\n if ranks.count(r)==n:\r\n return r\r\n return None\r\n\r\ndef two_pair(ranks):\r\n highcard=kind(2,ranks)\r\n locard=kind(2,tuple(reversed(ranks)))\r\n\r\n if highcard!=locard:\r\n return (highcard, locard)\r\n return None\r\n\r\ndef card_ranks(hand):\r\n ranks=['--23456789TJQKA'.index(r) for r,s in hand]\r\n ranks.sort(reverse=True)\r\n return ranks\r\n\r\ndef card_suits(hand):\r\n return [s for r,s in hand]\r\n\r\ndef hand_rank(hand):\r\n ranks=card_ranks(hand)\r\n suits=card_suits(hand)\r\n\r\n if straight (ranks) and flush(suits):\r\n return (8,max(ranks))\r\n elif kind(4,ranks):\r\n return (7, kind(4, ranks), kind(1,ranks))\r\n elif kind (3, ranks) and kind (2, ranks):\r\n return (6, kind(3, ranks), kind(2, ranks))\r\n elif flush(suits):\r\n return (5, max(ranks))\r\n elif straight(ranks):\r\n return (4, max(ranks))\r\n elif kind(3,ranks):\r\n return(3, kind(3,ranks),kind(1,ranks))\r\n elif two_pair(ranks):\r\n return(2,kind(2,ranks),rank(2,sorted(ranks)),kind(1,ranks))\r\n elif kind(2,ranks):\r\n return (2, kind(2,ranks),kind(1,ranks))\r\n else:\r\n return max(ranks)\r\n\r\ndef poker(hands):\r\n return max(hands, key=hand_rank)\r\n\r\nhands=[]\r\nline = input(\"Enter first Hand:\\n\")\r\nwhile(line !=''):\r\n hands.append(tuple(line.split()))\r\n print(\"Enter new Hand\")\r\n line=input()\r\nprint(\"Winner is:\")\r\nprint(poker(hands))\r\n\r\nif __name__==\"__main__\":\r\n assert(straight([6,5,4,3,2])==True)\r\n assert(straight([6,5,5,3,2])==False)\r\n \r\n \r\n \r\n","repo_name":"Radhika10128/Projects","sub_path":"poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11406424033","text":"\"\"\"This is the base module for repository\"\"\"\n\nfrom typing import Optional, TypeVar, Union\n\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic import BaseModel\n\nfrom repos.db.database import Base\n\nModelType = TypeVar(\"ModelType\", bound=Base)\nCreateSchemaType = TypeVar(\"CreateSchemaType\", bound=BaseModel)\nUpdateSchemaType = TypeVar(\"UpdateSchemaType\", bound=BaseModel)\n\n\nclass BaseRepo:\n \"\"\"This handles base repository operations\"\"\"\n\n def __init__(self, db) -> None:\n self.db = db\n\n def get(self, model: ModelType, id: str) -> Optional[ModelType]:\n \"\"\"Get object by id from DB\"\"\"\n return self.db.query(model).filter(model.id == id).first()\n\n def add(self, model: ModelType, obj_in: Union[dict, CreateSchemaType]) -> ModelType:\n \"\"\"Create an object into the DB\"\"\"\n db_obj = model(**obj_in)\n self.db.add(db_obj)\n self.db.commit()\n return db_obj\n\n def update(\n self, db_obj: ModelType, obj_in: Union[dict, UpdateSchemaType]\n ) -> ModelType:\n \"\"\"Update an object into the DB\"\"\"\n obj_data = jsonable_encoder(db_obj)\n if isinstance(obj_in, dict):\n update_data = obj_in\n else:\n update_data = obj_in.dict(exclude_unset=True)\n\n for field in obj_data:\n if field in update_data and not isinstance(getattr(db_obj, field), Base):\n setattr(db_obj, field, update_data[field])\n\n self.db.commit()\n self.db.refresh(db_obj)\n return db_obj\n","repo_name":"acs195/minesweeper","sub_path":"backend/app/repos/db/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4270079990","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef normal_init(m):\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):\n m.weight.data.normal_(mean=0, std=0.02)\n if m.bias.data is not None:\n m.bias.data.zero_()\n\n\ndef reparametrize(mu, logvar):\n std = logvar.div(2).exp()\n eps = std.data.new(std.size()).normal_()\n return mu + std * eps\n\n\nclass View(nn.Module):\n def __init__(self, size):\n super(View, self).__init__()\n self.size = size\n\n def forward(self, tensor):\n return tensor.view(self.size)\n\n\nclass Encoder(nn.Module):\n def __init__(self, c_dim=10, nc=3, infodistil_mode=False):\n super(Encoder, self).__init__()\n self.c_dim = c_dim\n self.nc = nc\n self.infodistil_mode = infodistil_mode\n self.layer = nn.Sequential(\n nn.Conv2d(nc, 32, 4, 2, 1), # B, 32, 32, 32\n nn.ReLU(True),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.ReLU(True),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.ReLU(True),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.ReLU(True),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.ReLU(True),\n View((-1, 256 * 1 * 1)), # B, 256\n nn.Linear(256, c_dim * 2), # B, c_dim*2\n )\n\n def forward(self, x):\n if self.infodistil_mode:\n x = x.add(1).div(2)\n if (x.size(2) > 64) or (x.size(3) > 64):\n x = F.adaptive_avg_pool2d(x, (64, 64))\n\n h = self.layer(x)\n return h\n\n\nclass Decoder(nn.Module):\n def __init__(self, c_dim=10, nc=3):\n super(Decoder, self).__init__()\n self.c_dim = c_dim\n self.nc = nc\n self.layer = nn.Sequential(\n nn.Linear(c_dim, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.ReLU(True),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.ReLU(True),\n nn.ConvTranspose2d(32, nc, 4, 2, 1), # B, nc, 64, 64\n )\n\n def forward(self, c):\n x = self.layer(c)\n return x\n\n\nclass Generator(nn.Module):\n def __init__(self, z_dim, size, nfilter=64, nfilter_max=512, **kwargs):\n super().__init__()\n self.z_dim = z_dim\n\n s0 = self.s0 = 4\n nf = self.nf = nfilter\n nf_max = self.nf_max = nfilter_max\n\n # Submodules\n nlayers = int(np.log2(size / s0))\n self.nf0 = min(nf_max, nf * 2 ** nlayers)\n\n self.fc = nn.Linear(z_dim, self.nf0 * s0 * s0)\n\n blocks = []\n for i in range(nlayers):\n nf0 = min(nf * 2 ** (nlayers - i), nf_max)\n nf1 = min(nf * 2 ** (nlayers - i - 1), nf_max)\n blocks += [\n ResnetBlock(nf0, nf1),\n nn.Upsample(scale_factor=2)\n ]\n\n blocks += [\n ResnetBlock(nf, nf),\n ]\n\n self.resnet = nn.Sequential(*blocks)\n self.conv_img = nn.Conv2d(nf, 3, 3, padding=1)\n\n def forward(self, z):\n batch_size = z.size(0)\n out = self.fc(z)\n out = out.view(batch_size, self.nf0, self.s0, self.s0)\n out = self.resnet(out)\n out = self.conv_img(out)\n out = torch.tanh(out)\n return out\n\n\nclass Discriminator(nn.Module):\n def __init__(self, z_dim, size, nfilter=64, nfilter_max=512):\n super().__init__()\n s0 = self.s0 = 4\n nf = self.nf = nfilter\n nf_max = self.nf_max = nfilter_max\n\n # Submodules\n nlayers = int(np.log2(size / s0))\n self.nf0 = min(nf_max, nf * 2 ** nlayers)\n\n blocks = [\n ResnetBlock(nf, nf)\n ]\n\n for i in range(nlayers):\n nf0 = min(nf * 2 ** i, nf_max)\n nf1 = min(nf * 2 ** (i + 1), nf_max)\n blocks += [\n nn.AvgPool2d(3, stride=2, padding=1),\n ResnetBlock(nf0, nf1),\n ]\n\n self.conv_img = nn.Conv2d(3, 1 * nf, 3, padding=1)\n self.resnet = nn.Sequential(*blocks)\n self.fc = nn.Linear(self.nf0 * s0 * s0, 1)\n\n def forward(self, x):\n batch_size = x.size(0)\n out = self.conv_img(x)\n out = self.resnet(out)\n out = out.view(batch_size, self.nf0 * self.s0 * self.s0)\n out = self.fc(out)\n return out\n\n\nclass ResnetBlock(nn.Module):\n def __init__(self, fin, fout, fhidden=None, is_bias=True):\n super().__init__()\n # Attributes\n self.is_bias = is_bias\n self.learned_shortcut = (fin != fout)\n self.fin = fin\n self.fout = fout\n if fhidden is None:\n self.fhidden = min(fin, fout)\n else:\n self.fhidden = fhidden\n\n # Submodules\n self.conv1 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(self.fhidden)\n self.nonlin1 = nn.LeakyReLU()\n self.conv2 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(self.fout)\n self.nonlin2 = nn.LeakyReLU()\n self.bypass = fin == fout\n\n def forward(self, x):\n dx = self.conv1(x)\n dx = self.bn1(dx)\n dx = self.nonlin1(dx)\n dx = self.conv2(dx)\n dx = self.bn2(dx)\n out = self.nonlin2(dx)\n if self.bypass:\n out = x + out\n\n return out\n\n\nclass BetaVAE_H(nn.Module):\n \"\"\"Model proposed in original beta-VAE paper(Higgins et al, ICLR, 2017).\"\"\"\n\n def __init__(self, c_dim=10, channels=3, infodistil_mode=False):\n super(BetaVAE_H, self).__init__()\n self.c_dim = c_dim\n self.channels = channels\n self.encoder = Encoder(c_dim, channels, infodistil_mode)\n self.decoder = Decoder(c_dim, channels)\n self.apply(normal_init)\n\n def forward(self, x=None, c=None, encode_only=False, decode_only=False):\n assert int(encode_only) + int(decode_only) != 2\n if encode_only:\n c, mu, logvar = self._encode(x)\n return c, mu, logvar\n elif decode_only:\n x_recon = self._decode(c)\n return x_recon\n else:\n c, mu, logvar = self._encode(x)\n x_recon = self._decode(c)\n return x_recon, c, mu, logvar\n\n def _encode(self, x):\n distributions = self.encoder(x)\n mu = distributions[:, :self.c_dim]\n logvar = distributions[:, self.c_dim:]\n c = reparametrize(mu, logvar)\n return c, mu, logvar\n\n def _decode(self, c):\n return self.decoder(c)\n","repo_name":"MarQuisCheshire/idgan","sub_path":"models/mini_models.py","file_name":"mini_models.py","file_ext":"py","file_size_in_byte":6906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6808441934","text":"#!/usr/bin/env python3\nimport datetime\nimport bitcoin\nfrom sys import argv\nimport csv\nfrom butype import *\nfrom butypes import *\nfrom appmaker import make_app\n\napp, db=make_app(fresh=False)\n\nbu_developer=\"theZerg\"\nbu_president=\"solex\"\nbu_secretary=\"Peter_R\"\n\ncsv_file=open(argv[1], \"r\")\n\ndef date2epoch(s):\n if s == \"never\":\n return 0.0\n return datetime.datetime.strptime(s, \"%d-%b-%y\").timestamp()\n\n\nentries = []\nmembers = []\nfor row in csv.reader(csv_file):\n try:\n member_idx =int(row[0])\n\n nick = (row[1]\n .replace(\" \", \"_\") # sorry ...\n ).replace(\".\", \"_\") # and sorry again ..\n\n joined = date2epoch(row[3])\n app_text = row[4]\n\n addr = row[5].strip()\n bitcoin.b58check_to_hex(addr)\n\n last_vote = date2epoch(row[6])\n\n last_action = max(joined, last_vote)\n\n print(\"Adding:\", nick, addr, \"?\")\n\n while True:\n m = Member.by_name(nick)\n b = Member.by_address(addr)\n if m is not None:\n print(\"by name:\", m.name, m.address)\n\n if b is not None:\n print(\"by address:\", b.name, b.address)\n\n if m != b:\n print(\"XXXX Change in address, adding _!!\")\n nick+=\"_\"\n else:\n break\n\n if m is None and b is None:\n print(\"add!\")\n m = Member(nick, addr)\n Global.set_member_last_vote_time(m, last_action)\n members.append(m)\n db.session.add(m)\n else:\n m = Member.by_name(nick)\n Global.set_member_last_vote_time(m, last_action)\n members.append(m)\n print(\"Not adding %s as in db already\" % m.name)\n\n entries.append((nick, addr, last_action))\n except Exception as e:\n print (\"XXXX Not enough data:\", row[1], e)\n\nif 1:\n developer=Member.by_name(bu_developer)\n president=Member.by_name(bu_president)\n secretary=Member.by_name(bu_secretary)\n\n print (\"dev:\", developer)\n print (\"president:\", president)\n print (\"secretary:\", secretary)\n\n ml = MemberList(\n members = members,\n secretary = secretary,\n president = president,\n developer = developer)\n\n Global.set_votemaster_rules([\"secretary\", \"president\"])\n\n db.session.add(ml)\n Global.set_current_member_list(ml)\n db.session.commit()\n","repo_name":"BitcoinUnlimited/BitcoinUnlimitedVotingWebService","sub_path":"import_members_from_csv.py","file_name":"import_members_from_csv.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"6816280756","text":"# -*- coding: utf-8 -*-\n\n###############################################################################\n# This source file is part of the Tomviz project, https://tomviz.org/.\n# It is released under the 3-Clause BSD License, see \"LICENSE\".\n###############################################################################\nimport inspect\nimport pkgutil\n\nimport tomviz.io\nimport tomviz.io.formats\n\n\ndef list_python_readers():\n\n readers = []\n for importer, name, _ in pkgutil.iter_modules(tomviz.io.formats.__path__,\n prefix='tomviz.io.formats.'):\n m = importer.find_spec(name).loader.load_module()\n for _, c in inspect.getmembers(m, inspect.isclass):\n if inspect.getmodule(c) is m:\n if issubclass(c, tomviz.io.Reader):\n readers.append(\n [\n c.file_type().display_name,\n c.file_type().extensions,\n c\n ]\n )\n return readers\n\n\ndef create_reader_instance(reader_class):\n return reader_class()\n\n\ndef execute_reader(obj, path):\n return obj.read(path)\n\n\ndef list_python_writers():\n writers = []\n for importer, name, _ in pkgutil.iter_modules(tomviz.io.formats.__path__,\n prefix='tomviz.io.formats.'):\n m = importer.find_spec(name).loader.load_module()\n for _, c in inspect.getmembers(m, inspect.isclass):\n if inspect.getmodule(c) is m:\n if issubclass(c, tomviz.io.Writer):\n writers.append(\n [\n c.file_type().display_name,\n c.file_type().extensions,\n c\n ]\n )\n return writers\n\n\ndef create_writer_instance(writer_class):\n return writer_class()\n\n\ndef execute_writer(obj, path, data):\n obj.write(path, data)\n","repo_name":"OpenChemistry/tomviz","sub_path":"tomviz/python/tomviz/io/_internal.py","file_name":"_internal.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":304,"dataset":"github-code","pt":"38"} +{"seq_id":"71993082349","text":"import os\nimport requests\nimport sys\n\n\ndef handle(req):\n \"\"\"handle a request to the function\n Args:\n req (str): request body\n \"\"\"\n\n # uses a default of \"gateway\" for when \"gateway_hostname\" is not set\n gateway_hostname = os.getenv(\"gateway_hostname\", \"gateway\")\n result = req\n for i in (1, 2):\n\n resp = requests.get(\"http://\" + gateway_hostname + \":8080/function/appender\" + str(i), data=result)\n\n if resp.status_code != 200:\n sys.exit(\"Error with appender%d, expected: %d, got: %d\\n\" % (i, 200, resp.status_code))\n\n result = resp.text\n\n return result\n","repo_name":"simonefalvo/openfaas-functions","sub_path":"appendflow/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1327790270","text":"import matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\n\nimport numpy as np\nimport h5py\n\nfrom compute_similarity import Similarity\n\ndef _qa(trace, thresh=30):\n crossings = np.diff((trace > thresh).astype('int'))\n return np.sum(crossings == 1) # It has positive and negative ones for falling/rising edges\n\ndef draw_scale(ax, center=(0, 0), x_len=1, y_len=1, x_unit=None, y_unit=None):\n x, y = center\n ax.plot([0+x, 0+x], [0+y, y_len+y], color='k')\n ax.plot([0+x, x_len+x], [0+y, 0+y], color='k')\n\n if x_unit is not None:\n ax.text(x, y-29, '{} {}'.format(x_len, x_unit), fontsize=6)\n\n if y_unit is not None:\n ax.text(x-7, y, '{} {}'.format(y_len, y_unit), rotation=90, fontsize=6, verticalalignment='bottom')\n\n\nif __name__ == '__main__':\n\n FILE_i = 0\n\n NTRACES = 2\n NMODELS = 3\n\n fig = plt.figure(figsize=(NTRACES*4, NMODELS))\n\n heights = [0.2, 1] * NMODELS\n top_gs = gridspec.GridSpec(NMODELS*2, 2, figure=fig, hspace=0.5, height_ratios=heights, wspace=0)\n # top_gs = gridspec.GridSpec(1, NMODELS, figure=fig, hspace=0.5)\n\n simpredfiles = [\n h5py.File(x, 'r') for x in (\n '/data/izhi/izhi_4pv6c-ML693-izhi_4pv6c/{}/cellRegr.sim.pred.h5'.format(FILE_i),\n # '/data/izhi/hh_ballstick_4pEv3-ML693-hh_ballstick_4pEv3/{}/cellRegr.sim.pred.h5'.format(FILE_i),\n # '/data/izhi/hh_ballstick_4pHv3-ML693-hh_ballstick_4pHv3/{}/cellRegr.sim.pred.h5'.format(FILE_i),\n '/data/izhi/hh_ballstick_7pv3-ML693-hh_ballstick_7pv3/{}/cellRegr.sim.pred.h5'.format(FILE_i),\n )\n ]\n models = [\n 'izhi',\n # 'hh_ball_stick_4param_easy',\n # 'hh_ball_stick_4param_hard',\n 'hh_ball_stick_7param_latched',\n ]\n modelnames = ['Izhikevich',\n # 'Hodgkin-Huxley 4 param (easy)', 'Hodgkin-Huxley 4 param (hard)',\n 'Hodgkin-Huxley 7 param']\n # dist_cutoffs = [(.0001, .0013), (.0008, .0015), (.003, .05)]\n # dist_cutoffs = [(1, 20), (2.0, 7.5), (2.0, 7.5), (5, 5)]\n # dist_cutoffs = [(.01, .064), (.01, .104), (.01, .059), (.03, .188)] # mse params\n dist_cutoffs = [(750, 1400),\n # (2427, 3121), (2406, 3202),\n (2324, 3267)] # mse traces\n \n # good_traces = [1, 7, 6, 2]\n # bad_traces = [23, 25, 25, 25]\n \n good_traces = [97,\n # 6, 146,\n 291] \n bad_traces = [38,\n # 20, 22,\n 110]\n\n for model_i, (model, pname, infile, (d1, d2), good_tr_i, bad_tr_i) in enumerate(zip(models, modelnames, simpredfiles, dist_cutoffs, good_traces, bad_traces)):\n trace_gs = gridspec.GridSpecFromSubplotSpec(1, NTRACES, subplot_spec=top_gs[model_i*2+1, :], wspace=0)\n print(model)\n sim = Similarity(model, 'stims/chirp23a.csv')\n t_axis = np.arange(0, 0.02*9000, 0.02)\n\n ax_i = -1 # within each model, how many have we plotted\n trace_i = -1 # within each file, how many traces have we checked\n prev_ax = None\n\n mindist, maxdist = 999, -1\n min_i, max_i = None, None\n nsamples = infile['trace2D'].shape[0]\n \n # for trace_i in range(NTRACES):\n \n # while ax_i < NTRACES-1:\n # trace_i += 1\n # if trace_i > nsamples-1:\n # break\n\n for trace_i, col in zip((good_tr_i, bad_tr_i), (('lime', 'green'),('cyan', 'blue'))):\n ax_i += 1\n \n v_truth = infile['trace2D'][trace_i, ...].squeeze()\n unit_pred = infile['unitPred2D'][trace_i, ...]\n unit_truth = infile['unitTruth2D'][trace_i, ...]\n phys_par = infile['physPred2D'][trace_i, ...]\n v_pred = sim._data_for(*phys_par)\n\n if not _qa(v_truth) > 1 or not _qa(v_pred) > 1:\n print(\"ONE FAILED QA\")\n continue\n\n # dist = sim._similarity(v_truth, v_pred)\n # dist = np.sqrt(sum( (x-y)**2 for (x, y) in zip(unit_pred, unit_truth) ))\n dist = np.sqrt(sum( (x-y)**2 for (x, y) in zip(v_pred, v_truth) ))\n # print(dist)\n if dist < mindist:\n mindist = dist\n min_i = trace_i\n elif dist > maxdist:\n maxdist = dist\n max_i = trace_i\n \n # if dist < d1:\n # print(\"got one\")\n # ax_i += 1\n # elif dist > d2:\n # if dist > d2:\n # ax_i += 1\n # else:\n # continue\n\n ax = plt.subplot(trace_gs[:, ax_i], sharex=prev_ax)#, sharey=prev_ax)\n ax.axis('off')\n \n ax.plot(t_axis, v_truth, color='k', linewidth=0.5, label='True')\n # ax.plot(t_axis, v_pred, color='red', linewidth=0.5, label='Predicted', linestyle='--')\n\n # ax.text(80, 0, \"trace #{}\".format(trace_i))\n\n # if model_i == 2:\n if ax_i == 0 and model_i == 0:\n draw_scale(ax, center=(160, -30), x_len=10, y_len=40, x_unit='ms', y_unit='mV')\n\n # if trace_i == NTRACES-1:\n # ax.set_xlabel('Time (ms)')\n # else:\n # ax.set_xticklabels([])\n\n if ax_i == 0:\n # ax.set_title(pname)\n title_gs = gridspec.GridSpecFromSubplotSpec(\n 1, 2, subplot_spec=top_gs[2*model_i, :])\n title_ax = plt.subplot(title_gs[:])\n title_ax.axis('off')\n title_ax.text(0.5, 0, pname, horizontalalignment='center',\n verticalalignment='center', transform=title_ax.transAxes)\n\n if ax_i == NTRACES-1 and model_i == 0:\n ax.legend(bbox_to_anchor=(0.8, 0.9), loc=2, prop={'size': 6}, frameon=False)\n\n if model_i == NMODELS-1:\n ax.text(0.5, -0.3, \"Similar\" if ax_i == 0 else 'Dissimilar',\n horizontalalignment='center', verticalalignment='center',\n transform=ax.transAxes)\n\n prev_ax = ax\n\n \n # good_v_truth = infile['trace2D'][good_i, ...]\n # good_phys_par = infile['physPred2D'][good_i, ...]\n # good_v_pred = sim._data_for(*good_phys_par)\n\n # bad_v_truth = infile['trace2D'][bad_i, ...]\n # bad_phys_par = infile['physPred2D'][bad_i, ...]\n # bad_v_pred = sim._data_for(*bad_phys_par)\n\n # good_ax = plt.subplot(gs[0, 3*model_i:3*(model_i+1)])\n # good_ax.plot(t_axis, good_v_truth, color='lime', linewidth=0.5, label='True params')\n # good_ax.plot(t_axis, good_v_pred, color='green', linewidth=0.5, label='Predicted')\n # good_\n # good_ax.set_title(pname)\n\n # bad_ax = plt.subplot(gs[1, 3*model_i:3*(model_i+1)])\n # bad_ax.plot(t_axis, bad_v_truth, color='cyan', linewidth=0.5, label='True params')\n # bad_ax.plot(t_axis, bad_v_pred, color='blue', linewidth=0.5, label='Predicted')\n # bad_ax.set_xlabel('Time (ms)')\n\n\n\n\n # BOTTOM ROW FROM ROYS DATA\n roysdata = np.genfromtxt('figures/forVyassa.csv')\n pred_v_lg = roysdata[0][5500:14500]\n truth_v_lg = roysdata[1][5500:14500]\n pred_v_sm = roysdata[2][5500:14500]\n truth_v_sm = roysdata[3][5500:14500]\n \n trace_gs = gridspec.GridSpecFromSubplotSpec(1, NTRACES, subplot_spec=top_gs[2*NMODELS-1, :], wspace=0)\n ax = plt.subplot(trace_gs[0], sharex=prev_ax)#, sharey=prev_ax)\n ax.axis('off')\n ax.plot(t_axis, truth_v_sm, color='k', linewidth=0.5, label='True')\n ax.plot(t_axis, pred_v_sm, color='red', linewidth=0.5, linestyle='--', label='Predicted')\n ax.text(0.5, -0.3, \"Similar\",\n horizontalalignment='center', verticalalignment='center',\n transform=ax.transAxes)\n\n ax = plt.subplot(trace_gs[1], sharex=prev_ax)#, sharey=prev_ax)\n ax.axis('off')\n ax.plot(t_axis, truth_v_lg, color='k', linewidth=0.5)\n ax.plot(t_axis, pred_v_lg, color='red', linewidth=0.5, linestyle='--')\n ax.text(0.5, -0.3, \"Dissimilar\",\n horizontalalignment='center', verticalalignment='center',\n transform=ax.transAxes)\n\n title_gs = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=top_gs[2*NMODELS-2, :])\n title_ax = plt.subplot(title_gs[:])\n title_ax.axis('off')\n title_ax.text(0.5, 0, \"Mainen 10 param\", horizontalalignment='center',\n verticalalignment='center', transform=title_ax.transAxes)\n\n # plt.savefig('trace_compare.png'.format(FILE_i)) \n plt.show()\n \n # for phys_pred, unit_pred, unit_truth, v_truth in sim.iter_sim_predictions([simpredfile]):\n # v_pred = self._data_for(*phys_pred)\n # isi = self._similarity(v_truth, v_pred)\n\n \n\n \n","repo_name":"VBaratham/DL4neurons","sub_path":"figures/trace_pair.py","file_name":"trace_pair.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6460810544","text":"#!/usr/bin/env python3\n# Script to train neural network model.\n# Author: Brian Wisniewski\n# Date: March 29th 2022\n# CHANGELOG\n# - Initial creation \nfrom cv2 import normalize\nimport tensorflow as tf\nimport cv2\n\n#Import script from a different directory\nimport os\nos.sys.path.insert(1, '../src')\nfrom spell_detector_nn import spell_detector_model\n\ntrain_ds = tf.keras.utils.image_dataset_from_directory(\n '../../training_data/',\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=(20,20),\n batch_size=35\n)\n\nspells = train_ds.class_names\n\nval_ds = tf.keras.utils.image_dataset_from_directory(\n '../../training_data/',\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(20,20),\n batch_size=35\n)\n\nAUTOTUNE = tf.data.AUTOTUNE\n\ntrain_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)\nval_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\n\ncheckpoint_path = \"../src/spell_detector_model_parameters/cp.ckpt\"\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)\nmodel = spell_detector_model(2)\n\nmodel.fit(\n train_ds,\n validation_data=val_ds,\n epochs=3,\n callbacks=[cp_callback] # Pass callback to training\n)\n\nprint(spells)\n","repo_name":"BWizz/cv_wand_tracking","sub_path":"scripts/ml-trainer.py","file_name":"ml-trainer.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15166885255","text":"from train_curveadj import CurveAdj\nfrom matplotlib.pyplot import subplots\n\nclass PlotCurveAdj(CurveAdj):\n def __init__(self, population_size: int, tournament_size: float, n_generations: int, range_considered, mutation_allowed: bool, **kwargs) -> None:\n super().__init__(population_size, tournament_size, n_generations, range_considered, mutation_allowed)\n\n self.fig, self.axes = subplots(nrows=1, ncols=2, **kwargs)\n self.func_x, self.func_y = [], []\n\n\n def plot_curves(self, i: int, **kwargs) -> None:\n self.axes[0].clear()\n self.axes[0].plot(self.actual_curve, color='blue')\n\n est_curve = self.curve_values(self.top_winners[i])\n self.axes[0].plot(est_curve, **kwargs)\n self.axes[0].set_ylim(top=max(self.actual_curve)*1.1)\n\n winner_func = self.function_to_eval(self.top_winners[i]).replace('\"','')\n self.axes[0]\n self.axes[0].set_title(winner_func)\n\n\n def plot_error(self, i: int) -> None:\n min_error, max_error = self.top_errors[-1], self.top_errors[0]\n top_error = self.top_errors[i]\n\n self.func_x.append(i)\n self.func_y.append(top_error)\n\n self.axes[1].clear()\n \n error = f'Gen #{str(i+1).zfill(3)} error: {top_error:.2f}'\n self.axes[1].set_title(error)\n # self.axes[1].set_xlim([0, self.n_gen])\n self.axes[1].set_ylim([min_error*0.9, max_error])\n\n self.axes[1].plot(self.func_x, self.func_y, color='blue')\n\n\n def plot_curveadj(self, i, **kwargs) -> None:\n self.plot_curves(i, **kwargs)\n self.plot_error(i)\n","repo_name":"Afroefras/UAG_MCC","sub_path":"inteligencia_artificial/curveadj/plot_curveadj.py","file_name":"plot_curveadj.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"73473323950","text":"from django.http import JsonResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .models import Contact\nfrom .serializers import ContactSerializer\n \n@api_view(['GET', 'POST'])\ndef contact_list(request):\n \"\"\"\n List all snippets, or create a new snippet. \n \"\"\"\n if request.method == 'GET':\n contacts = Contact.objects.all()\n serializer = ContactSerializer(contacts, many=True)\n print('serializer.data :')\n print(serializer.data)\n data = [{\"firstname\": \"Andrew\", \"lastname\": \"Reid\"}, {\"firstname\": \"Benny\", \"lastname\": \"Benassi\"}]\n return Response(data)\n \n elif request.method == 'POST':\n print(request.data)\n\n serializer = ContactSerializer(data=request.data)\n \n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)","repo_name":"gnudrew/EdisonRoom","sub_path":"Room-10_django-rest-framework/myproject/contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40106332713","text":"#! /usr/bin/env python3\n\n\"\"\" Various basic deepseq-related utilities I wrote - see separate function docstrings. For importing only.\n --Weronika Patena, 2011-2021\n\"\"\"\n\n# basic libraries\nfrom __future__ import division\nimport unittest\n# other packages\nimport HTSeq # no longer available for python2\nimport matplotlib.pyplot as mplt\n# my modules\nimport basic_seq_utilities\n\n\n## NOTE: for fasta/fastq (raw data) utilities see basic_seq_utilities.py\n\n\n################## aligned data utilities ################## \n\n### NOTE: I'm currently using HTSeq for parsing SAM-format data, but I could try something else - pysam seems good.\n\n# SAM and GFF are 1-based, with the end being the last base; HTSeq is 0-based, with the end being the base AFTER last.\n\n######### NOTES ON THE SAM FORMAT\n### Header:\n# MAYBE-TODO do something useful with the SAM header? Or at least copy it to outfile?\n### Alignment line fields:\n# * query template name\n# * bitwise flag (relevant bits: 4 = unmapped, 16 = reverse-complement, 512 = failed quality control)\n# * reference sequence name (* = unmapped)\n# * leftmost mapping position (1-based) (0 = unmapped)\n# * mapping quality (\"-10 * log10(probability that position is wrong)\"; 255 = unknown)\n# * CIGAR string - descriptions of alignment matches/mismatches/etc (M/= match, I/D ins/del, X mismatch, S/H clipping)\n# * (PE only - reference name of the mate fragment)\n# * (PE only - position of the mate fragment)\n# * template length\n# * fragment sequence\n# * ASCII of Phred-scaled base quality + 33 (original deepseq read quality)\n# * OPTIONAL FIELDS, lots of different possibilities: MD is mismatch info string, NM is edit distance to reference\n# (for info on MD field format see SAM manual footnote, and \n# sam_MD_field_examples_*.txt files in experiments/reference_data/aligner_format_info)\n#########\n\nCIGAR_TYPES_MATCH = ['=']\nCIGAR_TYPES_NOOP = ['S','H','P']\nCIGAR_TYPES_MUTATION = ['X','I','D']\nCIGAR_TYPES_INTRON = ['N'] # 'N' is for introns, but we shouldn't be paying attention to those for genomic DNA seq\nCIGAR_TYPES_UNKNOWN = ['M']\n# MAYBE-TODO HTSeq doesn't appear aware of the = and X operations... http://www-huber.embl.de/users/anders/HTSeq/doc/alignments.html#HTSeq.CigarOperation - I emailed the author about it, no response\n\n# SAM format flag meanings, from https://samtools.github.io/hts-specs/SAMv1.pdf\nFLAG_MEANINGS = {1: 'the read is part of a pair', \n 2: 'the read is part of a pair that properly aligned as a pair', \n 4: 'unmapped', \n 8: 'the read is part of a pair and the other mate in the pair is unmapped', \n 16: 'alignment is reverse-complement', \n 32: 'the other mate in the pair aligned reverse-complement', \n 64: 'the read is mate 1 in a pair', \n 128: 'the read is mate 2 in a pair', \n 256: 'secondary alignment', \n 512: 'not passing filters, such as platform/vendor quality controls', \n 1024: 'PCR or optical duplicate', \n 2048: 'supplementary alignment (whatever that is)'\n }\n\n\nclass DeepseqError(Exception):\n \"\"\" Exception in this module; no special behavior.\"\"\"\n pass\n\n### Parsing two fastq files in parallel (for paired-end deepseq data)\n\ndef parse_2fastq_parallel(file1, file2):\n \"\"\" Parse two fastq files in parallel - generator yielding (name, seq1, seq2, qual1, qual2) tuples.\n\n Doesn't check that the readnames match.\n \"\"\"\n from Bio.SeqIO.QualityIO import FastqGeneralIterator # Bio is the biopython package\n with open(file1) as INFILE1:\n with open(file2) as INFILE2:\n generator1 = FastqGeneralIterator(INFILE1)\n generator2 = FastqGeneralIterator(INFILE2)\n if_finished_1, if_finished_2 = False, False\n while True:\n try: name1, seq1, qual1 = next(generator1)\n except StopIteration: if_finished_1 = True\n try: name2, seq2, qual2 = next(generator2)\n except StopIteration: if_finished_2 = True\n name = name1.split()[0]\n if not if_finished_1 and not if_finished_2:\n yield (name, seq1, seq2, qual1, qual2)\n elif if_finished_1 and if_finished_2:\n return\n else:\n raise DeepseqError(\"One file finished but the other one didn't! Read name %s\"%(\n name if if_finished_2 else name2.split()[0]))\n # TODO unit-test!\n\n\ndef parse_fastx_sam_parallel(fastx_infile, sam_infile):\n \"\"\" Parse fastx and resulting sam file in parallel - generator yielding (name, seq, alignment_list) tuples.\n\n The sam file may contain multiple alignments per read. Program checks that the readnames match.\n \"\"\"\n fastx_generator = basic_seq_utilities.name_seq_generator_from_fasta_fastq(fastx_infile)\n sam_generator = iter(HTSeq.bundle_multiple_alignments(HTSeq.SAM_Reader(sam_infile)))\n if_finished_fastx, if_finished_sam = False, False\n while True:\n try: name, seq = next(fastx_generator) # used to be generator.next() in python2\n except StopIteration: if_finished_fastx = True\n try: alns = next(sam_generator)\n except StopIteration: if_finished_sam = True\n # if both finished, good, we're done\n if if_finished_fastx and if_finished_sam:\n return\n # if one file was finished but the other wasn't, error!\n elif if_finished_fastx or if_finished_sam:\n raise DeepseqError(\"Parsing seq/aln files in parallel - inconsistent finished states! \"\n +\"(If finished: %s %s, %s %s)\"%(fastx_infile, if_finished_fastx, sam_infile, if_finished_sam))\n # if all the files still contained data, yield it\n else:\n name = name.split()[0]\n name2 = alns[0].read.name.split()[0]\n if not name2 == name:\n raise DeepseqError(\"Non-matching readnames between files! %s in %s, %s in %s\"%(fastx_infile, name, \n sam_infile, name2))\n yield (name, seq, alns)\n\n\n### Getting mutation counts from various SAM alignment format fields, as read by HTSeq\n\ndef _get_HTSeq_optional_field_either_version(val_or_tuple):\n \"\"\" Different HTSeq versions return either val or (name,val) from aln.optional_field(name) - convert either to val. \"\"\"\n if isinstance(val_or_tuple, tuple): return val_or_tuple[1]\n else: return val_or_tuple\n\n\ndef get_HTSeq_optional_field(HTSeq_alignment, field_name):\n \"\"\" Return value of optional field (like NM, XM, etc). \"\"\"\n return _get_HTSeq_optional_field_either_version(HTSeq_alignment.optional_field(field_name))\n\n\ndef check_mutation_count_by_CIGAR_string(HTSeq_alignment, treat_unknown_as='unknown', ignore_introns=False):\n \"\"\" Return number of mutations in HTSeq_alignment, based on CIGAR string; -1 if unknown ('M') by default.\n If treat_unknown_as is 'unknown', return -1 whenever an unknown (M, may be match or mismatch) operation is found; \n if treat_unknown_as is 'mutation' or 'match', count unknowns accordingly. Return -1 if read is unaligned.\n If ignore_introns is False, count introns (N) as mutations; otherwise don't.\"\"\"\n global CIGAR_TYPES_MUTATION, CIGAR_TYPES_INTRON, CIGAR_TYPES_UNKNOWN\n # just return -1 for unaligned reads\n if HTSeq_alignment.cigar is None:\n return -1\n # figure out whether to consider intron-skipping ('N') as a mutation or not, based on argument\n if ignore_introns:\n # (need this []+ here so it's a copy, not a reference, and modifying it later doesn't modify the original)\n cigar_types_mutation = [] + CIGAR_TYPES_MUTATION\n else:\n cigar_types_mutation = CIGAR_TYPES_MUTATION + CIGAR_TYPES_INTRON \n # figure out how to treat unknown matches ('M'), based on argument\n if treat_unknown_as=='unknown':\n # (need this []+ here so it's a copy, not a reference, and modifying it later doesn't modify the original)\n cigar_types_unknown = [] + CIGAR_TYPES_UNKNOWN\n elif treat_unknown_as=='mutation':\n cigar_types_mutation += CIGAR_TYPES_UNKNOWN\n cigar_types_unknown = []\n elif treat_unknown_as=='match':\n cigar_types_unknown = []\n else:\n raise ValueError(\"treat_unknown_as argument value must be 'mutation', 'match' or 'unknown'\")\n # count the mutations, return total count (or instantly return -1 on finding an unknonw)\n mutations = 0\n for cigar_op in HTSeq_alignment.cigar:\n if cigar_op.type in cigar_types_mutation:\n mutations += cigar_op.size\n # if there's an unknown, just return -1, no need to count\n elif cigar_op.type in cigar_types_unknown:\n return -1\n return mutations\n\n\ndef check_mutation_count_by_optional_NM_field(HTSeq_alignment, negative_if_absent=True):\n \"\"\" Return #errors in HTSeq_alignment, based on optional NM field; -1 or exception if field missing.\"\"\"\n # for unalign reads NM field is missing - returns -1\n try: return get_HTSeq_optional_field(HTSeq_alignment, 'NM')\n except KeyError: \n if negative_if_absent: return -1\n else: raise DeepseqError(\"Optional NM field missing in read %s - can't determine #errors!\"%\n HTSeq_alignment.read.name)\n\n\ndef check_mutation_count_by_optional_MD_field(HTSeq_alignment):\n \"\"\" Return number of mutations in HTSeq_alignment, based on optional MD field; -1 if unknown (MD field missing).\"\"\"\n # for info on MD field format see SAM manual footnote, \n # and sam_MD_field_examples_*.txt files in experiments/reference_data/aligner_format_info\n # basically a number means matches, a letter means a mismatch to reference (or insertion? is that different?), \n # letters preceded by ^ mean deletion from the reference\n try: mutation_string = get_HTSeq_optional_field(HTSeq_alignment, 'MD')\n except KeyError: return -1\n # for unalign reads MD field is missing - returns -1\n mutation_letters = [c for c in mutation_string if not (c.isdigit() or c=='^')]\n # (^ is used in describing a mutation but it shouldn't be counted as a separate mutation - only letters count.)\n return len(mutation_letters)\n\n\ndef check_mutation_count_try_all_methods(HTSeq_alignment, treat_unknown_as='unknown', ignore_introns=False):\n \"\"\" Return number of mutations in HTSeq_alignment (look at CIGAR string and NM and MD optional fields); -1 if unknown.\n First check the CIGAR string but only accept the answer if there are no unknown ('M') characters; \n then check the NM and MD fields and return the result if those fields exist.\n If the CIGAR string is ambiguous and neither of the optional fields exist:\n - if treat_unknown_as is 'unknown', return -1\n - if treat_unknown_as is 'mutation' or 'match', return the CIGAR string result with unknowns counted accordingly.\n If ignore_introns is False, count introns (N) in CIGAR string as mutations; otherwise don't .\n Does NOT guarantee returning a sensible value if the CIGAR, NM and MD fields contain inconsistent information.\n \"\"\"\n mutation_count = check_mutation_count_by_CIGAR_string(HTSeq_alignment, treat_unknown_as='unknown', \n ignore_introns=ignore_introns)\n if not mutation_count==-1: return mutation_count\n mutation_count = check_mutation_count_by_optional_NM_field(HTSeq_alignment)\n if not mutation_count==-1: return mutation_count\n mutation_count = check_mutation_count_by_optional_MD_field(HTSeq_alignment)\n if not mutation_count==-1: return mutation_count\n if treat_unknown_as=='unknown': return -1\n return check_mutation_count_by_CIGAR_string(HTSeq_alignment, treat_unknown_as=treat_unknown_as, ignore_introns=ignore_introns)\n\n\n### Other SAM alignment utilities\n\ndef aln_read_coverage(HTSeq_alignment):\n \"\"\" Given an HTSeq alignment, return (start,end) of the part of the read it covers (using CIGAR soft-padding info). \"\"\"\n cigar = HTSeq_alignment.cigar\n if cigar[0].type == 'S': start = cigar[1].query_from\n else: start = 0\n if cigar[-1].type == 'S': end = cigar[-2].query_to\n else: end = cigar[-1].query_to\n # if the alignment is -strand, the CIGAR operation positions use a reverse-complement of the read, so reverse them properly\n if HTSeq_alignment.iv.strand == '-':\n # sometimes only one alignment per read has the actual sequence - in those cases if this read doesn't have the sequence,\n # deduce the length from the CIGAR position info - I checked this on real data and it's correct.\n # also note that sequences are annoyingly often \"bytes\" objects (e.g. b'ATG' instead of 'ATG'), need to check for that.\n if HTSeq_alignment.read.seq in ('', '*', b'', b'*'): read_length = max(max(c.query_from, c.query_to) for c in cigar)\n else: read_length = len(HTSeq_alignment.read.seq)\n start, end = read_length - end, read_length - start\n return start, end\n\ndef aln_read_coverage_fraction(HTSeq_alignment, percent_string=False):\n \"\"\" Given an HTSeq alignment, return fraction of the read it covers (or percentage string). \"\"\"\n s, e = aln_read_coverage(HTSeq_alignment)\n fraction = (e-s)/len(HTSeq_alignment.read.seq)\n if percent_string: return \"%.0f%%\"%(fraction*100)\n else: return fraction\n\n\ndef read_coverage_all_alns(alns):\n \"\"\" Given a list of HTSeq alignment objects, return (start,end) tuple describing the part of the read covered by any of them. \"\"\"\n starts, ends = zip(*[aln_read_coverage(a) for a in alns])\n return min(starts), max(ends)\n\n\ndef read_coverage_fraction_all_alns(alns):\n \"\"\" Given a list of HTSeq alignment objects, return fraction of the read covered by any of them. \"\"\"\n starts, ends = zip(*[aln_read_coverage(a) for a in alns])\n # there's probably a more elegant way to do interval overlaps, but this is simple and works\n if_covered = [0 for _ in alns[0].read.seq]\n for aln in alns:\n start, end = aln_read_coverage(aln)\n for i in range(start, end):\n if_covered[i] = 1\n return sum(if_covered)/len(if_covered)\n\n\ndef print_aln_list_info(alns, sort=True):\n \"\"\" Return string of useful information about a list of alignments, optionally sorted. \"\"\"\n if not alns: return \"no alignments!\"\n aln_data = [('-' if a.not_primary_alignment else '+', '1' if a.pe_which=='first' else '2', \n aln_read_coverage_fraction(a, True), a.optional_field('NM'), \n a.iv.chrom, a.iv.start, a.iv.end) \n for a in alns]\n if sort: aln_data.sort(key=lambda x: (x[0], x[1], -int(x[2][:-1]), x[3]))\n # TODO make this a nicer string instead of just str() of a list!\n return alns[0].read.name + '\\n' + '\\n'.join(str(x) for x in aln_data)\n\n\ndef find_best_aln(alns, min_coverage=0.8, bad_coverage=.5, max_errors=.02, bad_errors=0.05):\n \"\"\" If there's exactly one alignment that meets min_coverage and max_errors \n while all other alignments are bad_coverage and bad_errors or worse, return that; otherwise return None.\n \"\"\"\n acceptable_alns = [a for a in alns if aln_read_coverage_fraction(a) > bad_coverage \n and check_mutation_count_try_all_methods(a)/len(a.read.seq) < bad_errors]\n if len(acceptable_alns) != 1: \n return None\n a = acceptable_alns[0]\n if aln_read_coverage_fraction(a) > min_coverage and check_mutation_count_try_all_methods(a)/len(a.read.seq) < max_errors:\n return a\n else:\n return None\n\n\ndef primary_or_best_aln(alns, min_coverage=0.8, bad_coverage=.5, max_errors=.02, bad_errors=0.05):\n \"\"\" Return primary alignment if there is one, or the only alignment if there's only one, otherwise same as find_best_aln. \"\"\"\n if len(alns) == 1: return alns[0]\n primary = [a for a in alns if not a.not_primary_alignment]\n if len(primary) == 1: return primary[0]\n elif len(primary) > 1: print(\"Multiple primary alignments for %s - shouldn't happen!\"%a.read.name)\n return find_best_aln(alns, min_coverage, bad_coverage, max_errors, bad_errors)\n\n\ndef interpret_flags(flag_number, details=False):\n \"\"\" Given an overall flag number (like 35), split it into bits (32, 2, 1), optionally with verbal interpretations. \"\"\"\n flags = []\n for i, bit in enumerate(reversed(bin(flag_number))):\n if bit=='1': flags.append(2**i)\n elif bit=='0': pass\n elif bit=='b': break\n else: print('This binary interpretation has weird bits! %s -> %s'%(flag_number, bin(flag_number)))\n if details: \n for f in flags:\n print('%s - %s'%(f, FLAG_MEANINGS[f]))\n return flags\n\n\ndef plot_alignments(alns, cassette_length):\n \"\"\" Plot a set of alignments against a read - one row per chromosome, cassette with a gradient, darker grey = fewer errors. \"\"\"\n filtered_alns = alns # I could do filtering by chromosome or error rate or something\n all_chroms = sorted(set(aln.iv.chrom for aln in filtered_alns))\n for i, chrom in enumerate(all_chroms):\n curr_alns = [aln for aln in filtered_alns if aln.iv.chrom == chrom]\n if 'cassette' in chrom:\n for aln in curr_alns:\n color1 = aln.iv.start/cassette_length*256\n color2 = aln.iv.end/cassette_length*256\n start, end = aln_read_coverage(aln)\n mplt.imshow([[color1, color2], [color1, color2]], interpolation='bicubic', cmap='cool',\n extent=(start, end, -i-0.4, -i+0.4), alpha=1, aspect='auto', vmin=0, vmax=255)\n else:\n # TODO make the colors into parameters\n aln_starts_ends = [aln_read_coverage(aln) for aln in curr_alns]\n aln_error_rates = [check_mutation_count_try_all_methods(aln)/(end-start)\n for aln,(start,end) in zip(curr_alns, aln_starts_ends)]\n mplt.barh(y = [-i-0.4 for aln in curr_alns],\n width = [end-start for start,end in aln_starts_ends], height = 0.8, align = 'edge', \n left = [start for start,end in aln_starts_ends], edgecolor='None',\n color=['0.4' if e<0.1 else ('0.6' if e<0.2 else '0.8') for e in aln_error_rates])\n # plot the read at the end to keep the xrange from getting screwed up by imshow\n read_len = len(alns[0].read.seq)\n mplt.barh(0.6, read_len, 0.8, 0, align='edge', color='black')\n mplt.xticks(range(0, int(read_len), 100), [])\n # TODO I'd prefer X ticks to be up from the X axis rather than down, in this case, to save space & avoid confusion between subplots\n mplt.yticks([])\n mplt.xlim(read_len * -0.02, read_len * 1.02)\n mplt.ylabel('%s'%read_len, rotation=90)\n # TODO it might be nice to always arrange the reads so the cassette fragment is at the beginning - can I easily do that?\n\n\ndef seqs_and_mismatches_from_CIGAR_match(c, aln, full_read_seq, ref_chromosome_seq):\n \"\"\" Given a CIGAR match c, return the corresponding read and reference sequences (RC if needed) and mismatch positions. \"\"\"\n if c.type != 'M': raise Exception(\"look_at_CIGAR_match intended only for M operations!\")\n # sometimes sequences are \"bytes\" instead of strings (b'ATG' vs 'ATG') and then they don't compare properly, UGH\n if type(full_read_seq) is bytes: full_read_seq = full_read_seq.decode()\n if type(ref_chromosome_seq) is bytes: ref_chromosome_seq = ref_chromosome_seq.decode()\n # apparently the CIGAR info uses read_as_aligned, not the original read sequence as written in the SAM file, so RC it if needed\n if aln.iv.strand == '-': full_read_seq = basic_seq_utilities.reverse_complement(full_read_seq)\n readseq = full_read_seq[c.query_from:c.query_to].upper()\n refseq = ref_chromosome_seq[c.ref_iv.start:c.ref_iv.end].upper()\n if not len(readseq) == len(refseq): \n raise Exception(\"read and ref lengths of a CIGAR match are different! %s\\n%s\\n%s\"%(c, readseq, refseq))\n mismatches = [i for i,(x,y) in enumerate(zip(readseq, refseq)) if x!=y]\n # TODO I should have multiple options for this - sometimes I want it in read orientation and sometimes in reference orientation!\n return readseq, refseq, mismatches\n\n\ndef extract_sub_alignment_read_seq(aln, ref_start, ref_end):\n \"\"\" Given an HTSeq alignment and start/end positions, return the read seq corresponding to that range, in reference orientation. \n \"\"\"\n # TODO TODO TODO implement this!\n\n\ndef plot_alignments_CIGAR(alns, chromosome_seqs):\n \"\"\" Plot a set of alignments against a read, one row per chromosome, with mismatches/indels marked \"\"\"\n # plot the alignment bars using plot_alignments; calculate cassette_length purely to pass to plot_alignments\n cassette_lens = [len(seq) for chrom,seq in chromosome_seqs.items() if 'cassette' in chrom]\n if len(cassette_lens) > 1: raise Exception(\"multiple cassette chromosomes, need to disambiguate which length!\")\n elif len(cassette_lens)==1: cassette_length = cassette_lens[0]\n else: cassette_length = 0 # since in this case it doesn't matter\n filtered_alns = alns # I could do filtering by chromosome or error rate or something\n plot_alignments(alns, cassette_length)\n # now plot mismatch/indel symbols on top of that!\n all_chroms = sorted(set(aln.iv.chrom for aln in filtered_alns))\n for i, chrom in enumerate(all_chroms):\n curr_alns = [aln for aln in filtered_alns if aln.iv.chrom == chrom]\n for aln in curr_alns:\n start, end = aln_read_coverage(aln)\n curr_readpos = start\n # TODO TODO TODO fix this to deal with the thing where CIGAR positions use RC reads for -strand alignments! IF I CARE.\n for c in aln.cigar:\n plot_kwargs = dict(markeredgecolor='k', markerfacecolor='k', linestyle='None')\n if c.type == 'S': continue\n elif c.type =='D': # deletion in read - will be a single position regardless of deletion size\n mplt.plot(c.query_from, -i, marker='x', **plot_kwargs)\n elif c.type =='I': # insertion in read - may be multiple bases\n mplt.plot(range(c.query_from, c.query_to), [-i for x in range(c.query_from,c.query_to)], marker='+', **plot_kwargs)\n elif c.type =='M': # match OR mismatch - need to look up the reference sequence and compare\n readseq, refseq, mismatches = look_at_CIGAR_match(c, aln, alns[0].read.seq, chromosome_seqs[chrom])\n mplt.plot([x+c.query_from for x in mismatches], [-i for _ in mismatches], marker='d', **plot_kwargs)\n else: raise Exception(\"CIGAR type %s not implemented!\"%c.type)\n\n\ndef plot_alignments_100(aln_sets, cassette_length):\n \"\"\" Plot alignments for a set of 100 reads in subplots, using plot_alignments. \"\"\"\n mplt.figure(figsize=(30,16))\n n = 0\n for alns in sorted(aln_sets, key = lambda x: len(x[0].read.seq)):\n n += 1\n mplt.subplot(25, 4, n)\n plot_alignments(alns, cassette_length)\n mplt.suptitle(\"black=read, aqua-pink gradient=cassette 5'-3', grey=chromosome (dark error rate <10%, pale >20%); \"\n +\"xtick=100bp; Y labels are read lengths in bp\", y=.915)\n mplt.subplots_adjust(wspace=0.05, hspace=0.1)\n\n\n\n################## unit tests ################## \n\nclass Fake_deepseq_objects:\n \"\"\" Fake deepseq data objects for testing. \"\"\"\n # NOTE: not all of those are used in the unit-tests for this module, but they're also imported elsewhere!\n\n class Fake_HTSeq_cigar_op:\n \"\"\" Fake CIGAR operation, mimicking HTSeq cigar object.\"\"\"\n size = 1\n def __init__(self,string): \n self.type = string\n\n class Fake_HTSeq_genomic_pos:\n \"\"\" Fake HTSeq.GenomicPosition. \"\"\"\n def __init__(self, chrom, strand, start, end):\n self.chrom = chrom\n self.strand = strand\n self.start = start\n self.end = end\n\n class Fake_HTSeq_read:\n \"\"\" Fake read, as in HTSeq_alignment.read. \"\"\"\n def __init__(self,seq='AAA',name='test'):\n self.seq = seq\n self.name = name\n\n class Fake_HTSeq_alignment:\n \"\"\" Fake HTSeq.Alignment object.\"\"\"\n\n def __init__(self, seq='AAA', readname='test', unaligned=False, pos=('chr_','+',0,0), \n cigar_string=None, optional_field_data={}): \n self.read = Fake_deepseq_objects.Fake_HTSeq_read(seq,readname)\n if unaligned:\n self.aligned = False\n self.iv = None\n else:\n self.aligned = True\n self.iv = Fake_deepseq_objects.Fake_HTSeq_genomic_pos(*pos)\n self.optional_field_data = optional_field_data\n if cigar_string is None: self.cigar = None\n else: self.cigar = [Fake_deepseq_objects.Fake_HTSeq_cigar_op(c) for c in cigar_string]\n\n def optional_field(self,field): \n return self.optional_field_data[field]\n\n\nclass Testing(unittest.TestCase):\n \"\"\" Unit-tests for all the functions/classes in this module. \"\"\"\n\n def test__parse_fastx_sam_parallel(self):\n output = list(parse_fastx_sam_parallel('_test_inputs/test_parallel2.fq', '_test_inputs/test_parallel2.sam'))\n assert len(output) == 3\n assert [len(x) for x in output] == [3, 3, 3]\n assert [len(x[2]) for x in output] == [2, 1, 1]\n assert output[0][2][0].read.name == output[0][0] == 'ROCKFORD:4:1:1680:975#0/1'\n # this .decode() is needed because the first one is a bytes type: b'ACTAATACGCGGCCTGGAGCTGGACGTTGGAACCAA'\n assert output[0][2][0].read.seq.decode() == output[0][1] == 'ACTAATACGCGGCCTGGAGCTGGACGTTGGAACCAA'\n # the generator isn't really run until you ask for its results, so I have to run list on it to get the error\n self.assertRaises(DeepseqError, list, parse_fastx_sam_parallel('_test_inputs/test.fq', '_test_inputs/test_parallel2.sam'))\n\n def test__check_mutation_count_by_CIGAR_string(self):\n # no alignment (CIGAR is None)\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment()\n assert check_mutation_count_by_CIGAR_string(fake_alignment) == -1\n # CIGAR is unambiguous, no MD or NM given (or needed)\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='==')\n assert check_mutation_count_by_CIGAR_string(fake_alignment) == 0\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='XX')\n assert check_mutation_count_by_CIGAR_string(fake_alignment) == 2\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='DD')\n assert check_mutation_count_by_CIGAR_string(fake_alignment) == 2\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='S=')\n assert check_mutation_count_by_CIGAR_string(fake_alignment) == 0\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='SX')\n assert check_mutation_count_by_CIGAR_string(fake_alignment) == 1\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='N=')\n assert check_mutation_count_by_CIGAR_string(fake_alignment, ignore_introns=True) == 0\n assert check_mutation_count_by_CIGAR_string(fake_alignment, ignore_introns=False) == 1\n # CIGAR is ambiguous (contains M's) - return -1, 2 or 0 depending on what treat_unknown_as is set to\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='MM')\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='unknown') == -1\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='mutation') == 2\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='match') == 0\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='M=')\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='unknown') == -1\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='mutation') == 1\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='match') == 0\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='MX')\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='unknown') == -1\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='mutation') == 2\n assert check_mutation_count_by_CIGAR_string(fake_alignment, treat_unknown_as='match') == 1\n\n def test__check_mutation_count_by_optional_NM_field(self):\n \"\"\" the tested function should return -1 if no NM field, otherwise return value of NM field. \"\"\"\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment()\n assert check_mutation_count_by_optional_NM_field(fake_alignment, negative_if_absent=True) == -1\n self.assertRaises(DeepseqError, check_mutation_count_by_optional_NM_field, fake_alignment, negative_if_absent=False)\n for x in range(10):\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'NM':x})\n assert check_mutation_count_by_optional_NM_field(fake_alignment) == x\n\n def test__check_mutation_count_by_optional_MD_field(self):\n \"\"\" see ~/experiments/reference_data/aligner_format_info/* files for MD field examples.\"\"\"\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment({})\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == -1\n for s in [str(x) for x in range(30)]:\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 0\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s+s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 0\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s+'A'+s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 1\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s+'A0G'+s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 2\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s+'A2G'+s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 2\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s+'A2G2T2C2N'+s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 5\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s+'^A'+s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 1\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(optional_field_data={'MD': s+'^AGC'+s })\n assert check_mutation_count_by_optional_MD_field(fake_alignment) == 3\n\n def test__check_mutation_count_try_all_methods(self):\n \"\"\" The order of check is CIGAR, NM, MD; CIGAR is skipped if ambiguous; NM and MD skipped if inexistent. \n Not attempting to deal with inconsistent states sensibly.\"\"\"\n # all measures agree there are no mutations (with 0-2 of NM/MD fields present)\n for opt_data in [{'NM':0, 'MD':'10'}, {'NM':0}, {'MD':'10'}, {}]:\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='='*10, optional_field_data=opt_data)\n assert check_mutation_count_try_all_methods(fake_alignment) == 0\n # all measures agree there is a mutation (with 0-2 of NM/MD fields present)\n for opt_data in [{'NM':1, 'MD':'A9'}, {'NM':1}, {'MD':'A9'}, {}]:\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='X'+'='*9,optional_field_data=opt_data)\n assert check_mutation_count_try_all_methods(fake_alignment) == 1\n # CIGAR is ambiguous, there are no mutations according to NM/MD (NM, MD or both are present)\n for opt_data in [{'NM':0, 'MD':'10'}, {'NM':0}, {'MD':'10'}]:\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='M'*10, optional_field_data=opt_data)\n assert check_mutation_count_try_all_methods(fake_alignment) == 0\n # CIGAR is ambiguous, there is a mutation according to NM/MD (NM, MD or both are present)\n for opt_data in [{'NM':1, 'MD':'A9'}, {'NM':1}, {'MD':'A9'}]:\n fake_alignment = Fake_deepseq_objects.Fake_HTSeq_alignment(cigar_string='M'*10, optional_field_data=opt_data)\n assert check_mutation_count_try_all_methods(fake_alignment) == 1\n\n def test__interpret_flags(self):\n for i in range(1,12):\n self.assertEqual(interpret_flags(2**i), [2**i])\n self.assertEqual(interpret_flags(2**i + 1), [1,2**i])\n self.assertEqual(interpret_flags(2**i - 1), [2**j for j in range(i)])\n self.assertEqual(interpret_flags(1), [1])\n self.assertEqual(interpret_flags(3), [1,2])\n self.assertEqual(interpret_flags(4), [4])\n # and the details option I just tested by hand\n\n\nif __name__ == \"__main__\":\n \"\"\" Allows both running and importing of this file. \"\"\"\n print(\"*** This is a module to be imported to other files - running the built-in test suite. ***\")\n unittest.main()\n\n","repo_name":"Jonikas-Lab/basic-bioinf-utilities","sub_path":"deepseq_utilities.py","file_name":"deepseq_utilities.py","file_ext":"py","file_size_in_byte":34289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14913767173","text":"#!/usr/bin/python3\n''' Author: dshah1091@gmail.com | learning paramiko'''\n\nimport os\n\nimport paramiko\nSRVRS = [{'ip' : '10.10.2.3', 'un' : 'bender'},{'ip' : '10.10.2.4' , 'un' : 'fry'}]\nwith open('cmds2issue.txt', 'r') as cmds:\n CMDLIST = cmds.readlines()\n\n\ndef cmdissue(sshsession, commandtoissue):\n ssh_stdin, ssh_stdout, ssh_stderr = sshsession.exec_command(commandtoissue)\n return ssh_stdout.read()\n\ndef main():\n # harvest RSA key (SSH private key)\n myprivkey = paramiko.RSAKey.from_private_key_file(\"/home/student/.ssh/id_rsa\")\n \n for server in SRVRS: \n # Initage connection to remote mahine\n sshsession = paramiko.SSHClient()\n sshsession.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n sshsession.connect(hostname=server['ip'], username=server['un'], pkey=myprivkey)\n\n # touch two files\n\n # get uptime of server\n for commandtoissue in CMDLIST:\n result = cmdissue(sshsession, commandtoissue)\n if result != \"\":\n logfile= server['ip'].replace('.','') + '.log'\n with open(logfile, 'a' ) as svrlog:\n print('COMMAND ISSUED -', commandtoissue, file=svrlog)\n print(result, file=svrlog)\n print('', file=svrlog)\n\n # close the connection\n sshsession.close()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"dshah1091/mycodebed","sub_path":"07-learningingssh.py","file_name":"07-learningingssh.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"32058199633","text":"from gbk.gbk import GBK as Model\n\nmodel = Model()\n# model.load(\"merged.json\")\nmodel.load(\"sportsmodelTFIDF.json\")\n\ndocument1 = \"At election time the game of politics is played\"\ndocument2 = \"A very close sport game was played \"\n\n\nresult1 = model.predict('model',document1).getTopics()\nresult2 = model.predict('model',document2).getTopics()\n\n\n\nprint(\"Result1:{}\\nResult2:{}\".format(result1,result2))\n\n\n","repo_name":"DavidDexterCharles/GBK-Topic-Modeler","sub_path":"iDocumentation/experiment0/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34436762286","text":"from dynamics.get_reduction_errors import *\r\n# from plots.plots_setup import *\r\nimport matplotlib.pyplot as plt\r\nimport json\r\nimport numpy as np\r\n# from tqdm import tqdm\r\n# from mpl_toolkits.axes_grid1.inset_locator import inset_axes\r\n\r\nfirst_community_color = \"#2171b5\"\r\nsecond_community_color = \"#f16913\"\r\nreduced_first_community_color = \"#9ecae1\"\r\nreduced_second_community_color = \"#fdd0a2\"\r\nreduced_third_community_color = \"#a1d99b\"\r\ntotal_color = \"#525252\"\r\nfontsize = 12\r\ninset_fontsize = 9\r\nfontsize_legend = 12\r\nlabelsize = 12\r\ninset_labelsize = 9\r\nlinewidth = 2\r\ns = 50\r\nplt.rc('text', usetex=True)\r\nplt.rc('font', family='serif')\r\n\r\n\r\nwith open('data/kuramoto/errors_kuramoto/2019_10_02_17h44min36sec_big_sim_data'\r\n '_parameters_dictionary_for_error_kuramoto_2D_vs_N.json'\r\n ) as json_data:\r\n R_big_dictionary = json.load(json_data)\r\n\r\np_out_array_big = np.array(R_big_dictionary[\"p_out_array\"])\r\nomega1_array_big = np.array(R_big_dictionary[\"omega1_array\"])\r\nN_array_big = np.array(R_big_dictionary[\"N_array\"])\r\nnb_p_out_big = len(p_out_array_big)\r\nnb_omega1_big = len(omega1_array_big)\r\nplot_transitions_big = 0\r\n\r\nL1_spec_vs_N_big, var_L1_spec_vs_N_big = \\\r\n get_error_vs_N_kuramoto_bipartite(R_big_dictionary, p_out_array_big,\r\n omega1_array_big, N_array_big,\r\n plot_transitions_big)\r\n\r\nwith open('data/kuramoto/errors_kuramoto/2019_10_03_15h29min13sec_'\r\n 'test_data_parameters_dictionary_for_error_kuramoto_2D_vs_N.json'\r\n ) as json_data:\r\n R_dictionary = json.load(json_data)\r\n\r\np_out_array = np.array(R_dictionary[\"p_out_array\"])\r\nomega1_array = np.array(R_dictionary[\"omega1_array\"])\r\nN_array = np.array(R_dictionary[\"N_array\"])\r\nnb_p_out = len(p_out_array)\r\nnb_omega1 = len(omega1_array)\r\nplot_transitions = 0\r\n\r\nL1_spec_vs_N, var_L1_spec_vs_N = \\\r\n get_error_vs_N_kuramoto_bipartite(R_dictionary, p_out_array, omega1_array,\r\n N_array, plot_transitions)\r\n\r\nwith open('data/kuramoto/errors_kuramoto/2019_10_11_14h25min01sec_N5000_data_'\r\n 'parameters_dictionary_for_error_kuramoto_2D_vs_N.json'\r\n ) as json_data:\r\n R_dictionary_5000 = json.load(json_data)\r\n\r\np_out_array_5000 = np.array(R_dictionary_5000[\"p_out_array\"])\r\nomega1_array_5000 = np.array(R_dictionary_5000[\"omega1_array\"])\r\nN_array_5000 = np.array(R_dictionary_5000[\"N_array\"])\r\nplot_transitions_5000 = 0\r\n\r\nL1_spec_vs_N5000, var_L1_spec_vs_N5000 = \\\r\n get_error_vs_N_kuramoto_bipartite(R_dictionary_5000, p_out_array_5000,\r\n omega1_array_5000,\r\n N_array_5000, plot_transitions_5000)\r\n\r\nwith open('data/kuramoto/errors_kuramoto/2019_10_14_07h38min20sec_N10000_data_'\r\n 'parameters_dictionary_for_error_kuramoto_2D_vs_N.json'\r\n ) as json_data:\r\n R_dictionary_10000 = json.load(json_data)\r\n\r\np_out_array_10000 = np.array(R_dictionary_10000[\"p_out_array\"])\r\nomega1_array_10000 = np.array(R_dictionary_10000[\"omega1_array\"])\r\nN_array_10000 = np.array(R_dictionary_10000[\"N_array\"])\r\nplot_transitions_10000 = 0\r\n\r\nL1_spec_vs_N10000, var_L1_spec_vs_N10000 = \\\r\n get_error_vs_N_kuramoto_bipartite(R_dictionary_10000, p_out_array_10000,\r\n omega1_array_10000,\r\n N_array_10000, plot_transitions_10000)\r\n\r\nplt.figure(figsize=(6, 3))\r\nax1 = plt.subplot(121)\r\n# plt.scatter(N_array, RMSE_freq_vs_N, label=\"RMSE$_{freq}$\", s=s,\r\n# color=reduced_first_community_color)\r\n# plt.scatter(N_array, RMSE_spec_vs_N, label=\"RMSE$_{spec}$\", s=s,\r\n# color=reduced_third_community_color)\r\n# plt.errorbar(N_array, L1_freq_vs_N, yerr=var_L1_freq_vs_N, fmt='o',\r\n# color=reduced_first_community_color,\r\n# label=\"$\\\\langle L_1 \\\\rangle_{freq}$\")\r\nplt.title(\"(a)\", fontsize=fontsize)\r\nplt.errorbar(N_array_big, L1_spec_vs_N_big, yerr=var_L1_spec_vs_N_big, fmt='o',\r\n color=reduced_third_community_color,\r\n label=\"$\\\\langle L_1 \\\\rangle_{spec}$\")\r\nplt.tick_params(axis='both', which='major', labelsize=labelsize)\r\n# plt.legend(loc=1, fontsize=fontsize_legend)\r\nylab = plt.ylabel(\"$\\\\langle L_1 \\\\rangle$\", fontsize=fontsize, labelpad=15)\r\nylab.set_rotation(0)\r\nplt.xlabel(\"$N$\", fontsize=fontsize)\r\nplt.xscale('symlog')\r\nplt.ylim([0, 0.1])\r\n\r\nax2 = plt.subplot(122)\r\n# plt.scatter(N_array, RMSE_freq_vs_N, label=\"RMSE$_{freq}$\", s=s,\r\n# color=reduced_first_community_color)\r\n# plt.scatter(N_array, RMSE_spec_vs_N, label=\"RMSE$_{spec}$\", s=s,\r\n# color=reduced_third_community_color)\r\n# plt.errorbar(N_array, L1_freq_vs_N, yerr=var_L1_freq_vs_N, fmt='o',\r\n# color=reduced_first_community_color,\r\n# label=\"$\\\\langle L_1 \\\\rangle_{freq}$\")\r\nplt.title(\"(b)\", fontsize=fontsize)\r\nplt.errorbar(N_array, L1_spec_vs_N, yerr=var_L1_spec_vs_N, fmt='o',\r\n color=reduced_third_community_color,\r\n label=\"$\\\\langle L_1 \\\\rangle_{spec}$\")\r\nplt.errorbar(N_array_5000, L1_spec_vs_N5000, yerr=var_L1_spec_vs_N5000,\r\n fmt='o', color=reduced_third_community_color)\r\nplt.errorbar(N_array_10000, L1_spec_vs_N10000, yerr=var_L1_spec_vs_N10000,\r\n fmt='o', color=reduced_third_community_color)\r\nplt.tick_params(axis='both', which='major', labelsize=labelsize)\r\n# plt.legend(loc=1, fontsize=fontsize_legend)\r\nylab = plt.ylabel(\"$\\\\langle L_1 \\\\rangle$\", fontsize=fontsize, labelpad=15)\r\nylab.set_rotation(0)\r\nplt.xlabel(\"$N$\", fontsize=fontsize)\r\nplt.xscale('symlog')\r\nplt.ylim([0, 0.4])\r\n\r\n\r\n# axins1 = inset_axes(ax, width=\"70%\", height=\"70%\",\r\n# bbox_to_anchor=(.45, .50, .5, .5), # (-0.12, .7, .5, .5),\r\n# bbox_transform=ax.transAxes, loc=4)\r\n# # plt.errorbar(N_array, L1_freq_vs_N, yerr=var_L1_freq_vs_N, fmt='o',\r\n# # color=reduced_first_community_color)\r\n# plt.errorbar(N_array, L1_spec_vs_N, yerr=var_L1_spec_vs_N, fmt='o',\r\n# color=reduced_third_community_color)\r\n# plt.xlim([600, 10550])\r\n# plt.ylim([0, 0.005])\r\n# plt.xticks([1000, 5000, 10000])\r\n# plt.tick_params(axis='both', which='major', labelsize=labelsize)\r\n# ylab = plt.ylabel(\"$\\\\langle L_1 \\\\rangle$\", fontsize=fontsize, labelpad=15)\r\n# ylab.set_rotation(0)\r\n# plt.xlabel(\"$N$\", fontsize=fontsize)\r\n\r\nplt.tight_layout()\r\n\r\nplt.show()\r\n","repo_name":"VinceThi/threefold-way-dimension-reduction-synchronization","sub_path":"simulations/plot_error_kuramoto_2D_vs_N.py","file_name":"plot_error_kuramoto_2D_vs_N.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"20266937024","text":"import os\nimport signal\nimport subprocess\nimport time\nimport traceback\nimport traci\nfrom rlsumo.simulator.network_generator.network_gen import NetworkGenerator\n\n# Number of retries on restarting SUMO before giving up\nRETRIES_ON_ERROR = 10\nSUMO_SLEEP = 1.0\n\n\nclass SimulationKernel:\n\n def __init__(self, simulation_params):\n self.sumo_proc = None\n self.simulation_params = simulation_params\n self.traci_connection = None\n self.network_gen = NetworkGenerator()\n\n def simulation_step(self):\n if self.traci_connection is not None:\n self.traci_connection.simulationStep()\n\n def start_simulation(self):\n\n self.network_gen.generate_network()\n error = None\n for _ in range(RETRIES_ON_ERROR):\n try:\n # port number the sumo instance will be run on\n port = traci.getFreeSocketPort()\n\n sumo_binary = \"sumo-gui\" if self.simulation_params.render is True \\\n else \"sumo\"\n\n # command used to start sumo\n sumo_call = [\n sumo_binary, \"-c\", self.network_gen.network.cfg,\n \"--remote-port\", str(port),\n \"--step-length\", str(self.simulation_params.sim_step)\n ]\n\n # use a ballistic integration step (if request)\n if self.simulation_params.use_ballistic:\n sumo_call.append(\"--step-method.ballistic\")\n\n # ignore step logs (if requested)\n if self.simulation_params.no_step_log:\n sumo_call.append(\"--no-step-log\")\n\n # add the lateral resolution of the sublanes (if requested)\n if self.simulation_params.lateral_resolution is not None:\n sumo_call.append(\"--lateral-resolution\")\n sumo_call.append(str(self.simulation_params.lateral_resolution))\n\n if self.simulation_params.overtake_right:\n sumo_call.append(\"--lanechange.overtake-right\")\n sumo_call.append(\"true\")\n\n # specify a simulation seed (if requested)\n if self.simulation_params.seed is not None:\n sumo_call.append(\"--seed\")\n sumo_call.append(str(self.simulation_params.seed))\n\n # if not self.simulation_params.print_warnings:\n # sumo_call.append(\"--no-warnings\")\n # sumo_call.append(\"true\")\n\n # set the time it takes for a gridlock teleport to occur\n sumo_call.append(\"--time-to-teleport\")\n sumo_call.append(str(int(self.simulation_params.teleport_time)))\n\n sumo_call.append(\"--collision.action\")\n sumo_call.append(\"warn\")\n\n # check collisions at intersections\n sumo_call.append(\"--collision.check-junctions\")\n sumo_call.append(\"true\")\n\n sumo_call.append(\"--emissions.volumetric-fuel\")\n sumo_call.append(\"true\")\n\n # Opening the I/O thread to SUMO\n self.sumo_proc = subprocess.Popen(\n sumo_call,\n stdout=subprocess.DEVNULL\n )\n\n # wait a small period of time for the subprocess to activate\n # before trying to connect with traci\n if os.environ.get(\"TEST_FLAG\", 0):\n time.sleep(0.1)\n else:\n time.sleep(SUMO_SLEEP)\n\n traci_connection = traci.connect(port)\n # traci_connection.setOrder(0)\n self.traci_connection = traci_connection\n traci_connection.simulationStep()\n\n return traci_connection, self.network_gen.length\n except Exception as e:\n print(\"Error during start: {}\".format(traceback.format_exc()))\n error = e\n self.teardown_sumo()\n raise error\n\n def reset(self):\n self.close()\n self.network_gen = NetworkGenerator()\n return self.start_simulation()\n\n def teardown_sumo(self):\n \"\"\"Kill the sumo subprocess instance.\"\"\"\n try:\n if self.sumo_proc is not None:\n self.sumo_proc.kill()\n os.killpg(self.sumo_proc.pid, signal.SIGTERM)\n except Exception as e:\n print(\"Error during teardown: {}\".format(e))\n\n def close(self):\n if self.traci_connection is not None:\n self.traci_connection.close()\n self.network_gen.close()\n del self.network_gen\n # if self.sumo_proc is not None:\n # self.teardown_sumo()\n","repo_name":"Vamsi995/rl-sumo","sub_path":"rlsumo/simulator/traci_simulator.py","file_name":"traci_simulator.py","file_ext":"py","file_size_in_byte":4735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"27618771678","text":"import cdms2 as cdms,MV2 as MV, numpy as np,cdutil\nimport os,sys\nimport calendar\nfrom helpers import get_prefix_name\n\nif len(sys.argv) == 1:\n year = 2016\nelse:\n year = sys.argv[1]\n\n###################### control variables ###################### \ncase_name = get_prefix_name(int(year), False)+str(year)\nisleap = calendar.isleap(int(year))\nif isleap == True:\n leap_year = 1\nelse:\n leap_year = 0\ndata_path=\"/lustre/scratch/leiduan/MERRA2_data/Wind/\"\nlat_num = 361\nlon_num = 576\n###############################################################\n\nif leap_year == 0:\n hour_in_years=8760\n month_days={1:31,2:28,3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31}\n month_days_array = np.array([31,28,31,30,31,30,31,31,30,31,30,31])\nelif leap_year == 1:\n hour_in_years=8784\n month_days={1:31,2:29,3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31}\n month_days_array = np.array([31,29,31,30,31,30,31,31,30,31,30,31])\n\nfd_cam=os.listdir(data_path)\n\nu_ci = 3 # cut in speed in m/s\nu_r = 12 # rated speed in m/s\nu_co = 25 # cut out speed in m/s\n\nwcf = MV.array(np.zeros([hour_in_years,lat_num,lon_num])); wcf.id='wcf'; wcf.units='1'; wcf.missing_value = 1e20\ncount_num=1\nfor file in fd_cam:\n if file[:-8] == case_name and file[-4:]=='.nc4':\n print (file, count_num)\n f=cdms.open(data_path+file)\n month = int(file[-8:-6])\n days = int(file[-6:-4])\n if month == 1:\n position = (0 + days-1) * 24\n else:\n position = (np.sum(month_days_array[:month-1]) + (days-1)) *24\n print (month, days, position)\n\n lat = f.getAxis('lat')\n lon = f.getAxis('lon')\n if len(lat) != lat_num or len(lon) != lon_num:\n print ('lat/lon number error')\n sys.exit\n\n u50m_tmp = f('U50M')\n v50m_tmp = f('V50M')\n u10m_tmp = f('U10M')\n v10m_tmp = f('V10M')\n ws10m = np.hypot(u10m_tmp,v10m_tmp)\n ws50m = np.hypot(u50m_tmp,v50m_tmp)\n wsc = (np.log(ws50m)-np.log(ws10m)) / (np.log(50.)-np.log(10.))\n ws100m_tmp = ws10m * (100./10.)**wsc\n ws100m = MV.filled(ws100m_tmp,0.) \n \n for hr_idx in range(24):\n # wind_speed < u_ci = 0.\n # wind_speed > u_co = 0.\n # wind_speed >= u_ci and wind_speed < u_r = v**3/u_r**2\n # wind_speed >= u_r and wind_speed <= 1.\n wcf[position+hr_idx, ws100m[hr_idx] < u_ci ] = 0.\n wcf[position+hr_idx, (ws100m[hr_idx] >= u_ci) & (ws100m[hr_idx] < u_r) ] = ws100m[hr_idx, (ws100m[hr_idx] >= u_ci) & (ws100m[hr_idx] < u_r)]**3 / (u_r**3)\n wcf[position+hr_idx, (ws100m[hr_idx] >= u_r) & (ws100m[hr_idx] <= u_co) ] = 1.\n wcf[position+hr_idx, ws100m[hr_idx] > u_co ] = 0.\n f.close\n count_num = count_num+1\n\n# use NetCDF3 Classic format\n#cdms.setNetcdfShuffleFlag(0) # netcdf3 classic...\n#cdms.setNetcdfDeflateFlag(0) # netcdf3 classic...\n#cdms.setNetcdfDeflateLevelFlag(0) # netcdf3 classic...\n\nfout=cdms.open(case_name+'_wcf100m031225.nc','w')\nfout.write(wcf)\nfout.close()\n\nwcf_annual = cdutil.averager(wcf,axis=0,weights='equal')\nwcf_annual.id='wcf_annual'\ngout=cdms.open(case_name+'_wcf100m031225_annual.nc','w')\ngout.write(wcf_annual)\ngout.close()\n\n","repo_name":"carnegie/Create_Wind_and_Solar_Resource_Files","sub_path":"example_MERRA2/example_get_CFs/step0_get_windCF.py","file_name":"step0_get_windCF.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"14625323857","text":"class Solution:\n def sortArrayByParityII(self, nums: List[int]) -> List[int]:\n ei, oi = 0, 1\n while ei < len(nums) and oi < len(nums):\n if nums[ei] % 2 != 0 and nums[oi] % 2 == 0:\n nums[ei], nums[oi] = nums[oi], nums[ei]\n ei += 2\n oi += 2\n elif nums[ei] % 2 == 0 and nums[oi] % 2 != 0:\n ei += 2\n oi += 2\n elif nums[ei] % 2 == 0 and nums[oi] % 2 == 0:\n ei += 2\n else:\n oi += 2\n \n return nums","repo_name":"kshittijagrawal/Solved-Problems","sub_path":"sort-array-by-parity-ii/sort-array-by-parity-ii.py","file_name":"sort-array-by-parity-ii.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"38375583932","text":"lst = [10,3,1,5,19,4]\r\ndef sortList(lst):\r\n for i in range(0,len(lst)-1):\r\n for j in range(0,len(lst)-1-i):\r\n if lst[j]>lst[j+1]:\r\n lst[j],lst[j+1]=lst[j+1],lst[j]\r\n return lst\r\ndef findNthlargeElement(lst,sortedList,position):\r\n #sortList(lst)\r\n res = sortedList(lst)\r\n return res[-abs(position)]\r\nprint(findNthlargeElement(lst,3))\r\n\r\n","repo_name":"psrath/practice-memo","sub_path":"find-nth-highest-element-in-list.py","file_name":"find-nth-highest-element-in-list.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9966754860","text":"#! /usr/bin/env python\nfrom rouge import Rouge\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nfrom data_helpers import get_iterator, load_vocab, load_embedding\nfrom model import model\nfrom config import FLAGS\nimport math\n\ndef train(): \n\n with tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n # load the vocab and embedding files\n vocab_table, vocab, vocab_size = load_vocab(FLAGS.vocab_file)\n embeddings = load_embedding(FLAGS.embed_file, vocab)\n train_iterator, train_next_batch = get_iterator(FLAGS.train_data_file, vocab_table, FLAGS.batch_size, FLAGS.max_seq_len, padding=True)\n dev_iterator, dev_next_batch = get_iterator(FLAGS.dev_data_file, vocab_table, 10000000, FLAGS.max_seq_len, padding=True)\n \n mode = tf.estimator.ModeKeys.TRAIN\n mymodel = model(vocab_size, l2_reg_lambda=FLAGS.l2_reg_lambda, mode=mode)\n\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n learning_rate = 0.001\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n grads_and_vars = optimizer.compute_gradients(mymodel.loss)\n # clip the gradient norms:\n cliped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in grads_and_vars]\n train_op = optimizer.apply_gradients(cliped_gvs, global_step=global_step)\n \n # Keep track of gradient values and sparsity (optional)\n grad_summaries = []\n for g, v in grads_and_vars:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n # timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.path.curdir, tf.flags.FLAGS.model +\"_runs\"))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss\n loss_summary = tf.summary.scalar(\"loss\", mymodel.loss)\n\n # Train Summaries\n train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n \n\n # Dev summaries\n dev_summary_op = tf.summary.merge([loss_summary])\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)\n def train_step(): \n \"\"\"\n A single training step\n \"\"\"\n [batch] = sess.run([train_next_batch])\n feed_dict = {\n mymodel.tokens: batch['tokens'],\n mymodel.surf_features:batch['features'] ,\n mymodel.input_y: batch['scores'],\n mymodel.batchsize: batch['tokens'].shape[0]\n }\n _, step, summaries, loss = sess.run(\n [train_op, global_step, train_summary_op, mymodel.loss], feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}\".format(time_str, step, loss))\n train_summary_writer.add_summary(summaries, step)\n\n def dev_step(step, writer=None):\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n sess.run(dev_iterator.initializer)\n while True:\n try:\n [batch] = sess.run([dev_next_batch])\n feed_dict = {\n mymodel.tokens: batch['tokens'],\n mymodel.surf_features:batch['features'],\n mymodel.input_y: batch['scores'],\n mymodel.batchsize: batch['tokens'].shape[0] \n }\n summaries, loss = sess.run(\n [ dev_summary_op, mymodel.loss], feed_dict)\n print('--- dev loss: ', loss)\n if writer:\n writer.add_summary(summaries, step)\n except tf.errors.OutOfRangeError:\n print(\"End of dataset\")\n break\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}\".format(time_str, step, loss))\n if writer:\n writer.add_summary(summaries, step)\n\n # Initialize all variables\n init_ops = [tf.global_variables_initializer(),\n tf.local_variables_initializer(), tf.tables_initializer()]\n sess.run(init_ops)\n for epoch in range(FLAGS.num_epochs): \n # initialize going through dataset\n sess.run(train_iterator.initializer)\n while True:\n try:\n train_step()\n current_step = tf.train.global_step(sess, global_step)\n # evaluate on dev set \n if current_step % FLAGS.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n dev_step(current_step, writer=dev_summary_writer)\n print(\"\")\n \n if current_step % FLAGS.checkpoint_every == 0:\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))\n except tf.errors.OutOfRangeError:\n print(\"End of dataset\")\n break\n print('-'*100)\ndef main(argv=None):\n start = time.time()\n train()\n end = time.time()\n print('RUNNING TIME IS: ', end-start)\n\nif __name__ == '__main__':\n tf.app.run()\n\n","repo_name":"atharsefid/Automatic-Slide-Generation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7034,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"38"} +{"seq_id":"28115676545","text":"class Solution:\r\n def moveZeroes(self, nums: List[int]) -> None:\r\n \"\"\"\r\n Do not return anything, modify nums in-place instead.\r\n \"\"\"\r\n l, r = 0, 0\r\n while r < len(nums):\r\n if nums[r] != 0:\r\n nums[l], nums[r] = nums[r], nums[l]\r\n l += 1\r\n r += 1\r\n#using a for loop instead of a while loop would be little faster so use that.............................\r\n","repo_name":"apratyush777/LeetCode","sub_path":"Easy/283 Move Zeroes.py","file_name":"283 Move Zeroes.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14688229411","text":"from itertools import chain\n\n# cost, damage, armor\nweapons = [(8,4,0), (10,5,0), (25,6,0), (40,7,0), (74,8,0)]\narmors = [(13,0,1), (31,0,2), (53,0,3), (75,0,4), (102,0,5)]\nrings = [(25,1,0), (50,2,0), (100,3,0), (20,0,1), (40,0,2), (80,0,3)]\n\nfor equipment in [armors, rings]:\n equipment.append((0, 0, 0))\n\ndef all_pairs(n):\n return ((i, j) for i in range(n) for j in range(i+1, n))\n\n# return an iterator of (cost, damage, armor)\ndef ring_sets():\n def one_or_two_rings():\n for i1, i2 in all_pairs(len(rings)):\n c1, d1, a1 = rings[i1]\n c2, d2, a2 = rings[i2]\n yield (c1+c2, d1+d2, a1+a2)\n def no_rings():\n yield (0, 0, 0)\n return chain(no_rings(), one_or_two_rings())\n\ndef overall_stats():\n for weapon in weapons:\n for armor in armors:\n for ring_set in ring_sets():\n yield tuple(x+y+z for x,y,z in zip(weapon, armor, ring_set))\n\n# each character is a 3-tuple: (hp, damage, armor)\ndef deal_damage(attacker, defender):\n damage = attacker[1] - defender[2]\n if damage < 0:\n damage = 1\n return (defender[0] - damage, defender[1], defender[2])\n\n# simulate the fight between player and boss\n# returns: the player's health at the end of the fight\ndef simulate_fight(player, boss):\n player_turn = True\n while player[0] > 0 and boss[0] > 0:\n # print(player, boss)\n if player_turn:\n boss = deal_damage(player, boss)\n else:\n player = deal_damage(boss, player)\n player_turn = not player_turn\n return player[0]\n\nsimulate_fight\n\n# hp, damage, armor\nboss = (103, 9, 2)\n\nmin_cost = 1e8\nmax_cost = -1\nfor stat_set in overall_stats():\n cost = stat_set[0]\n result = simulate_fight((100, stat_set[1], stat_set[2]), boss)\n if cost < min_cost and result > 0:\n min_cost = cost\n if cost > max_cost and result <= 0:\n print(stat_set)\n max_cost = cost\n\nprint(min_cost, max_cost) # 121 201","repo_name":"ajnirp/advent-2015","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34358407874","text":"from abc import ABC\n\nimport numpy as np\nfrom privacy_preserving_svms.abstract_data_privatiser import ABCPrivacyPreserver\n\n\n# class which is responsible for preparing\n# dataset for privatisation and adding\n# laplace noise to increase the privacy of\n# individual entries\nclass LaplacePrivacyPreserver(ABCPrivacyPreserver, ABC):\n # default values\n mean_value = 0.0\n epsilon_value = 1.0\n\n def __init__(self, epsilon_val=1.0):\n # check if provided epsilon value is valid\n if type(epsilon_val) != float:\n raise ValueError('Epsilon value has to be a float')\n if epsilon_val <= 0.0:\n raise ValueError('Epsilon value has to be >0.0')\n self.epsilon_value = epsilon_val\n\n def privatise_single_value(self, data, sensitivity_level=1.0):\n # convert it to a float\n try:\n float_value = float(data)\n except:\n raise ValueError('The value to be sanitised has to be float')\n # define the sensitivity value:\n # how much of an impact can an individual value\n # have on the outcome of the queries?\n sensitivity_level = max(0.001, sensitivity_level)\n # add noise to the value:\n # epsilon attribute represents the privacy budget,\n # which is a measure of how much privacy is being\n # provided to the data. Together with the sensitivity\n # it determines the scale of the noise added to the data.\n # the noise is drawn from the Laplace distribution\n noise_value = np.random.laplace(self.mean_value, sensitivity_level / self.epsilon_value, 1)[0]\n return float(float_value + noise_value)\n\n\nclass DataConverter:\n # convert np array into a list\n def convert_from_original(self, original_data):\n if type(original_data) == np.ndarray:\n converted_data = []\n # loop through all the data entries\n for value in original_data:\n converted_data.append(self.convert_from_original(value))\n return converted_data\n else:\n return self.covert_to_float(original_data)\n\n # convert the data to a float\n def covert_to_float(self, data):\n try:\n return float(data)\n except:\n # inform the user if errors occur\n raise ValueError('Data could not be converted to float')\n\n","repo_name":"Preffet/PPSVM","sub_path":"privacy_preserving_svms/Laplace_dataset_privatiser.py","file_name":"Laplace_dataset_privatiser.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"6460813301","text":"from flask import request\nimport os\nfrom redis import Redis\n\nclass FrameSession():\n def __init__(self, redisconn: Redis):\n self.r = redisconn\n def read(self):\n token = request.args.get('framesess', '')\n contents = self.r.get('ndk:framesess:'+token)\n if contents is None:\n token = None\n return token, contents\n def create(self, initial):\n assert initial is not None\n token = os.urandom(8).hex()\n self.r.set('ndk:framesess:'+token, initial, ex=30*60)\n return token\n def write(self, content):\n assert content is not None\n token, _ = self.read()\n if token is not None:\n self.r.set('ndk:framesess:'+token, content, ex=30*60)\n","repo_name":"szymonszl/nodoka","sub_path":"framesession.py","file_name":"framesession.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14757079968","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# najczestszy.py\nfrom random import randint\n\n\ndef losuj(n, zakres):\n lista = []\n for i in range(n):\n lista.append(randint(0, n))\n return lista\n\n\ndef policz_elementy(lista, n):\n wystapienia = dict()\n \n for i in lista:\n licznik = 0\n if i not in wystapienia:\n for j in lista:\n if i == j:\n licznik += 1\n wystapienia[i] = licznik\n \n print(wystapienia)\n \n\ndef main(args):\n ile = 20\n zakres = 30\n lista = losuj(ile, zakres)\n print(lista)\n policz_elementy(lista, ile)\n \n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","repo_name":"lo1cgsan/rok202021","sub_path":"2AP4_2/python_2/najczestszy.py","file_name":"najczestszy.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23681335127","text":"#!/usr/bin/python3\n#receives data from a uart rf module and prints what it gets\nimport serial\nfrom time import sleep\nser = serial.Serial('/dev/ttyUSB0', 9600, timeout=None)\ndef pnw(): #testing function reads all from buffer and prints its length and contents\n n = ser.read_all()\n print(len(n))\n print(n)\n sleep(0.5)\n\ndef getRF(rf_uart, size_of_payload): #added argument to make it more function-like\n ser.setDTR(True) #if the extra pins on the ttl usb are connected to m0 & m1 on the ebyte module\n ser.setRTS(True) #then these two lines will send low logic to both which puts the module in transmit mode 0\n while True:\n n = rf_uart.read(1) #read bytes one at a time\n if n == b's': #throw away bytes until start byte is encountered\n data = ser.read(size_of_payload) #read fixed number of bytes\n n = ser.read(1) #the following byte should be the stop byte\n if n == b'f':\n print('success')\n print(data)\n else: #if that last byte wasn't the stop byte then something is out of sync\n print(\"failure\")\n return -1\n return data\nprint(\"start\")\nwhile True:\n #pass serial object to receive from\n #and size of payload IN BYTES\n serial_data = getRF(ser, 2) #returns a tuple of sent variables\n print(serial_data)\n","repo_name":"CSUFTitanRover/TitanRover2018","sub_path":"homebase/paddle/relay/tests/rec.py","file_name":"rec.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"} +{"seq_id":"26758159819","text":" \nimport os\nimport re\nimport glob\n\n\"\"\"\ntry :\n from setuptools import setup\n print \"installation with setuptools\"\nexcept :\n\"\"\"\n\nfrom distutils.core import setup\n\n\n\n\ndata_files=[('/usr/share/maggy/data', glob.glob('maggy/data/*.*')),\n ('/usr/share/maggy/config/demo2', glob.glob('maggy/config/demo2/*.*')),\n ('/usr/share/maggy/documentation', glob.glob('maggy/documentation/*.*')), \n ('/usr/share/applications', ['maggy/data/maggy.desktop']),\n ('/usr/share/locale/fr/LC_MESSAGES', glob.glob('maggy/locale/fr/LC_MESSAGES/*.*')),\n ('/usr/share/pixmaps', ['maggy/data/maggy$.png']),\n ('/usr/share/maggy/icons/hicolor/scalable', ['maggy/data/maggy$.svg'])]\n\n\n\"\"\"\n# Freshly generate .mo from .po, add to data_files:\nif os.path.isdir('mo/'):\n os.system ('rm -r mo/')\nfor name in os.listdir('po'):\n m = re.match(r'(.+)\\.po$', name)\n if m != None:\n lang = m.group(1)\n out_dir = 'mo/%s/LC_MESSAGES' % lang\n out_name = os.path.join(out_dir, 'pdfshuffler.mo')\n install_dir = 'share/locale/%s/LC_MESSAGES/' % lang\n os.makedirs(out_dir)\n os.system('msgfmt -o %s po/%s' % (out_name, name))\n data_files.append((install_dir, [out_name]))\n\"\"\"\nsetup(name='maggy',\n version='2.3.0.12',\n author='GAF Software',\n author_email='Averell7 at sourceforge dot net',\n description='A gui generator for Sqlite and MySQL',\n url = 'https://sourceforge.net/projects/maggy',\n license='GNU GPL-3',\n scripts=['bin/maggy'],\n packages=['maggy'],\n data_files=data_files,\n #requires=['python-poppler'], # for distutils\n #install_requires=['python-poppler'] # for setuptools\n )\n\"\"\"\n# Clean up temporary files\nif os.path.isdir('mo/'):\n os.system ('rm -r mo/')\nif os.path.isdir('build/'):\n os.system ('rm -r build/')\n\"\"\"\n","repo_name":"Averell7/maggy2","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"18418091885","text":"\n#1. Escribir un programa que permita procesar datos de pasajeros de viaje en una lista de tuplas con la siguiente\n#forma: (nombre, dni, destino). Por ejemplo:\n#*(‘Manuel Juarez’, 12345678, ‘San Juan’), (‘Silvana Paredes’, 62258472, ‘Mendoza’)+\n#Además en otra lista de tuplas se almacenan los datos de cada ciudad y el país al que pertenecen. Ejemplo:\n#*(‘Buenos Aires’, ‘Argentina’), (‘Lisboa’, ‘Portugal’), (‘Mendoza’, ‘Argentina’)+\n#Hacer un menú iterativo que permita al usuario realizar las siguientes operaciones:\n# - Agregar pasajeros a la lista de viajeros.\n# - Agregar ciudades a la lista de ciudades.\n# - Dado el DNI de un pasajero, ver a qué ciudad viaja.\n# - Dada una ciudad, mostrar la cantidad de pasajeros que viajan a esa ciudad.\n# - Dado el DNI de un pasajero, ver a qué país viaja.\n# - Dado un país, mostrar cuántos pasajeros viajan a ese país.\n# - Salir del programa\n\nlista_pasajeros = []\nlista_ciudades = []\n\nwhile True:\n print(\"\\nMenú:\")\n print(\"Ingrese 1 para agregar pasajeros a la lista\")\n print(\"Ingrese 2 para agregar ciudades a la lista\")\n print(\"Ingrese 3 para ingresar el dni de la persona para ver a que ciudad viaja\")\n print(\"Ingrese 4 para mostrar la cantidad de pasajeros que van a tal ciudad\")\n print(\"Ingrese 5 para ingresar el dni de la persona para ver a que pais viaja\")\n print(\"Ingrese 6 para mostrar la cantidad de pasajeros que van a tal pais\")\n print(\"Ingrese 7 para salir del programa\")\n\n option = int(input(\"Ingrese la opcion deseada: \"))\n\n if option == 1:\n name = str(input(\"Ingrese el nombre de la persona: \"))\n dni = int(input(\"Ingrese el DNI de la persona: \"))\n destino = str(input(\"Ingrese el destino del pasajero: \"))\n lista_pasajeros.append((name, dni, destino))\n \n elif option == 2:\n ciudad = str(input(\"Agrege ciudades: \"))\n pais = str(input(\"Ingrese el pais de la ciudad: \"))\n lista_ciudades.append((ciudad, pais))\n\n elif option == 3:\n dni = int(input(\"Ingrese el DNI del pasajero: \"))\n found = False\n for pasajero in lista_pasajeros:\n if pasajero[1] == dni:\n destiny_city = pasajero[2]\n found = True\n break\n if (found):\n print(f\"El pasajero viaja a {destiny_city}\")\n else:\n print(\"DNI no encontrado\")\n\n elif option == 4:\n city = str(input(\"Ingrese la ciudad para ver cuantas personas viajan ahi: \"))\n city = city.lower()\n counter = 0\n for i in lista_pasajeros:\n if (i[2]==city):\n counter=counter+1\n print(f\"La cantidad de viajeros que van a {city} son {counter}\")\n\n elif option == 5:\n dni = int(input(\"Ingrese el DNI de la persona para ver a que pais viaja: \"))\n found = False\n for pasajero in lista_pasajeros:\n if pasajero[1] == dni:\n destiny_pais = pasajero[2]\n found = True\n break\n if (found):\n for ciudad, pais in lista_ciudades:\n if ciudad == destiny_city:\n print(f\"El pasajero va a viajar a {pais}\")\n break\n \n else:\n print(\"DNI no encontrado\")\n\n elif option == 6:\n pais = str(input(\"Ingrese pais para ver cuantas personas viajan ahi: \"))\n pais = pais.lower()\n counter = 0\n for i in lista_ciudades:\n if (i[1]==pais):\n counter=counter+1\n else:\n pass\n print(f\"La cantidad de viajeros que van a {pais} son {counter}\")\n\n elif option == 7:\n print(\"Saliendo del programa\")\n break\n else:\n print(\"Ingrese una opcion valida: \")\n\n\n#2. Suponer una lista con datos de las compras hechas por clientes de una empresa a lo largo de un mes, la cual\n#contiene tuplas con información de cada venta: (cliente, día del mes, monto, domicilio del cliente). Ejemplo:\n#*(‘Nuria Costa’, 5, 1234.5,’Calle 1 – 456’), (‘Jorge Russo’, 7, 3999, ‘Calle 2 – 741’)+\n#Escribir una función que reciba como parámetro una lista con el formato mencionado anteriormente y\n#retorne los domicilios de cada cliente al cual se le debe enviar una factura de compra. Notar que cada cliente\n#puede haber hecho más de una compra en el mes, por lo que la función debe retornar una estructura que\n#contenga cada domicilio una sola vez.\n\nlista_compras =[(\"Santiago Bazan\", 19, 10.500, \"Carrillo 3090\"), (\"Jorge Garcia\", 2, 18.573, \"Lemos 59\"), (\"Mariela Gonzales\", 15, 18.574, \"En su casa\")]\nlista_home = funciones.home(buy_list)","repo_name":"santibazan/programacion-Rigoni","sub_path":"ejercicio_en_clase_variables_dimensionadas.py","file_name":"ejercicio_en_clase_variables_dimensionadas.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27703700359","text":"#!/usr/bin/python\n\nimport json\nimport time\nfrom prometheus_client import start_http_server\nfrom prometheus_client.core import GaugeMetricFamily, REGISTRY\nimport argparse\nimport yaml\nfrom objectpath import *\nimport logging\nimport dateutil.parser\nimport asyncio\nimport aiohttp\nimport json\nimport urllib\n# -*- coding: utf-8 -*-\n\nclass JsonPathCollector(object):\n def __init__(self, config):\n self._config = config\n\n def collect(self):\n config = self._config\n result_tree = Tree(JSON)\n for metric_config in config['metrics']:\n metric_name = \"{}_{}\".format(config['metric_name_prefix'], metric_config['name'])\n metric_description = metric_config.get('description', '')\n metric_path = metric_config['path']\n value = result_tree.execute(metric_path)\n if(metric_name == \"zoe_lastUpdateTime\"):\n value = dateutil.parser.parse(result_tree.execute(metric_path)).strftime('%s')\n logging.debug(\"metric_name: {}, value for '{}' : {}\".format(metric_name, metric_path, value))\n metric = GaugeMetricFamily(metric_name, metric_description, value=value)\n yield metric\n else:\n logging.debug(\"metric_name: {}, value for '{}' : {}\".format(metric_name, metric_path, value))\n metric = GaugeMetricFamily(metric_name, metric_description, value=value)\n yield metric\n\nasync def get_android_config(session, location):\n url = 'https://renault-wrd-prod-1-euw1-myrapp-one.s3-eu-west-1.amazonaws.com/configuration/android/config_' + location + '.json'\n async with session.get(url) as response:\n responsetext = await response.text()\n if responsetext == '':\n responsetext = '{}'\n jsonresponse = json.loads(responsetext)\n if 'message' in jsonresponse:\n self.tokenData = None\n raise MyRenaultServiceException(jsonresponse['message'])\n print('done')\n return jsonresponse\n\nasync def get_gigyasession(session, gigyarooturl, gigyaapikey, loginID, password):\n payload = {'loginID': loginID, 'password': password, 'apiKey': gigyaapikey}\n url = gigyarooturl + '/accounts.login?' + urllib.parse.urlencode(payload)\n async with session.get(url) as response:\n responsetext = await response.text()\n if responsetext == '':\n responsetext = '{}'\n jsonresponse = json.loads(responsetext)\n if 'message' in jsonresponse:\n self.tokenData = None\n raise MyRenaultServiceException(jsonresponse['message'])\n return jsonresponse\n\nasync def get_gigyaaccount(session, gigyarooturl, gigyaapikey, gigyacookievalue):\n payload = {'oauth_token': gigyacookievalue}\n url = gigyarooturl + '/accounts.getAccountInfo?' + urllib.parse.urlencode(payload)\n async with session.get(url) as response:\n responsetext = await response.text()\n if responsetext == '':\n responsetext = '{}'\n jsonresponse = json.loads(responsetext)\n if 'message' in jsonresponse:\n self.tokenData = None\n raise MyRenaultServiceException(jsonresponse['message'])\n return jsonresponse\n\nasync def get_gigyajwt(session, gigyarooturl, gigyaapikey, gigyacookievalue):\n payload = {'oauth_token': gigyacookievalue, 'fields': 'data.personId,data.gigyaDataCenter', 'expiration': 900}\n url = gigyarooturl + '/accounts.getJWT?' + urllib.parse.urlencode(payload)\n async with session.get(url) as response:\n responsetext = await response.text()\n if responsetext == '':\n responsetext = '{}'\n jsonresponse = json.loads(responsetext)\n if 'message' in jsonresponse:\n self.tokenData = None\n raise MyRenaultServiceException(jsonresponse['message'])\n return jsonresponse\n\nasync def get_kamereonperson(session, kamereonrooturl, kamereonapikey, gigya_jwttoken, personId):\n payload = {'country': 'FR'}\n headers = {'x-gigya-id_token': gigya_jwttoken, 'apikey': kamereonapikey}\n url = kamereonrooturl + '/commerce/v1/persons/' + personId + '?' + urllib.parse.urlencode(payload)\n async with session.get(url, headers=headers) as response:\n responsetext = await response.text()\n if responsetext == '':\n responsetext = '{}'\n jsonresponse = json.loads(responsetext)\n if 'message' in jsonresponse:\n self.tokenData = None\n raise MyRenaultServiceException(jsonresponse['message'])\n return jsonresponse\n\nasync def get_kamereontoken(session, kamereonrooturl, kamereonapikey, gigya_jwttoken, accountId):\n payload = {'country': 'FR'}\n headers = {'x-gigya-id_token': gigya_jwttoken, 'apikey': kamereonapikey}\n url = kamereonrooturl + '/commerce/v1/accounts/' + accountId + '/kamereon/token?' + urllib.parse.urlencode(payload)\n async with session.get(url, headers=headers) as response:\n responsetext = await response.text()\n if responsetext == '':\n responsetext = '{}'\n jsonresponse = json.loads(responsetext)\n if 'message' in jsonresponse:\n self.tokenData = None\n raise MyRenaultServiceException(jsonresponse['message'])\n return jsonresponse\n\nasync def get_status(session, kamereonrooturl, kamereonapikey, gigya_jwttoken, kamereonaccesstoken, vin, typeapi):\n headers = {'x-gigya-id_token': gigya_jwttoken, 'apikey': kamereonapikey, 'x-kamereon-authorization' : 'Bearer ' + kamereonaccesstoken}\n url = kamereonrooturl + '/commerce/v1/accounts/kmr/remote-services/car-adapter/v1/cars/' + vin + '/' + typeapi\n async with session.get(url, headers=headers) as response:\n responsetext = await response.text()\n if responsetext == '':\n responsetext = '{}'\n jsonresponse = json.loads(responsetext)\n if 'message' in jsonresponse:\n self.tokenData = None\n raise MyRenaultServiceException(jsonresponse['message'])\n return jsonresponse\n\nasync def main():\n async with aiohttp.ClientSession() as session:await mainwithsession(session)\n\nasync def mainwithsession(session):\n global JSON\n android_config = await get_android_config(session, CREDENTIALS_RenaultServiceLocation)\n with open('android_config.json', 'w') as outfile:\n json.dump(android_config, outfile)\n \n gigyarooturl = android_config['servers']['gigyaProd']['target']\n gigyaapikey = android_config['servers']['gigyaProd']['apikey']\n\n kamereonrooturl = android_config['servers']['wiredProd']['target']\n kamereonapikey = android_config['servers']['wiredProd']['apikey']\n \n gigya_session = await get_gigyasession(session, gigyarooturl, gigyaapikey, CREDENTIALS_RenaultServicesUsername, CREDENTIALS_RenaultServicesPassword)\n with open('gigya_session.json', 'w') as outfile:\n json.dump(gigya_session, outfile)\n \n gigyacookievalue = gigya_session['sessionInfo']['cookieValue']\n\n gigya_account = await get_gigyaaccount(session, gigyarooturl, gigyaapikey, gigyacookievalue)\n with open('gigya_account.json', 'w') as outfile:\n json.dump(gigya_account, outfile)\n\n gigya_jwt = await get_gigyajwt(session, gigyarooturl, gigyaapikey, gigyacookievalue)\n with open('gigya_jwt.json', 'w') as outfile:\n json.dump(gigya_jwt, outfile)\n \n gigya_jwttoken= gigya_jwt['id_token']\n \n kamereonpersonid = gigya_account['data']['personId']\n \n kamereon_person = await get_kamereonperson(session, kamereonrooturl, kamereonapikey, gigya_jwttoken, kamereonpersonid)\n with open('kamereon_person.json', 'w') as outfile:\n json.dump(kamereon_person, outfile)\n kamereonaccountid = kamereon_person['accounts'][0]['accountId']\n\n kamereon_token = await get_kamereontoken(session, kamereonrooturl, kamereonapikey, gigya_jwttoken, kamereonaccountid)\n with open('kamereon_token.json', 'w') as outfile:\n json.dump(kamereon_token, outfile)\n \n kamereonaccesstoken = kamereon_token['accessToken']\n\n kamereon_battery = await get_status(session, kamereonrooturl, kamereonapikey, gigya_jwttoken, kamereonaccesstoken, CREDENTIALS_VIN,\"battery-status\")\n JSON = json.loads(json.dumps(kamereon_battery))\n\n kamereon_cockpit = await get_status(session, kamereonrooturl, kamereonapikey, gigya_jwttoken, kamereonaccesstoken, CREDENTIALS_VIN, \"cockpit\")\n jsonToAppend = json.loads(json.dumps(kamereon_cockpit))\n JSON['data']['attributes'].update(jsonToAppend['data']['attributes'])\n\n\ndef getRenaultAPI():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n\nif __name__ == \"__main__\":\n global CREDENTIALS_RenaultServiceLocation,CREDENTIALS_RenaultServicesUsername,CREDENTIALS_RenaultServicesPassword,CREDENTIALS_VIN\n parser = argparse.ArgumentParser(description='Expose metrics bu jsonpath for configured url')\n parser.add_argument('config_file_path', help='Path of the config file')\n args = parser.parse_args()\n with open(args.config_file_path) as config_file:\n config = yaml.load(config_file, yaml.SafeLoader)\n log_level = config.get('log_level')\n CREDENTIALS_RenaultServiceLocation = config.get('RenaultServiceLocation')\n CREDENTIALS_RenaultServicesUsername = config.get('RenaultServiceUsername')\n CREDENTIALS_RenaultServicesPassword = config.get('RenaultServicePassword')\n CREDENTIALS_VIN = config.get('RenaultServiceVIN')\n API_Refresh = config.get('RenaultServiceAPIRefresh')\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.getLevelName(log_level.upper()))\n exporter_port = config.get('exporter_port')\n logging.debug(\"Config %s\", config)\n logging.info('Starting server on port %s', exporter_port)\n start_http_server(exporter_port)\n getRenaultAPI()\n REGISTRY.register(JsonPathCollector(config))\n while True:\n time.sleep(API_Refresh)\n getRenaultAPI()","repo_name":"LacazeThomas/renault_api_exporter","sub_path":"renault_api_exporter/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":9809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"3595976689","text":"from Base_backtest import *\r\nimport pandas as pd\r\n\r\n\r\n\r\nclass level_adjust_strategy(base_strategy):\r\n def __init__(self,initial,save_result_to_folder,code_list, s_code, iv_type = 'vix',cost_opt_trading = None):\r\n super().__init__(initial,save_result_to_folder,code_list)\r\n # gamma的范围\r\n self.gamma_float_up = 1.15\r\n self.gamma_float_down = 0.85\r\n # vega的范围\r\n self.vega_float_up = 1.15\r\n self.vega_float_down = 0.85\r\n \r\n #回撤范围\r\n self.res_param()\r\n self.s_code = s_code\r\n\r\n #换仓最少天数\r\n self.change_day = 7\r\n #Vega, Gamma信号的excel\r\n self.df_vega_signal = pd.read_csv('D:/Harper/option_strategy/signal_20210802/vega_敞口3_20210802.csv',date_parser=True)\r\n self.df_gamma_signal = pd.read_csv('D:/Harper/option_strategy/signal_20210804/gamma_1500_调整平滑方式_iv调整为365年化.csv',date_parser=True)\r\n self.df_vega_signal['Date'] =pd.to_datetime(self.df_vega_signal['Date'])\r\n self.df_gamma_signal['Date'] = pd.to_datetime(self.df_gamma_signal['Date'])\r\n\r\n\r\n self.iv_type = iv_type\r\n\r\n if self.iv_type != 'vix':\r\n self.df_iv = pd.read_excel('D:/Harper/实习文件整理_张依依/HV_percentile/iv_insert_50etf_0728.xlsx',date_parser=True)\r\n self.df_iv['Date'] = pd.to_datetime(self.df_iv['Date'],format ='%Y%m%d')\r\n if cost_opt_trading != None:\r\n self.cost_opt_trading = cost_opt_trading\r\n\r\n def res_param(self):\r\n\r\n \"\"\"\r\n Parameters\r\n ----------\r\n vol_up : float -- 确定vega敞口(-)\r\n 波动率上行压力测试\r\n vol_down : float -- 确定vega敞口(+)\r\n 波动率下行压力测试\r\n s_up : float -- 确定gamma敞口(-)\r\n underlying 涨跌幅\r\n s_down : float -- 确定gamma敞口(+)\r\n underlying 涨跌幅\r\n \"\"\"\r\n self.vol_up = 0.1\r\n self.vol_down = 0.05\r\n self.s_up = 0.05\r\n self.s_down = 0.05\r\n #vega 总敞口的限制\r\n self.vega_min,self.vega_max = self.vega_res()\r\n\r\n def vega_res(self, max_drawdown_short=0.02, max_drawdown_long=0.01):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n max_drawdown_short : float(>0), optional\r\n short vega最大回撤. The default is 0.02.\r\n max_drawdown_long : float(>0), optional\r\n long vega最大回撤. The default is 0.01.\r\n\r\n Returns\r\n -------\r\n vega_min : float\r\n vega敞口限制(short).\r\n vega_max : float\r\n vega敞口限制(long).\r\n \"\"\"\r\n #原来的方法\r\n vega_min = - (self.initial * max_drawdown_short) / self.vol_up\r\n vega_max = (self.initial * max_drawdown_long) / self.vol_down\r\n return (vega_min*3, vega_max/3)\r\n\r\n\r\n def vega_30days(self,gamma,s,iv,T = 30):\r\n vega_cal = gamma*iv*s**2 * T/365\r\n return vega_cal\r\n\r\n\r\n def gamma_res(self, s, iv, max_drawdown_up_s=0.02, max_drawdown_down=0.01, t=5):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n s : float\r\n underlying\r\n iv : float\r\n implied volatility\r\n max_drawdown_up_s : float(>0), optional\r\n short gamma最大回撤. The default is 0.02.\r\n max_drawdown_down : float(>0), optional\r\n long gamma最大回撤(Theta PnL 决定). The default is 0.01.\r\n t : TYPE, optional\r\n DESCRIPTION. The default is 5.\r\n\r\n Returns\r\n -------\r\n gamma_min : float\r\n gamma敞口限制(short).\r\n gamma_max : float\r\n gamma敞口限制(long).\r\n\r\n \"\"\"\r\n # 原来的方法\r\n gamma_min = - 2 * (self.initial * max_drawdown_up_s) / (self.s_up * s) ** 2\r\n # use Theta PnL来确定 Gamma(+) 敞口, theta = 0.5*gamma*(s)**2*(iv)**2\r\n theta_down = max_drawdown_down * self.initial\r\n theta = theta_down / (t / 365)\r\n gamma_max = 2 * theta / (s ** 2) / (iv ** 2)\r\n return (gamma_min*3, gamma_max*3)\r\n\r\n\r\n\r\n def signal(self, date):\r\n vega_signal = self.df_vega_signal.loc[self.df_vega_signal['Date']==pd.to_datetime(date),'vega_iv'].values[0]\r\n gamma_signal = self.df_gamma_signal.loc[self.df_gamma_signal['Date'] == pd.to_datetime(date), 'gamma_signal'].values[0]\r\n return (vega_signal, gamma_signal)\r\n\r\n def load_all_df(self):\r\n self.df_option,self.df_volatility,self.df_rf = self.data_prepare(database_address='85', database_name=['contract_info_daily','df_vol_50etf','rf'],\r\n condition = [\"where 期权标的 = '%s'\"%self.s_code.upper(),'None','None'])\r\n\r\n self.df_s = self.data_prepare(database_address='wind', code=self.s_code.upper(), symbol=self.code_list[self.s_code]['symbol'],\r\n start_date=self.start_date.strftime('%Y%m%d'))\r\n\r\n self.df_option['日期'] = pd.to_datetime(self.df_option['日期'])\r\n self.df_option['期权代码'] = self.df_option['期权代码'].astype('str')\r\n self.df_volatility['日期'] = pd.to_datetime(self.df_volatility['日期'])\r\n self.df_rf['日期'] = pd.to_datetime(self.df_rf['日期'])\r\n self.df_s['交易日期'] = pd.to_datetime(self.df_s['交易日期'])\r\n\r\n def cal_position(self,date,s,hv,rf,pre_position, focus = 'vega'):\r\n #选定近月+远月合约,决定要不要换仓\r\n\r\n df_t1 = self.select_option_contract(date, self.df_option, ex_A=True, require_maturity=self.change_day, require_expire_date='t1', require_k=s, s = s, hv=hv, rf=rf)\r\n df_t2 = self.select_option_contract(date, self.df_option, ex_A=True, require_maturity=self.change_day,\r\n require_expire_date='t2', require_k=s, s=s, hv=hv, rf=rf)\r\n #判断是不是没有不除权除息的合约\r\n if len(df_t1) == 0:\r\n df_t1 = self.select_option_contract(date, self.df_option, ex_A=False, require_maturity=self.change_day,\r\n require_expire_date='t1', require_k=s, s=s, hv=hv, rf=rf)\r\n if len(df_t2) == 0:\r\n df_t2 = self.select_option_contract(date, self.df_option, ex_A=False, require_maturity=self.change_day,\r\n require_expire_date='t2', require_k=s, s=s, hv=hv, rf=rf)\r\n\r\n\r\n call_t1 = df_t1.loc[df_t1['交易代码'].str.contains('C')].to_dict('records')[0]\r\n put_t1 = df_t1.loc[df_t1['交易代码'].str.contains('P')].to_dict('records')[0]\r\n call_t2 = df_t2.loc[df_t2['交易代码'].str.contains('C')].to_dict('records')[0]\r\n put_t2 = df_t2.loc[df_t2['交易代码'].str.contains('P')].to_dict('records')[0]\r\n t1,t2 = df_t1.loc[:,'maturity_days'].drop_duplicates().values[0],df_t2.loc[:,'maturity_days'].drop_duplicates().values[0]\r\n\r\n if pre_position is not None: # 不是第一天开仓(已经有持仓)\r\n df_pre_position = self.select_option_contract(date,self.df_option, selected_code = pre_position['Code'].tolist(), ex_A=False)\r\n pre_position_t = df_pre_position.loc[:,'maturity_days'].drop_duplicates().sort_values().reset_index(drop=True)\r\n\r\n if len(pre_position_t) == 1:\r\n if self.atm(df_pre_position.loc[df_pre_position['maturity_days']==pre_position_t[0],'行权价'].drop_duplicates().values[0],s): \r\n if pre_position_t[0] == t1: #近月不用换合约\r\n call_t1 = df_pre_position.loc[df_pre_position['交易代码'].str.contains('C')].to_dict('records')[0]\r\n put_t1 = df_pre_position.loc[df_pre_position['交易代码'].str.contains('P')].to_dict('records')[0]\r\n elif pre_position_t[0] == t2: #远月不用换合约\r\n call_t2 = df_pre_position.loc[df_pre_position['交易代码'].str.contains('C')].to_dict('records')[0]\r\n put_t2 = df_pre_position.loc[df_pre_position['交易代码'].str.contains('P')].to_dict('records')[0]\r\n else:\r\n pre_position_t1 = pre_position_t[0]\r\n pre_position_t2 = pre_position_t[1]\r\n\r\n #决定要不要换仓\r\n if self.atm(df_pre_position.loc[df_pre_position['maturity_days']==pre_position_t1,'行权价'].drop_duplicates().values[0],s): \r\n if pre_position_t1 == t1: #近月不用换合约\r\n call_t1 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('C')) \r\n & (df_pre_position['maturity_days']==pre_position_t1)].to_dict('records')[0]\r\n put_t1 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('P')) \r\n & (df_pre_position['maturity_days']==pre_position_t1)].to_dict('records')[0]\r\n elif pre_position_t1== t2: #远月不用换合约\r\n call_t2 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('C')) \r\n & (df_pre_position['maturity_days']==pre_position_t2)].to_dict('records')[0]\r\n put_t2 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('P')) \r\n & (df_pre_position['maturity_days']==pre_position_t2)].to_dict('records')[0]\r\n \r\n \r\n if self.atm(df_pre_position.loc[df_pre_position['maturity_days']==pre_position_t2,'行权价'].drop_duplicates().values[0],s): \r\n if pre_position_t2 == t1: #近月不用换合约\r\n call_t1 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('C'))\r\n & (df_pre_position['maturity_days']==pre_position_t2)].to_dict('records')[0]\r\n put_t1 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('P'))\r\n & (df_pre_position['maturity_days']==pre_position_t2)].to_dict('records')[0]\r\n elif pre_position_t2== t2: #远月不用换合约\r\n call_t2 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('C')) \r\n & (df_pre_position['maturity_days']==pre_position_t2)].to_dict('records')[0]\r\n put_t2 = df_pre_position.loc[(df_pre_position['交易代码'].str.contains('P')) \r\n & (df_pre_position['maturity_days']==pre_position_t2)].to_dict('records')[0]\r\n\r\n\r\n\r\n for each in [call_t1,put_t1,call_t2,put_t2]:\r\n\r\n each['Delta'],each['Gamma'],each['Theta'],each['Vega'] = self.Greeks(each['maturity_days']/365, s, hv, rf, each['行权价'], each['option_type'], choice = 'all')\r\n\r\n #delta对冲后远近月的gamma,vega\r\n delta_ratio_t1,delta_ratio_t2 = abs(call_t1['Delta']/put_t1['Delta']),abs(call_t2['Delta']/put_t2['Delta'])\r\n gamma_t1,gamma_t2 = call_t1['Gamma'] * call_t1['合约单位'] + delta_ratio_t1 * put_t1['Gamma'] * put_t1['合约单位'],\\\r\n call_t2['Gamma'] * call_t2['合约单位'] + delta_ratio_t2 * put_t2['Gamma'] * put_t2['合约单位']\r\n vega_t1,vega_t2 = call_t1['Vega'] * call_t1['合约单位'] + delta_ratio_t1 * put_t1['Vega'] * put_t1['合约单位'],\\\r\n call_t2['Vega'] * call_t2['合约单位'] + delta_ratio_t2 * put_t2['Vega'] * put_t2['合约单位']\r\n\r\n #判断近月满足target_vega之后满不满足 gamma 敞口, 不满足用 t2 配\r\n\r\n if focus == 'vega':\r\n minor = 'gamma'\r\n elif focus == 'gamma':\r\n minor = 'vega'\r\n \r\n ratio_t1= eval('self.target_%s'%focus) / eval('%s_t1'%focus)\r\n test_minor_t1 = ratio_t1 * eval('%s_t1'%minor)\r\n \r\n if test_minor_t1 in range(round(eval('self.target_%s_down'%minor)-1),round((eval('self.target_%s_up'%minor)+1))):\r\n call_t1['No'],put_t1['No'] = round(ratio_t1), round(ratio_t1*delta_ratio_t1)\r\n call_t1['Num'], put_t1['Num'] = call_t1['No'] * call_t1['合约单位'],put_t1['No'] * put_t1['合约单位']\r\n return(call_t1,put_t1)\r\n else:\r\n #如果t2>65天,不做t2 合约 并且gamma限制住\r\n if t2 > 65:\r\n ratio_onet_minor = eval('self.target_%s'%minor) / eval('%s_t1'%minor)\r\n call_t1['No'], put_t1['No'] = round(ratio_onet_minor), round(ratio_onet_minor * delta_ratio_t1)\r\n call_t1['Num'], put_t1['Num'] = call_t1['No'] * call_t1['合约单位'], put_t1['No'] * put_t1['合约单位']\r\n return (call_t1, put_t1)\r\n\r\n\r\n ratio_t1, ratio_t2 = self.cal_ratio([self.target_vega,self.target_gamma],[vega_t1, gamma_t1],[vega_t2, gamma_t2])\r\n\r\n call_t1['No'],put_t1['No'] = round(ratio_t1), round(ratio_t1*delta_ratio_t1)\r\n call_t1['Num'], put_t1['Num'] = call_t1['No'] * call_t1['合约单位'],put_t1['No'] * put_t1['合约单位']\r\n\r\n call_t2['No'],put_t2['No'] = round(ratio_t2), round(ratio_t2*delta_ratio_t2)\r\n call_t2['Num'], put_t2['Num'] = call_t2['No'] * call_t2['合约单位'],put_t2['No'] * put_t2['合约单位']\r\n\r\n # 如果只有一边有仓位的话, 另一边也为0;不看这个的仓位\r\n if call_t1['No'] * put_t1['No'] == 0:\r\n return (call_t2,put_t2)\r\n\r\n if call_t2['No'] * put_t2['No'] == 0:\r\n return (call_t1,put_t1)\r\n\r\n return (call_t1,put_t1,call_t2,put_t2)\r\n\r\n \r\n '''\r\n #原来的写法\r\n ratio_t1 = self.target_vega / vega_t1\r\n test_gamma_t1 = ratio_t1 * gamma_t1\r\n\r\n if (test_gamma_t1 in range(round(self.target_gamma_down)-1,round(self.target_gamma_up)+1)):\r\n call_t1['No'],put_t1['No'] = round(ratio_t1), round(ratio_t1*delta_ratio_t1)\r\n call_t1['Num'], put_t1['Num'] = call_t1['No'] * call_t1['合约单位'],put_t1['No'] * put_t1['合约单位']\r\n return(call_t1,put_t1)\r\n else:\r\n #如果t2>65天,不做t2 合约 并且gamma限制住\r\n if t2 > 65:\r\n ratio_onet_gamma = self.target_gamma / gamma_t1\r\n call_t1['No'], put_t1['No'] = round(ratio_onet_gamma), round(ratio_onet_gamma * delta_ratio_t1)\r\n call_t1['Num'], put_t1['Num'] = call_t1['No'] * call_t1['合约单位'], put_t1['No'] * put_t1['合约单位']\r\n return (call_t1, put_t1)\r\n\r\n\r\n ratio_t1, ratio_t2 = self.cal_ratio([self.target_vega,self.target_gamma],[vega_t1, gamma_t1],[vega_t2, gamma_t2])\r\n\r\n call_t1['No'],put_t1['No'] = round(ratio_t1), round(ratio_t1*delta_ratio_t1)\r\n call_t1['Num'], put_t1['Num'] = call_t1['No'] * call_t1['合约单位'],put_t1['No'] * put_t1['合约单位']\r\n\r\n call_t2['No'],put_t2['No'] = round(ratio_t2), round(ratio_t2*delta_ratio_t2)\r\n call_t2['Num'], put_t2['Num'] = call_t2['No'] * call_t2['合约单位'],put_t2['No'] * put_t2['合约单位']\r\n\r\n return (call_t1,put_t1,call_t2,put_t2)\r\n '''\r\n\r\n # 判断钱够不够用\r\n def is_moneny_enough(self,s,s_yes,position_option, position_fut = None,initial_require = 0.8):\r\n '''\r\n :param s: 标的今天价格\r\n :param s_yes: 标的t-1价格\r\n :param position_option: option的持仓\r\n :param position_fut: future的持仓\r\n :param initial_require: 最多占规模的%\r\n :return: dict --- 和cal position 一样\r\n '''\r\n position_option = pd.DataFrame(position_option)\r\n position_option.loc[:,'Margin'] = 0\r\n option_value = (position_option['收盘价']*position_option['Num']).sum()\r\n t_list = position_option['到期日'].drop_duplicates().reset_index(drop=True)\r\n test_margin = 0\r\n self.remark = ''\r\n for each_t in t_list:\r\n each_call = position_option.loc[(position_option['到期日']==each_t)&(position_option['交易代码'].str.contains('C'))].to_dict('records')[0]\r\n each_put = position_option.loc[(position_option['到期日'] == each_t) & (position_option['交易代码'].str.contains('P'))].to_dict('records')[0]\r\n test_margin += self.margin(strategy_type = 'straddle_delta_hedge',strike = each_call['行权价'],settle_yes_call= each_call['前结算价'], settle_yes_put= each_put['前结算价'],\r\n num_call= each_call['Num'], num_put= each_put['Num'],s_yes= s_yes, s_today= s)\r\n if position_fut != None:\r\n position_fut.loc[:, 'Margin'] = 0\r\n test_margin += self.margin(strategy_type='futures', fut_price= position_fut['fut_price'], no_fut= position_fut['no_fut'], contract_unit= position_fut['contract_unit'])\r\n\r\n # 计算保证金和option卖出的钱 够不够用\r\n if (option_value + test_margin > self.initial * initial_require) & (option_value + test_margin > 0):\r\n restrict_ratio = (self.initial * initial_require) / (option_value + test_margin)\r\n self.remark = '原仓位所需钱 > %s'%initial_require\r\n print('----',self.remark,'----')\r\n position_option.loc[:,'No'] = (position_option.loc[:,'No'] * restrict_ratio).astype('int')\r\n position_option.loc[:, 'Num'] = position_option.loc[:,'No'] * position_option.loc[:,'合约单位']\r\n\r\n #计算最后的保证金\r\n for each_t in t_list:\r\n each_call = position_option.loc[\r\n (position_option['到期日'] == each_t) & (position_option['交易代码'].str.contains('C'))].to_dict('records')[0]\r\n each_put = position_option.loc[\r\n (position_option['到期日'] == each_t) & (position_option['交易代码'].str.contains('P'))].to_dict('records')[0]\r\n position_option.loc[position_option['到期日'] == each_t, 'Margin'] = 1/2 * self.margin(strategy_type='straddle_delta_hedge', strike=each_call['行权价'],\r\n settle_yes_call=each_call['前结算价'], settle_yes_put=each_put['前结算价'],\r\n num_call=each_call['Num'], num_put=each_put['Num'], s_yes=s_yes, s_today=s)\r\n\r\n if position_fut != None:\r\n position_fut.loc[:,'no_fut'] = (position_fut.loc[:,'no_fut'] * restrict_ratio).astype('int')\r\n position_fut.loc[:,'Margin'] = self.margin(strategy_type='futures', fut_price=position_fut['fut_price'], no_fut=position_fut['no_fut'],\r\n contract_unit=position_fut['contract_unit'])\r\n return position_option.to_dict(orient='records'), position_fut.to_dict(orient='records')\r\n else:\r\n return position_option.to_dict(orient='records')\r\n\r\n\r\n\r\n # 给定vega, gamma 求解二元方程式/不等式\r\n def cal_ratio(self, target, t1, t2):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n target : [target_vega, target_gamma]\r\n\r\n t1 :[vega_t1, gamma_t1]\r\n 近月到期合约--delta 对冲之后,一个lot的straddle下的vega和gamma\r\n t2 : [vega_t2, gamma_t2]\r\n 远月到期合约--delta 对冲之后,一个lot的straddle下的vega和gamma\r\n\r\n Returns\r\n -------\r\n t1, t2 的配比\r\n\r\n \"\"\"\r\n # target = [target_vega, target_gamma]\r\n a = Symbol('a')\r\n b = Symbol('b')\r\n formula = [a * t1[i] + b * t2[i] - target[i] for i in range(len(target))]\r\n result = solve(formula, [a, b])\r\n #print(result)\r\n\r\n # if target[0] * result[a] < 0:\r\n # result[a] = 0\r\n # result[b] = target[0]/t2[0]\r\n\r\n # if target[0] * result[b] < 0:\r\n # result[b] = 0\r\n # result[a] = target[0]/t2[0]\r\n\r\n return (result[a], result[b])\r\n\r\n def atm(self, k_old, etf_close):\r\n if etf_close <= 3:\r\n tick = 0.05\r\n elif etf_close > 3 and etf_close <= 5:\r\n tick = 0.1\r\n elif etf_close > 5 and etf_close <= 10:\r\n tick = 0.25\r\n elif etf_close > 10 and etf_close <= 20:\r\n tick = 0.5\r\n elif etf_close > 20 and etf_close <= 50:\r\n tick = 1\r\n elif etf_close > 50 and etf_close <= 100:\r\n tick = 2.5\r\n elif etf_close > 100:\r\n tick = 5\r\n\r\n if abs(etf_close - k_old) <= tick:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n def back_test(self,focus = 'vega',consider_money = False, data_type = None, vwap_or_twap_price_type = None,vwap_or_twap = None,start_time='14:30', end_time='14:50'):\r\n #data prepare\r\n self.load_all_df()\r\n self.data_type,self.vwap_or_twap_price_type,self.vwap_or_twap = data_type, vwap_or_twap_price_type,vwap_or_twap\r\n self.datelist = self.df_gamma_signal['Date'].drop_duplicates().sort_values().reset_index(drop=True)\r\n #self.datelist = pd.to_datetime(self.df_option['日期']).drop_duplicates().sort_values().reset_index(drop=True)\r\n pre_position = None\r\n #for i in range(10):\r\n for i in range(len(self.datelist)-1):\r\n print(i,'/',len(self.datelist)-2,'----',self.datelist[i],'----')\r\n date = self.datelist[i]\r\n tmr = self.datelist[i + 1]\r\n #1. 计算信号和敞口(vega,gamma)\r\n (self.vega_signal, self.gamma_signal) = self.signal(date)\r\n '''\r\n #原来target_vega的定义\r\n self.target_vega = self.vega_signal * abs(self.vega_min) if self.vega_signal < 0 else self.vega_signal * self.vega_max\r\n '''\r\n\r\n s = self.get_info('etf',self.df_s, date=date, return_type='收盘价')\r\n s_yes = self.get_info('etf',self.df_s, date=date, return_type='前收盘价')\r\n\r\n hv = self.df_volatility.loc[self.df_volatility['日期']==date,'HV'].values[0]/100\r\n if self.iv_type == 'vix':\r\n iv = self.df_volatility.loc[self.df_volatility['日期']==date,'iVIX'].values[0]/100\r\n else:\r\n iv = self.df_iv.loc[self.df_iv['Date']==date,'iv_insert'].values[0]/100\r\n\r\n rf = self.df_rf[self.df_rf['日期']==date]['中债国债到期收益率:1年'].values[0] / 100\r\n (self.gamma_min,self.gamma_max) = self.gamma_res(s, iv)\r\n self.target_gamma = self.gamma_signal * abs(self.gamma_min) if self.gamma_signal < 0 else self.gamma_signal * self.gamma_max\r\n self.target_gamma_down,self.target_gamma_up = self.target_gamma*self.gamma_float_down,self.target_gamma*self.gamma_float_up\r\n\r\n # 更新的30days target_vega(根据gamma而定)\r\n self.target_vega = self.vega_30days(self.target_gamma,s,iv, T=30)\r\n self.target_vega_down, self.target_vega_up = self.target_vega * self.vega_float_down, self.target_vega * self.vega_float_up\r\n\r\n # 2. 筛选option + 计算仓位\r\n\r\n result = self.cal_position(date, s, hv, rf, pre_position, focus)\r\n if consider_money != False:\r\n result = self.is_moneny_enough(s, s_yes, result, position_fut=None, initial_require=0.8)\r\n\r\n for code in result:\r\n #持仓都是期权\r\n code['Symbol'] = 'option'\r\n if self.vwap_or_twap == None:\r\n self.df_records = self.df_records.append(\r\n {'Date': date, 'Code': code['期权代码'], 'Symbol': code['Symbol'], 'option_type':code['option_type'],'K':code['行权价'],\r\n 'Num': code['Num'], 'No': code['No'],'Lot': code['合约单位'],\r\n 'Margin': code['Margin'] if 'Margin' in code.keys() else 0,\r\n 'Price_t': code['收盘价'],\r\n 'Price_t1': self.get_info(asset_type=code['Symbol'], df=self.df_option, code = code['期权代码'],date=tmr,\r\n return_type='price'),\r\n\r\n 'Delta':code['Delta'] * code['Num'],'Gamma':code['Gamma']* code['Num'],'Vega':code['Vega']* code['Num'],'Theta':code['Theta']* code['Num'],\r\n 'T':code['maturity_days'],'Maturity':code['到期日'].strftime('%Y-%m-%d')\r\n }, ignore_index=True)\r\n else:\r\n self.df_records = self.df_records.append(\r\n {'Date': date, 'Code': code['期权代码'], 'Symbol': code['Symbol'],\r\n 'option_type': code['option_type'], 'K': code['行权价'],\r\n 'Num': code['Num'], 'No': code['No'], 'Lot': code['合约单位'],\r\n 'Margin': code['Margin'] if 'Margin' in code.keys() else 0,\r\n 'Price_t': code['收盘价'],\r\n 'Price_t1': self.get_info(asset_type=code['Symbol'], df=self.df_option, code=code['期权代码'],\r\n date=tmr,\r\n return_type='price'),\r\n 'Price_t_%s_%s' % (self.vwap_or_twap_price_type, self.vwap_or_twap): self.get_info(\r\n asset_type=code['Symbol'], df=self.df_option, code=code['期权代码'],\r\n date=date,\r\n return_type='price', data_type=data_type, vwap_or_twap=self.vwap_or_twap,\r\n start_time=start_time, end_time=end_time,\r\n vwap_or_twap_price_type=self.vwap_or_twap_price_type),\r\n\r\n 'Price_t1_%s_%s'%(self.vwap_or_twap_price_type,self.vwap_or_twap): self.get_info(asset_type=code['Symbol'], df=self.df_option, code=code['期权代码'],\r\n date=tmr,\r\n return_type='price', data_type=data_type, vwap_or_twap=self.vwap_or_twap,\r\n start_time=start_time, end_time=end_time,\r\n vwap_or_twap_price_type=self.vwap_or_twap_price_type),\r\n\r\n\r\n 'Delta': code['Delta'] * code['Num'], 'Gamma': code['Gamma'] * code['Num'],\r\n 'Vega': code['Vega'] * code['Num'], 'Theta': code['Theta'] * code['Num'],\r\n 'T': code['maturity_days'], 'Maturity': code['到期日'].strftime('%Y-%m-%d')\r\n }, ignore_index=True)\r\n\r\n\r\n pre_position = self.df_records.loc[self.df_records['Date'] == date]\r\n if consider_money != False:\r\n self.df_records.loc[self.df_records['Date'] == date, 'Remark'] = self.remark\r\n self.df_records.loc[self.df_records['Date'] == date, 'S'] = s\r\n self.df_records.loc[self.df_records['Date'] == date,'Vega_signal'] = self.vega_signal\r\n self.df_records.loc[self.df_records['Date'] == date, 'Gamma_signal'] = self.gamma_signal\r\n self.df_records.loc[self.df_records['Date'] == date,'Target_vega'] = self.target_vega\r\n self.df_records.loc[self.df_records['Date'] == date, 'Target_gamma'] = self.target_gamma\r\n\r\n\r\n # self.df_records = self.df_records[\r\n # ['Date', 'Code', 'Symbol', 'Num', 'No', 'Lot','t(days)','maturity', 'Margin', 'Price_t', 'Price_t1']]\r\n\r\n self.df_records.loc[:, 'No'] = self.df_records.loc[:, 'No'].astype(int)\r\n self.df_records.loc[:, 'Num'] = self.df_records.loc[:, 'Num'].astype(int)\r\n self.df_records.loc[:, ['Delta','Gamma','Vega','Theta']] = self.df_records.loc[:, ['Delta','Gamma','Vega','Theta']].astype(float)\r\n if 'Remark' in self.df_records.columns:\r\n if 'Price_t_%s_%s' %(self.vwap_or_twap_price_type, self.vwap_or_twap) in self.df_records.columns:\r\n self.df_records = self.df_records[\r\n ['Date','Code', 'Symbol', 'K', 'S', 'No', 'Lot', 'Num', 'Price_t', 'Price_t1',\r\n 'Price_t_%s_%s' %(self.vwap_or_twap_price_type, self.vwap_or_twap),\r\n 'Price_t1_%s_%s' %(self.vwap_or_twap_price_type, self.vwap_or_twap),\r\n 'Maturity', 'T',\r\n 'Margin', 'Delta', 'Gamma', 'Theta', 'Vega',\r\n 'option_type', 'Vega_signal', 'Gamma_signal', 'Target_vega',\r\n 'Target_gamma','Remark']]\r\n else:\r\n self.df_records = self.df_records[\r\n ['Date','Code', 'Symbol', 'K', 'S', 'No', 'Lot', 'Num', 'Price_t', 'Price_t1',\r\n 'Maturity', 'T',\r\n 'Margin', 'Delta', 'Gamma', 'Theta', 'Vega',\r\n 'option_type', 'Vega_signal', 'Gamma_signal', 'Target_vega',\r\n 'Target_gamma','Remark']]\r\n\r\n else:\r\n self.df_records = self.df_records[['Date','Code', 'Symbol','K','S', 'No','Lot','Num','Price_t', 'Price_t1',\r\n 'Maturity', 'T',\r\n 'Margin', 'Delta', 'Gamma', 'Theta', 'Vega',\r\n 'option_type', 'Vega_signal', 'Gamma_signal', 'Target_vega',\r\n 'Target_gamma']]\r\n\r\n\r\n\r\n#%%\r\nif __name__ == \"__main__\":\r\n test = level_adjust_strategy(initial = 5000000,\r\n save_result_to_folder= 'D:/Harper/option_strategy/backtest_result',\r\n code_list = {'510050.sh':{'symbol':'etf','database_address':'wind'}},\r\n s_code = '510050.sh',\r\n iv_type='iv',\r\n cost_opt_trading = 2)\r\n test.init_test_period(start_date = '20150201', end_date = None)\r\n test.back_test(focus='gamma', consider_money=True)\r\n #test.back_test(focus = 'gamma',consider_money = True,data_type = 'minbar', vwap_or_twap_price_type = 'mid',vwap_or_twap = 'twap',start_time='14:30', end_time='14:50')\r\n df_records = test.df_records.copy()\r\n test.daily_cost_calculate(fillna_previous_column = ['S','Vega_signal','Gamma_signal','Target_vega','Target_gamma'],only_open_short_no_cost = True)\r\n #test.daily_pnl_calculate(price_name = '%s_%s' %(test.vwap_or_twap_price_type, test.vwap_or_twap))\r\n test.daily_pnl_calculate()\r\n test.daily_return_calculate()\r\n test.df_pnl = pd.merge(test.df_pnl,df_records.loc[:,['Date','Vega_signal','Gamma_signal','Target_vega','Target_gamma','Remark']].drop_duplicates(subset=['Date']),on='Date',how = 'left')\r\n test.annual_return_analysis()\r\n\r\n test.save_result(file_name = 'test_信号0804_1500_调整平滑方式_iv调整为365年化_vega敞口gamma定_gamma3倍_65_7天不开_保证金限制_仅short0cost_冲击成本2_twap',sheet_name_and_df_dict = {'持仓':test.df_records,\r\n 'PnL': test.df_pnl,\r\n 'Result': test.df_annual_result})\r\n\r\n\r\n\r\n","repo_name":"HarperRui/QuantChina","sub_path":"1.期权/回测框架/level_adjust_strategy.py","file_name":"level_adjust_strategy.py","file_ext":"py","file_size_in_byte":32062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30163113465","text":"from sys import stdin, stdout\ni = stdin.readline\nres = []\nfor _ in range(int(i())):\n n = int(i())\n dic = {}\n sc = [int(s) for s in i().split()]\n q = int(i())\n for j in range(q):\n qu = [int(x) for x in i().split()]\n if qu[0] == 1:\n x, y = qu[1], qu[2]\n if x == y or (x == 0 or y == 0):\n continue\n if sc[x - 1] > sc[y - 1]:\n y, x = x, y\n\n sc[y - 1] += sc[x - 1]\n sc[x - 1] = 0\n try:\n dic[y - 1][1] += dic[x - 1][1]\n dic[x - 1][0] = y\n dic[x - 1][1] = 0\n except:\n dic[y - 1] = [y-1, 1]\n dic[x - 1] = [x-1, 1]\n dic[y - 1][1] += dic[x - 1][1]\n dic[x - 1][0] = y\n dic[x - 1][1] = 0\n\n elif qu[0] == 2:\n try:\n res.append(str(dic[qu[1] - 1][1]))\n except:\n res.append('1')\n elif qu[0] == 3:\n try:\n res.append(str(dic[qu[1] - 1][0]))\n except:\n res.append(str(qu[1]))\nstdout.write('\\n'.join(res))\n\n","repo_name":"jahir-raihan/solved_problems_from_different_platforms","sub_path":"hackerrank/pen fight.py","file_name":"pen fight.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"5234746276","text":"import socket\nimport sys\n\n#This function sets up the client program and readies to foward a file to the server\ndef client(host, port, file_name): \n # Resolve the IP Address given the hostname and the port number\n try:\n ai = socket.getaddrinfo(host,port)\n except socket.gaierror: \n # insert error handling code because we couldn't resolve host name\n print(\"We had an error.\")\n\n # Create a socket object with the proper socket specifications.\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Send over a new file name\n # The first thing sent to the server is the name of the new file where everything is going to be stored.\n strTest = 'some.txt'.encode('ascii', 'ignore')\n sock.sendto(strTest, (ai[0][4]))\n\n # Open the file to transfer and start sending data using the created socket object\n # At the end, you should send a control message, just an empty string to notify the end of file\n with open(file_name, 'r') as f:\n for line in f:\n #Have to encode the string we're sending over \n msg = line.encode('ascii', 'ignore')\n sock.sendto(msg, (ai[0][4]))\n #Send an empty string at the end of it to notify the end of file.\n sock.sendto(\" \".encode('ascii','ignore'),(ai[0][4]))\n # Close the socket\n sock.close()\n\nif __name__ == '__main__':\n if len (sys.argv) > 3:\n try:\n host = sys.argv[1]\n port = int(sys.argv[2])\n file_name = sys.argv[3]\n client (host, port, file_name)\n except ValueError:\n #print ('Usage: python3 client.py host port file_name')\n raise\n else:\n print ('Usage: python3 client.py host port')\n","repo_name":"tchung777/School-Projects","sub_path":"COEN146_Computer Networks/Lab 3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20470244535","text":"'''\nDescripttion: \n1、引入模块,首先需要有一个包,包下一定要有一个文件 __init__.py,然后就可以引用该包下的模块,即 .py文件\n2、或者在模块搜索路径中加入需要引用的文件的路径\n\n记录一个现象,获取LogTest中的日志器时:\n\n1、如果都使用LogTest的属性logger,日志打印不会出现问题\n2、如果都使用LogTest的方法logger1,会出现问题:重复打印日志,因为会重复创建日志器(日志器名称是同一个)\n3、一个使用方法logger1,一个使用方法logger2,并且两个方法中的日志器名称不同,日志打印不会出现问题\n\n总结: 一个日志类,定义了一个方法,方法里创建一个日志器;\n 如果引入该日志类,调用类中的方法获取日志器的时候,一旦重复调用了,不管是在一个方法里调用两次,还是在不同的方法里分别调用一次\n 只要调用次数大于 1,就会导致同一个名称的日志器重复创建,日志就会重复输出\n解决: 1 - 定义不同的方法,方法中的日志器名称不能相同,每个方法只能调用一次\n 2 - 不使用方法,使用全局属性,重复获取该属性,不会导致日志器重复创建\n\nversion: 1.0\nAuthor: xieyupeng\nDate: 2020-08-07 16:47:59\nLastEditors: xieyupeng\nLastEditTime: 2020-08-26 13:36:32\n'''\nimport threading\nimport asyncio\nimport sys\nimport LogTest\nsys.path.append('/app1/utils') # 在模块搜索路径中加入 logTest的路径,才能直接引用\n\n@asyncio.coroutine\ndef hello():\n logger = LogTest.logger1()\n # logger = LogTest.logger\n logger.debug('start....(%s)' % threading.currentThread())\n yield from asyncio.sleep(2) # 用睡眠模拟执行时长\n logger.debug('end....(%s)' % threading.currentThread())\n\n\n@asyncio.coroutine\ndef hello1():\n logger = LogTest.logger2()\n # logger = LogTest.logger1()\n # logger = LogTest.logger\n logger.debug('start1....(%s)' % threading.currentThread())\n yield from asyncio.sleep(2)\n logger.debug('end1....(%s)' % threading.currentThread())\n\n\ndef main():\n loop = asyncio.get_event_loop()\n tasks = [hello(), hello1()]\n loop.run_until_complete(asyncio.wait(tasks))\n loop.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xieyupengzZ/python3","sub_path":"app1/utils/ProcessThread/AsynicoTest.py","file_name":"AsynicoTest.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38287174326","text":"\"\"\"\nhttps://leetcode.com/problems/deepest-leaves-sum/\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:\n last_floor = list()\n floor = list()\n floor.append(root)\n while len(floor) > 0:\n last_floor = floor\n floor = list()\n for node in last_floor:\n if node.left is not None:\n floor.append(node.left)\n if node.right is not None:\n floor.append(node.right)\n result = 0\n for node in last_floor:\n result += node.val\n return result","repo_name":"AnhaoROMA/leetcode","sub_path":"Tree/1302 deepest leaves sum.py","file_name":"1302 deepest leaves sum.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39661309783","text":"\nnum = 0\ntot = 0.0\nwhile True:\n sval = input (\"Enter a Number: \")\n if sval == \"done\": ## if user enter \"done\" Exit From this loop\n break\n try: ## Check if the input is the float or string\n fval = float(sval)\n except: ## if the input is string print this message and continue the Loop\n print(\"Invalid Input\")\n continue\n print (fval)\n num +=1 # Number of iterations\n tot += fval ## Compute the Total for all Number\nprint (\"ALL DONE\")\nprint (tot , num , tot/num)\n","repo_name":"AhmedMohamedEid/Python_For_Everybody","sub_path":"python_for_everybody_get_startting/ex_05.01/ex_05.01.py","file_name":"ex_05.01.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"15381577008","text":"from __future__ import annotations\n\nfrom cve_bin_tool.checkers import Checker\n\n\nclass RdesktopChecker(Checker):\n CONTAINS_PATTERNS: list[str] = []\n FILENAME_PATTERNS: list[str] = []\n VERSION_PATTERNS = [\n r\"rdesktop: A Remote Desktop Protocol client.\\r?\\nVersion ([0-9]+\\.[0-9]+\\.[0-9]+)\"\n ]\n VENDOR_PRODUCT = [(\"rdesktop\", \"rdesktop\")]\n","repo_name":"intel/cve-bin-tool","sub_path":"cve_bin_tool/checkers/rdesktop.py","file_name":"rdesktop.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":950,"dataset":"github-code","pt":"38"} +{"seq_id":"5451667804","text":"\nFIELD_WIDTH = 20\nFIELD_HEIGHT = 20\nFIELD_DIM = 2\n\nN_HIDDEN = 10\nFC_H_SIZE = 256\n\nEPOCHS = 600\n\nBATCH_SIZE = 20\nLEARNING_RATE = 1e-5\nINEQ_DEPTH = 100\nEQ_DEPTH = 100\n\nTEST_SIZE = 500\nRENDER_INDEX = 8\n\nMODEL_TYPE = \"optnet\"\n#MODEL_TYPE = \"fc\"\n\n#ACTION = \"generate\"\n#ACTION = \"train\"\nACTION = \"render\"\n\nPATHS = {\n \"magnets-train\": \"data/it2/magnets-500-train.npy\",\n \"fields-train\": \"data/it2/fields-500-train.npy\",\n \"magnets-test\": \"data/it2/magnets-500-test.npy\",\n \"fields-test\": \"data/it2/fields-500-test.npy\",\n \"save-results\": \"data/results/500-it6\" + MODEL_TYPE + \".npy\",\n \"model\": \"models/final-\" + MODEL_TYPE + \".pt\"\n}\n\n","repo_name":"hatfield-c/optnet-magnet","sub_path":"Parameters.py","file_name":"Parameters.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40869361974","text":"from itertools import chain\nfrom jarbas.chamber_of_deputies.fields import ArrayField, DateAsStringField, FloatField, IntegerField\nfrom jarbas.chamber_of_deputies.models import Reimbursement\n\n\nINTEGERS = (\n 'applicant_id',\n 'batch_number',\n 'congressperson_document',\n 'congressperson_id',\n 'document_id',\n 'document_type',\n 'installment',\n 'month',\n 'subquota_group_id',\n 'subquota_number',\n 'term',\n 'term_id',\n 'year'\n)\n\nFLOATS = (\n 'document_value',\n 'remark_value',\n 'total_net_value',\n 'total_value'\n)\n\nTYPES = tuple(chain(\n ((field, IntegerField) for field in INTEGERS),\n ((field, FloatField) for field in FLOATS),\n (('issue_date', DateAsStringField),),\n (('numbers', ArrayField),),\n))\n\n\ndef serialize(row):\n \"\"\"Read the dict generated by the reimbursement command and returns a\n Reimbursement model instance.\"\"\"\n for key, type_ in TYPES:\n value = row.get(key)\n row[key] = type_.deserialize(value)\n\n for field in FLOATS:\n row[field] = row[field] if row[field] else 0.0\n\n if row['issue_date']:\n return Reimbursement(**row)\n","repo_name":"okfn-brasil/serenata-de-amor","sub_path":"jarbas/chamber_of_deputies/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":4484,"dataset":"github-code","pt":"38"} +{"seq_id":"26027762944","text":"class InvalidFeeError(Exception):\n def __init__(self, fee):\n self.fee = fee\n\n def __str__(self):\n return f\"Invalid Course Fee {self.fee}. It must be >= 0\"\n\n\nclass Course:\n taxrate = 18\n\n def __init__(self, name, fee=5000, duration=36):\n if fee < 0:\n raise InvalidFeeError(fee)\n\n if duration <= 0:\n raise ValueError(f\"Invalid Duration --> {duration}. It must be > 0\")\n\n self.name = name\n self.fee = fee\n self.duration = duration\n\n def getnetfee(self):\n return self.fee + self.fee * Course.taxrate // 100\n\n\nc = Course(\"AWS\", -5000, 30)\n\nc = Course(\"AWS\", duration=24)\nprint(c.getnetfee())\n","repo_name":"srikanthpragada/PYTHON_05_JUN_2023","sub_path":"demo/oop/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33000826747","text":"import tensorflow as tf\nimport cv2\nimport os\nimport numpy as np\n\npackage_directory = os.path.dirname(os.path.abspath(__file__))\n\nclass_indices = {'cyrillic': ['1', '2', '3', '4', '5', '6', '7', '8', '9',\n 'А', 'Б', 'В', 'Г', 'Д', 'Е', 'Ё', 'Ж', 'З',\n 'И', 'Й', 'К', 'Л', 'М', 'Н', 'О', 'П', 'Р',\n 'С', 'Т', 'У', 'Ф', 'Х', 'Ц', 'Ч', 'Ш', 'Щ',\n 'Ъ', 'Ы', 'Ь', 'Э', 'Ю', 'Я'],\n }\n\nclass Recognizer:\n def __init__(self, model_path, indices):\n self.model = tf.keras.models.load_model(model_path)\n self.indices = indices\n self.image_size = self.model.get_input_shape_at(0)[1:3]\n\n def recognize(self, img):\n data = self.convert_cv2_to_tf(img)\n result = self.model.predict(data)[0]\n best = np.argmax(result)\n return self.indices[best], result[best]\n\n def convert_cv2_to_tf(self, img):\n resized = cv2.resize(img, self.image_size)\n processed = resized / 255\n tensor = tf.convert_to_tensor(processed, dtype=tf.float32)\n return tf.reshape(tensor, (1, *self.image_size, 1))\n\nclass CyrillicRecognizer(Recognizer):\n def __init__(self):\n super().__init__(os.path.join(package_directory, 'models/cyrillic'),\n class_indices['cyrillic'])\n","repo_name":"povle/ege-scanner","sub_path":"lib/recognition/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"5720326335","text":"import json\n\nimport pytest\nimport tornado.httpclient\n\nfrom demo import config, schemas, runner\n\n\n@pytest.mark.usefixtures(\"init\")\nclass TestHandlers:\n @pytest.mark.gen_test\n async def test_post_events(self, http_client, base_url, input_schema):\n event_handler = runner.EventHandler.get_instance()\n request = tornado.httpclient.HTTPRequest(\n base_url + \"/events\",\n method=\"POST\",\n headers={\"X-Auth-Token\": config.POST_SECRET},\n body=input_schema.json().encode(\"utf-8\"))\n response = await http_client.fetch(request)\n assert response.code == 200\n assert json.loads(response.body)[\"result\"] == \"success\"\n assert len(event_handler._events_queue) == 10\n assert all(\n isinstance(e, schemas.output.EventV1)\n for e in event_handler._events_queue)\n\n @pytest.mark.gen_test\n async def test_post_events_missing_secret(\n self, http_client, base_url, input_schema):\n request = tornado.httpclient.HTTPRequest(\n base_url + \"/events\",\n method=\"POST\",\n headers={},\n body=input_schema.json().encode(\"utf-8\"))\n response = await http_client.fetch(request, raise_error=False)\n assert response.code == 403\n assert response.reason == \"Forbidden\"\n\n @pytest.mark.gen_test\n async def test_post_events_incorrect_secret(\n self, http_client, base_url, input_schema):\n request = tornado.httpclient.HTTPRequest(\n base_url + \"/events\",\n method=\"POST\",\n headers={\"X-Auth-Token\": \"INCORRECT\"},\n body=input_schema.json().encode(\"utf-8\"))\n response = await http_client.fetch(request, raise_error=False)\n assert response.code == 403\n assert response.reason == \"Forbidden\"\n\n @pytest.mark.gen_test\n async def test_post_events_incorrect_body(self, http_client, base_url):\n incorrect_input_schema = {\"events\": [{\"id\": 1, \"x\": 1}]}\n request = tornado.httpclient.HTTPRequest(\n base_url + \"/events\",\n method=\"POST\",\n headers={\"X-Auth-Token\": config.POST_SECRET},\n body=json.dumps(incorrect_input_schema).encode(\"utf-8\"))\n response = await http_client.fetch(request, raise_error=False)\n assert response.code == 422\n assert response.reason == \"Invalid body\"\n\n @pytest.mark.gen_test\n async def test_base_path(self, http_client, base_url):\n request = tornado.httpclient.HTTPRequest(\n base_url + \"/\",\n method=\"POST\",\n body=b\"\")\n response = await http_client.fetch(request, raise_error=False)\n assert response.code == 404\n","repo_name":"samm0ss/tornado-demo","sub_path":"test/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1581021586","text":"class Solution:\n def partition(self, s: str) -> List[List[str]]:\n self.res = []\n self.backtrack([], s)\n return self.res\n\n def backtrack(self, sol, s):\n if not s:\n self.res.append(sol)\n return\n for i in range(1, len(s) + 1):\n if s[:i] != s[:i][::-1]:\n continue\n self.backtrack(sol + [s[:i]], s[i:]) ##### 注意这里把sol与[s[:i]]相加,实现两个list相加\n\n\n\n\n\n\n","repo_name":"Lmyxxn/JZoffer","sub_path":"code/分割回文串.py","file_name":"分割回文串.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"22283541346","text":"from django.contrib import admin\r\nfrom apps.goods.models import Goods,GoodsType,GoodsSKU,IndexGoodsBanner,IndexPromotionBanner,IndexTypeGoodsBanner,GoodsImage\r\nfrom django.core.cache import cache\r\n# Register your models here.\r\n\r\nclass BaseModelAdmin(admin.ModelAdmin):\r\n def save_model(self, request, obj, form, change):\r\n '''新增或更新时候调用'''\r\n # 调用ModelAdmin中的save_model方法实现\r\n super().save_model(request, obj, form, change)\r\n\r\n # 附加操作1:发出生成静态首页的任务\r\n\r\n # 附加操作2,:清除首页缓存\r\n cache.delete('index_page_data')\r\n \r\n def delete_model(self, request, obj):\r\n '''删除数据时调用'''\r\n super().delete_model(request, obj)\r\n # 附加操作: 清除首页缓存\r\n cache.delete('index_page_data')\r\n\r\nclass GoodsTypeAdmin(BaseModelAdmin):\r\n \"\"\"商品种类模型admin管理类\"\"\"\r\n list_display = ('name','logo')\r\n\r\nclass GoodsSKUInfo(admin.StackedInline):\r\n model = GoodsSKU\r\n extra = 1\r\n\r\nclass GoodsAdmin(BaseModelAdmin):\r\n inlines = [GoodsSKUInfo]\r\n \"\"\"商品种类模型admin管理类\"\"\"\r\n list_display = ('name',)\r\n\r\nclass GoodsSKUAdmin(BaseModelAdmin):\r\n \"\"\"商品种类模型admin管理类\"\"\"\r\n search_fields=('name', 'unite','price','stock','sales')\r\n list_display = ('name', 'unite', 'price', 'stock', 'sales')\r\n\r\nclass GoodsImageAdmin(BaseModelAdmin):\r\n '''商品图片'''\r\n pass\r\n\r\nclass IndexGoodsBannerAdmin(BaseModelAdmin):\r\n \"\"\"首页轮播商品模型admin管理类\"\"\"\r\n list_display = ('sku','index')\r\n\r\n\r\nclass IndexTypeGoodsBannerAdmin(BaseModelAdmin):\r\n list_filter = ('category','display_type')\r\n \"\"\"首页分类商品展示模型admi管理类\"\"\"\r\n list_display = ('category','sku','display_type','index')\r\n\r\n\r\nclass IndexPromotionBannerAdmin(BaseModelAdmin):\r\n \"\"\"首页促销活动admin管理类\"\"\"\r\n list_display = ('name','url','index')\r\n\r\nadmin.site.register(GoodsType, GoodsTypeAdmin)\r\nadmin.site.register(Goods, GoodsAdmin)\r\nadmin.site.register(GoodsSKU, GoodsSKUAdmin)\r\nadmin.site.register(GoodsImage,GoodsImageAdmin)\r\nadmin.site.register(IndexGoodsBanner, IndexGoodsBannerAdmin)\r\nadmin.site.register(IndexPromotionBanner, IndexPromotionBannerAdmin)\r\nadmin.site.register(IndexTypeGoodsBanner, IndexTypeGoodsBannerAdmin)\r\n","repo_name":"sh829/dailyfresh","sub_path":"apps/goods/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10349262185","text":"import sys\r\nimport os\r\nsys.path.append(\"../\")\r\nimport AcraNetwork.Chapter10.Chapter10 as ch10\r\nimport AcraNetwork.Chapter10.Chapter10UDP as ch10udp\r\nimport AcraNetwork.Pcap as pcap\r\nimport AcraNetwork.SimpleEthernet as eth\r\nimport argparse\r\nimport sys\r\n\r\n\r\ndef create_parser():\r\n #----------------------------------\r\n # Setup the command line parser\r\n #----------------------------------\r\n parser = argparse.ArgumentParser(description='Covert a chapter 10 file to a pcap')\r\n parser.add_argument('--pcap', required=True, help='The output pcap file')\r\n parser.add_argument('--ch10', required=True, help='The input chapter 10 file')\r\n return parser\r\n\r\ndef encapsulate_udppayload_in_eth(udp_payload: bytes):\r\n \"\"\"\r\n Encapsulte the udp payload in an Ethernet packet\r\n \"\"\"\r\n ethpkt = eth.Ethernet()\r\n ethpkt.dstmac = 0x01005e000001\r\n ethpkt.srcmac = 0x000c4dac7aaa\r\n ethpkt.type = eth.Ethernet.TYPE_IP\r\n #\r\n ippkt = eth.IP()\r\n ippkt.dstip = \"235.0.0.2\"\r\n ippkt.srcip = \"127.0.0.1\"\r\n # Stick a UDP packet in the payload\r\n udppkt = eth.UDP()\r\n udppkt.dstport = 51001\r\n udppkt.srcport = 51001\r\n\r\n udppkt.payload = udp_payload\r\n # packet the udp packet into the ethernet payload\r\n ippkt.payload = udppkt.pack()\r\n ethpkt.payload = ippkt.pack()\r\n return ethpkt.pack()\r\n\r\ndef main(args):\r\n\r\n pf = pcap.Pcap(args.pcap, mode='w')\r\n fp = ch10.FileParser(args.ch10)\r\n\r\n idx = 0\r\n with fp as ch10file:\r\n for idx, pkt in enumerate(ch10file):\r\n pr = pcap.PcapRecord()\r\n pr.set_current_time()\r\n udp = ch10udp.Chapter10UDP()\r\n udp.format = 3\r\n udp.sourceid_len = 0\r\n udp.sequence = idx\r\n udp.offset_pkt_start = 0\r\n udp.payload = pkt.pack()\r\n pr.payload = encapsulate_udppayload_in_eth(udp.pack())\r\n pf.write(pr)\r\n\r\n pf.close()\r\n print(f\"Create a pcap with {idx} records\")\r\n\r\nif __name__ == '__main__':\r\n parser = create_parser()\r\n args = parser.parse_args()\r\n ret = main(args)\r\n sys.exit(ret)\r\n","repo_name":"diarmuidcwc/AcraNetwork","sub_path":"examples/ch10_to_pcap.py","file_name":"ch10_to_pcap.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"72424591146","text":"from pandas.api.types import is_numeric_dtype\nfrom source_code.converter import list_to_string\n\ndef clean_sale_column(sale_data_column):\n \n sale_column = sale_data_column\n if is_numeric_dtype(sale_column):\n return (sale_column)\n cleaned_column = sale_column.astype(\"str\")\n cleaned_column = cleaned_column.str.replace(',', '')\n cleaned_column = cleaned_column.str.extract(\"([\\d.,]+)\")\n cleaned_column= cleaned_column.astype(float)\n return (cleaned_column)\n\ndef cal_total_cost_price(df,mapper):\n formular = None\n try:\n df[\"total_cost_price\"] = df[mapper[\"qty_sold\"]].values * df[mapper[\"unit_cp\"]].values\n formular = f\"We got total_cost by multiplying {mapper['qty_sold']} with {mapper['unit_cp']}\"\n return df,formular\n except KeyError:\n formular = None\n return df,formular\n \ndef cal_total_selling_price(df,mapper):\n \n possible_features = [\"sp\",\"discount_per\",\"discount_amount\"]\n formular = None\n for feature in possible_features:\n try:\n formular = f\"We got total_selling_price by multiplying {mapper['qty_sold']} with \"\n if feature == \"discount_per\":\n discounted = None\n if df[mapper[feature]].max() > 1:\n discounted = df[mapper[\"sp\"]].values - (df[mapper[\"sp\"]].values * df[mapper[feature]].div(100) ) \n formular += f\"{mapper['sp']} - ( {mapper['sp']} * ({mapper[feature]} / 100)\"\n else:\n discounted = df[mapper[\"sp\"]].values - (df[mapper[\"sp\"]].values * df[mapper[feature]].values ) \n formular += f\"{mapper['sp']} - ( {mapper['sp']} * ({mapper[feature]})\"\n \n df[\"total_selling_price\"] = df[mapper[\"qty_sold\"]].values * discounted\n return df,formular\n elif feature == \"discount_amount\":\n df[\"total_selling_price\"] = df[mapper[\"qty_sold\"]].values * (df[mapper[\"sp\"]].values - df[mapper[feature]].values)\n formular += f\"({mapper['sp']} - {mapper[feature]} )\" \n return df,formular\n else:\n df[\"total_selling_price\"] = df[mapper[\"qty_sold\"]].values * df[mapper[\"sp\"]].values\n formular += f\"{mapper['sp']}\" \n return df,formular\n \n except KeyError:\n formular = None\n continue\n \n return df,formular\n\ndef cal_gross_cost_price(df,mapper,multiple_features):\n formular = None\n features = []\n try:\n \n df['gross_cost_price'] = df['total_cost_price'].values + df[mapper[\"extra_cost\"]]\n features.append(mapper['extra_cost'])\n try:\n for multiple_key in multiple_features[\"extra_cost\"]:\n df['gross_cost_price'] = df['gross_cost_price'].values + df[mapper[multiple_key]] \n features.append(mapper[multiple_key]) \n except KeyError:\n pass\n except KeyError:\n pass\n \n try:\n init = \"gross_cost_price\" if features else 'total_cost_price'\n df['gross_cost_price'] = df[init].values + (df[mapper[\"extra_cost_pu\"]].values * df[mapper[\"qty_sold\"]].values)\n features.append(f\"({mapper['extra_cost_pu']} * {mapper['qty_sold']})\")\n try:\n for multiple_key in multiple_features[\"extra_cost\"]:\n df['gross_cost_price'] = df['gross_cost_price'].values + (df[mapper[multiple_key]] * df[mapper[\"qty_sold\"]].values) \n features.append({mapper[multiple_key]}) \n features.append(f\"({mapper[multiple_key]} * {mapper['qty_sold']})\") \n except KeyError:\n pass\n except KeyError:\n pass\n \n if features: \n formular = f\"We got gross_cost_price by adding total_cost_price with \"\n string,_ = list_to_string(features)\n formular += string\n \n return df,formular\n \ndef cal_delivery_profit(df,mapper):\n formular = None\n try:\n df[\"delivery_profit\"] = df[mapper[\"del_charge\"]].values - df[mapper[\"del_cost\"]].values\n formular = f\"We got delivery_profit by substracting {mapper['del_cost']} from {mapper['del_charge']}\"\n except KeyError:\n pass\n return df,formular\n\ndef cal_profit(df,mapper,multiple_features,total_cp,total_sp):\n derivatives = []\n new_cols = []\n if not total_cp :\n df,total_cp_formular = cal_total_cost_price(df,mapper)\n total_cp = True if total_cp_formular else False\n if total_cp:\n derivatives.append(total_cp_formular)\n new_cols.append(\"total_cost_price\")\n \n if not total_sp :\n df,total_sp_formular = cal_total_selling_price(df,mapper)\n total_sp = True if total_sp_formular else False\n if total_sp:\n derivatives.append(total_sp_formular)\n new_cols.append(\"total_selling_price\")\n\n df,delivery_profit_formular = cal_delivery_profit(df,mapper)\n if delivery_profit_formular:\n derivatives.append(delivery_profit_formular)\n new_cols.append(\"delivery_profit\")\n if total_cp:\n df,total_gcp_formular = cal_gross_cost_price(df,mapper,multiple_features)\n if total_gcp_formular:\n derivatives.append(total_gcp_formular)\n new_cols.append(\"gross_cost_price\")\n if total_gcp_formular and total_sp and delivery_profit_formular:\n df[\"profit\"] = df[\"total_selling_price\"] - df[\"gross_cost_price\"] + df[\"delivery_profit\"]\n df['returns(%)'] = (( df[\"profit\"].values/df[\"gross_cost_price\"].values) * 100).round(2)\n formular = \"We got profit by substracting (gross_cost_price + delivery_profit) from total_selling_price \\\n then derived returns(%) by multipying (profit/gross_cost_price) by 100\"\n derivatives.append(formular)\n new_cols += [\"profit\",\"returns(%)\"]\n elif total_gcp_formular and total_sp:\n df[\"profit\"] = df[\"total_selling_price\"] - df[\"gross_cost_price\"]\n df['returns(%)'] = ((df[\"profit\"].values / df[\"gross_cost_price\"].values ) * 100).round(2)\n formular = \"We got profit by substracting gross_cost_price from total_selling_price \\\n then derived returns(%) by multipying (profit/gross_cost_price) by 100\"\n derivatives.append(formular)\n new_cols += [\"profit\",\"returns(%)\"]\n elif total_cp and total_sp and delivery_profit_formular:\n df[\"profit\"] = df[\"total_selling_price\"] - df[\"total_cost_price\"] + df[\"delivery_profit\"]\n df['returns(%)'] = (( df[\"profit\"].values / df[\"total_cost_price\"].values) * 100).round(2)\n formular = \"We got profit by substracting (total_cost_price + delivery_profit) from total_selling_price \\\n then derived returns(%) by multipying (profit/total_cost_price) by 100\"\n derivatives.append(formular)\n new_cols += [\"profit\",\"returns(%)\"]\n elif total_cp and total_sp:\n df[\"profit\"] = df[\"total_selling_price\"] - df[\"total_cost_price\"]\n df['returns(%)'] = ((df[\"profit\"].values / df[\"total_cost_price\"].values ) * 100).round(2)\n formular = \"We got profit by substracting total_cost_price from total_selling_price \\\n then derived returns(%) by multipying (profit/total_cost_price) by 100\"\n derivatives.append(formular)\n new_cols += [\"profit\",\"returns(%)\"]\n total_sp = False\n total_cp = False \n return df,derivatives,new_cols \n ","repo_name":"Emeka-Onwuepe/Automated_Sales_Data_Analysis","sub_path":"source_code/clean/sales.py","file_name":"sales.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"470855890","text":"import pulp as p\r\n#from p import *\r\n\r\nBillboards = ['BB1', 'BB2', 'SS1', 'SS2', 'SS3']\r\n\r\ncosts = {'BB1': 126.19,\r\n 'BB2': 102.54, \r\n 'SS1': 316.60,\r\n 'SS2': 388.62,\r\n 'SS3': 359.34\r\n }\r\ncontacts = {'BB1': 1967,\r\n 'BB2': 1599,\r\n 'SS1': 3290,\r\n 'SS2': 4039,\r\n 'SS3': 3735\r\n }\r\n# Create a variable\r\nx1 = p.LpVariable(\"BB1\", lowBound=4, upBound=4, cat=p.LpInteger) \r\nx2 = p.LpVariable(\"BB2\", lowBound=4, upBound=10, cat=p.LpInteger) \r\ny1 = p.LpVariable(\"SS1\", lowBound=4, upBound=18, cat=p.LpInteger) \r\ny2 = p.LpVariable(\"SS2\", lowBound=4, upBound=18, cat=p.LpInteger) \r\ny3 = p.LpVariable(\"SS3\", lowBound=4, upBound=4, cat=p.LpInteger) \r\n\r\nDifference_constraint_BB1_BB2 = {\"BB1\": -1.0,\r\n \"BB2\": 1.0,\r\n \"SS1\": 0.0,\r\n \"SS2\": 0.0,\r\n \"SS3\": 0.0\r\n }\r\nDifference_constraint_BB1_SS1 = {\"BB1\": -1.0,\r\n \"BB2\": 0.0,\r\n \"SS1\": 1.0,\r\n \"SS2\": 0.0,\r\n \"SS3\": 0.0\r\n }\r\nDifference_constraint_BB1_SS2 = {\"BB1\": -1.0,\r\n \"BB2\": 0.0,\r\n \"SS1\": 0.0,\r\n \"SS2\": 1.0,\r\n \"SS3\": 0.0\r\n }\r\nDifference_constraint_BB1_SS3 = {\"BB1\": -1.0,\r\n \"BB2\": 0.0,\r\n \"SS1\": 0.0,\r\n \"SS2\": 0.0,\r\n \"SS3\": 1.0\r\n } \r\nDifference_constraint_BB2_SS1 = {\"BB1\": 0.0,\r\n \"BB2\": -1.0,\r\n \"SS1\": 1.0,\r\n \"SS2\": 0.0,\r\n \"SS3\": 0.0\r\n } \r\nDifference_constraint_BB2_SS2 = {\"BB1\": 0.0,\r\n \"BB2\": -1.0,\r\n \"SS1\": 0.0,\r\n \"SS2\": 1.0,\r\n \"SS3\": 0.0\r\n } \r\nDifference_constraint_BB2_SS3 = {\"BB1\": 0.0,\r\n \"BB2\": 1.0,\r\n \"SS1\": 0.0,\r\n \"SS2\": 0.0,\r\n \"SS3\": -1.0\r\n } \r\nDifference_constraint_SS1_SS2 = {\"BB1\": 0.0,\r\n \"BB2\": 0.0,\r\n \"SS1\": 1.0,\r\n \"SS2\": -1.0,\r\n \"SS3\": 0.0\r\n } \r\n\r\nDifference_constraint_SS1_SS3 = {\"BB1\": 0.0,\r\n \"BB2\": 0.0,\r\n \"SS1\": 1.0,\r\n \"SS2\": 0.0,\r\n \"SS3\": -1.0\r\n } \r\nDifference_constraint_SS2_SS3 = {\"BB1\": 0.0,\r\n \"BB2\": 0.0,\r\n \"SS1\": 0.0,\r\n \"SS2\": 1.0,\r\n \"SS3\": -1.0\r\n } \r\nprob = p.LpProblem(\"The number of display problem\", p.LpMaximize)\r\ningredient_vars = p.LpVariable.dicts(\"Ingr\", Billboards, 2, cat=p.LpInteger)\r\n# Objective Function\r\nprob += p.lpSum([costs[i]*ingredient_vars[i] for i in Billboards])\r\n# Constraints: \r\nprob += p.lpSum([Difference_constraint_BB1_BB2[i] * ingredient_vars[i] for i in Billboards]) <= 6\r\nprob += p.lpSum([Difference_constraint_BB1_SS1[i] * ingredient_vars[i] for i in Billboards]) <= 14\r\nprob += p.lpSum([Difference_constraint_BB1_SS2[i] * ingredient_vars[i] for i in Billboards]) <= 14\r\n#prob += p.lpSum([Difference_constraint_BB1_SS3[i] * ingredient_vars[i] for i in Billboards]) <= 0\r\nprob += p.lpSum([Difference_constraint_BB2_SS1[i] * ingredient_vars[i] for i in Billboards]) <= 8\r\nprob += p.lpSum([Difference_constraint_BB2_SS2[i] * ingredient_vars[i] for i in Billboards]) <= 8\r\nprob += p.lpSum([Difference_constraint_BB2_SS3[i] * ingredient_vars[i] for i in Billboards]) <= 6\r\n#prob += p.lpSum([Difference_constraint_SS1_SS2[i] * ingredient_vars[i] for i in Billboards]) <= 0\r\nprob += p.lpSum([Difference_constraint_SS1_SS3[i] * ingredient_vars[i] for i in Billboards]) <= 14\r\nprob += p.lpSum([Difference_constraint_SS2_SS3[i] * ingredient_vars[i] for i in Billboards]) <= 14\r\nprob += p.lpSum([contacts[i]*ingredient_vars[i] for i in Billboards]) <= 81848.85\r\n# The problem data is written to an .lp file\r\nprob.writeLP(\"The number of display problem.lp\")\r\n\r\n# The problem is solved using PuLP's choice of Solver\r\nprob.solve()\r\n\r\n# The status of the solution is printed to the screen\r\nprint(\"Status:\", p.LpStatus[prob.status])\r\n\r\n\r\n# Each of the variables is printed with it's resolved optimum value\r\nfor v in prob.variables():\r\n if v.varValue > 0:\r\n print(v.name, \"=\", v.varValue)\r\n\r\n\r\n# The optimised objective function value is printed to the screen \r\nprint(p.value(prob.objective))","repo_name":"vchernova12/python-project","sub_path":"real_example_1.py","file_name":"real_example_1.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8423453434","text":"# vim: set fileencoding=utf-8 :\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport logbook\nimport netaddr\n\nimport stethoscope.configurator\n\n\nlogger = logbook.Logger(__name__)\n\n\nclass VPNFilter(stethoscope.configurator.Configurator):\n\n config_keys = (\n 'VPN_CIDRS',\n )\n\n def __init__(self, *args, **kwargs):\n super(VPNFilter, self).__init__(*args, **kwargs)\n self._networks = netaddr.IPSet(self.config['VPN_CIDRS'])\n\n def augment(self, events):\n for event in events:\n event['vpn'] = (netaddr.IPAddress(event['ip_address']) in self._networks)\n return events\n","repo_name":"stasfilin/stethoscope","sub_path":"stethoscope/plugins/transform/vpnfilter.py","file_name":"vpnfilter.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"5052447585","text":"from google.appengine.ext import webapp\n\nfrom utils import auth, tasks\n\nimport urllib\nimport hashlib\n\nPOST = 'POST'\n\t\nclass PostMessage(webapp.RequestHandler):\n\tdef post(self):\n\t\targs = self.request.arguments()\n\t\targs.sort()\n\t\tparams = {}\n\t\tfor arg in args:\n\t\t\tparams[arg] = self.request.get(arg)\n\n\t\ttry:\n\t\t\ttoken = self.request.headers['Mail-Engine-Auth-Token']\n\t\texcept:\n\t\t\ttoken = ''\n\n\t\tif auth.check(token, urllib.urlencode(params), self.request.remote_addr):\n\t\t\ttasks.add('/build', params=params)\n","repo_name":"ghinch/Mail-Engine","sub_path":"handlers/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"27970793140","text":"#!python\r\n\r\nimport cgi, os\r\nform = cgi.FieldStorage()\r\npageId = form.getvalue(\"pageId\")\r\ntitle = form.getvalue(\"title\")\r\ndescription = form.getvalue(\"description\")\r\n\r\nopened_file = open('data/'+pageId, 'w')\r\nopened_file.write(description)\r\nopened_file.close()\r\n\r\nos.rename('data/'+pageId, 'data/'+title)\r\n\r\n#Redirection\r\nprint(\"Location: index.py?id=\"+title)\r\nprint()\r\n","repo_name":"Action2theFuture/Database-MySQL","sub_path":"process_update.py","file_name":"process_update.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71390467627","text":"import torch as th\nimport torch.nn as nn\nimport pytorch_lightning as pl\n\n\nclass FourierFeatures(nn.Module):\n def __init__(self, levels: int):\n super().__init__()\n\n self.levels = levels\n\n\n def forward(self, x: th.Tensor):\n scale = (2**th.arange(self.levels) * th.pi) \\\n .repeat(x.shape[1]) \\\n .to(x.device)\n args = x.repeat_interleave(self.levels, dim=1) * scale\n\n # NOTE: Sines and cosines on the same level are not adjacent \n # as in the original paper. Network should be invariant to this,\n # so there should be no loss difference. Computation is faster though.\n return th.hstack((th.cos(args), th.sin(args)))\n\n\nclass Nerf2d(pl.LightningModule):\n def __init__(\n self, \n width: int, \n height: int, \n fourier_levels: int,\n learning_rate: float = 1e-3,\n learning_rate_decay: float = 0.5,\n learning_rate_decay_patience: int = 20,\n weight_decay: float = 0.0,\n ):\n super().__init__()\n self.save_hyperparameters()\n\n self.width = width\n self.height = height\n \n self.fourier_levels = fourier_levels\n \n self.learning_rate = learning_rate\n self.learning_rate_decay = learning_rate_decay\n self.learning_rate_decay_patience = learning_rate_decay_patience\n \n self.weight_decay = weight_decay\n \n \n self.model = nn.Sequential(\n FourierFeatures(levels=self.fourier_levels),\n nn.Linear(2*2*self.fourier_levels, 256),\n \n # nn.Linear(2, 256),\n \n nn.Tanh(),\n nn.Linear(256, 256),\n nn.Tanh(),\n nn.Linear(256, 256),\n nn.Tanh(),\n nn.Linear(256, 3),\n nn.Sigmoid()\n )\n\n\n def forward(self, x: th.Tensor):\n return self.model(x)\n\n\n def training_step(self, batch: tuple[th.Tensor, th.Tensor], batch_idx: int):\n x, y = batch\n\n y_hat = self(x)\n loss = nn.functional.mse_loss(y_hat, y)\n self.log(\"train_loss\", loss)\n\n return loss\n\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n\n y_hat = self(x)\n loss = nn.functional.mse_loss(y_hat, y)\n self.log('val_loss', loss)\n\n\n def configure_optimizers(self):\n optimizer = th.optim.Adam(self.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)\n scheduler = th.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, \n mode=\"min\", \n factor=self.learning_rate_decay, \n patience=self.learning_rate_decay_patience\n )\n\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": scheduler,\n \"monitor\": \"train_loss\"\n }\n","repo_name":"sarphiv/nerf-experiments","sub_path":"2d-reconstruction/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39123274900","text":"#!/usr/bin/env python\nimport ngram\nimport filters\nimport csv\nimport random\n\nngram.good_turing =lambda x:x\n\ndef trial(train,n,p,probs):\n\t\"\"\"n is n-gram size. p is whether it's log or arbitrary precision\"\"\"\n\tngram.set_fractions(p)\n\tsentence=ngram.make_sentence(probs[0:(n+1)])\n\tpp=ngram.perplexity(probs[0:(n+1)],sentence)\n\ttime='NA'\n\treturn [n,p,pp,time,' '.join(sentence)]\n\ndef sentence_generation(train,filename,nmax,reps,probs_ap,probs_log):\n\t#Open results file\n\tout_fh = open(filename, 'wb')\n\tout=csv.writer(out_fh, delimiter='|', quotechar='&', quoting=csv.QUOTE_NONE)\n\tout.writerow(['n','use fractions','perplexity','time','sentence'])\n\t\n\t#Write perplexities and other output\n\tfor i in range(0,reps):\n\t\tfor n in range(1,nmax+1):\n\t\t\ttry:\n\t\t\t\tout.writerow(trial(train,n,True,probs_ap))\n\t\t\t\tout.writerow(trial(train,n,False,probs_log))\n\t\t\t\tout_fh.flush()\n\t\t\texcept(AttributeError):\n\t\t\t\tpass\n\ndef run(train,nmax,reps,out):\n\t#Get probabilities with arbitrary precision\n\tfh = open(train)\n\tngram.set_fractions(True)\n\tprobs_ap=ngram.probabilities(ngram.good_turing(ngram.ngram(nmax,filters.unk(filters.shakespeare(fh)))))\n\t\n\t#Get probabilities with logs\n\tfh = open(train)\n\tngram.set_fractions(False)\n\tprobs_log=ngram.probabilities(ngram.good_turing(ngram.ngram(nmax,filters.unk(filters.shakespeare(fh)))))\n\t\n\t#Make sentences\n\tsentence_generation(train,out,nmax,reps,probs_ap,probs_log)\n\ndef main():\n\tnmax=5\n\treps=30\n\t#run('War and Peace/short.txt',nmax,reps,'War and Peace results.csv')\n\t#run('Shakespeare/short.txt',nmax,reps,'Shakespeare results.csv')\n\trun('War and Peace/Train.txt',nmax,reps,'War and Peace results.csv')\n\trun('Shakespeare/Train.txt',nmax,reps,'Shakespeare results.csv')\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"astory/cs4740_1","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"13159201061","text":"import numpy as np\nfrom data_tree import series, logger\n\n\ndef test_hdf5_lock():\n\n s = series(np.arange(100))\n s1 = s.hdf5(\"test_s1.hdf5\")\n s2 = s.hdf5(\"test_s2.hdf5\")\n for s in [s1, s2]:\n s.clear()\n s.ensure()\n from itertools import islice\n # for some reason, islice stops execution\n logger.info(list(islice(zip(*(i.batch_generator(16,preload=5) for i in [s1, s2])),0,1))[0])\n\n","repo_name":"proboscis/data_tree","sub_path":"test/test_hdf5_zip.py","file_name":"test_hdf5_zip.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"4782664865","text":"# Given a sorted array and a target value, return the index if the target is found. \n# If not, return the index where it would be if it were inserted in order.\n# You may assume no duplicates in the array.\n\n\nimport unittest\nclass Solution:\n def searchInsert(self, nums, target):\n left = 0\n right = len(nums) - 1\n while left <= right:\n index = (right + left) // 2\n if nums[index] == target:\n return index\n elif nums[index] < target:\n left = index + 1\n print('higher', index)\n else:\n right = index - 1\n print('lower', index)\n\n print('index final:', index)\n if target < nums[index]:\n return index\n else:\n return index + 1\n\n \nclass TestStringMethods(unittest.TestCase):\n def test_isLocated(self):\n s = Solution()\n result = s.searchInsert([1,3,5], 5)\n self.assertEqual(result, 2)\n\n def test_located2(self):\n s = Solution()\n result = s.searchInsert([1, 3, 5], 6)\n self.assertEqual(result, 3)\n\n def test_located3(self):\n s = Solution()\n result = s.searchInsert([1, 3, 5, 6], 2)\n self.assertEqual(result, 1)\n \n\n def test_located4(self):\n s = Solution()\n result = s.searchInsert([1, 3, 5, 6], 0)\n self.assertEqual(result, 0)\n\nunittest.main(exit=False)\n","repo_name":"bukandu1/code-challenge-practice","sub_path":"leet_code/search_target.py","file_name":"search_target.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12675474656","text":"import numpy as np\nimport pandas as pd\nSOURCE_DATA_PATH = '../data/train.csv'\nDATASET_FALL_PATH = '../data/dataset/sensor_data.csv'\n\ndata = pd.read_csv(SOURCE_DATA_PATH,index_col=False)\nprint(data.shape)\n# data = data.drop(columns=['label'])\nvalues = data.iloc[0:1, 1:1201].values\nprint(data.shape)\nprint(values)\ndata = pd.DataFrame(values)\ndata.to_csv(DATASET_FALL_PATH,index=False)","repo_name":"Ruksana-RASHEED/FD-CNN-1","sub_path":"utils/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30865442867","text":"class Solution:\n def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n res=[]\n \n subset=[]\n def dfs(i):\n if i>=len(nums):\n res.append(subset.copy())\n return \n \n # decision to include the element at nums[i]\n subset.append(nums[i])\n dfs(i+1)\n\n # decision NOT to include the element at nums[i]\n subset.pop()\n while i+1 files...\")\n c3Folder = None\n\n if self.pFiles['files'][fileKey]['metaData']:\n if self.pFiles['files'][fileKey]['supported']:\n \n \n if fileValue['c3File']:\n c3Folder = self.pFiles['c3proj'][fileKey]\n else:\n c3Folder = self.pFiles['c3proj']['rootFileFolders'][fileKey]\n \n self.loadFiles(fileKey, c3Folder, [])\n\n else:\n\n folderPath = os.path.join(self.tempPath, fileValue['folderName'])\n if os.path.exists(folderPath):\n for fileName in os.listdir(folderPath):\n\n fullPath = os.path.join(folderPath, fileName)\n fileContent = None\n with open(fullPath, 'rb') as file:\n fileContent = file.read()\n\n c3_file = C3File(fileName, fileContent, None, [])\n\n fileValue['fList'].append(c3_file)\n \n\n if (not fileValue['supported']) and len(fileValue['fList']) > 0:\n raise NameError(\"Projects with the <\" + fileKey + \"> file type are not supported\")\n\n shutil.rmtree(self.tempPath)\n\n def loadFiles(self, fileType, c3Folder, c3Dir):\n \n if 'name' in c3Folder:\n c3Dir.append(c3Folder['name'])\n\n for item in c3Folder['items']:\n c3_file = None\n dirName = self.pFiles['files'][fileType]['folderName']\n \n\n if fileType in self.c3FileList:\n tempName = item.lower() + '.json'\n logger.info('file name: ' + tempName)\n with open(os.path.join(self.tempPath, dirName, tempName), encoding='utf8') as json_file:\n tempJson = json.load(json_file)\n \n c3_file = C3File(tempName, tempJson, None, c3Dir)\n\n else:\n tempName = item['name']\n tempType = item['type']\n with open(os.path.join(self.tempPath, dirName, tempName), 'rb') as file:\n tempJson = file.read()\n\n c3_file = C3File(tempName, tempJson, tempType, c3Dir)\n\n self.pFiles['files'][fileType]['fList'].append(c3_file)\n\n for folder in c3Folder['subfolders']:\n \n self.loadFiles(fileType, folder, c3Dir.copy())\n\n self.pFiles['files'][fileType]['subfolders'].append(c3Dir)\n #logger.info(c3Dir) \n c3Dir = []\n \n def exportProject(self, export_path='export', name='c3project', one_file=True):\n \n logger.info(\"Exporting files...\")\n \n #creating main folder and c3proj file\n if not os.path.exists(self.tempPath):\n os.makedirs(self.tempPath)\n \n #updating c3proj file\n self.updateC3proj()\n\n logger.info(\"Writing c3proj file\")\n with open(os.path.join(self.tempPath, 'project.c3proj'), 'w') as outfile:\n json.dump(self.pFiles['c3proj'], outfile, indent=4)\n\n #creating c3 files\n \n for fileKey, fileValue in self.pFiles['files'].items():\n \n if len(fileValue['fList']) > 0:\n logger.info(\"Writing <\" + fileKey + \">files...\")\n folderName = os.path.join(self.tempPath, fileValue['folderName'])\n if not os.path.exists(folderName):\n os.makedirs(folderName)\n \n for currentFile in fileValue['fList']:\n logger.info(\"File name: \" + currentFile.name)\n if fileKey in self.c3FileList:\n with open(os.path.join(folderName, currentFile.name), 'w') as outfile:\n json.dump(currentFile.content, outfile, indent=4)\n else:\n with open(os.path.join(folderName, currentFile.name), \"wb\") as binary_file:\n binaryFormat = bytearray(currentFile.content)\n binary_file.write(binaryFormat)\n\n #exporting project\n if not os.path.exists(export_path):\n os.makedirs(export_path)\n\n if one_file:\n logger.info(\"Compressing project into .c3p file...\")\n zipPath = os.path.join(export_path, name + \".c3p\")\n dirPath = self.tempPath\n \n zipf = zipfile.ZipFile(zipPath , mode='w')\n lenDirPath = len(dirPath)\n for root, _ , files in os.walk(dirPath):\n for file in files:\n filePath = os.path.join(root, file)\n zipf.write(filePath , filePath[lenDirPath :] )\n zipf.close()\n\n else:\n logger.info(\"Exporting project to target parth...\")\n self.copyFiles(self.tempPath, export_path)\n\n if one_file:\n logger.info(\"File created '\" + name + \".c3p' at '\" + export_path + \"'\")\n else:\n logger.info(\"Project exported at path: \" + export_path) \n shutil.rmtree(self.tempPath)\n \n\n def copyFiles(self, src, dst):\n logger.info('src: ' + src)\n fileList = []\n for f in os.listdir(src):\n fileList.append(f)\n\n for filename in fileList:\n filename= os.path.join(src, filename)\n if os.path.isfile(filename):\n shutil.copy(filename, dst)\n else:\n destFolder = os.path.join(dst, os.path.basename(filename)) \n if not os.path.exists(destFolder):\n os.mkdir(destFolder)\n self.copyFiles( filename, destFolder )\n \n #updates c3proj file and sids according to the loaded files\n def updateC3proj(self):\n\n logger.info(\"Updating .c3proj file\")\n for fKey, fValue in self.pFiles['files'].items():\n if fValue['supported'] and fValue['metaData']:\n \n updatedDir = {'items' : [], 'subfolders' : []}\n \n\n for c3File in self.pFiles['files'][fKey]['fList']: \n\n tempDir = updatedDir\n\n #for each file's directory\n for fDir in c3File.dir:\n \n createDir = True\n \n tempDir = tempDir['subfolders']\n for j, c3Dir in enumerate(tempDir):\n \n if fDir == c3Dir['name']:\n tempDir = tempDir[j]\n createDir = False\n break\n \n if createDir:\n logger.info(\"new dir: \" + fDir)\n tempDir.append({'items' : [], 'subfolders' : [], 'name' : fDir})\n tempDir = tempDir[len(tempDir)-1]\n \n if fValue['c3File'] == True:\n tempDir['items'].append(c3File.content['name'])\n else:\n tempDir['items'].append({'name' : c3File.name, 'type' : c3File.type, 'sid' : 0})\n \n \n if fValue['c3File']:\n self.pFiles['c3proj'][fKey] = updatedDir\n else:\n self.pFiles['c3proj']['rootFileFolders'][fKey] = updatedDir\n\n # Set new values to project sids and uids. Very unlikely for this to be necessary, it's probabilly safier to leave this commented\n # self.setProjectIds()\n \n\n\n\n def addC3File(self, c3File):\n pass\n\n def setProjectIds(self):\n \n logger.info(\"Seting sids for c3proj...\")\n self.pFiles['c3proj'] = self.setFileSid(self.pFiles['c3proj'])\n \n \n for key, value in self.pFiles['files'].items():\n \n if value['supported'] and value['c3File']:\n logger.info(\"Seting sids for files <\" + key + \">\")\n for f in value['fList']:\n \n logger.info(\"file: \" + f.name)\n f.content = self.setFileSid(f.content)\n\n logger.info (\"All sids seted\")\n\n def setFileSid(self, jsonFile):\n\n jsonStr = json.dumps(jsonFile)\n\n pattern = re.compile(r'(\"sid\": *)(\\d+)') \n result = re.subn(pattern, self.newSid, jsonStr)\n\n pattern = re.compile(r'(\"uid\": *)(\\d+)') \n result = re.subn(pattern, self.newUid, result[0]) \n\n jsonFile = json.loads(result[0])\n return jsonFile\n \n\n def newSid(self, string):\n self.sid += 1\n return '\"sid\": ' + str(self.sid)\n \n def newUid(self, string):\n self.uid += 1\n return '\"uid\": ' + str(self.uid)\n\n def getGlobalVarList(self):\n\n globalVarList = []\n\n for f in self.pFiles['files']['eventSheets']['fList']:\n for event in f.content['events']:\n if event['eventType'] == 'variable':\n globalVarList.append(event['name'])\n\n return globalVarList\n\n def getGroupList(self):\n\n groupList = []\n\n for f in self.pFiles['files']['eventSheets']['fList']:\n groupList += self.getGroupInEvents(f.content['events'])\n \n return groupList\n\n def getGroupInEvents(self, events):\n\n groupList = []\n\n for event in events:\n if event['eventType'] == 'group':\n groupList.append(event['title'])\n\n if 'children' in event:\n groupList += self.getGroupInEvents(event['children'])\n\n return groupList\n\n\ndef main():\n \n '''\n targetProjectA = C3Project('test/files/projects/oneFileProject.c3p')\n targetProjectA.exportProject(export_path='export/projectA', one_file=False)\n \n targetProjectB = C3Project('test/files/projects/testProject.c3p')\n\n targetProjectB.removeC3File('objectTypes', 'keyboard.json')\n targetProjectB.removeC3File('sound', 'sfx_sample.webm') \n\n \n targetProjectB.exportProject(export_path='export/projectB', one_file=False)\n '''\n \n targetProjectC = C3Project('test/spookids2.c3p')\n targetProjectC.exportProject(export_path='export/projectC', one_file=False)\n \n '''\n targetProjectD = C3Project('test/files/projects/isoengine.c3p')\n targetProjectD.exportProject(export_path='export/projectD', one_file=False)\n '''\n\nif __name__== \"__main__\":\n main()","repo_name":"cb130felix/C3PM","sub_path":"C3Project.py","file_name":"C3Project.py","file_ext":"py","file_size_in_byte":15124,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"28875305481","text":"import matplotlib.pyplot as plt\nimport sys\n\nfilename = sys.argv[1]\nf = open(filename)\n# x-axis: data size\nX = f.readline().split(\":\")[1].split()\nX = list(map(int, X))\n# time unit of Y\ntunit = f.readline().split(\":\")[1]\nalgorithms = []\nrunTimes = []\n# sorting time of each algorithm\nfor line in f.readlines():\n\talgo, Ts = line.split(\":\")\n\talgorithms.append(algo.strip())\n\trunTime = list(map(float, Ts.split()))\n\trunTimes.append(runTime)\n\n# plot graph\nfor i in range(len(runTimes)):\n\tplt.plot(X, runTimes[i], marker='o', label=algorithms[i])\nplt.yscale('log', basey=2)\nplt.xscale('log', basex=2)\nplt.legend(loc='upper left')\nplt.xlabel('Input size (N)')\nplt.ylabel('T(N) %s' % (tunit))\nplt.title('Running time of sorting algorithms (log-log plot)')\nplt.savefig(filename.split('.')[0] + '.png')\nplt.show()\t","repo_name":"kulwadeeso/sit-csc209_256002","sub_path":"plot_sorting.py","file_name":"plot_sorting.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29176768556","text":"\ndef _run_pipeline(steps, origin, **kwargs):\n \"\"\"Given a list of functions runs them in a pipeline.\n\n Each function receives the output of the previous function as\n its input and all the keyword arguments provided to the\n pipeline. Output **must be a dictionary**.\n\n \"\"\"\n output = origin\n for step in steps:\n output, kwargs = step(output, **kwargs)\n return output\n","repo_name":"nvsr07/yucca-storage","sub_path":"mongocluster/yuccausage/yuccausage/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42076539660","text":"import itertools\n\nimport importlib.util\nimport os\nfrom tempfile import NamedTemporaryFile\nfrom contextlib import contextmanager\n\n\nclass KRecord:\n \"\"\"\n A smoother namedtuple -- like https://pythonhosted.org/pyrecord but using the existing class syntax.\n Like a 3.7 dataclass, but don't need to decorate each derived class\n\n Derive a class from KRecord, declare its fields, and use keyword args in __init__\n\n def MyClass(KRecord):\n cost: float\n names: List[String]\n\n def __init__(cost, names):\n super().__init__(cost=cost, names=names)\n\n And now you have a nice little record class.\n\n Construct a MyClass:\n a = MyClass(1.3, [\"fred\", \"conor\", \"una\"])\n\n Compare two MyClasses\n if a == b: ...\n \n Etc\n \"\"\"\n\n def __init__(self, **args):\n for (nt, v) in args.items():\n # assert nt in self.__annotations__ # <- This check will fail for chains of derived classes -- only the deepest has __annotations__ ready yet.\n setattr(self, nt, v)\n\n def __eq__(self, that):\n if type(self) != type(that):\n return False\n\n for nt in self.__annotations__:\n if getattr(self, nt) != getattr(that, nt):\n return False\n return True\n\n\ndef ensure_list_of_lists(l):\n \"\"\"return input, wrapped in a singleton list if its first element is not a list\n\n ensure_list_of_lists([]) = []\n ensure_list_of_lists([1]) = [[1]]\n ensure_list_of_lists([[1]]) = [[1]]\n ensure_list_of_lists([[1,2]]) = [[1, 2]]\n ensure_list_of_lists([[1,2], [3,4]]) = [[1, 2], [3, 4]]\n \"\"\"\n\n if not isinstance(l, list):\n raise ValueError(\"Expect a list\")\n if len(l) < 1: # Empty list is empty list\n return l\n if not isinstance(l[0], list):\n return [l]\n else:\n return l\n\n\ndef single_elem(l):\n assert len(l) == 1\n return l[0]\n\n\ndef paren(s):\n return \"(\" + s + \")\"\n\n\nPYTHON_MODULE_NAME = \"ks_mod\"\n\n\ndef import_module_from_path(module_name, path):\n # These three lines are for loading a module from a file in Python 3.5+\n # https://bugs.python.org/issue21436\n spec = importlib.util.spec_from_file_location(module_name, path)\n py_out = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(py_out)\n return py_out\n\n\ndef translate_and_import(source_file_name, *args):\n from ksc.translate import translate\n\n py_out = translate(*args, source_file_name, with_main=False)\n with NamedTemporaryFile(mode=\"w\", suffix=\".py\", delete=False) as f:\n f.write(f\"# AUTOGEN from {source_file_name} via ksc.utils.translate_and_import\")\n f.write(py_out)\n\n print(f.name)\n return import_module_from_path(PYTHON_MODULE_NAME, f.name)\n\n\ndef get_ksc_dir():\n if \"KSC_RUNTIME_DIR\" in os.environ:\n ksc_runtime_dir = os.environ[\"KSC_RUNTIME_DIR\"]\n ksc_src = os.path.dirname(ksc_runtime_dir)\n return os.path.dirname(ksc_src)\n\n d = os.path.dirname(__file__) # src/python/ksc\n d = os.path.dirname(d) # src/python\n d = os.path.dirname(d) # src\n return os.path.dirname(d)\n\n\ndef get_ksc_build_dir():\n return get_ksc_dir() + \"/build\"\n\n\ndef get_ksc_paths():\n if \"KSC_RUNTIME_DIR\" in os.environ:\n ksc_runtime_dir = os.environ[\"KSC_RUNTIME_DIR\"]\n else:\n ksc_runtime_dir = get_ksc_dir() + \"/src/runtime\"\n\n if \"KSC_PATH\" in os.environ: # TODO: We should deprecate this\n ksc_path = os.environ[\"KSC_PATH\"]\n else:\n ksc_path = get_ksc_build_dir() + \"/bin/ksc\"\n\n return ksc_path, ksc_runtime_dir\n\n\ndef encode_name(s: str) -> str:\n # TODO: this could be faster\n return (\n s.replace(\"@\", \"$a\")\n .replace(\",\", \"$_\")\n .replace(\".\", \"$o\")\n .replace(\"[\", \"$6\")\n .replace(\"]\", \"$9\")\n .replace(\"<\", \"$d\")\n .replace(\">\", \"$b\")\n .replace(\"*\", \"$x\")\n .replace(\":\", \"$8\")\n )\n\n\ndef ndgrid_inds(sz):\n \"\"\"\n Return a sequnce of tuples of indices as if generated by nested comprehensions.\n Example:\n ndgrid_inds((ni,nj))\n Returns the same sequence as\n [(i,j) for i in range(ni) for j in range(nj)]\n\n The iterates are always tuples so\n ndgrid_inds(4)\n returns\n [(0,), (1,), (2,), (3,)] \n\n \"\"\"\n\n return itertools.product(*map(range, sz))\n\n\ndef singleton(cls):\n \"\"\" Simple decorator that makes a single instance of a class.\n @singleton\n class Foo:\n def do_foo(self):\n .....\n Foo.do_foo()\n \"\"\"\n return cls()\n\n\n# https://stackoverflow.com/a/41904558/35544\n# submodule_search_locations doesn't work for this\n@contextmanager\ndef add_to_path(p):\n import sys\n\n old_path = sys.path\n old_modules = sys.modules\n sys.modules = old_modules.copy()\n sys.path = sys.path[:]\n sys.path.insert(0, p)\n try:\n yield\n finally:\n sys.path = old_path\n sys.modules = old_modules\n\n\nimport os.path\n\n\ndef write_file_if_different(to_write, filename, verbose):\n \"\"\"\n Write LINES to FILENAME unless they are identical to the current contents\n If VERBOSE, print info to stdout.\n \"\"\"\n if os.path.isfile(filename):\n # Read from file\n with open(filename, \"r\") as f:\n existing_contents = f.read()\n\n # Compare to new\n if existing_contents == to_write:\n if verbose:\n print(f\"ksc.utils: File not changed: {filename}\")\n return\n\n if verbose:\n print(f\"ksc.utils: File changed, overwriting {filename}\")\n\n else:\n if verbose:\n print(f\"ksc.utils: New file {filename}\")\n\n # And overwrite if different\n with open(filename, \"w\") as f:\n f.write(to_write)\n","repo_name":"microsoft/knossos-ksc","sub_path":"src/python/ksc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"17692389062","text":"import matplotlib.pyplot as plt\n\nk_list = []\ntrain_acc = []\ntest_acc = []\n\nwith open('knn_tuning.txt') as f:\n k_line = f.readline()\n while 1:\n k_list.append([num for num in k_line.split() if num.isdigit()][0])\n train_acc.append(float(f.readline().split(':')[1]))\n test_acc.append(float(f.readline().split(':')[1]))\n k_line = f.readline()\n if (k_line == ''):\n break\n\n\nplt.plot(k_list[:11], train_acc[:11], label = 'train accuracy')\nplt.plot(k_list[:11], test_acc[:11], label = 'test accuracy')\nplt.legend()\nplt.savefig('bestk1.png')\nplt.show()\nplt.clf()\n\nplt.plot(k_list[12:], train_acc[12:], label = 'train accuracy')\nplt.plot(k_list[12:], test_acc[12:], label = 'test accuracy')\nplt.legend()\nplt.savefig('bestk2.png')\nplt.legend()\nplt.show()","repo_name":"HuyNguyen-hust/ml_proj_text_classification","sub_path":"result/visualize_knn.py","file_name":"visualize_knn.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"201454919","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 25 17:36:11 2022\n\n@author: CoraAnn\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom astropy.io import fits\nfrom scipy.signal import find_peaks\nfrom scipy.optimize import curve_fit\n#from scipy.fft import fft, fftfreq\n\n# From PRH\n# sflux3=np.interp(wavelength2,wavelength,sflux,left=0,right=0)\n# serror3=np.interp(wavelength2,wavelength,serror,left=0,right=0)\n\n#%% Useful functions\n\ndef align_data(data_wave, data_flux, data_err, data_names=None, wave_min=1000.,\\\n wave_max=3000., get_res=True, save=False, save_dir=None, res=None,\\\n interp_wave=0):\n \"\"\"\n Parameters\n ----------\n data_wave : arr\n DESCRIPTION.\n data_flux : arr\n DESCRIPTION.\n data_err : arr\n DESCRIPTION.\n data_names : arr, optional\n Names of the spectra to use when saving. Generally of the form\n 'spec-plate-mjd-fiber'\n wave_min : float, optional\n DESCRIPTION. The default is 1000.\n wave_max : float, optional\n DESCRIPTION. The default is 3000.\n get_res : bool, optional\n DESCRIPTION. The default is True, in which case the resolution will be \n calculated for each spectrum, and the minimum will be used.\n If False, the default resolution of 1 wavelength bin per Angstrom\n will be used. The user can set a resolution with kwarg 'res'.\n save : bool, optional\n DESCRIPTION. The default is False.\n save_dir : str, optional\n DESCRIPTION. The default is None.\n res : float, optional\n Resolution to use. 'get_res' must be set to 'False'.\n interp_wave : arr, optional\n If provided, use this wavelength array for all spectra instead of\n calculating the resolution.\n\n Returns\n -------\n interp_wave : arr\n Aligned wavebins.\n interp_fluxes : arr\n Fluxes at aligned wavebins (dimension = numspecs).\n interp_errs : arr\n Errors at aligned wavebins (dimension = numspecs).\n\n \"\"\"\n \n numspecs = len(data_wave)\n \n if get_res: #calculate the minimum resolution of the sample\n lengths = [] #number of data points in each spectrum\n coverage = [] #wavelength coverage of each spectrum\n resolutions = [] #resolution of each spectrum\n for i in data_wave: #calculate the wavelength coverage of each spec\n length=len(i)\n lengths.append(length)\n delta = max(i) - min(i)\n coverage.append(delta)\n for idx, cov in enumerate(coverage): #calculate the resolution\n res = lengths[idx]/cov\n resolutions.append(res)\n res=min(resolutions)\n print('Default resolution (1 wavebin per A) reset to', res)\n elif res:\n res = res\n else:\n res=1\n \n if interp_wave:\n length = len(interp_wave)\n delta = max(interp_wave) - min(interp_wave)\n res = length / delta\n print('Resolution of provided wave arr is :', np.round(res,2), 'wavebins/Ang.')\n else:\n num_pts = res * (wave_max - wave_min)\n num_pts = int(np.round(num_pts, 0))\n interp_wave = np.linspace(wave_min, wave_max, num_pts)\n \n interp_fluxes = []\n interp_errs = []\n \n for i in range(0, numspecs): \n \n aligned_flux = np.interp(interp_wave, data_wave[i], data_flux[i], left=0, right=0)\n aligned_err = np.interp(interp_wave, data_wave[i], data_err[i], left=0, right=0)\n \n interp_fluxes.append(aligned_flux)\n interp_errs.append(aligned_err)\n \n if save:\n \n save_name = str(data_names[i]) + str('_aligned.dat')\n \n save_data = np.array((interp_wave, aligned_flux, aligned_err)).T\n header = 'interp_wave, aligned_flux, aligned_err'\n \n np.savetxt((save_dir + save_name), save_data, header=header, fmt='%s')\n \n interp_fluxes = np.asarray(interp_fluxes, dtype='float')\n interp_errs = np.asarray(interp_errs, dtype='float')\n \n return(interp_wave, interp_fluxes, interp_errs)\n\ndef plot_sdss_lines(wave_min, wave_max):\n \"\"\"\n Adds vertical lines marking some common SDSS spectral lines\n (Ly-a, NV, SiIV+OIV, CIV, CIII, MGII). Only plots withing the given \n wavelength range.\n \n Labels are included. To display, please add 'plt.lenend()' to your script.\n\n Parameters\n ----------\n wave_min : float\n Minimum wavelength for plotting.\n wave_max : float\n Maximum wavelength for plotting.\n\n Returns\n -------\n None.\n\n \"\"\"\n \n waves = np.array([1033, 1215, 1240.81, 1399.8, 1549, 1908, 2799])\n species = ['OVI', r'Ly$\\alpha$', 'NV', 'SiIV+OIV', 'CIV', 'CIII', 'MgII']\n \n wl_range_mask = np.where((wave_min < waves) & (wave_max > waves))\n \n for i in wl_range_mask[0]:\n plt.axvline(waves[i], label=species[i], ls='--', color=('C'+str(i)))\n\ndef divide_specs(spec1, spec2):\n \"\"\"\n Divide aligned spectral fluxes.\n \n Both flux arrays must have the same dimension (1D array of length n).\n\n Parameters\n ----------\n spec1 : arr\n Flux values of spectrum 1.\n spec2 : arr\n Flux values of spectrum 2.\n\n Returns\n -------\n Divided flux array (spec1 / spec2).\n If both have a value of 0 at a point, returns 1.\n If one has a value of 0 at a point, 0 flux is reset to a small number.\n\n \"\"\"\n \n div_flux_list = []\n \n for i,flux1 in enumerate(spec1):\n \n flux2 = spec2[i]\n \n div_val = 1000\n \n if ((flux1==0) and (flux2==0)):\n div_val = 1\n elif ((flux1==0) and (flux2!=0)):\n flux1 = 0.00000000000001\n #div_val = 0\n div_val = flux1/flux2\n elif ((flux1!=0) and (flux2==0)):\n flux2 = 0.00000000000001\n #div_val = 0\n div_val = flux1/flux2\n else:\n div_val = flux1/flux2\n \n div_flux_list.append(div_val)\n \n return(div_flux_list)\n\ndef get_avg_flux_val(wave, flux, center, width):\n \"\"\"\n Get the average flux value centered at some wavelength.\n\n Parameters\n ----------\n wave : arr\n Wavlength array for spectrum.\n flux : arr\n Flux array for spectrum.\n center : float\n Wavelength for which you want to calculate the average flux.\n Must be in same units as wave array.\n width : float\n Delta wavelength units over which to compute the average.\n\n Returns\n -------\n Average flux value within range.\n\n \"\"\"\n \n avg_flux_val = 1\n \n wave_min = center - width\n wave_max = center + width\n \n wave_mask = np.where((wave >= wave_min) & (wave <= wave_max))\n \n avg_flux_val = np.median(flux[wave_mask])\n \n return(avg_flux_val)\n\ndef norm_specs(waves, fluxes, norm_point=1300, width=1):\n \"\"\"\n Normalize spectra based on flux value at a certain wavelength.\n\n Parameters\n ----------\n waves : arr\n Wavelength arrays (1 for each spec).\n fluxes : arr\n Flux arrays.\n norm_point : float, optional\n Wavelength at which to normalize the spectra. The default is 1300.\n width : float, optional\n Width (in wavelength units) over which to average the flux values\n around the norm point. The default is 1.\n\n Returns\n -------\n wavelength arrays (unchanged), normalized fluxes.\n\n \"\"\"\n \n norm_fluxes = []\n \n for i, flux in enumerate(fluxes):\n \n norm = get_avg_flux_val(waves[i], flux, center=norm_point, width=width)\n norm_flux = flux / norm\n norm_fluxes.append(norm_flux)\n \n return(waves, norm_fluxes)\n\ndef get_line_mins(wave, norm_flux, wave_min=900, wave_max=1216, \\\n distance=3, width=2, perc_cut=1.0, plot=True):\n \"\"\"\n Get miminum abs line values in Ly-a region.\n\n Parameters\n ----------\n wave : arr\n Wavelength values.\n norm_flux : arr\n Normalized flux values.\n wave_min : float, optional\n Minimum wavelength for finding lines. The default is 900.\n wave_max : float, optional\n Maximum wavelength for finding lines. The default is 1216.\n distance : float, optional\n Miminum distance in wavelength bins between potential abs lines. The default is 3.\n width : float, optional\n Minimun width of abs lines in wavelength bins. The default is 2.\n perc_cut : float, optional\n Between 0 and 1. Percentage of normalized flux abs must exceed. The default is 1.0.\n plot : bool, optional\n If true, plot the Ly-a region and mark the found line mins. The default is True.\n\n Returns\n -------\n Wavelength of line mins, fluxes of line mins.\n\n \"\"\"\n \n wave_mask = np.where((wave >= wave_min) & (wave <= wave_max))\n \n #x = -1*norm_flux[wave_mask]\n x = -1*norm_flux\n \n border = -perc_cut*np.ones(x.size)\n peaks, properties = find_peaks(x, height=(border, 0), distance=distance, width=width)\n \n line_mask = np.where((wave[peaks] >= wave_min) & (wave[peaks] <= wave_max))\n \n line_mins_wave = wave[peaks][line_mask]\n line_mins_flux = -1*x[peaks][line_mask]\n line_mins_idxs = peaks[line_mask]\n \n if plot:\n \n \n plt.plot(wave, norm_flux, lw=1, ds='steps-mid')\n \n plt.plot(wave, -border, \"--\", color=\"gray\")\n #plt.plot(border, \":\", color=\"gray\")\n plt.plot(wave[peaks], -x[peaks], \"x\")\n plt.xlim(wave_min, wave_max)\n plt.ylim(-0.1, max(norm_flux[wave_mask]))\n \n # plt.hlines(y=-1*properties[\"width_heights\"], xmin=properties[\"left_ips\"]+idx_min,\n # xmax=properties[\"right_ips\"]+idx_min, color = \"C1\")\n \n \n plt.show()\n plt.clf()\n \n return(line_mins_wave, line_mins_flux, line_mins_idxs)\n\n#%% Compare specs - single object\n\nspec_dir = 'specs/J085825/'\n# spec_dir = 'specs/J014548/'\n\nz = 2.863\n# z = 2.8\n\nobj_specs = []\nobj_names = []\nfor spec in os.listdir(spec_dir):\n data = np.loadtxt((spec_dir+spec)).T\n obj_specs.append(data)\n obj_names.append(spec)\n \nspec0 = obj_specs[0]\nspec1 = obj_specs[1]\n\n# Overplot\n\n# Ok cool...\n# Lots of little lines are in the same place on both :)\n# IDK how to get gaussians there, but let me put some vertical\n# lines over the places where they match.\n\nplt.figure(dpi=200)\n\n# for i,spec in enumerate(obj_specs):\n \n# plt.plot(spec[0]/(1 + z), spec[1], lw=1, alpha=0.75, label=obj_names[i], ds='steps-mid')\n\n\nplt.plot(spec0[0]/(1 + z), spec0[1], lw=1, alpha=0.7, \\\n label='J085825 MJD: 51912', ds='steps-mid', color='blue')\nplt.plot(spec1[0]/(1 + z), spec1[1], lw=1, alpha=0.7, \\\n label='J085825 MJD: 55537', ds='steps-mid', color='red')\n#plt.plot(spec[0]/(1 + z), spec[0], lw=1, alpha=0.75, label=obj_names[i], ds='steps-mid')\n\nplt.xlabel('Rest Frame Wavelength (A)')\nplt.ylabel(r'Flux ($10^{-17}$ erg/cm$^2$/s/A)')\n\nplt.xlim(1060, 1200)\nplt.ylim(bottom=-0.1, top=25)\n\n#plt.axvline(4033, alpha=0.4, color='black')\n#plt.axvline(4150, alpha=0.4, color='black')\n\n\nplt.legend(fontsize=8) \nplt.show()\nplt.clf()\n\n\n\n#%% Presentation plots\n\nplt.figure(dpi=200)\nspec = obj_specs[1]\nplt.plot(spec[0]/(1 + z), spec[1], lw=1, label='J085825 MJD: 55537',\\\n ds='steps-mid', color='black')\nplot_sdss_lines(900, 1700)\n\nplt.axvspan(ymin=-0.1, ymax=32, xmin=950, xmax=1216, color='blue', alpha=0.2)\n\nplt.xlim(950, 1700)\nplt.ylim(bottom=-0.1, top=32)\n\nplt.xlabel('Rest Frame Wavelength (A)')\nplt.ylabel(r'Flux ($10^{-17}$ erg/cm$^2$/s/A)')\n\nplt.legend(fontsize=8, loc='upper right') \nplt.show()\nplt.clf()\n\n#%% Divide specs\n\n# First align\n\ndata_waves = [spec0[0]/(1 + z), spec1[0]/(1 + z)]\ndata_fluxes = [spec0[1], spec1[1]]\ndata_errs = [spec0[2], spec1[2]]\ndata_names = ['J085825 MJD: 51912', 'J085825 MJD: 55537']\n\nalign_wave, align_fluxes, align_errs = align_data(data_waves, data_fluxes, data_errs, \\\n data_names=data_names, wave_min=800, \\\n wave_max=2300, get_res=True, save=False)\n\n# Check\n\nfor i, flux in enumerate(align_fluxes):\n plt.plot(align_wave, flux, label=data_names[i], lw=1, ds='steps-mid')\n\n \nplt.xlim(1060, 1200)\nplt.ylim(bottom=-0.1, top=25)\nplt.legend()\nplt.title('Aligned Specs')\nplt.show()\nplt.clf()\n\n# Divide\n\ndiv_flux = divide_specs(align_fluxes[1], align_fluxes[0])\n\nplt.plot(align_wave, div_flux, 'r-', ms=1.5)\nplt.axhline(1)\n \nplt.xlim(1060, 1200)\nplt.ylim(bottom=0, top=2)\nplt.legend()\nplt.title('Divided Flux')\nplt.show()\nplt.clf()\n\n#%% Crude normalization\n\nfor i, flux in enumerate(align_fluxes):\n plt.plot(align_wave, flux, label=data_names[i], lw=1, ds='steps-mid')\n\n \n#plt.xlim(1060, 1200)\nplt.ylim(bottom=-0.1, top=25)\nplt.legend()\nplt.title('Aligned Specs')\nplt.show()\nplt.clf()\n\nnorm_wave, norm_flux = norm_specs((align_wave, align_wave), align_fluxes)\n \nfor i, flux in enumerate(norm_flux):\n plt.plot(norm_wave[i], flux, label=data_names[i], lw=1, ds='steps-mid')\n\n \n#plt.xlim(1060, 1200)\nplt.ylim(bottom=-0.1, top=4)\nplt.legend()\nplt.title('Normed Specs')\nplt.show()\nplt.clf()\n\n#%% Find some peaks!\n\nline_waves, line_fluxes, line_idxs = get_line_mins(norm_wave[1], norm_flux[1], \\\n perc_cut=0.75, wave_min=1060, wave_max=1200, width=2)\n\n#%% Generalize to all lines\n\ndef gauss(wave, *p):\n \n norm, mu, sigma = p\n \n curve = 1+norm*np.exp(-(wave-mu)**2/(2.*sigma**2))\n \n return(curve)\n\ndelta = 4\n\nfit_waves = []\nfit_fluxes = []\nerr_waves = []\nerr_fluxes = []\n\nfor i, line_idx in enumerate(line_idxs):\n \n line_wave = line_waves[i]\n \n xdata = norm_wave[1][line_idx-delta:line_idx+delta]\n ydata = norm_flux[1][line_idx-delta:line_idx+delta]\n \n p0 = [-1, line_wave, 0.5]\n try:\n popt, pcov = curve_fit(gauss, xdata, ydata, p0=p0)\n fit_flux = gauss(xdata, *popt)\n \n fit_waves.append(xdata)\n fit_fluxes.append(fit_flux)\n except RuntimeError:\n print('RuntimeError for line', line_wave)\n err_waves.append(line_wave)\n err_fluxes.append(norm_flux[1][line_idx])\n \nerr_waves = np.asarray(err_waves) \n \n \nplt.figure(dpi=200)\n\nplt.plot(norm_wave[0], norm_flux[0], alpha=0.6, color='green', ds='steps-mid')\nplt.plot(norm_wave[1], norm_flux[1], alpha=0.6, color='blue', ds='steps-mid')\nplt.plot(line_waves, line_fluxes, 'r*', label='Abs Mins')\nplt.axhline(1, color='black', alpha=0.5, ls='--', label='Norm')\nplt.axhline(0.75, color='red', alpha=0.5, ls='--', label='Percent Cutoff')\nplt.plot(err_waves, err_fluxes, 'gx', label='Fit Error')\n\nfor i, flux in enumerate(fit_fluxes):\n \n plt.plot(fit_waves[i], flux, 'r-.', alpha=0.8, color='red')\n \nplt.legend()\n\nplt.xlim(1060, 1200)\nplt.ylim(-0.1, 2)\n\nplt.show()\nplt.clf()\n\n#%% Similarity Metric\n\nplt.figure(dpi=100)\n\nsim_cut = 0.025\n\nsim_metric = (abs((norm_flux[0] - norm_flux[1]))**2)\nsim_mask = np.where(sim_metric < sim_cut)\n\nplt.axhline(sim_cut, color='red', alpha=0.5, label='Sim Cutoff')\n\nplt.plot(norm_wave[0], sim_metric, color='black', ds='steps-mid', alpha=0.5, label='Sim Metric')\nplt.plot(norm_wave[0], norm_flux[0], alpha=0.4, color='green', ds='steps-mid')\nplt.plot(norm_wave[1], norm_flux[1], alpha=0.4, color='blue', ds='steps-mid')\n\n# plt.plot(norm_wave[0][sim_mask], norm_flux[0][sim_mask], alpha=0.6, label='Similar', ds='steps-mid', color='red')\n# plt.plot(norm_wave[1][sim_mask], norm_flux[1][sim_mask], alpha=0.6, color='red')\n\nplt.legend()\nplt.xlim(1060, 1200)\nplt.ylim(-0.1, 2)\n\nplt.show()\nplt.clf()\n\n\n\n\n#%% Play with FFT\n\n# yf0 = fft(-1*norm_flux[0])\n# yf1 = fft(-1*norm_flux[1])\n\n# ms_diff = np.abs(((yf0 - yf1)/2) ** 2)\n\n# #plt.plot(norm_wave[0], norm_flux[0], alpha=0.6, color='green', ds='steps-mid')\n# plt.plot(norm_wave[0], yf0/20, ds='steps-mid')\n# plt.plot(norm_wave[0], yf1/20, ds='steps-mid')\n# #plt.plot(norm_wave[0], ms_diff/20, ds='steps-mid', color='black', ls='', marker='o')\n\n# plt.ylim(-2, 2)\n# plt.xlim(1060, 1200)\n\n# plt.show()\n# plt.clf()\n\n# yshift0 = np.fft.fftshift(yf0)\n\n# plt.plot(norm_wave[0], yshift0)\n# plt.ylim(-20, 20)\n# plt.xlim(1060, 1200)","repo_name":"CoraDeFrancesco/identify_qso_ly_a","sub_path":"identify_lya.py","file_name":"identify_lya.py","file_ext":"py","file_size_in_byte":16127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28590109057","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @FileName :WebTools.py\n# @Time :2021/4/24 14:15\n# @Author :Amundsen Severus Rubeus Bjaaland\n\n\nimport AWord\n\nimport requests\n\n\ndef GetSentence(URL: str = AWord.URL.Chinese, SentenceMode: str = AWord.SentenceMode.All, MinLength: int = 0,\n MaxLength: int = 30):\n Params = CreateParams(SentenceMode, MinLength, MaxLength)\n Response = requests.get(URL, params=Params).json()\n OneSentence = AWord.Sentence()\n OneSentence.Id = Response[\"id\"]\n OneSentence.Uuid = Response[\"uuid\"]\n OneSentence.Hitokoto = Response[\"hitokoto\"]\n OneSentence.Text = Response[\"hitokoto\"]\n OneSentence.Type = Response[\"type\"]\n OneSentence.From = Response[\"from\"]\n OneSentence.Creator = Response[\"creator\"]\n OneSentence.CreatorId = Response[\"creator_uid\"]\n OneSentence.CommitFrom = Response[\"commit_from\"]\n OneSentence.CreateTime = float(Response[\"created_at\"])\n OneSentence.Length = Response[\"length\"]\n return OneSentence\n\n\ndef CreateParams(SentenceMode: str = AWord.SentenceMode.All, MinLength: int = 0, MaxLength: int = 30) -> dict:\n Params = dict()\n if SentenceMode == AWord.SentenceMode.All:\n pass\n else:\n Params[\"c\"] = SentenceMode\n Params[\"charset\"] = \"utf-8\"\n Params[\"encode\"] = \"json\"\n Params[\"min_length\"] = MinLength\n Params[\"max_length\"] = MaxLength\n return Params\n","repo_name":"ChineseWriter/AWord","sub_path":"AWord/WebTools.py","file_name":"WebTools.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10476660591","text":"\"\"\"Base URLs for H1DS project.\n\nThis module provides a small number of urls, and forwards app-specific\nURLS to the relevant apps.\n\nURLS provided by this module:\n\n* '^robots\\.txt$' -- tell web-crawlers (Google et al.) which which areas to leave alone.\n* '^admin/doc/' -- docs for admin interface\n* '^admin/' -- admin interface\n* '^openid/' -- OpenID authentication\n\nURLS passed to other H1DS modules.\n\n* '' -- all queries are first checked against h1ds for a match.\n\nFor the optional submodules (i.e. h1ds_mdsplus, h1ds_summary,\nh1ds_configdb), settings.INSTALLED_APPS is checked to see if the\nsubmodule is installed. If it is, then the root url is read from the\nconfiguration settings:\n\n.. topic:: H1DS module root URL configuration\n\n The root URLS for the H1DS modules can be configured individually.\n\n ============= ========================== ==================\n module name setting default\n ============= ========================== ==================\n h1ds_mdsplus ``H1DS_MDSPLUS_ROOT_URL`` ``mdsplus``\n h1ds_summary ``H1DS_SUMMARY_ROOT_URL`` ``summary``\n h1ds_configdb ``H1DS_CONFIGDB_ROOT_URL`` ``configurations``\n ============= ========================== ==================\n\n The URL regular expression used is ``'^config_value/'``, where\n ``config_value`` is specified by the relevant setting in the above\n table.\n\nIn the development environment (i.e. settings.DEBUG==True), the media\nfiles (settings.MEDIA_ROOT) are served under '^media/'\n\n\"\"\"\n\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.views.generic import TemplateView\n\nfrom h1ds import AVAILABLE_H1DS_MODULES\nfrom h1ds.views import TextTemplateView\nfrom h1ds.views import homepage, logout_view, edit_profile\nfrom h1ds.views import UserMainView, WorksheetView\n\nfrom h1ds.views import ApplyFilterView, UpdateFilterView, RemoveFilterView\nfrom h1ds.views import UserSignalCreateView, UserSignalDeleteView\nfrom h1ds.views import UserSignalUpdateView, ShotStreamView\nfrom h1ds.views import AJAXShotRequestURL, AJAXLatestShotView, NodeView\nfrom h1ds.views import RequestShotView, request_url, ShotDetailView\nfrom h1ds.views import DeviceListView, DeviceDetailView, ShotListView, TreeDetailView\n\n\nadmin.autodiscover()\n\n\ndef module_urlpattern(module_name):\n mod = __import__(module_name)\n mod_url_re = r'^{}/'.format(mod.MODULE_ROOT_URL)\n mod_url_target = include('{}.urls'.format(module_name))\n return patterns('', (mod_url_re, mod_url_target))\n\n\nh1ds_mods = [m for m in AVAILABLE_H1DS_MODULES if m in settings.INSTALLED_APPS]\n\nif hasattr(settings, \"H1DS_DATA_PREFIX\"):\n DATA_PREFIX = settings.H1DS_DATA_PREFIX\nelse:\n DATA_PREFIX = \"data\"\n\ncore_urlpatterns = patterns('',\n url(r'^$', homepage, name=\"h1ds-core-homepage\"),\n url(r'^user/settings/(?P\\w+)/$', edit_profile, name=\"h1ds-core-edit-settings\"),\n url(r'^logout/$', logout_view, name=\"h1ds-logout\"),\n)\n\nuser_patterns = patterns('',\n url(r'^(?P[-\\w]+)/$',\n UserMainView.as_view(),\n name=\"h1ds-user-main-page\"),\n url(r'^(?P[-\\w]+)/(?P[-\\w]+)/$',\n WorksheetView.as_view(),\n name=\"h1ds-user-worksheet\"),\n)\n\n\n# Internal use\n\nfilter_patterns = patterns('',\n url(r'^apply/$', ApplyFilterView.as_view(), name=\"apply-filter\"),\n url(r'^update/$', UpdateFilterView.as_view(), name=\"update-filter\"),\n url(r'^remove/$', RemoveFilterView.as_view(), name=\"remove-filter\"),\n)\n\nusersignal_patterns = patterns('',\n url(r'^create/$', UserSignalCreateView.as_view(), name=\"h1ds-create-user-signal\"),\n url(r'^delete/(?P\\d+)$', UserSignalDeleteView.as_view(),\n name=\"h1ds-delete-user-signal\"),\n url(r'^update/(?P\\d+)$', UserSignalUpdateView.as_view(),\n name=\"h1ds-update-user-signal\"),\n)\n\ninternal_patterns = patterns('',\n url(r'^filter/', include(filter_patterns)),\n url(r'^usersignal/', include(usersignal_patterns)),\n url(r'^shot_stream/$', ShotStreamView.as_view(), name=\"h1ds-shot-stream\"),\n url(r'^request_shot/$', RequestShotView.as_view(), name=\"h1ds-request-shot\"),\n url(r'^url_for_shot/$', AJAXShotRequestURL.as_view(), name=\"h1ds-shot-request-url\"),\n # TODO: should not have separate AJAX views. e.g. call with ?format=json\n url(r'^(?P[-\\w]+)/latest_shot/$', AJAXLatestShotView.as_view(),\n name=\"h1ds-latest-shot\"),\n url(r'^latest_shot/$', AJAXLatestShotView.as_view(),\n name=\"h1ds-latest-shot-for-default-device\"),\n url(r'^request_url/$', request_url, name=\"h1ds-request-url\"),\n)\n\n## Data modules\ndata_patterns = patterns('',\n url(r'^$', DeviceListView.as_view(), name=\"device-list\"),\n url(r'^(?P[-\\w]+)/$', DeviceDetailView.as_view(), name=\"device-detail\"),\n url(r'^(?P[-\\w]+)/shots/$', ShotListView.as_view(), name=\"device-shot-list\"),\n url(r'^(?P[-\\w]+)/shot_stream/$', ShotStreamView.as_view(), name=\"h1ds-shot-stream\"),\n url(r'^(?P[-\\w]+)/(?P\\d+|latest)/$', ShotDetailView.as_view(),\n name=\"shot-detail\"),\n url(r'^(?P[-\\w]+)/(?P\\d+|latest)/(?P[-\\w]+)/$', TreeDetailView.as_view(),\n name=\"tree-detail\"),\n\n url(r'^(?P[-\\w]+)/(?P\\d+|latest)/(?P[-\\w]+)/(?P.+)/$', NodeView.as_view(),\n name=\"node-detail\"),\n)\n\ncore_urlpatterns += patterns('',\n url(r'^_/', include(internal_patterns)),\n url(r'^u/', include(user_patterns)),\n url(r'^{}/'.format(DATA_PREFIX), include(data_patterns)),\n)\n\nurlpatterns = patterns('',\n (r'^robots\\.txt$',\n TextTemplateView.as_view(template_name='robots.txt')\n ),\n (r'', include(core_urlpatterns)),\n (r'^admin/doc/',\n include('django.contrib.admindocs.urls')),\n (r'^admin/', include(admin.site.urls)),\n (r'^openid/', include('django_openid_auth.urls')),\n)\n\nfor mod_name in h1ds_mods:\n urlpatterns += module_urlpattern(mod_name)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }\n ),\n )\n\n \nhandler403 = 'h1ds.views.http403'\nhandler404 = 'h1ds.views.http404'\nhandler500 = 'h1ds.views.http500'\n","repo_name":"h1ds/h1ds","sub_path":"h1ds/h1ds/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29719704711","text":"def main():\r\n entrada = input('Digite a palavra a ser expressa: ')\r\n coluna = 0\r\n resultado = vertical(coluna, entrada)\r\n print(resultado) \r\n\r\ndef vertical(coluna, texto):\r\n espacos = ' ' * coluna\r\n for caractere in texto:\r\n if len(texto) > 20:\r\n print('O texto deve conter no máximo 20 letras')\r\n else: \r\n print(espacos + caractere)\r\n espacos += ' '\r\n\r\nmain()","repo_name":"thiag0vaz/Lista-Strings","sub_path":"16.question.py","file_name":"16.question.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12638379559","text":"# https://leetcode.com/problems/running-sum-of-1d-array/\n# 1480. Running Sum of 1d Array\n# Easy\n# Arrays\n# A\n\nimport sys\nfrom typing import List\n\nclass Solution:\n def runningSum(self, nums: List[int]) -> List[int]:\n if(not nums or len(nums) == 0):\n return nums\n\n res = []\n res.append(nums[0])\n\n for i in range(1, len(nums)):\n res.append(nums[i] + res [i-1])\n\n return res\n\n\nsolution = Solution()\n\nres = solution.runningSum([1,2,3,4])\nprint (res)\n\nres = solution.runningSum([1,1,1,1,1])\nprint (res)\n\n","repo_name":"segios/problems","sub_path":"python-problems/Arrays/1480. Running Sum of 1d Array.py","file_name":"1480. Running Sum of 1d Array.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22527968092","text":"import pyautogui as pag # 마우스를 조작할 수 있게 하는 패키지\nimport random, time, subprocess\n\nsummons_button = {\n 'top_left': {\n 'x': 1593,\n 'y': 302\n },\n 'bottom_right': {\n 'x': 1680,\n 'y': 430\n }\n}\n\n\n\n\n\n\n# pyautogui.moveTo() : duration 동안 (x,y) 좌표로 이동\n# random.uniform(a,b) : a 보다 크고 b 보다 작은 랜덤한 숫자 생성\n# while True:\n# duration = random.uniform(0.5, 1.5)\n# pag.moveTo(\n# x=random.uniform(summons_button['top_left']['x'], summons_button['bottom_right']['x']),\n# y=random.uniform(summons_button['top_left']['y'], summons_button['bottom_right']['y']),\n# duration=duration\n# )\n\n# # 마우스 버튼을 누른다\n# pag.mouseDown()\n#\n# # t초 만큼 딜레이를 준다.\n# time.sleep(random.uniform(0.15001, 0.3001))\n#\n# # 마우스 버튼을 놓는다\n# pag.mouseUp()\n#\n# time.sleep(random.uniform(0.32001, 0.41001))\n\n\n\n\n\n# while True:\n# x, y = pag.position() # 현재 마우스 커서의 좌표 받아오기\n# print('x: %s, y: %s' % (x,y))","repo_name":"GiSeok-Hong/TIL","sub_path":"Python/make_auto_click/auto_click.py","file_name":"auto_click.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2662533391","text":"__author__ = '璐'\n\nfrom functools import reduce\nfrom itertools import combinations\ndef break_rings(rings):\n # number of rings\n n_rings = max(reduce(set.union, rings))\n def trial(n):\n # rings for remove\n combi = combinations(range(1,n_rings+1), n)\n for c in combi:\n # count saved rings after removing rings\n rings_ = map(lambda x: len(x - set(c)), rings)\n if max(rings_) == 1:\n return True\n return False\n\n # brute force search\n for n in range(1, n_rings+1):\n if trial(n):\n return n\n return 0\n\n\n# 递归\ndef break_rings_recurse(connections):\n if len(connections) == 0:\n return 0\n r1, r2 = connections[0]\n return min(\n break_rings_recurse([c for c in connections if r1 not in c]) + 1,\n break_rings_recurse([c for c in connections if r2 not in c]) + 1)\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for\n # auto-testing\n assert break_rings(\n ({1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {4, 6})) == 3, \"example\"\n assert break_rings(\n ({1, 2}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 4})) == 3, \"All to all\"\n assert break_rings(({5, 6}, {4, 5}, {3, 4}, {3, 5}, {3, 6})) == 2, \"Chain\"\n assert break_rings(({8, 9}, {1, 9}, {1, 2}, {2, 3}, {3, 4}, {\n 4, 5}, {5, 6}, {6, 7}, {8, 7})) == 5, \"Long chain\"\n\n\n\n\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert break_rings(({1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {4, 6})) == 3, \"example\"\n assert break_rings(({1, 2}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 4})) == 3, \"All to all\"\n assert break_rings(({5, 6}, {4, 5}, {3, 4}, {3, 2}, {2, 1}, {1, 6})) == 3, \"Chain\"\n assert break_rings(({8, 9}, {1, 9}, {1, 2}, {2, 3}, {3, 4}, {4, 5}, {5, 6}, {6, 7}, {8, 7})) == 5, \"Long chain\"\n assert break_rings(({3,4},{5,6},{2,7},{1,5},{2,6},{8,4},{1,7},{4,5},{9,5},{2,3},{8,2},{2,4},{9,6},{5,7},{3,6},{1,3},)) == 5,\"long chain2\"\n assert break_rings(({1,2},{2,3},{3,4},{4,5},{5,2},{1,6},{6,7},{7,8},{8,9},{9,6},{1,10},{10,11},{11,12},{12,13},{13,10},{1,14},{14,15},{15,16},{16,17},{17,14},)) == 8,\"13rd\"\n\n","repo_name":"AbnerZheng/checkio","sub_path":"home/break_rings.py","file_name":"break_rings.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74220029548","text":"from cremi.io import CremiFile\nfrom cremi.evaluation import (\n NeuronIds,\n Clefts,\n SynapticPartners,\n SynapticPartnersMultRec,\n SynapticPartnersMultRecGt,\n)\nimport logging\nimport sys\nimport os\nfrom CNNectome.utils import config_loader\n\n\ndef evaluate(test, truth):\n synaptic_partners_eval = SynapticPartners()\n fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(\n test.read_annotations(),\n truth.read_annotations(),\n truth.read_neuron_ids(),\n all_stats=True,\n )\n logging.info(\"\\tfscore: \" + str(fscore))\n logging.info(\"\\tprecision: \" + str(precision))\n logging.info(\"\\trecall: \" + str(recall))\n logging.info(\"\\tfp: \" + str(fp))\n logging.info(\"\\tfn: \" + str(fn))\n\n return fscore\n\n\ndef evaluate_multrec(test, truth):\n synaptic_partners_eval = SynapticPartnersMultRec()\n fscore, precision, recall, fp, fn, filtered_matches = synaptic_partners_eval.fscore(\n test.read_annotations(),\n truth.read_annotations(),\n truth.read_neuron_ids(),\n all_stats=True,\n )\n logging.info(\"\\tfscore: \" + str(fscore))\n logging.info(\"\\tprecision: \" + str(precision))\n logging.info(\"\\trecall: \" + str(recall))\n logging.info(\"\\tfp: \" + str(fp))\n logging.info(\"\\tfn: \" + str(fn))\n\n return fscore\n\n\ndef evaluate_multrecgt(test, truth, add_in_file=False):\n synaptic_partners_eval = SynapticPartnersMultRecGt()\n if add_in_file:\n (\n fscore,\n precision,\n recall,\n fp,\n fn,\n filtered_matches,\n annot,\n ) = synaptic_partners_eval.fscore(\n test.read_annotations(),\n truth.read_annotations(),\n truth.read_neuron_ids(),\n all_stats=True,\n add_in_file=add_in_file,\n )\n\n test.write_annotations(annot)\n else:\n (\n fscore,\n precision,\n recall,\n fp,\n fn,\n filtered_matches,\n ) = synaptic_partners_eval.fscore(\n test.read_annotations(),\n truth.read_annotations(),\n truth.read_neuron_ids(),\n all_stats=True,\n add_in_file=add_in_file,\n )\n logging.info(\"\\tfscore: \" + str(fscore))\n logging.info(\"\\tprecision: \" + str(precision))\n logging.info(\"\\trecall: \" + str(recall))\n logging.info(\"\\tfp: \" + str(fp))\n logging.info(\"\\tfn: \" + str(fn))\n\n return fscore\n\n\ndef main(s, mode=0, data=None):\n # samples = ['A','B', 'C']\n samples = [(s.split(\"/\")[-1]).split(\"_\")[0]]\n for sample in samples:\n logging.info(\"evaluating synapse predictions for sample {0:}\".format(sample))\n truth_fn = os.path.join(\n config_loader.get_config()[\"synapses\"][\"cremi17_data_path\"],\n \"sample_{0:}_padded_20170424.aligned.hdf\".format(sample),\n )\n if data is not None:\n logging.info(\n \"sample {0:} in mode {1:} using {2:}\".format(sample, mode, data)\n )\n if (\n data == \"val\"\n or data == \"validation\"\n or data == \"VAL\"\n or data == \"VALIDATION\"\n ):\n assert s.endswith(\".hdf\")\n test = CremiFile(s.replace(\".hdf\", \".validation.hdf\"), \"a\")\n truth = CremiFile(truth_fn.replace(\".hdf\", \".validation.hdf\"), \"a\")\n elif (\n data == \"train\"\n or data == \"training\"\n or data == \"TRAIN\"\n or data == \"TRAINING\"\n ):\n assert s.endswith(\".hdf\")\n test = CremiFile(s.replace(\".hdf\", \".training.hdf\"), \"a\")\n truth = CremiFile(truth_fn.replace(\".hdf\", \".training.hdf\"), \"a\")\n else:\n test = CremiFile(s, \"a\")\n truth = CremiFile(truth_fn, \"a\")\n\n if mode == 0:\n evaluate(test, truth)\n elif mode == 1:\n evaluate_multrecgt(test, truth, add_in_file=True)\n elif mode == 2:\n evaluate_multrecgt(test, truth)\n\n\ndef main_all(s):\n # samples = ['A','B', 'C']\n main(s, 0, \"VAL\")\n main(s, 2, \"VAL\")\n main(s, 0, \"TRAIN\")\n main(s, 2, \"TRAIN\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n s = sys.argv[1]\n # m = int(sys.argv[2])\n # d = sys.argv[3]\n # main(s, m, d)\n main_all(s)\n","repo_name":"saalfeldlab/CNNectome","sub_path":"CNNectome/validation/synapses/evaluate_synaptic_partners.py","file_name":"evaluate_synaptic_partners.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"10004019359","text":"import cv2\r\nimport pyautogui\r\nfrom THE_First import myPose\r\nclass MyGame():\r\n def __init__(self):\r\n self.pose = myPose()\r\n self.game_started = False\r\n self.x_positions =1 # 0 : Left, 1: Center, 2: Right\r\n self.y_positions = 1 # 0: Down, 1:Stand, 2: jump\r\n self.clap_duration = 0 # so frame ma nguoi dung vo taay\r\n\r\n def move_LRC(self,LRC):\r\n if LRC==\"L\":\r\n for _ in range(self.x_positions):\r\n pyautogui.press(\"left\")\r\n self.x_positions= 0\r\n elif LRC==\"R\":\r\n for _ in range(2,self.x_positions,-1):\r\n pyautogui.press(\"right\")\r\n self.x_positions=2\r\n else:\r\n if self.x_positions == 0 :\r\n pyautogui.press(\"right\")\r\n elif self.x_positions == 2:\r\n pyautogui.press(\"left\")\r\n\r\n self.x_positions =1\r\n return\r\n def move_JSD(self,JSD):\r\n if (JSD==\"J\") & (self.y_positions==1):\r\n pyautogui.press('up')\r\n self.y_positions=2\r\n elif (JSD==\"D\") and (self.y_positions==1):\r\n pyautogui.press('down')\r\n self.y_positions=0\r\n elif (JSD==\"S\") and (self.y_positions!=1):\r\n self.y_positions=1\r\n return\r\n def play_game(self):\r\n #khoi tao ca mera\r\n cap = cv2.VideoCapture(0)\r\n cap.set(3,1280)\r\n cap.set(4,960)\r\n\r\n while True:\r\n ret,image = cap.read()\r\n if ret:\r\n\r\n image= cv2.flip(image,1)\r\n image_hight,image_width, _ = image.shape\r\n image, results =self.pose.detectPose(image)\r\n\r\n if results.pose_landmarks:\r\n #kiem tra game da chay chua\r\n if self.game_started:\r\n image, LRC = self.pose.checkPose_LRC(image, results)\r\n self.move_LRC(LRC)\r\n\r\n #kiem tra len xuong\r\n image, JSD = self.pose.checkPose_JSD(image, results)\r\n self.move_JSD(JSD)\r\n else:\r\n cv2.putText(image, \"Clap your hand to play\",(5,image_hight-10),cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),3)\r\n image, CLAP =self.pose.checkPose_Clap(image,results)\r\n if CLAP == 'C':\r\n self.clap_duration +=1\r\n if self.clap_duration == 10:\r\n if self.game_started:\r\n #reset\r\n self.x_positions = 1\r\n self.y_positions =1\r\n self.pose.shoudler_line_y(image, results)\r\n pyautogui.press('space')\r\n else:\r\n self.game_started = True\r\n self.pose.save_Shoulder_line_y(image,results)\r\n pyautogui.click(x=720,y= 560, button =\"left\" )\r\n self.clap_duration =0\r\n else:\r\n self.clap_duration = 0\r\n cv2.imshow(\"Game\",image)\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n cap.release()\r\n cv2.destroyAllWindows()\r\nmygame= MyGame()\r\nmygame.play_game()","repo_name":"trungnguyen-web/ControlGameByHand","sub_path":"MyGame.py","file_name":"MyGame.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28264799221","text":"from aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom handlers.call_back.callback import goods\n\nkb_inline = InlineKeyboardMarkup(row_width=2)\n\nbtn_add = InlineKeyboardButton(text='Добавить', callback_data='ADD')\nbtn_remove = InlineKeyboardButton(text='Купить', callback_data=goods.new(items='Товары', count='100'))\nbtn_print = InlineKeyboardButton(text='Печать', callback_data='PRINT_P')\nbtn_exit = InlineKeyboardButton(text='Выход', callback_data='EXIT')\n\nkb_inline.row(btn_add, btn_remove)\nkb_inline.add(btn_print)\nkb_inline.add(btn_exit)\n","repo_name":"7Arkadius7/My_Bot","sub_path":"keybords/inline/main_inline.py","file_name":"main_inline.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8272447687","text":"from django import template\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef strip_error_msg(arg):\n if arg:\n return arg[0][0]\n else:\n return \"\"\n\n\n@register.simple_tag\ndef show_error_msg(arg):\n if arg:\n # ValidationError object does not support indexing,需要转一下格式\n error_msg = tuple(arg[0])[0]\n result = '
{}
'.format(error_msg)\n return mark_safe(result)\n else:\n return \"\"\n","repo_name":"xartisan/django_forum","sub_path":"forum/templatetags/error_msg.py","file_name":"error_msg.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2118244010","text":"num = 600851475143 # the number we want to find the factors of\np = 3 # start with 3 because 1 is not a prime number and 2 is not a factor since num is odd\nfactors = [] # our list of factors\nwhile num > 1: # it will continuously decrease for prime factorization\n if num >= p:\n if num % p == 0: # if p is a multiple of num then add it to our list\n factors.append(p)\n num /= p # divide by p, this is part of prime factorization\n else: # if it is not divisible by p then add 1 to p until the next term is found\n p += 1\nprint(max(factors))\n","repo_name":"trev-rock/Project-Euler_Exercises","sub_path":"level_3.py","file_name":"level_3.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12096669886","text":"from datetime import timedelta\nfrom flask import Blueprint, request, jsonify\nfrom flask_jwt_extended import create_access_token, create_refresh_token\nfrom .serializer import UserSchema\nfrom .models.tables import User\n\n\nbp_login = Blueprint('login', __name__)\n\n\n@bp_login.route('/login', methods=['POST'])\ndef login():\n result = {}\n user, error = UserSchema().load(request.json)\n\n if error:\n result = {'error': {'reason': error}}\n return jsonify(result), 401\n\n user = User.query.filter_by(username=user.username).first()\n\n if user and user.verify_token(request.json['password']):\n access_token = create_access_token(\n identity=user.user_id,\n expires_delta=timedelta(seconds=60)\n )\n refresh_token = create_refresh_token(identity=user.user_id)\n result = {\n 'access_token': access_token,\n 'refresh_token': refresh_token,\n 'message': 'success'\n }\n return jsonify(result), 200\n\n result = {'error': {'reason': 'credenciais inválidas'}}\n return jsonify(result), 401\n","repo_name":"Tilzen/cpf-status-api","sub_path":"app/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"73965447467","text":"'''Given two stings ransomNote and magazine, return true if ransomNote can be constructed from magazine and false otherwise.\r\n\r\nEach letter in magazine can only be used once in ransomNote.\r\n\r\nInput Format\r\n\r\nThe input format is string The first line input is ransomeNote The second line input is magazine\r\n\r\nConstraints\r\n\r\n1 <= ransomNote.length, magazine.length <= 105 ransomNote and magazine consist of lowercase English letters.\r\n\r\nOutput Format\r\n\r\nThe output is boolean\r\n\r\nSample Input 0\r\n\r\na\r\nb\r\nSample Output 0\r\n\r\nfalse'''\r\n\r\nransomeNote = input()\r\nmagazine = input()\r\noutput = ''\r\nfor i in magazine:\r\n if i in ransomeNote:\r\n output += i\r\nif ransomeNote == output:\r\n print(\"true\")\r\nelse:\r\n print(\"false\")\r\n","repo_name":"anandakumar0962/Hackerrank","sub_path":"Ransome note.py","file_name":"Ransome note.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34445956699","text":"from functools import reduce\nfrom itertools import count\nimport warnings\n\nimport tensorflow as tf\n\nfrom .domain import Domain\nfrom parot.utils import graph, util\n\nrelu = tf.nn.relu\n\n\nclass HZ(Domain):\n \"\"\"\n A hybrid zonotope on a shape `p = (p1,p2,...)` is a set characterised\n by the tensors `c`, `b` and `E`. The set `Z` is characterised as\n `Z := {c + b^T \\beta + E e | \\beta \\in [-1,1]^p, e \\in [-1,1]^m}`.\n\n ### Implementation notes:\n\n A batch dimension should always be provided, even if only set to one.\n The first dimension in the shape is assumed to be the batch dimension.\n\n To account for batch sizes greater than one and to keep some methods\n efficient, the `e` dimension in `E` is folded in to the batch\n dimension. This means that `c`,`b`,`E` all have the same rank. To get\n a version of `E` with the batch and `e` dimensions unflattened call\n `get_E_unflattened`.\n\n Attributes:\n id (int): count of the instance of HZ for the purpose of variable\n naming\n b (tf.Tensor): the on-axis uncorrelated errors\n c (tf.Tensor): center of the object\n E (tf.Tensor): zonotope errors\n \"\"\"\n _ids = count(0)\n\n def __init__(self, c, b, E):\n # we store the error dimension flattened along with batch-size to make\n # the non-activation transormations simpler.\n self.id = next(self._ids)\n self.c = tf.identity(c, name='hz_{}_c'.format(self.id))\n self.b = tf.identity(b, name='hz_{}_b'.format(self.id))\n self.E = tf.identity(E, name='hz_{}_E'.format(self.id))\n if self.E.shape[0] is None:\n raise ValueError('E should have a defined shape')\n\n if len(self.E.shape) != len(self.c.shape):\n raise ValueError('E and c have different shapes')\n\n def get_center(self): return self.c\n\n def get_error_dim(self): return self.E.shape[0] // self.c.shape[0]\n\n def get_batch_dim(self): return self.c.shape[0]\n\n def get_space_dims(self):\n \"\"\"\n Return the shape of `self.c` without the batch dimension.\n\n Returns:\n tf.Tensor: space dimensions\n \"\"\"\n return self.c.shape[1:]\n\n def has_E_matrix(self):\n \"\"\"Returns True if the HZ has an E matrix\n\n Returns:\n bool: True if the instance has an E matrix\n \"\"\"\n return self.E.shape[0] != 0\n\n def get_E_unflattened(self):\n \"\"\"\n E is stored with the error dimension reshaped in to the batch\n dimension to make some calculations simpler.\n `self.E : (error_dim * batch_dim, ..rest)`.\n `self.get_E_unflattened() : (error_dim, batch_dim, ..rest)`\n\n Returns:\n tf.Tensor: unflattened E matrix\n \"\"\"\n # this operation will fail if any dimensions is unknown\n if any([dim._value is None for dim in self.c.shape]):\n raise ValueError(\n 'The implementation of HZ requires all dimensions of ' +\n 'all tensors to be known. If the undefined dimension is ' +\n 'due to a fixed batch size, consider reshaping your input ' +\n 'to include that batch size')\n\n return tf.reshape(self.E, [-1, *self.c.shape])\n\n def get_errors(self):\n \"\"\"\n Return the zonotope error vector\n\n Returns:\n tf.Tensor: vector of errors\n \"\"\"\n E = self.get_E_unflattened()\n return self.b + tf.reduce_sum(abs(E), axis=0)\n\n def get_parameters(self):\n \"\"\"\n Return the zonotope parameters\n\n Returns:\n tf.Tensor: parameters\n \"\"\"\n p = self.get_error_dim()\n b = self.get_batch_dim()\n rest = self.get_space_dims()\n return ([b] + list(rest), [p, b])\n\n def get_adversary(self, y_true, n_classes):\n \"\"\"\n Draws an arbitrary box around the domain element and then finds the\n point in this box which is furthest from the tensor 'y_true'\n\n Args:\n y_true (tf.Tensor): the true output\n n_classes (int): number of classes in the output\n\n Returns:\n tf.Tensor: the furthest tensor from `y_true` in the box\n \"\"\"\n c = self.get_center()\n s = self.get_errors()\n g = (tf.cast(tf.one_hot(y_true, depth=n_classes), tf.float32) * 2 - 1)\n return c - s * g\n\n def evaluate(self, p):\n \"\"\"Evaluate the zonotope at p\n\n Args:\n p (tf.Tensor): input tensor representing a point to be evaluated\n\n Returns:\n tf.Tensor: zonotope at p\n \"\"\"\n (beta, e) = p\n E = self.get_E_unflattened()\n [p, b, *rest] = E.shape\n beta = tf.clip_by_value(beta, -1, 1)\n e = tf.clip_by_value(e, -1, 1)\n e = tf.reshape(e, [p, b] + [1 for r in rest])\n E = E * e\n E = tf.reduce_sum(E, axis=0)\n return self.c + self.b * beta + E\n\n def is_inside(self, x):\n \"\"\"\n Check if the point `x` is inside the zonotope.\n Showing that x lies within a zonotope is a linear programming problem.\n For our purposes we can just overapproximate to a box and check if\n it lies in the box.\n\n Args:\n x (tf.Tensor): tensor to check\n\n Returns:\n bool: whether x is inside the zonotope\n \"\"\"\n return tf.reduce_all(self.get_errors() >= abs(x - self.c))\n\n def box_relu(self):\n \"\"\"\n Box version of relu transformer for comparison with `self.relu()`.\n\n Returns:\n HZ: a transformed zonotope\n \"\"\"\n c = self.c\n s = self.get_errors()\n return HZ.of_minmax(relu(c + s), relu(c - s))\n\n def box_sigmoid(self): return self.box_monotone(tf.math.sigmoid)\n\n def sigmoid(self):\n \"\"\"\n Computes the transformation of a zonotope through a sigmoid\n activation function. It works by drawing the smallest\n parallelogram about the sigmoid graph.\n\n Returns:\n HZ: a transformed zonotope\n \"\"\"\n def sigma(x): return tf.math.sigmoid(x)\n c = self.c\n s = self.get_errors()\n up = c + s\n lo = c - s\n meu = tf.div_no_nan(sigma(up) - sigma(lo), s) / 2\n\n # to find extrema we wish to solve `\\delta_x \\sigma(x) = \\meu`\n # Let `Y = exp(-x)`. Then `\\delta_x Y = - Y`, `\\sigma(x) = 1 / (1 + Y)`\n # and `\\delta_x \\sigma(x) = Y / (1 + Y)^2`\n # So we need `\\meu + (2\\meu - 1)Y + \\meu Y^2 = 0`.\n # Which happens when `Y = b +- sqrt(b^2 - 1)`\n # Where `b := (\\meu^{-1} - 2) / 2`.\n\n b = tf.div_no_nan(1.0, meu + 0.01) / 2 - 1\n # INF \\geq b \\geq 1 avoids nans in gradient\n\n b = tf.maximum(b, 1.00)\n delta = tf.sqrt(b * b - 1)\n\n # x1, x2 are the input points at which the error is maximised.\n x1 = - tf.log(b + delta)\n x1 = tf.where(tf.is_finite(x1), x1, tf.zeros_like(x1))\n x2 = - tf.log(b - delta)\n x2 = tf.where(tf.is_finite(x2), x2, tf.zeros_like(x2))\n x1 = tf.maximum(tf.minimum(x1, up), lo)\n x2 = tf.maximum(tf.minimum(x2, up), lo)\n eps1 = sigma(x1) - sigma(lo) - meu * (x1 - lo)\n eps2 = sigma(x2) - sigma(lo) - meu * (x2 - lo)\n eps_mx = relu(tf.maximum(eps1, eps2))\n eps_mn = relu(tf.maximum(-eps1, -eps2))\n epsilon = (eps_mx + eps_mn) / 2\n c = ((sigma(up) + eps_mx) + (sigma(lo) - eps_mn)) / 2\n b = self.b * meu + epsilon\n E = self.get_E_unflattened() * meu\n E = tf.reshape(E, tf.shape(self.E))\n return HZ(c, b, E)\n\n def __add__(self, x2):\n \"\"\"\n Add a zonotope or a tensor or a float\n\n Args:\n x2 (HZ or tf.Tensor or float)\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x1 = self\n if isinstance(x2, tf.Tensor) or isinstance(x2, float):\n return HZ(x1.c + x2, x1.b, x1.E)\n elif isinstance(x2, HZ) and (x1.get_error_dim() == x2.get_error_dim()):\n\n print(\"[WARNING]: adding two hybrid zonotopes\", x1.c.name, \",\",\n x2.c.name,\n \". Assuming that the E-matrix error terms are correlated.\")\n return HZ(x1.c + x2.c, x1.b + x2.b, x1.E + x2.E)\n else:\n return NotImplemented\n __radd__ = __add__\n\n def __sub__(self, x2):\n \"\"\"\n Subtracts a zonotope or a tensor or a float\n\n Args:\n x2 (HZ or tf.Tensor or float)\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x1 = self\n if isinstance(x2, tf.Tensor) or isinstance(x2, float):\n return HZ(x1.c - x2, x1.b, x1.E)\n elif isinstance(x2, HZ):\n return x1 + (- x2)\n else:\n return NotImplemented\n\n def __neg__(self): return HZ(- self.c, self.b, self.E)\n\n def __mul__(self, x2):\n \"\"\"\n Multiplication with a zonotope or a tensor or a float\n\n Args:\n x2 (HZ or tf.Tensor)\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x1 = self\n if isinstance(x2, tf.Tensor) or isinstance(x2, float):\n return HZ(x1.c * x2, x1.b * abs(x2), x1.E * x2)\n if isinstance(x2, HZ):\n return x1.mul_same_error(x2)\n return NotImplemented\n __rmul__ = __mul__\n\n def __truediv__(self, x2):\n \"\"\"\n Division with a zonotope or a tensor\n\n Args:\n x2 (HZ or tf.Tensor)\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x1 = self\n if isinstance(x2, tf.Tensor):\n return HZ(x1.c / x2, x1.b / abs(x2), x1.E / x2)\n elif isinstance(x2, HZ):\n return x1.div_same_error(x2)\n else:\n return NotImplemented\n\n def minimum(self, x2):\n \"\"\"\n Minimum of the HZ instance and x2\n\n Args:\n x2 (HZ or tf.Tensor or float)\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x1 = self\n if isinstance(x2, tf.Tensor) or isinstance(x2, float):\n return - (- x1 + x2).relu() + x2\n elif isinstance(x2, HZ):\n return - (-x1).max_same_error(-x2)\n else:\n return NotImplemented\n\n def maximum(self, x2):\n \"\"\"\n Maximum of the HZ instance and x2\n\n Args:\n x2 (HZ or tf.Tensor or float)\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x1 = self\n if isinstance(x2, tf.Tensor) or isinstance(x2, float):\n return (x1 - x2).relu() + x2\n elif isinstance(x2, HZ):\n return x1.max_same_error(x2)\n else:\n return NotImplemented\n\n def mul_same_error(self, y):\n \"\"\"\n Multiply the HZ's (the HZ instance and y), while maintaing the same\n error.\n\n Args:\n y (HZ): input HZ to multiply by\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x = self\n\n xc, xs = x.get_center_errors()\n yc, ys = y.get_center_errors()\n\n xu, xl = (xc + xs), (xc - xs)\n yu, yl = (yc + ys), (yc - ys)\n bds = tf.stack([xu * yu, xl * yu, xu * yl, xl * yl], axis=-1)\n up = tf.reduce_max(bds, axis=-1)\n lo = tf.reduce_min(bds, axis=-1)\n\n return HZ((up + lo) / 2, abs(up - lo) / 2, tf.zeros_like(x.E))\n\n def div_same_error(self, y):\n \"\"\"\n Divide the HZ's (the HZ instance and y), while maintaing the same\n error.\n\n Args:\n y (HZ): input HZ to divide by\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x = self\n\n xc, xs = x.get_center_errors()\n yc, ys = y.get_center_errors()\n\n up = (xc + xs) / relu(yc - ys)\n lo = (xc - xs) / (yc + ys)\n c = (up + lo) / 2\n\n return HZ(c=c, b=abs(up - lo) / 2, E=tf.zeros_like(x.E))\n\n def reciprocal(self):\n \"\"\"\n Return the reciprocal of the HZ instance\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n c = self.get_center()\n s = self.get_errors()\n up = c + s\n lo = c - s\n\n meu = tf.div_no_nan(\n tf.math.reciprocal(up) - tf.math.reciprocal(lo),\n 2.0 * s)\n x = tf.rsqrt(-meu) * tf.sign(c)\n epsilon = util.linterp(\n lo, up,\n tf.math.reciprocal(lo),\n tf.math.reciprocal(up), x) - tf.math.reciprocal(x)\n\n c = (tf.math.reciprocal(up) + tf.math.reciprocal(lo) - epsilon) / 2\n b = self.b * abs(meu) + abs(epsilon)\n E = self.get_E_unflattened() * meu\n E = tf.reshape(E, tf.shape(self.E))\n return HZ(c, b, E)\n\n def transform_convex(self, f, extremum_fn):\n \"\"\"\n Idea; the `extremum_fn` takes the gradient `\\meu` of a line\n between two points `l,u` on `f` and gives back an `x` such that\n `df/dx - \\meu = 0`.\n This is then used to compute a bounding zonotope for the function.\n All of this works because we know that `f` is either convex or concave.\n\n Args:\n f (Callable): function to transform\n extremum_fn (function): extremum function\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n c, s = self.get_center_errors()\n up = c + s\n lo = c - s\n\n meu = tf.div_no_nan(f(up) - f(lo), 2.0 * s)\n x = extremum_fn(meu)\n x = tf.clip_by_value(x, lo, up)\n epsilon = util.linterp(lo, up, f(lo), f(up), x) - f(x)\n\n c = (f(up) + f(lo) - epsilon) / 2\n b = self.b * meu + (abs(epsilon) / 2.0)\n E = self.get_E_unflattened() * meu\n E = tf.reshape(E, tf.shape(self.E))\n return HZ(c, b, E)\n\n def relu(self):\n \"\"\"\n Pass the zonotope through relu activation.\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n if self.has_E_matrix():\n return self.box_relu()\n else:\n return self.transform_convex(relu, tf.zeros_like)\n\n def abs(self): return self.transform_convex(tf.abs, tf.zeros_like)\n\n def exp(self): return self.transform_convex(tf.exp, tf.log)\n\n def log(self): return self.transform_convex(tf.log, tf.reciprocal)\n\n def log1p(self):\n \"\"\"\n Element-wise natural logarithm of (1+x)\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n return self.transform_convex(tf.log1p, lambda meu: tf.reciprocal(\n meu) - 1)\n\n def reduce(self, op, axis=-1):\n \"\"\"\n Apply the original operation while reducing an axis.\n\n Args:\n op (tf.Operation): operation to reduce\n axis (int, optional): axis specifier\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n if (axis == 0):\n raise NotImplementedError(\n \"axis=0 not implemented for reduction operation.\")\n return HZ(\n c=op(self.c, axis=axis),\n b=op(self.b, axis=axis),\n E=op(self.E, axis=axis)\n )\n\n def reduce_sum(self, axis=-1): return self.reduce(tf.reduce_sum, axis)\n\n def reduce_mean(self, axis=-1): return self.reduce(tf.reduce_mean, axis)\n\n def expand_dims(self, axis=-1): return self.reduce(tf.expand_dims, axis)\n\n def box_max(self, y):\n \"\"\"\n A simple Box maxing procedure. Using `max_same_error` should give\n smaller errors.\n\n Args:\n y (HZ): other HZ to max in terms of box\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x = self\n xs = x.get_errors()\n ys = y.get_errors()\n up = tf.maximum(x.c + xs, y.c + ys)\n lo = tf.maximum(x.c - xs, y.c - ys)\n return HZ.of_minmax(up, lo)\n\n def op_box_monotone(self, op):\n \"\"\"\n Assuming the given `op` is monotone, will compute the axis-aligned\n bounding box transformer for `op`.\n\n Args:\n op (tf.Operation): tensor describing an operation\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n s = self.get_errors()\n c = self.c\n return HZ.of_minmax(\n graph.clone_op(op, [c + s], suffix=\"_u\")[0],\n graph.clone_op(op, [c - s], suffix=\"_l\")[0]\n )\n\n def box_monotone(self, f):\n \"\"\"\n Apply a monotone function f to the HZ.\n\n Args:\n f (Callable): Description\n\n Returns:\n HZ: Description\n \"\"\"\n c, s = self.get_center_errors()\n return HZ.of_minmax(f(c + s), f(c - s))\n\n def box_softmax(self):\n \"\"\"\n Apply softmax to the zonotope.\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n c, b = self.to_box().softmax().get_center_errors()\n return HZ(c, b, tf.zeros_like(self.E))\n\n def max_same_error(self, y):\n \"\"\"\n Finds the max of two zonotopes assuming that the E matrices are\n using the same error terms.\n\n Args:\n y (HZ): other HZ to max\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n x = self\n\n if x.get_error_dim() != y.get_error_dim():\n raise ValueError(\n 'the dimensions of the error term in the two HZs should ' +\n 'be equal')\n\n Ex = x.get_E_unflattened() # : (e,b,h,w,c)\n Ey = y.get_E_unflattened()\n sx = x.b + tf.reduce_sum(abs(Ex), axis=0) # (b,h,w,c)\n sy = y.b + tf.reduce_sum(abs(Ey), axis=0)\n ux = x.c + sx\n lx = x.c - sx\n uy = y.c + sy\n ly = y.c - sy\n up = tf.maximum(ux, uy)\n lo = tf.maximum(lx, ly)\n\n meux = tf.div_no_nan(up - tf.maximum(lx, uy), sx) / 2\n meuy = tf.div_no_nan(up - tf.maximum(ly, ux), sy) / 2\n epsilon = (up - 2 * meux * sx - 2 * meuy * sy - lo) / 2\n\n c = (up + lo) / 2\n b = meux * x.b + meuy * y.b + epsilon\n E = meux * Ex + meuy * Ey # : (e, b,h,w,c)\n E = tf.reshape(E, tf.shape(x.E))\n return HZ(c, b, E)\n\n def __matmul__(self, w):\n \"\"\"\n Matrix multiplication\n\n Args:\n w (tf.Tensor): matrix to multiply by in tensor form\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n return HZ(\n c=self.c @ w,\n b=self.b @ abs(w),\n E=self.E @ w\n )\n\n def __rmatmul__(self, l):\n \"\"\"\n Right-matrix multiplication\n\n Args:\n l (tf.Tensor): matrix to multiply by in tensor form\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n return HZ(\n c=l @ self.c,\n b=abs(l) @ self.b,\n E=l @ self.E\n )\n\n def __getitem__(self, key):\n \"\"\"\n Get a new HZ instance by slicing the individual components of\n the given instance.\n\n Args:\n key (Slice): key to get in the individual components of HZ\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n if (key[0] != slice(None)):\n raise NotImplementedError(\n \"Can't do anything except [:] on the batch dimension.\")\n return HZ(self.c.__getitem__(key), self.b.__getitem__(key),\n self.E.__getitem__(key))\n\n def max_pool_2x2(self):\n \"\"\"\n Perform a non-overlapping 2x2 maxpool. Assumes that both height and\n width are an even number.\n\n Returns:\n HZ: max pooled HZ\n \"\"\"\n x = self\n x = x[:, :, ::2, :].max_same_error(x[:, :, 1::2, :])\n x = x[:, ::2, :, :].max_same_error(x[:, 1::2, :, :])\n return x\n\n def concat(self, other, axis):\n \"\"\"\n Concatenate `other` with the given instance of HZ.\n\n Args:\n other (HZ or tf.Tensor): either an instance of a HZ or a tensor\n which can be used to generate an instance\n axis (int): axis in which the concatenation occurs\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n if isinstance(other, HZ):\n c = tf.concat([self.c, other.c], axis=axis)\n b = tf.concat([self.b, other.b], axis=axis)\n E1 = self.get_E_unflattened()\n E2 = other.get_E_unflattened()\n [e1, batch_size, *_] = E1.shape\n [e2, _, *_] = E2.shape\n EE1 = tf.concat([E1, tf.zeros_like(E2)], axis=0)\n EE2 = tf.concat([tf.zeros_like(E1), E2], axis=0)\n E = tf.concat([EE1, EE2], axis=axis + 1)\n [_, *cshape] = c.shape\n new_shape = [(e1 + e2) * batch_size, *cshape]\n E = tf.reshape(E, new_shape)\n return HZ(c, b, E)\n else:\n return HZ(\n c=tf.concat([self.c, other], axis=axis),\n b=tf.concat([self.b, tf.zeros_like(other)], axis=axis),\n E=tf.concat([self.E, tf.zeros_like(other)], axis=axis),\n )\n\n @staticmethod\n def transform_op(op, inputs):\n \"\"\"\n Transform operation for a given set of inputs\n\n Args:\n op (tf.Operation): operation to transform\n inputs (List[tf.Tensor]): list of inputs\n\n Returns:\n HZ: resulting zonotope\n \"\"\"\n def input_conv(n):\n return [getattr(i, n) if isinstance(i, HZ) else i for i in inputs]\n\n op_type = op.type\n\n if op_type in [\"Shape\", \"ZerosLike\", \"OnesLike\", \"GreaterEqual\"]:\n return [None]\n\n if op_type == \"ConcatV2\":\n [z1, z2, axis] = inputs\n if not isinstance(z1, HZ) or isinstance(axis, HZ):\n raise NotImplementedError(\n 'graph includes a non-supported ConcatV2 ' +\n 'due to its first or second input type (should ' +\n 'be HZ and tf.Tensor, respectively)')\n\n return [z1.concat(z2, axis)]\n\n if op_type == \"Reshape\":\n input, _ = inputs\n\n if not isinstance(input, HZ):\n raise NotImplementedError(\n 'the input to the Reshape operation is not a HZ, which ' +\n 'is not supported in this version of parot')\n\n output = op.outputs[0]\n Cs = output.shape[1:]\n return [HZ(\n c=tf.reshape(input.c, [-1, *Cs]),\n b=tf.reshape(input.b, [-1, *Cs]),\n E=tf.reshape(input.E, [-1, *Cs]),\n )]\n\n if op_type in [\"Transpose\", \"StridedSlice\"]:\n # perform the operation component-wise\n ocs = graph.clone_op(op, input_conv(\"c\"), suffix=\"_c\")\n obs = graph.clone_op(op, input_conv(\"b\"), suffix=\"_b\")\n oEs = graph.clone_op(op, input_conv(\"E\"), suffix=\"_E\")\n return [HZ(c, b, E) for c, b, E in zip(ocs, obs, oEs)]\n\n if op_type == \"Select\":\n cond, y, z = inputs\n\n if isinstance(cond, HZ):\n raise NotImplementedError(\n 'the first input to the Select operation is a HZ, ' +\n 'which is not supported in this version of parot')\n\n return [HZ(\n c=graph.clone_op(op, input_conv(\"c\"), suffix=\"_c\")[0],\n b=graph.clone_op(op, input_conv(\"b\"), suffix=\"_b\")[0],\n E=graph.clone_op(op, input_conv(\"E\"), suffix=\"_E\")[0]\n )]\n\n if op_type in [\"Sum\", \"Mean\"]:\n [z, reduction_indices] = inputs\n if len(list(reduction_indices.shape)) == 0:\n # dealing with reductions where `axis` might be (),\n # which means that it should reduce to a scalar.\n e = z.get_error_dim()\n E = tf.reshape(z.E, [e, -1])\n if op_type == \"Sum\":\n E = tf.reduce_sum(E, axis=1)\n elif op_type == \"Mean\":\n E = tf.reduce_mean(E, axis=1)\n else:\n [E] = graph.clone_op(op, [z.E, reduction_indices], suffix=\"_E\")\n\n [c] = graph.clone_op(op, input_conv(\"c\"), suffix=\"_c\")\n [b] = graph.clone_op(op, input_conv(\"b\"), suffix=\"_b\")\n return [HZ(c, b, E)]\n\n if op_type == \"MatMul\":\n x, w = inputs\n if isinstance(w, HZ):\n raise NotImplementedError(\n 'the second input to the MatMul operation is a HZ, ' +\n 'which is not supported in this version of parot')\n\n b = x.b\n HW = b.shape[1:]\n b = tf.einsum(\"...ij,jk->...ijk\", b, w)\n perm = [\n *(i + 1 for i in range(len(HW))),\n 0,\n *(len(HW) + 1 + i for i in range(len(w.shape) - 1))]\n b = tf.transpose(b, perm=perm)\n b = tf.reshape(b, [-1, *w.shape[1:]])\n c = x.c @ w\n E = x.E @ w\n E = tf.concat([E, b], axis=0)\n return [HZ(c, tf.zeros_like(c), E)]\n\n if op_type == \"Conv2D\":\n x, w = inputs\n\n if isinstance(w, HZ):\n return NotImplementedError(\n 'the filter must be a tf.Tensor in this version of parot')\n\n [c] = graph.clone_op(op, [x.c, w], suffix=\"_c\")\n [b] = graph.clone_op(op, [x.b, abs(w)], suffix=\"_b\")\n [E] = graph.clone_op(op, [x.E, w], suffix=\"_E\")\n return [HZ(c, b, E)]\n\n if op_type in [\"MaxPool\"]:\n warnings.warn(\"MaxPool is only implemented \" +\n \"for `keras.MaxPool2D(2)` at the moment\")\n [x] = inputs\n y = x.max_pool_2x2()\n return [y]\n\n if op_type in [\"Add\", \"BiasAdd\"]:\n [a, b] = inputs\n return [a + b]\n\n if op_type in [\"Mul\"]:\n [a, b] = inputs\n return [a * b]\n\n if op_type in [\"Sub\"]:\n [a, b] = inputs\n if not isinstance(a, HZ):\n return [(-b) + a]\n else:\n return [a - b]\n\n if op_type in [\"RealDiv\"]:\n [a, b] = inputs\n return [a / b]\n\n if op_type in [\"Maximum\"]:\n [a, b] = inputs\n c = a.maximum(b)\n return [b.maximum(a) if c is NotImplemented else c]\n\n if op_type in [\"Minimum\"]:\n [a, b] = inputs\n c = a.minimum(b)\n return [b.minimum(a) if c is NotImplemented else c]\n\n if op_type in [\"Relu\"]:\n return [inputs[0].relu()]\n\n if op_type in [\"Abs\"]:\n return [inputs[0].abs()]\n\n if op_type in [\"Log\"]:\n return [inputs[0].log()]\n\n if op_type in [\"Log1p\"]:\n return [inputs[0].log1p()]\n\n if op_type in [\"Exp\"]:\n return [inputs[0].exp()]\n\n if op_type in [\"Sigmoid\"]:\n return [inputs[0].box_sigmoid()]\n\n if op_type in [\"Softmax\"]:\n return [inputs[0].box_softmax()]\n\n if op_type in [\"Neg\"]:\n return [- inputs[0]]\n\n raise NotImplementedError(\n ('the {} operation is not implemented by default in HZ for this ' +\n 'version of parot').format(op_type))\n\n def promote_all(self):\n \"\"\"\n Takes the largest components of the b vector and converts\n them to E columns. That is, `E` maps to `concat(E,diag(b))`\n and `b` maps to zero.\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n c = self.c\n (B, *Cs) = c.shape\n HWC = reduce(lambda x, y: x * y, Cs, 1)\n b = self.b\n E = tf.reshape(b, [-1, HWC])\n E = tf.linalg.diag(E)\n E = tf.transpose(E, [2, 0, 1])\n E = tf.reshape(E, [-1, *Cs])\n E = tf.concat([self.E, E], axis=0)\n return HZ(c, tf.zeros_like(c), E)\n\n def to_box(self):\n \"\"\"\n Return a box corresponding to the hybrid zonotope\n\n Returns:\n Box: a box\n \"\"\"\n from .box import Box\n\n return Box(self.c, self.get_errors())\n\n @staticmethod\n def of_domain(self, domain):\n \"\"\"\n Create an HZ using the `get_center_errors` method on the domain.\n\n Args:\n domain (Domain): domain to start from\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n c, e = domain.get_center_errors()\n return HZ.of_ball_promoted(c, e)\n\n @staticmethod\n def of_minmax(x1, x2):\n \"\"\"\n HZ from the difference between two tensors.\n\n Args:\n x1 (tf.Tensor): maximum/minimum tensor\n x2 (tf.Tensor): maximum/minimum tensor\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n return HZ((x1 + x2) / 2, abs(x1 - x2) / 2, tf.zeros_like(x2))\n\n @staticmethod\n def of_E_columns(x, cols):\n \"\"\"\n Given an input tensor, get an HZ instance with a computed E matrix.\n\n Args:\n x (tf.Tensor): Description\n cols (List[Int]): Description\n\n Returns:\n HZ: resultant zonotope\n \"\"\"\n [B, *S] = x.shape\n E = tf.constant(cols)\n E = tf.expand_dims(E, axis=1)\n E = tf.tile(E, multiples=[1, B, *[1 for _ in S]])\n E = tf.reshape(E, [len(cols) * B, *S])\n return HZ(x, tf.zeros_like(x), E)\n","repo_name":"fiveai/parot","sub_path":"parot/domains/hz.py","file_name":"hz.py","file_ext":"py","file_size_in_byte":29024,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"33096467089","text":"#!/usr/bin/python3\n\nimport argparse\nimport requests.exceptions \nfrom resources.agentk import run\nfrom resources.webagent import run_main\nfrom sys import argv as argu\nimport resources.enumsub as subdom\nimport os\n\n\n\nG = '\\033[92m' \nY = '\\033[93m' \nB = '\\033[36m' \nR = '\\033[91m' \nW = '\\033[0m'\nL = \"\\033[90m\"\n\ndef parse_arg():\n parser = argparse.ArgumentParser(epilog='\\tExample: \\r\\npython3 ' + argu[0] + \" \")\n parser._optionals.title = \"OPTIONS\"\n parser.add_argument(\"-m\",'--module',help=\"Para especificar o módulo a ser usado \",required=True)\n parser.add_argument(\"-n\",'--name' ,help=\"Nome de usuário ou nome de domínio \",required=True)\n parser.add_argument(\"-o\",'--output' ,help=\"Nome do arquivo de saída \",default=None)\n parser.add_argument(\"-t\",'--threads',help=\"Para definir o valor do fio \",type=int,default=50)\n parser.add_argument(\"-l\",'--limit',help=\"To set the max limit for web crawling\",default=30)\n parser.add_argument(\"-v\",\"--verbose\",help=\"Para habilitar verboso\",nargs='?',default=False)\n parser.add_argument(\"-p\",'--ports',help=\"Para definir as portas para enumeração de subdomínio{separar usando vírgulas eg: 80,443}\")\n return parser.parse_args()\n\n#````````````````````````````````````````````````````````````````````````````````````````\n\n#````````````````````````````````````````````````````````````````````````````````````````\ndef main():\n agrument = parse_arg()\n \n module=agrument.module\n name = agrument.name\n output = agrument.output\n thread = agrument.threads\n limit = agrument.limit\n port = agrument.ports\n verbose = agrument.verbose\n\n if (module == \"find\"):\n run(username=name,output=output,threads=thread)\n elif (module == \"crawl\" or module == \"scrap\" or module == \"web\"):\n run_main(x=name,max=limit,filename=output)\n elif (module == \"enum\" or module == \"sub-domain\"):\n subdom.main(domain=name,threads=thread,savefile=output,ports=port,verbose=verbose)\n else:\n print(f\"\\n {R} Module não Encontrado \")\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(f\"{Y}Interrupção de teclado encontrada Saindo do programa :) {W}\")\n exit(0)\n except requests.exceptions.MissingSchema:\n print(f\"{R}\\n Esquema ausente. Talvez você sinta falta 'http://' \")\n","repo_name":"gilmarScript/PyosintV1","sub_path":"pyosint.py","file_name":"pyosint.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32654620977","text":"from nonebot.adapters.onebot.v11 import Bot\n\nfrom typing import List\n\n\nasync def get_bot_friend_list(bot: Bot) -> List[int]:\n friend_list = await bot.get_friend_list()\n return [i[\"user_id\"] for i in friend_list]\n\n\nasync def get_bot_group_list(bot: Bot) -> List[int]:\n group_list = await bot.get_group_list()\n return [i[\"group_id\"] for i in group_list]\n\n\nasync def get_bot_guild_channel_list(bot: Bot, guild_id: str=None) -> List[str]:\n if guild_id is None:\n guild_list = await bot.get_guild_list()\n return [i[\"guild_id\"] for i in guild_list]\n else:\n guild_list = await bot.get_guild_list()\n if guild_id in [i[\"guild_id\"] for i in guild_list]:\n channel_list = await bot.get_guild_channel_list(guild_id=guild_id)\n return [i[\"channel_id\"] for i in channel_list]\n else:\n return []\n","repo_name":"mobyw/nonebot-general-rss","sub_path":"src/plugins/nonebot-general-rss/bot_info.py","file_name":"bot_info.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"29183832342","text":"#! /usr/bin/enc python\n# -*- coding: utf-8 -*-\n# author: Irving He \n# email: 1910646@tongji.edu.cn\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# plt.style.use(['science'])\n\nDataDir = \"results/\"\n\nBehaviorData = \"behavioral_Hopper-v3_0\"\nBuffer_PerformanceData = \"buffer_performance_Hopper-v3_0\"\n\ndata1 = np.load(DataDir+BehaviorData+\".npy\")\ndata2 = np.load(DataDir+Buffer_PerformanceData+\".npy\")\nprint(data1.shape)\nprint(data2.shape)\n\nplt.plot(data1)\nplt.plot(data2)\nplt.show()","repo_name":"HzcIrving/DLRL-PlayGround","sub_path":"Offline RL/BCQ/DataProcess.py","file_name":"DataProcess.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"37"} +{"seq_id":"34291523642","text":"#!/usr/bin/env python3\n\n\"\"\"convert a range of different audio files to mp3 files\nusing multiple parallel processes\"\"\"\n\nimport argparse\nimport concurrent.futures\nimport logging\nimport multiprocessing as mp\nimport os\nimport pathlib\nimport pprint\nimport random\nimport sys\nfrom collections import defaultdict\n\nimport cv2\nimport numpy as np\nimport tqdm\n\nfrom gate.utils.logging_helpers import get_logging\nfrom tali.datasets.utils.audio import load_to_tensor\n\n\ndef get_base_arguments():\n parser = argparse.ArgumentParser()\n # data and I/O\n parser.add_argument(\"--source_filepath\", type=str, default=\"data/\")\n parser.add_argument(\"--num_processes\", type=int, default=mp.cpu_count())\n\n return parser.parse_args()\n\n\ndef delete_file_if_exists(path: pathlib.Path):\n logging.error(f\"Deleting {path}\")\n if path.exists():\n path.unlink()\n\n\ndef verify_video(path: pathlib.Path):\n video_filepath = os.fspath(path.resolve())\n vid_capture = cv2.VideoCapture(video_filepath)\n try:\n total_frames = vid_capture.get(cv2.CAP_PROP_FRAME_COUNT)\n fps = vid_capture.get(cv2.CAP_PROP_FPS)\n duration_in_seconds = total_frames / fps\n vid_capture.release()\n result = True\n\n except Exception:\n video_path = pathlib.Path(video_filepath)\n audio_path = video_path.with_suffix(\".aac\")\n vid_capture.release()\n delete_file_if_exists(video_path)\n delete_file_if_exists(audio_path)\n result = False\n\n return video_filepath, result\n\n\ndef verify_audio(path: pathlib.Path):\n audio_filepath = os.fspath(path.resolve())\n try:\n load_to_tensor(\n filename=audio_filepath,\n start_point_in_seconds=1,\n duration_in_seconds=7,\n sample_rate=44100,\n mono=False,\n normalize=False,\n in_type=np.float32,\n out_type=np.float32,\n log_time=False,\n video_frame_idx_list=None,\n )\n result = True\n\n except Exception:\n result = False\n\n if not result:\n delete_file_if_exists(path)\n\n return audio_filepath, result\n\n\ndef verify_pairs(path: pathlib.Path):\n if \".mp4\" in path.suffixes:\n pair_path = path.with_suffix(\".aac\")\n elif \".aac\" in path.suffixes:\n pair_path = path.with_suffix(\".mp4\")\n else:\n return path, False\n\n if pair_path.exists() and path.exists():\n return path, True\n\n delete_file_if_exists(path)\n delete_file_if_exists(pair_path)\n return path, False\n\n\nif __name__ == \"__main__\":\n logging = get_logging(\"NOTSET\")\n args = get_base_arguments()\n\n if not os.path.exists(args.source_filepath):\n logging.error(f\"Source path {args.source_filepath} not found\")\n\n target_file_types = (\".mp4\", \".aac\")\n\n failed_jobs = []\n matching_files = defaultdict(list)\n # get all of the source audio filenames\n logging.info(f\"Current working directory is {args.source_filepath}\")\n with tqdm.tqdm() as pbar:\n for file_type in target_file_types:\n for file in pathlib.Path(args.source_filepath).glob(f\"**/*{file_type}\"):\n matching_files[file_type].append(file)\n pbar.update(1)\n\n for file_type in target_file_types:\n random.shuffle(matching_files[file_type])\n\n for file_type in target_file_types:\n num_samples = len(matching_files[file_type])\n logging.info(f\"Checking {num_samples} {file_type} files\")\n target_func = verify_video if file_type == \".mp4\" else verify_audio\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=args.num_processes\n ) as executor:\n with tqdm.tqdm(total=num_samples, smoothing=0.0) as pbar:\n for job_idx, (file_path, result) in enumerate(\n executor.map(target_func, matching_files[file_type]),\n start=1,\n ):\n pbar.update(1)\n\n for file_type in target_file_types:\n num_samples = len(matching_files[file_type])\n logging.info(f\"Checking {num_samples} {file_type} files\")\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=args.num_processes\n ) as executor:\n with tqdm.tqdm(total=num_samples, smoothing=0.0) as pbar:\n for job_idx, (file_path, result) in enumerate(\n executor.map(verify_pairs, matching_files[file_type]),\n start=1,\n ):\n pbar.update(1)\n\n logging.info(\"Done\")\n logging.error(f\"Jobs failed {pprint.pformat(failed_jobs)}\")\n\n sys.exit(0)\n","repo_name":"AntreasAntoniou/TALI-v1-0-legacy","sub_path":"preprocessing_scripts/verify_and_clean.py","file_name":"verify_and_clean.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33432665882","text":"\"\"\"\nTransition\n----------\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport matplotlib.collections as mcoll\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom datatype.builder import Base, Plot\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n import numpy.typing as npt\n\n from typing_extensions import Any, Self\n\n\nclass Builder(Base):\n def ax(self) -> Self:\n figsize = self.settings.figure.get('figsize')\n figure, ax = plt.subplots(figsize=figsize)\n\n ax.axis('off')\n\n self.component['ax'] = ax\n self.component['figure'] = figure\n\n return self\n\n def _collection(\n self,\n x: npt.NDArray,\n y: npt.NDArray,\n array: npt.NDArray = None\n ) -> Self:\n ax = self.component.get('ax')\n\n alpha = self.settings.colorline.get('alpha')\n cmap = self.settings.colorline.get('cmap')\n linewidth = self.settings.colorline.get('linewidth')\n norm = self.settings.colorline.get('norm')\n\n # Default colors equally spaced on [0, 1]:\n if array is None:\n array = np.linspace(\n 0.0,\n 1.0,\n len(x)\n )\n\n if not hasattr(array, '__iter__'):\n array = np.array([array])\n\n array = np.asarray(array)\n\n line = [x, y]\n points = np.array(line).T.reshape(-1, 1, 2)\n\n segment = np.concatenate(\n [\n points[:-1],\n points[1:]\n ],\n axis=1\n )\n\n collection = mcoll.LineCollection(\n segment,\n alpha=alpha,\n array=array,\n cmap=cmap,\n linewidth=linewidth,\n norm=norm\n )\n\n ax.add_collection(collection)\n self.component['collection'] = collection\n\n return self\n\n def colorline(self) -> Self:\n for sequence in np.unique(self.sequence):\n mask = self.sequence == sequence\n embedding = self.embedding[mask]\n\n length = len(embedding)\n\n self._collection(\n embedding[:, 0],\n embedding[:, 1],\n np.linspace(0, 1, length),\n )\n\n return self\n\n def range(self) -> Self:\n ax = self.component.get('ax')\n\n xmin, xmax = np.sort(self.embedding[:, 0])[\n np.array(\n [\n int(len(self.embedding) * 0.01),\n int(len(self.embedding) * 0.99)\n ]\n )\n ]\n\n ymin, ymax = np.sort(self.embedding[:, 1])[\n np.array(\n [\n int(len(self.embedding) * 0.01),\n int(len(self.embedding) * 0.99)\n ]\n )\n ]\n\n xmin = xmin - (xmax - xmin) * self.settings.padding\n xmax = xmax + (xmax - xmin) * self.settings.padding\n ymin = ymin - (ymax - ymin) * self.settings.padding\n ymax = ymax + (ymax - ymin) * self.settings.padding\n\n xlimit = (xmin, xmax)\n ylimit = (ymin, ymax)\n\n ax.set_xlim(xlimit)\n ax.set_ylim(ylimit)\n\n return self\n\n def title(self) -> Self:\n \"\"\"Sets the title of the plot based on settings.\n\n Args:\n None.\n\n Returns:\n The modified Builder instance.\n\n \"\"\"\n\n ax = self.component.get('ax')\n\n title = f\"Transition for {self.settings.name}\"\n\n ax.set_title(\n title,\n fontsize=18,\n pad=25\n )\n\n return self\n\n\nclass Transition(Plot):\n def build(self) -> dict[Any, Any]:\n return (\n self.builder\n .ax()\n .title()\n .colorline()\n .range()\n .get()\n )\n","repo_name":"braycarlson/warbler.py","sub_path":"warbler.py/datatype/transition.py","file_name":"transition.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25015241559","text":"#!/usr/bin/env python3\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom gazebo_msgs.srv import GetModelState\nfrom tf.transformations import euler_from_quaternion\n\nimport numpy as np\nimport json\nimport os\n\ndata_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir)) + \"/data/\" \n\ndef main():\n rospy.init_node(\"satview_streamer\")\n \n bridge = CvBridge()\n sv_filename = data_dir + \"hk_data_sv_mean.json\"\n sv_data = {}\n with open(sv_filename, \"rb\") as fstream:\n sv_data = json.load(fstream)\n print(sv_data.keys())\n cno_max = np.max([sv_data[key][\"cno_max\"] for key in sv_data.keys()])\n gms = rospy.ServiceProxy(\"/gazebo/get_model_state\", GetModelState)\n img_pub = rospy.Publisher(\"skycam/satview\", Image, queue_size=1)\n start_time = rospy.get_rostime()\n cb_args = [bridge, sv_data, gms, img_pub, start_time, cno_max]\n img_sub = rospy.Subscriber(\"skycam/image_raw\", Image, img_sub_cb, cb_args)\n \n\ndef img_sub_cb(data, cb_args):\n bridge = cb_args[0]\n sv_data = cb_args[1]\n gms = cb_args[2]\n img_pub = cb_args[3]\n start_time = cb_args[4]\n cno_max = cb_args[5]\n\n model_pose = gms(\"laser_0\", \"world\")\n model_euler = euler_from_quaternion(\n [model_pose.pose.orientation.x,\n model_pose.pose.orientation.y,\n model_pose.pose.orientation.z,\n model_pose.pose.orientation.w,]\n )\n # ENU (gzb) to NED\n heading = np.pi/2 - model_euler[2]\n\n cv_img = bridge.imgmsg_to_cv2(data, \"bgr8\")\n cv_img = np.array(np.flip(cv_img, axis=0))\n \n img_height = cv_img.shape[0]\n img_width = cv_img.shape[1]\n img_center = np.array([img_height/2.0, img_width/2.0]) # [250 250]\n \n r_max = np.min(img_center)\n green = (0, 255, 0)\n red = (0, 0, 255)\n blue = (255, 0, 0)\n \n now = rospy.get_rostime()\n elapsed = (now-start_time).to_sec()\n \n for sv_id in sv_data.keys():\n elev = sv_data[sv_id][\"mean\"][0]\n azim = sv_data[sv_id][\"mean\"][1]\n\n index = int(elapsed*10 % len(sv_data[sv_id][\"cno\"]))\n cno = sv_data[sv_id][\"cno\"][index]\n # print(sv_id+\" cno: \", cno)\n # print(sv_id+\" color: \", int((cno)/cno_max*255), int((cno_max - cno)/cno_max*255))\n\n r = (90.0 - elev)/90.0 * r_max\n theta = np.deg2rad(azim) - np.pi/2 - heading\n\n x = int(r*np.cos(theta) + img_center[0])\n y = int(r*np.sin(theta) + img_center[1])\n\n cv2.circle(cv_img, (x, y), 10, (0, int((cno)/cno_max*255), int((cno_max-cno)/cno_max*255)/2), -1)\n cv2.circle(cv_img, (x, y), 11, (0, 0, 255), 2)\n cv2.putText(cv_img, sv_id, (x-10, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.3, green, 1)\n \n nesw = [\"N\", \"E\", \"S\", \"W\"]\n for i in range(4):\n theta = i*np.pi/2 - np.pi/2 - heading\n r = 235\n x = int(r*np.cos(theta) + img_center[0])\n y = int(r*np.sin(theta) + img_center[1])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(cv_img, nesw[i], (x,y), font, 0.5, green, 2)\n \n ros_img = bridge.cv2_to_imgmsg(cv_img, \"bgr8\")\n img_pub.publish(ros_img)\n # cv2.imshow(\"skycam\", cv_img)\n # k = cv2.waitKey(3) & 0xff\n\nif __name__ == \"__main__\":\n main()\n\n try:\n rospy.spin()\n\n except KeyboardInterrupt:\n print(\"Shutting down...\")\n cv2.destroyAllWindows()\n","repo_name":"zp-yang/multipath_sim","sub_path":"scripts/stream_skycam.py","file_name":"stream_skycam.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"4123165566","text":"from django.forms import ModelForm\nfrom django import forms\nfrom .models import *\nfrom django.shortcuts import get_object_or_404\nimport json\nfrom django.core import serializers\n\nclass EditIntentLabelsForm(forms.ModelForm):\n class Meta:\n model = IntentCategory\n fields = ('slots_field',\"new_intent_label_field\")\n slots_filed_widget = forms.Textarea(attrs={'autocomplete':'off','class':'intent-sentence','placeholder':'slot names here, space speparated'})\n slots_field = forms.CharField(label='', widget=slots_filed_widget)\n new_intent_label_widget = forms.TextInput(attrs={'autocomplete':'off','placeholder':'new intent label'})\n \n new_intent_label_field = forms.CharField(label='', widget=new_intent_label_widget)\n INTENT_LABELS = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n INTENTS = IntentCategory.objects.all()\n self.INTENT_LABELS = []\n INTENT_SLOTS_DICT = {}\n for i in range(len(INTENTS)):\n self.INTENT_LABELS.append((INTENTS[i].intent_label,INTENTS[i].intent_label))\n slots = INTENTS[i].intentslot_set.all().values('slot_name','color_hex')\n slots = [entry for entry in slots]\n INTENT_SLOTS_DICT[INTENTS[i].intent_label] = slots\n\n self.intents_json = json.dumps(INTENT_SLOTS_DICT)\n self.fields[\"intent_label_choices\"] = forms.ChoiceField(choices=self.INTENT_LABELS,widget=forms.Select(attrs={'onChange':'updateForm()'}))\n \n\n# class CustomModelChoiceIterator(forms.models.ModelChoiceIterator):\n# def choice(self, obj):\n# return (self.field.prepare_value(obj),\n# self.field.label_from_instance(obj), obj)\n# class CustomModelChoiceField(forms.ModelChoiceField):\n# def _get_choices(self):\n# if hasattr(self, '_choices'):\n# return self._choices\n# return CustomModelChoiceIterator(self)\n# choices = property(_get_choices, \n# forms.ChoiceField._set_choices)\n\n\nclass SubmitIntentsForm(forms.ModelForm):\n class Meta:\n model = IntentCategory\n fields = \"__all__\"\n seq_in_field_widget = forms.TextInput(attrs={'autocomplete':'off','class':'intent-sentence','placeholder':'enter intent here',\"onChange\":\"updateMask()\"})\n seq_in_field = forms.CharField(label='', widget=seq_in_field_widget)\n seq_out_field_widget = forms.TextInput(attrs={'style':\"font-size: 12px;\", \"readonly\":\"\", 'autocomplete':'off','class':'intent-sentence','placeholder':'mask will be generated here'})\n seq_out_field = forms.CharField(label='', widget=seq_out_field_widget)\n intent_id_to_delete = forms.CharField(widget = forms.HiddenInput(), required = False)\n intent_id_to_modify = forms.CharField(widget = forms.HiddenInput(), required = False)\n\n INTENT_LABELS = []\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n INTENTS = IntentCategory.objects.all()\n self.INTENT_LABELS = [('', '----'),]\n INTENT_SLOTS_DICT = {}\n for i in range(len(INTENTS)):\n self.INTENT_LABELS.append((INTENTS[i].intent_label,INTENTS[i].intent_label))\n slots = INTENTS[i].intentslot_set.all().values('slot_name','color_hex')\n slotsDict = {}\n slots = [entry for entry in slots]\n for entry in slots:\n slotsDict[entry[\"slot_name\"]]=entry[\"color_hex\"]\n INTENT_SLOTS_DICT[INTENTS[i].intent_label] = slotsDict\n self.intents_json = json.dumps(INTENT_SLOTS_DICT)\n self.fields[\"intent_label_choices\"] = forms.ChoiceField(choices=self.INTENT_LABELS,widget=forms.Select(attrs={'onChange':'updateForm()'}))\n self.fields[\"slots_choices\"] = forms.ModelChoiceField(queryset=self.instance.intentslot_set.values('slot_name','color_hex'),empty_label=None, widget=forms.RadioSelect(attrs={}))\n existing_intens = IntentInstance.objects.all().filter(label=self.instance.intent_label).values('id','label','seq_in','seq_out')\n existing_intens = [intent for intent in existing_intens]\n self.existing_intents_json = json.dumps(existing_intens)\n\n","repo_name":"ahadsuleymanli/goo_slot_labeller","sub_path":"mysite/submit_intents/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27438186431","text":"length = int(input())\npattern = []\nans = list(input())\ni = 0\nname = [[ 'Adrian', 0], ['Bruno', 0], [ 'Goran', 0]]\n# while i < length:\n# pattern.append(input())\n# i += 1\n\n\ndef andiran(index):\n if (index % 3 == 0):\n return 'A'\n elif (index % 3 == 1):\n return ('B')\n else:\n return 'C'\n\ndef bruno(index):\n if (index % 2 == 0):\n return 'B'\n elif (index % 4 == 1):\n return 'A'\n elif (index % 4 == 3):\n return 'C'\n\n\ndef goran(index):\n if (index % 6 == 0 or index % 6 == 1):\n return 'C'\n elif (index % 6 == 2 or index % 6 == 3):\n return 'A'\n elif (index % 6 == 4 or index % 6 == 5):\n return 'B'\n\n\n\n\nfor i in range(0,len(ans)):\n\n if(ans[i] == andiran(i)):\n name[0][1] += 1\n if (ans[i] == bruno(i)):\n name[1][1] += 1\n if (ans[i] == goran(i)):\n name[2][1] += 1\n\n\n\n\nmaxScore = max(map(lambda x: x[1], name))\nprint(maxScore)\nfor j in range(3):\n if (name[j][1] == maxScore):\n print(name[j][0])\n\n\n\n\n\n\n\n\n\n","repo_name":"Ekkawin/algorithm","sub_path":"src/Ptic.py","file_name":"Ptic.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26613932773","text":"n, m = map(int, input().split())\n\n\nclass UnionFind:\n def __init__(self, n) -> None:\n self.n = n\n self.par = [-1] * n\n\n def find(self, x):\n if self.par[x] < 0:\n return x\n else:\n self.par[x] = self.find(self.par[x])\n return self.par[x]\n\n def union(self, x, y):\n x = self.find(x)\n y = self.find(y)\n\n if x == y:\n return True\n\n if self.par[x] > self.par[y]:\n x, y = y, x\n self.par[x] += self.par[y]\n self.par[y] = x\n return False\n\n def roots(self):\n return [i for i, x in enumerate(self.par) if x < 0]\n\n def group_count(self):\n return len(self.roots())\n\n\nuf = UnionFind(n)\nx, y = 0, 0\nfor _ in range(m):\n a, b, c, d = input().split()\n a = int(a)\n c = int(c)\n a -= 1\n c -= 1\n\n if uf.union(a, c):\n y += 1\n\nx = uf.group_count() - y\nprint(y, x)\n","repo_name":"mei28/Competitive-programing","sub_path":"ABC-293/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18704246168","text":"# 链表\n# 两个链表相加\n# 方法:\n# 1. 直接写\n# 2. 递归\n# 3. 双指针\n# 学到了:\n# 1. dummy = ListNode(), p=dummy, return dummy.next 得方法保留头\n# 2. carry = sum//10; val = sum %10\n\n# 别人写的双指针(比我的简单多了)\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n dummy = ListNode()\n p = dummy\n p1 = l1\n p2 = l2\n carry = 0\n while p1 or p2:\n first, second = 0, 0\n if p1:\n first = p1.val\n p1 = p1.next\n if p2:\n second = p2.val\n p2 = p2.next\n sums = first + second + carry\n carry = sums // 10\n val = sums % 10\n p.next = ListNode(val, None)\n p = p.next\n\n if carry:\n p.next = ListNode(carry, None)\n return dummy.next\n\n\n# 别人的递归: 建议再写一遍自己想不到\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n # 递归\n if not l1:\n return l2\n if not l2:\n return l1\n\n l1.val = l1.val + l2.val\n if l1.val > 9:\n # // 整除\n l1.next = self.addTwoNumbers(ListNode(l1.val // 10), l1.next)\n # % 取余\n l1.val = l1.val % 10\n\n l1.next = self.addTwoNumbers(l1.next, l2.next)\n\n return l1\n\n\n# 自己写的垃圾: 直接写\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n if not l1:\n return l2\n if not l2:\n return l1\n\n memo = 0\n res = l1\n flag_2 = True\n flag_3 = True\n while flag_2 and flag_3:\n l1_digit = l1.val\n l2_digit = l2.val\n # 前一次得进位\n add_digit = l1_digit + l2_digit + memo\n # 这一次得进位\n if add_digit > 9:\n add_digit = add_digit - 10\n memo = 1\n else:\n memo = 0\n\n # record\n l1.val = add_digit\n if l1.next is None:\n flag_2 = False\n\n if l2.next is None:\n flag_3 = False\n\n # new\n if flag_2:\n l1 = l1.next\n if flag_3:\n l2 = l2.next\n\n if not flag_3 and not flag_2:\n if memo == 1:\n l1.next = ListNode(1, None)\n\n elif not flag_2:\n # copy untill end\n while l2:\n # 前一次得进位\n l2.val = l2.val + memo\n # 这一次得进位\n if l2.val > 9:\n l2.val = l2.val - 10\n memo = 1\n else:\n memo = 0\n\n l1.next = ListNode(l2.val, None)\n l1 = l1.next\n l2 = l2.next\n if memo == 1:\n l1.next = ListNode(1, None)\n\n else:\n # not l2:\n # copy untill end\n flag = True\n while flag:\n # 前一次得进位\n l1.val = l1.val + memo\n # 这一次得进位\n if l1.val > 9:\n l1.val = l1.val - 10\n memo = 1\n else:\n memo = 0\n if l1.next is None:\n flag = False\n break\n l1 = l1.next\n if memo == 1:\n l1.next = ListNode(1, None)\n\n return res\n\n","repo_name":"RoryDoctective/LeetCode","sub_path":"leetcode得题/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27751068555","text":"__author__ = \"Kyle Vitatus Lopin\"\n\n\nfrom matplotlib.patches import ConnectionPatch\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.gridspec import GridSpec\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\n# local files\nimport get_data\n\nBACKGROUND = [687.65, 9453.7, 23218.35, 9845.05, 15496.7,\n 18118.55, 7023.8, 7834.1, 28505.9, 4040.9,\n 5182.3, 1282.55, 2098.85, 1176.1, 994.45,\n 496.45, 377.55, 389.75]\n\nplt.style.use('dark_background')\n\n\n# first add spectrum to model\nfig = plt.figure(figsize=(11, 9))\nfig.suptitle(\"Convert sensor data to physical properties\",\n size=20)\n\ngs = GridSpec(4, 3, figure=fig)\n\nax1, ax2, ax3, ax4 = 0, 0, 0, 0\nax3 = fig.add_subplot(gs[0, 2])\nfor key, spine in ax3.spines.items():\n spine.set_visible(False)\nax3.get_xaxis().set_visible(False)\nax3.get_yaxis().set_visible(False)\n\ndef make_axis():\n global ax1, ax2, ax3, ax4\n ax1 = fig.add_subplot(gs[:2, :2], zorder=3)\n ax1.set_title(\"Spectrum data\", size=20, y=.85)\n ax1.spines['top'].set_visible(False)\n ax1.spines['right'].set_visible(False)\n\n ax2 = fig.add_subplot(gs[1:, 2])\n ax2.set_title(\"Chlorophyll levels\", size=16)\n ax2.set_xlabel(\"Leaf number\")\n ax2.yaxis.tick_right()\n ax2.yaxis.set_label_position(\"right\")\n ax2.set_ylabel(\"Chlorophyll (µg/cm2)\")\n\n ax3 = fig.add_subplot(gs[0, 2])\n for key, spine in ax3.spines.items():\n spine.set_visible(False)\n ax3.get_xaxis().set_visible(False)\n ax3.get_yaxis().set_visible(False)\n\n ax4 = fig.add_subplot(gs[2:, :2], zorder=1)\n for key, spine in ax4.spines.items():\n spine.set_visible(False)\n ax4.get_xaxis().set_visible(False)\n ax4.get_yaxis().set_visible(False)\n\n ax1.set_ylabel(\"Fraction of reflectance\", size=12)\n ax1.set_xlabel(\"Wavelength\", size=12)\n\n plt.tight_layout()\n\nx, y = get_data.get_data(\"mango\", \"as7265x\", int_time=150,\n position=2, led=\"b'White'\",\n led_current=\"25 mA\")\nprint(y.columns)\ny = y['Avg Total Chlorophyll (µg/cm2)']\n\nprint(x)\n\n\ndef runner(i):\n j = i - 20\n print('i = ', i, j)\n\n\n if j == 0:\n make_axis()\n\n if j == -10:\n ax3.text(0.2, 0.2, \"Record spectrum from\\n\"\n \"many different products\\n\"\n \"and actual chemical levels\",\n fontsize=15)\n\n if 0 <= j <= 99:\n # print(x.iloc[i])\n ax1.plot(x.iloc[j]/BACKGROUND, color='limegreen')\n if j == 0:\n ax1.set_xticklabels(x.columns, rotation=60)\n ax2.scatter(j, y.iloc[j], color='limegreen')\n\n if j == 20:\n con1 = ConnectionPatch(xyA=(.9, .6), xyB=(.2, .7),\n arrowstyle=\"simple\",\n coordsA=\"axes fraction\",\n coordsB=\"axes fraction\",\n axesA=ax1, axesB=ax2,\n shrinkB=1, shrinkA=1,\n fc='navy', lw=4,\n edgecolor='lightskyblue',\n mutation_scale=50,\n zorder=10)\n con1.set_in_layout(False)\n ax1.add_artist(con1)\n\n if j == 50:\n con2 = ConnectionPatch(xyA=(.5, .1), xyB=(.5, .65),\n arrowstyle=\"simple\",\n coordsA=\"axes fraction\",\n coordsB=\"axes fraction\",\n axesA=ax1, axesB=ax4,\n mutation_scale=40,\n lw=4)\n con2.set_in_layout(False)\n ax1.add_artist(con2)\n props = dict(boxstyle=\"round\",\n facecolor='wheat', )\n ax4.text(0.1, 0.4, \"Machine Learning (ML)\\nStatistical Learning methods\",\n bbox=props, fontsize=25,\n color='black')\n if j == 70:\n con3 = ConnectionPatch(xyA=(.9, .5), xyB=(.2, .4),\n arrowstyle=\"simple\",\n coordsA=\"axes fraction\",\n coordsB=\"axes fraction\",\n axesA=ax4, axesB=ax2,\n mutation_scale=40,\n lw=4)\n con3.set_in_layout(False)\n ax1.add_artist(con3)\n fig.savefig(\"final1.png\")\n\n\n\ntime = range(120)\nani = FuncAnimation(fig, runner, blit=False,\n frames=time, repeat=False)\n# ani.save('Spectrum_ML_init2.gif', writer='imagemagick')\nplt.show()\n\n\n","repo_name":"KyleLopin/sklearn_scripts","sub_path":"animation_model.py","file_name":"animation_model.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12044365530","text":"from prepare_features import get_data\nfrom sklearn import model_selection\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\n\nimport pandas as pd\n\n# training data\ndata = get_data()\nX, y = data.get_training_data()\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, shuffle=True, random_state=1)\n\n\nif __name__ == '__main__':\n model = RandomForestRegressor(criterion='mae')\n parameters = {\n 'n_estimators': [95, 97, 100, 105, 110],\n 'max_depth': [8, 9, 10, 11, 12],\n 'min_samples_split': [7, 8, 9, 10, 11],\n 'min_samples_leaf': [1],\n 'min_impurity_decrease': [0.0005],\n 'oob_score': [True],\n }\n\n grid = GridSearchCV(model,parameters,cv=5,scoring='neg_mean_absolute_error', n_jobs = 6,verbose=True)\n\n grid.fit(X, y)\n #predictions = xgb_grid.predict(X)\n #test_predict = xgb_grid.predict(X_test)\n\n print(grid.best_estimator_)\n print(grid.best_score_)\n\n\n# test data\n# X_test = data.get_test_data()\n\n","repo_name":"Brendonap/Z1nd1-Traffic","sub_path":"rf_gridsearch.py","file_name":"rf_gridsearch.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"181688867","text":"from sqlalchemy import or_\nfrom werkzeug.exceptions import NotFound\n\nfrom geonature.utils.env import DB\nfrom geonature.core.gn_meta.models import TDatasets\n\nfrom geonature.utils.utilssqlalchemy import testDataType\nfrom geonature.utils.errors import GeonatureApiError\nfrom .utils import get_nomenclature_filters\n\nfrom .models import (\n TRelevesOccurrence,\n TOccurrencesOccurrence,\n CorCountingOccurrence,\n corRoleRelevesOccurrence,\n)\nfrom geonature.core.gn_meta.models import TDatasets, CorDatasetActor\n\n\nclass ReleveRepository():\n \"\"\"\n Repository: classe permettant l'acces au données\n d'un modèle de type 'releve'\n \"\"\"\n\n def __init__(self, model):\n self.model = model\n\n def get_one(self, id_releve, info_user):\n \"\"\"Return one releve\n params:\n - id_releve: integer\n - info_user: TRole object model\n \"\"\"\n try:\n releve = DB.session.query(self.model).get(id_releve)\n except NotFound:\n raise NotFound(\n 'The releve \"{}\" does not exist'.format(id_releve)\n )\n return releve.get_releve_if_allowed(info_user)\n\n def update(self, releve, info_user, geom):\n \"\"\" Update the current releve if allowed\n params:\n - releve: a Releve object model\n - info_user: Trole object model\n \"\"\"\n releve = releve.get_releve_if_allowed(info_user)\n DB.session.merge(releve)\n DB.session.commit()\n return releve\n\n def delete(self, id_releve, info_user):\n \"\"\"Delete a releve\n params:\n - id_releve: integer\n - info_user: TRole object model\"\"\"\n\n releve = DB.session.query(self.model).get(id_releve)\n if releve:\n releve = releve.get_releve_if_allowed(info_user)\n DB.session.delete(releve)\n DB.session.commit()\n return releve\n raise NotFound('The releve \"{}\" does not exist'.format(id_releve))\n\n def filter_query_with_autorization(self, user):\n q = DB.session.query(self.model)\n if user.tag_object_code == '2':\n allowed_datasets = TDatasets.get_user_datasets(user)\n q = q.filter(\n or_(\n self.model.id_dataset.in_(tuple(allowed_datasets)),\n self.model.observers.any(id_role=user.id_role),\n self.model.id_digitiser == user.id_role\n )\n )\n elif user.tag_object_code == '1':\n q = q.filter(\n or_(\n self.model.observers.any(id_role=user.id_role),\n self.model.id_digitiser == user.id_role\n )\n )\n return q\n\n def filter_query_generic_table(self, user):\n \"\"\"\n Return a prepared query filter with cruved authorization\n from a generic_table (a view)\n \"\"\"\n q = DB.session.query(self.model.tableDef)\n if user.tag_object_code in ('1', '2'):\n q = q.outerjoin(corRoleRelevesOccurrence, self.model.tableDef.columns.id_releve_occtax ==\n corRoleRelevesOccurrence.columns.id_releve_occtax)\n if user.tag_object_code == '2':\n allowed_datasets = TDatasets.get_user_datasets(user)\n q = q.filter(\n or_(\n self.model.tableDef.columns.id_dataset.in_(tuple(allowed_datasets)),\n corRoleRelevesOccurrence.columns.id_role == user.id_role,\n self.model.tableDef.columns.id_digitiser == user.id_role\n )\n )\n elif user.tag_object_code == '1':\n q = q.filter(\n or_(\n corRoleRelevesOccurrence.columns.id_role == user.id_role,\n self.model.tableDef.columns.id_digitiser == user.id_role\n )\n )\n return q\n\n def get_all(self, info_user):\n \"\"\"\n Return all the data from Releve model filtered with\n the cruved authorization\n \"\"\"\n q = self.filter_query_with_autorization(info_user)\n data = q.all()\n if data:\n return data\n raise NotFound('No releve found')\n\n def get_filtered_query(self, info_user, from_generic_table=False):\n \"\"\"\n Return a query object already filtered with\n the cruved authorization\n \"\"\"\n if not from_generic_table:\n return self.filter_query_with_autorization(info_user)\n else:\n return self.filter_query_generic_table(info_user)\n\n\ndef get_query_occtax_filters(args, mappedView, q, from_generic_table=False):\n if from_generic_table:\n mappedView = mappedView.tableDef.columns\n params = args.to_dict()\n testT = None\n if 'cd_nom' in params:\n testT = testDataType(params.get('cd_nom'), DB.Integer, 'cd_nom')\n if testT:\n raise GeonatureApiError(message=testT)\n q = q.join(\n TOccurrencesOccurrence,\n TOccurrencesOccurrence.id_releve_occtax ==\n mappedView.id_releve_occtax\n ).filter(\n TOccurrencesOccurrence.cd_nom == int(params.pop('cd_nom'))\n )\n if 'observers' in params:\n q = q.join(\n corRoleRelevesOccurrence,\n corRoleRelevesOccurrence.columns.id_releve_occtax ==\n mappedView.id_releve_occtax\n ).filter(\n corRoleRelevesOccurrence.columns.id_role.in_(\n args.getlist('observers')\n )\n )\n params.pop('observers')\n\n if 'date_up' in params:\n testT = testDataType(params.get('date_up'), DB.DateTime, 'date_up')\n if testT:\n raise GeonatureApiError(message=testT)\n q = q.filter(mappedView.date_max <= params.pop('date_up'))\n if 'date_low' in params:\n testT = testDataType(\n params.get('date_low'),\n DB.DateTime,\n 'date_low'\n )\n if testT:\n raise GeonatureApiError(message=testT)\n q = q.filter(mappedView.date_min >= params.pop('date_low'))\n if 'date_eq' in params:\n testT = testDataType(\n params.get('date_eq'),\n DB.DateTime,\n 'date_eq'\n )\n if testT:\n raise GeonatureApiError(message=testT)\n q = q.filter(mappedView.date_min == params.pop('date_eq'))\n if 'altitude_max' in params:\n testT = testDataType(\n params.get('altitude_max'),\n DB.Integer,\n 'altitude_max'\n )\n if testT:\n raise GeonatureApiError(message=testT)\n q = q.filter(mappedView.altitude_max <= params.pop('altitude_max'))\n\n if 'altitude_min' in params:\n testT = testDataType(\n params.get('altitude_min'),\n DB.Integer,\n 'altitude_min'\n )\n if testT:\n raise GeonatureApiError(message=testT)\n q = q.filter(mappedView.altitude_min >= params.pop('altitude_min'))\n\n if 'organism' in params:\n q = q.join(\n CorDatasetActor,\n CorDatasetActor.id_dataset == mappedView.id_dataset\n ).filter(\n CorDatasetActor.id_actor == int(params.pop('organism'))\n )\n\n if 'observateurs' in params:\n observers_query = \"%{}%\".format(params.pop('observateurs'))\n q = q.filter(mappedView.observateurs.ilike(observers_query))\n\n if from_generic_table:\n table_columns = mappedView\n else:\n table_columns = mappedView.__table__.columns\n\n # Generic Filters\n for param in params:\n if param in table_columns:\n col = getattr(table_columns, param)\n testT = testDataType(params[param], col.type, param)\n if testT:\n raise GeonatureApiError(message=testT)\n q = q.filter(col == params[param])\n\n releve_filters, occurrence_filters, counting_filters = get_nomenclature_filters(params)\n if len(releve_filters) > 0:\n q = q.join(\n TRelevesOccurrence,\n mappedView.id_releve_occtax ==\n TRelevesOccurrence.id_releve_occtax\n )\n for nomenclature in releve_filters:\n col = getattr(TRelevesOccurrence.__table__.columns, nomenclature)\n q = q.filter(col == params.pop(nomenclature))\n\n if len(occurrence_filters) > 0:\n q = q.join(\n TOccurrencesOccurrence,\n mappedView.id_releve_occtax ==\n TOccurrencesOccurrence.id_releve_occtax\n )\n for nomenclature in occurrence_filters:\n col = getattr(TOccurrencesOccurrence.__table__.columns, nomenclature)\n q = q.filter(col == params.pop(nomenclature))\n\n if len(counting_filters) > 0:\n if len(occurrence_filters) > 0:\n q = q.join(\n CorCountingOccurrence,\n TOccurrencesOccurrence.id_occurrence_occtax ==\n CorCountingOccurrence.id_occurrence_occtax\n )\n else:\n q = q.join(\n TOccurrencesOccurrence,\n TOccurrencesOccurrence.id_releve_occtax ==\n mappedView.id_releve_occtax\n ).join(\n CorCountingOccurrence,\n TOccurrencesOccurrence.id_occurrence_occtax ==\n CorCountingOccurrence.id_occurrence_occtax\n\n )\n for nomenclature in counting_filters:\n col = getattr(CorCountingOccurrence.__table__.columns, nomenclature)\n q = q.filter(col == params.pop(nomenclature))\n\n # Order by\n if 'orderby' in params:\n if params.get('orderby') in mappedView.__table__.columns:\n orderCol = getattr(\n mappedView.__table__.columns,\n params['orderby']\n )\n # else:\n # orderCol = getattr(\n # mappedView.__table__.columns,\n # 'occ_meta_create_date'\n # )\n\n if 'order' in params:\n if (params['order'] == 'desc'):\n orderCol = orderCol.desc()\n\n q = q.order_by(orderCol)\n\n return q\n","repo_name":"cdcvidal/GeoNature","sub_path":"contrib/occtax/backend/repositories.py","file_name":"repositories.py","file_ext":"py","file_size_in_byte":10167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"7218946756","text":"import numpy as np\r\n\r\ndef ChooseTriclinic(mins,maxes):\r\n latticeparm=np.zeros([6])\r\n latticeparm[0]=np.random.rand()*(maxes[0]-mins[0])+mins[0]\r\n latticeparm[1]=np.random.rand()*(maxes[1]-mins[1])+mins[1]\r\n latticeparm[2]=np.random.rand()*(maxes[2]-mins[2])+mins[2]\r\n latticeparm[3]=np.random.rand()*(maxes[3]-mins[3])+mins[3]\r\n latticeparm[4]=np.random.rand()*(maxes[4]-mins[4])+mins[4]\r\n latticeparm[5]=np.random.rand()*(maxes[5]-mins[5])+mins[5]\r\n return latticeparm\r\n\r\ndef ChooseMonoclinic(mins,maxes):\r\n latticeparm=np.zeros([6])\r\n latticeparm[0]=np.random.rand()*(maxes[0]-mins[0])+mins[0]\r\n latticeparm[1]=np.random.rand()*(maxes[1]-mins[1])+mins[1]\r\n latticeparm[2]=np.random.rand()*(maxes[2]-mins[2])+mins[2]\r\n latticeparm[3]=np.pi/2\r\n latticeparm[4]=np.random.rand()*(maxes[4]-mins[4])+mins[4]\r\n latticeparm[5]=np.pi/2\r\n return latticeparm\r\n\r\ndef ChooseOrthorhombic(mins,maxes):\r\n latticeparm=np.zeros([6])\r\n latticeparm[0]=np.random.rand()*(maxes[0]-mins[0])+mins[0]\r\n latticeparm[1]=np.random.rand()*(maxes[1]-mins[1])+mins[1]\r\n latticeparm[2]=np.random.rand()*(maxes[2]-mins[2])+mins[2]\r\n latticeparm[3]=np.pi/2\r\n latticeparm[4]=np.pi/2\r\n latticeparm[5]=np.pi/2\r\n return latticeparm\r\n \r\ndef ChooseTetragonal(mins,maxes):\r\n latticeparm=np.zeros([6])\r\n t1=max(mins[0],mins[1])\r\n t2=min(maxes[0],maxes[1])\r\n latticeparm[0]=np.random.rand()*(t2-t1)+t1\r\n latticeparm[1]=latticeparm[0]\r\n latticeparm[2]=np.random.rand()*(maxes[2]-mins[2])+mins[2]\r\n latticeparm[3]=np.pi/2\r\n latticeparm[4]=np.pi/2\r\n latticeparm[5]=np.pi/2\r\n return latticeparm\r\n\r\ndef ChooseTrigonal(mins,maxes):\r\n latticeparm=np.zeros([6])\r\n t1=max(mins[0],mins[1])\r\n t2=min(maxes[0],maxes[1])\r\n latticeparm[0]=np.random.rand()*(t2-t1)+t1\r\n latticeparm[1]=latticeparm[0]\r\n latticeparm[2]=np.random.rand()*(maxes[2]-mins[2])+mins[2]\r\n latticeparm[3]=np.pi/2\r\n latticeparm[4]=np.pi/2\r\n latticeparm[5]=2*np.pi/3\r\n return latticeparm\r\n\r\ndef ChooseHexagonal(mins,maxes):\r\n latticeparm=np.zeros([6])\r\n t1=max(mins[0],mins[1])\r\n t2=min(maxes[0],maxes[1])\r\n latticeparm[0]=np.random.rand()*(t2-t1)+t1\r\n latticeparm[1]=latticeparm[0]\r\n latticeparm[2]=np.random.rand()*(maxes[2]-mins[2])+mins[2]\r\n latticeparm[3]=np.pi/2\r\n latticeparm[4]=np.pi/2\r\n latticeparm[5]=2*np.pi/3 \r\n return latticeparm\r\n\r\ndef ChooseCubic(mins,maxes):\r\n latticeparm=np.zeros([6])\r\n t1=max(mins[0],mins[1],mins[2])\r\n t2=min(maxes[0],maxes[1],maxes[2])\r\n latticeparm[0]=np.random.rand()*(t2-t1)+t1\r\n latticeparm[1]=latticeparm[0]\r\n latticeparm[2]=latticeparm[0]\r\n latticeparm[3]=np.pi/2\r\n latticeparm[4]=np.pi/2\r\n latticeparm[5]=np.pi/2\r\n return latticeparm\r\n\r\ndef GetLatticeParm(spg,latticeMins,latticeMaxes):\r\n if spg>=1 and spg<=2:\r\n latticeparm=ChooseTriclinic(latticeMins,latticeMaxes)\r\n if spg>=3 and spg<=15:\r\n latticeparm=ChooseMonoclinic(latticeMins,latticeMaxes)\r\n if spg>=16 and spg<=74:\r\n latticeparm=ChooseOrthorhombic(latticeMins,latticeMaxes)\r\n if spg>=75 and spg<=142:\r\n latticeparm=ChooseTetragonal(latticeMins,latticeMaxes)\r\n if spg>=143 and spg<=167:\r\n latticeparm=ChooseTrigonal(latticeMins,latticeMaxes)\r\n if spg>=168 and spg<=194:\r\n latticeparm=ChooseHexagonal(latticeMins,latticeMaxes)\r\n if spg>=195 and spg<=230:\r\n latticeparm=ChooseCubic(latticeMins,latticeMaxes)\r\n ax=latticeparm[0]\r\n bx=latticeparm[1]*np.cos(latticeparm[5])\r\n by=latticeparm[1]*np.sin(latticeparm[5])\r\n cx=latticeparm[2]*np.cos(latticeparm[4])\r\n cy=(latticeparm[2]*latticeparm[1]*np.cos(latticeparm[3])-cx*bx)/by\r\n cz=np.sqrt(latticeparm[2]**2-cx**2-cy**2)\r\n M=np.array([[ax,bx,cx],[0,by,cy],[0,0,cz]])\r\n return M\r\n \r\n#M=GetLatticeParm(ChooseCubic,[0,0,0,np.pi/3,np.pi/3,np.pi/3],[5,5,5,2*np.pi/3,2*np.pi/3,2*np.pi/3])","repo_name":"bigd4/generate","sub_path":"ChooseLattice.py","file_name":"ChooseLattice.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71994608108","text":"from django.urls import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django_app import models\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpRequest, HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom django_app.models import PostComments\n\n\ndef home(request):\n return render(request, 'home.html')\n@login_required\ndef list_suggests(request):\n select_orm = models.Suggests.objects.all()\n return render(request, 'list_suggests.html', context={\"select_orm\": select_orm})\n@login_required\ndef suggest_create(request: HttpRequest) -> HttpResponse:\n if request.method == \"GET\":\n return render(request, \"send_suggests.html\")\n elif request.method == \"POST\":\n\n title = str(request.POST[\"title\"])\n description = str(request.POST[\"description\"])\n date = str(request.POST[\"date\"])\n image = request.FILES[\"image\"]\n\n models.Suggests.objects.create(\n author=request.user,\n title=title,\n description=description,\n date=date,\n image=image,\n )\n return redirect(list_suggests)\n\n@login_required\ndef post_detail(request: HttpRequest, pk: str) -> HttpResponse:\n post_obj = models.Suggests.objects.get(id=int(pk))\n post_comments_objs = models.PostComments.objects.all()\n\n\n return render(request, \"detail.html\",{\"post\": post_obj, \"comments\": post_comments_objs})\n\n@login_required\ndef post_comment_create(request: HttpRequest, pk: str) -> HttpResponseRedirect:\n if request.method == \"POST\":\n post_obj = models.Suggests.objects.get(id=int(pk))\n author = request.user\n text = request.POST.get('text')\n models.PostComments.objects.create(post_id=post_obj.id, author=author, text=text)\n return redirect(reverse('post_detail', args=[pk]))\n else:\n # Возбуждение исключения в случае неправильного метода\n raise Exception(\"Method not allowed!\")\n\n\n\n\n\n@login_required\ndef post_delete(request: HttpRequest, pk: str) -> HttpResponseRedirect:\n if request.method == \"GET\":\n post = models.Suggests.objects.get(id=int(pk))\n post.delete()\n return redirect(list_suggests)\n else:\n raise Exception(\"Method not allowed!\")\n\n\n\n\n@login_required\ndef vote_comment(request, pk, vote):\n comment = get_object_or_404(PostComments, id=pk)\n\n if vote == 'up':\n comment.rating += 1\n elif vote == 'down':\n comment.rating -= 1\n\n comment.save()\n return redirect('post_detail', pk=comment.post.id)\n\n\n\n\n\n\n\n\n\n\n\n#TODO аутинтефикация\n@login_required\ndef logout_f(request):\n logout(request)\n return redirect(register_f)\ndef login_f(request):\n if request.method == 'GET':\n return render(request, 'login.html')\n elif request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(request, username=username, password=password) #пытается взять имя и пароль из базы данных\n if user is not None:\n login(request, user)\n return redirect(home)\n else:\n raise Exception(\"Данные для входа неправильные!\")\n else:\n raise Exception(\"Method not supported\")\ndef register_f(request):\n\n if request.method == \"GET\":\n return render(request, \"register.html\")\n elif request.method == \"POST\":\n username = request.POST.get('username')\n name = request.POST.get('name')\n surname = request.POST.get('surname')\n email = request.POST.get('email')\n password = request.POST.get('password')\n User.objects.create(\n username=username,\n password=make_password(password),\n first_name=name,\n last_name=surname,\n email=email\n )\n return redirect(login_f)\n else:\n raise Exception(\"Method not allowed!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Abuka13/django_server","sub_path":"django_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"897651291","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nErstelle ein Wörterbuch (dictionary), welches dem Kürzel jeder Fachlehrperson der Klasse M5a den vollen Namen zuordnet\n(z.B. 'loe' wird zu 'Roland Lötscher')\nErzeuge danach daraus eine Liste aller Kürzel der Namen der Fachlehrpersonen, welche einen Umlaut enthalten.\n\"\"\"\n\nname = {\n 'loe':'Roland Lötscher',\n 'lep': 'Marco Lepori',\n 'leu': 'Barbara Leuenberger'\n}\n\nlist = [k for k in name.keys() if \"ö\" in name[k] or \"ü\" in name[k] or \"ä\" in name[k]]","repo_name":"KS-Limmattal/infos-and-snippets","sub_path":"solutions/uebung-1.4/main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"de","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70692276906","text":"#!/usr/bin/env python3\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport helper\n\n\ndef convert(path_in, path_out, mod, style='default'):\n from_mat = cv2.imread(path_in)\n width = float(from_mat.shape[1])\n height = float(from_mat.shape[0])\n new_width = 0\n new_height = 0\n if (width > height):\n from_mat = cv2.resize(from_mat, (512, int(512 / width * height)), interpolation=cv2.INTER_AREA)\n new_width = 512\n new_height = int(512 / width * height)\n else:\n from_mat = cv2.resize(from_mat, (int(512 / height * width), 512), interpolation=cv2.INTER_AREA)\n new_width = int(512 / height * width)\n new_height = 512\n from_mat = from_mat.transpose((2, 0, 1))\n light_map = np.zeros(from_mat.shape, dtype=np.float)\n for channel in range(3):\n light_map[channel] = helper.get_light_map_single(from_mat[channel])\n light_map = helper.normalize_pic(light_map)\n light_map = helper.resize_img_512_3d(light_map)\n\n line_mat = mod.predict(light_map, batch_size=1)\n line_mat = line_mat.transpose((3, 1, 2, 0))[0]\n line_mat = line_mat[0:int(new_height), 0:int(new_width), :]\n line_mat = np.amax(line_mat, 2)\n line_mat = cv2.resize(line_mat, (int(width), int(height)), interpolation=cv2.INTER_AREA)\n\n save_func = {\n 'default': helper.show_active_img_and_save_denoise,\n 'pured': helper.show_active_img_and_save_denoise_filter,\n 'enhanced': helper.show_active_img_and_save_denoise_filter2,\n }[style]\n save_func('_', line_mat, path_out)\n\n\ndef main(_):\n FLAGS = flags.FLAGS\n\n in_image = FLAGS.in_image\n out_image = FLAGS.out_image\n model_path = FLAGS.model_path\n\n logging.info('Extract like art for images in %s, saving to %s' % (in_image, out_image))\n mod = tf.keras.models.load_model(model_path)\n os.makedirs(os.path.dirname(out_image), exist_ok=True)\n\n convert(in_image, out_image, mod)\n\n\nif __name__ == \"__main__\":\n flags.DEFINE_string('in_image', '', '')\n flags.DEFINE_string('out_image', '', '')\n flags.DEFINE_string('model_path', f'{os.path.dirname(__file__)}/mod.h5', '')\n app.run(main)","repo_name":"rois-codh/arc-ukiyoe-faces","sub_path":"sketchkerras/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"71337221226","text":"import os\n\nfrom django.shortcuts import render,HttpResponse,redirect\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom app import forms,models\nfrom blog import settings\nfrom django.views.decorators.csrf import csrf_exempt\nfrom bs4 import BeautifulSoup\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count\n\n# Create your views here.\ndef test(request):\n return HttpResponse('test')\n\n\ndef index(request):\n list = models.Article.objects.filter(hot=True).all()\n link_list=models.Link.objects.all()\n\n current_page = request.GET.get('page')\n paginator = Paginator(list, 7)\n\n try:\n article_list = paginator.get_page(current_page)\n except:\n article_list = paginator.get_page(1)\n\n blog = models.Blog.objects.first()\n if not blog:\n blog = models.Blog.objects.create(title='呆马蓝的天空')\n user=models.User.objects.first()\n\n return render(request,'index.html',locals())\n\n\ndef login(request):\n\n if not request.user.is_authenticated:\n if request.method=='POST':\n username = request.POST.get('user')\n password = request.POST.get('pwd')\n\n user=auth.authenticate(username=username,password=password)\n if user:\n auth.login(request,user)\n return redirect('/manage/')\n\n return render(request, 'manage/login.html', locals())\n\n return redirect('/manage/')\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('/login/')\n\n\n\n@login_required(login_url='/login/')\ndef manage(request):\n return render(request,'manage/index.html',locals())\n\n\n@login_required()\ndef userinfo(request):\n return render(request,'manage/userinfo.html',locals())\n\n@login_required()\ndef chpwd(request):\n\n if request.method=='GET':\n return render(request,'manage/chpwd.html',locals())\n else:\n ret = {}\n pwd=request.POST.get('pwd')\n newpwd=request.POST.get('newpwd')\n renewpwd=request.POST.get('renewpwd')\n\n user = auth.authenticate(username=request.user, password=pwd)\n if user:\n if newpwd==renewpwd:\n user.set_password(newpwd)\n user.save()\n ret['status']=0\n else:\n ret['error'] = {\n 'newpwd': '新密码输入2次不一样'\n }\n else:\n ret['status']=1\n ret['error']={\n 'pwd':'密码错误'\n }\n\n return JsonResponse(ret)\n\n#######################################友情链接###################################\n@login_required()\ndef link(request):\n list = models.Link.objects.all()\n return render(request, 'manage/link.html', locals())\n\n@login_required()\n@csrf_exempt\ndef add_link(request):\n title=request.POST.get('title')\n url=request.POST.get('url')\n models.Link.objects.create(title=title,url=url)\n ret={}\n ret['status']=0\n return JsonResponse(ret)\n\n@login_required()\n@csrf_exempt\ndef edit_link(request,id):\n ret={}\n\n title = request.POST.get('title')\n url = request.POST.get('url')\n models.Link.objects.filter(pk=id).update(title=title,url=url)\n ret['status']=0\n\n return JsonResponse(ret)\n\n@login_required()\n@csrf_exempt\ndef del_link(request,id):\n ret={}\n models.Link.objects.filter(pk=id).delete()\n ret['status']=0\n return JsonResponse(ret)\n\n\n@login_required()\ndef user(request):\n ret={}\n\n if request.is_ajax():\n userForm = forms.UserForm(request.POST)\n # username=request.POST.get('username')\n # telphone=request.POST.get('telphone')\n # email=request.POST.get('email')\n # avatar=request.FILES.get('avatar')\n\n if userForm.is_valid():\n\n username=userForm.cleaned_data.get('username')\n\n # extra = {}\n # if avatar_obj:\n # extra['avatar'] = avatar_obj\n #models.User.objects.filter(username=username).update(**userForm.cleaned_data,**extra)\n ##更新数据\n models.User.objects.filter(username=username).update(**userForm.cleaned_data)\n avatar_obj = request.FILES.get('avatar')\n if avatar_obj:\n user_obj = models.User.objects.filter(username=username).first()\n user_obj.avatar=avatar_obj\n user_obj.save()\n\n ret['status'] = 0\n else:\n ##错误信息\n errors=userForm.errors.get_json_data()\n error={}\n for key, message_dicts in errors.items():\n messages = []\n for message in message_dicts:\n messages.append(message['message'])\n error[key] = messages\n\n ret['status'] = 1\n ret['error'] = error\n\n return JsonResponse(ret)\n\n@login_required()\ndef blog(request):\n blog = models.Blog.objects.first()\n if request.method=='GET':\n\n if not blog:\n blog = models.Blog.objects.create(title='呆马蓝的天空')\n return render(request, 'manage/blog.html', locals())\n else:\n ret={}\n title=request.POST.get('title')\n site_name=request.POST.get('site_name')\n bgdoor_name=request.POST.get('bgdoor_name')\n\n blog.title=title\n blog.site_name=site_name\n blog.bgdoor_name=bgdoor_name\n\n pic1= request.FILES.get('pic1')\n pic2= request.FILES.get('pic2')\n pic3= request.FILES.get('pic3')\n pic4= request.FILES.get('pic4')\n if pic1:\n blog.pic1=pic1\n if pic2:\n blog.pic2 = pic2\n if pic3:\n blog.pic3 = pic3\n if pic4:\n blog.pic4 = pic4\n blog.save()\n ret['status'] = 0\n return JsonResponse(ret)\n\n@login_required()\ndef list_contact(request):\n list=models.Contact.objects.all()\n return render(request, 'manage/contact.html', locals())\n\n@login_required()\n@csrf_exempt\ndef del_contact(request,id):\n ret={}\n models.Contact.objects.filter(pk=id).delete()\n ret['status']=0\n return JsonResponse(ret)\n\n\n\n@login_required()\ndef list_article(request):\n\n tag_list = models.Tag.objects.all()\n category_list = models.Category.objects.all()\n article_list=models.Article.objects.all()\n\n return render(request,'manage/article.html',locals())\n\n\n@login_required()\ndef change_hot(request,id):\n ret={}\n article=models.Article.objects.filter(pk=id).first()\n if article.hot:\n article.hot=False\n else:\n article.hot = True\n article.save()\n ret['status']=0\n return JsonResponse(ret)\n\n\n@login_required()\ndef ajaxGetArticle(request,id):\n ret={}\n obj_article=models.Article.objects.filter(pk=id).first()\n tags=obj_article.tags.all()\n taglist=[]\n for tag in tags:\n taglist.append(tag.pk)\n\n data={\n 'id':obj_article.pk,\n 'title':obj_article.title,\n 'content':obj_article.content,\n 'category':obj_article.category_id,\n 'tags':taglist,\n }\n ret['status']=0\n ret['data']=data\n\n return JsonResponse(ret)\n\n\n\n@login_required()\n@csrf_exempt\ndef add_article(request):\n ret = {}\n\n title=request.POST.get('title')\n content=request.POST.get('content')\n category=request.POST.get('category')\n tags_list=request.POST.getlist('tag')\n\n soup=BeautifulSoup(content,'html.parser')\n #过虑\n for tag in soup.find_all():\n if tag.name=='script':\n tag.decompose()\n\n desc=soup.text[0:150]\n\n obj=models.Article.objects.create(title=title,content=str(soup),desc=desc,category_id=category,user=request.user)\n for tag in tags_list:\n obj.tags.add(tag)\n ret['status'] = 0\n\n return JsonResponse(ret)\n\n\n\n@login_required()\n@csrf_exempt\ndef edit_article(request,id):\n\n title = request.POST.get('title')\n content = request.POST.get('content')\n category = request.POST.get('category')\n tags_list = request.POST.getlist('tag[]')\n\n soup = BeautifulSoup(content, 'html.parser')\n # 过虑\n for tag in soup.find_all():\n if tag.name == 'script':\n tag.decompose()\n\n desc = soup.text[0:150]\n\n models.Article.objects.filter(pk=id).update(title=title,content=str(soup),desc=desc,category_id=category)\n obj=models.Article.objects.get(pk=id)\n\n obj.tags.clear()\n obj.tags.add(*tags_list)\n obj.save()\n\n ret = {}\n ret['status'] = 0\n\n return JsonResponse(ret)\n\n\n@login_required()\n@csrf_exempt\ndef del_article(request,id):\n ret={}\n models.Article.objects.filter(pk=id).delete()\n ret['status']=0\n return JsonResponse(ret)\n\n\n\n#############################【标签】#####################################\n@login_required()\ndef list_tag(request):\n list = models.Tag.objects.annotate(c=Count('article__title')).all()\n return render(request,'manage/tag.html',locals())\n\n@login_required()\n@csrf_exempt\ndef add_tag(request):\n ret={}\n form = forms.TagForm(request.POST)\n if form.is_valid():\n models.Tag.objects.create(**form.cleaned_data)\n ret['status']=0\n else:\n\n error=form.getError()\n\n ret['status'] = 1\n ret['error'] = error\n return JsonResponse(ret)\n\n@login_required()\n@csrf_exempt\ndef edit_tag(request,id):\n ret={}\n form = forms.TagForm(request.POST)\n if form.is_valid():\n models.Tag.objects.filter(nid=id).update(**form.cleaned_data)\n ret['status'] = 0\n else:\n\n error=form.getError()\n ret['status'] = 1\n ret['error'] = error\n\n return JsonResponse(ret)\n\n@login_required()\n@csrf_exempt\ndef del_tag(request,id):\n ret={}\n models.Tag.objects.filter(pk=id).delete()\n ret['status']=0\n return JsonResponse(ret)\n\n\n\n##############################【分类】############################################\n\n@login_required()\ndef list_category(request):\n list = models.Category.objects.annotate(c=Count('article__title')).all()\n return render(request,'manage/category.html',locals())\n\n@login_required()\n@csrf_exempt\ndef add_category(request):\n ret={}\n form = forms.CategoryForm(request.POST)\n if form.is_valid():\n models.Category.objects.create(**form.cleaned_data)\n ret['status']=0\n else:\n\n error=form.getError()\n\n ret['status'] = 1\n ret['error'] = error\n return JsonResponse(ret)\n\n@login_required()\n@csrf_exempt\ndef edit_category(request,id):\n ret={}\n form = forms.CategoryForm(request.POST)\n if form.is_valid():\n models.Category.objects.filter(nid=id).update(**form.cleaned_data)\n ret['status'] = 0\n else:\n\n error=form.getError()\n ret['status'] = 1\n ret['error'] = error\n\n return JsonResponse(ret)\n\n@login_required()\n@csrf_exempt\ndef del_category(request,id):\n ret={}\n models.Category.objects.filter(pk=id).delete()\n ret['status']=0\n return JsonResponse(ret)\n\n\ndef upload(request):\n\n\n img=request.FILES.get('upload_img')\n path=os.path.join(settings.MEDIA_ROOT,'img/article/',img.name)\n #TODO 没创建文件夹会出错\n with open(path,'wb') as f:\n for line in img:\n f.write(line)\n #print(request.FILES)\n #print('img:',img.name)\n result={\n 'error':0,\n 'url':'/media/img/article/%s'%img.name\n }\n\n return JsonResponse(result)\n\n\ndef about(request):\n return render(request,'about.html',locals())\n\ndef article(request):\n list = models.Article.objects.all()\n\n current_page=request.GET.get('page')\n paginator=Paginator(list,7)\n\n try:\n article_list=paginator.get_page(current_page)\n except:\n article_list=paginator.get_page(1)\n\n return render(request,'article.html',locals())\n\ndef articleinfo(request,id):\n article = models.Article.objects.filter(pk=id).first()\n return render(request,'articleinfo.html',locals())\n\ndef tag(request):\n list = models.Tag.objects.annotate(c=Count('article__title')).all()\n return render(request, 'tag.html', locals())\n\ndef taginfo(request,id):\n tag = models.Tag.objects.filter(pk=id).first()\n list =models.Article.objects.filter(tags=tag).all()\n\n current_page = request.GET.get('page')\n paginator = Paginator(list, 7)\n\n try:\n article_list = paginator.get_page(current_page)\n except:\n article_list = paginator.get_page(1)\n return render(request,'taginfo.html',locals())\n\ndef category(request):\n list = models.Category.objects.annotate(c=Count('article__title')).all()\n return render(request, 'category.html', locals())\n\ndef categoryinfo(request,id):\n category = models.Category.objects.filter(pk=id).first()\n list = models.Article.objects.filter(category=category).all()\n\n current_page = request.GET.get('page')\n paginator = Paginator(list, 7)\n\n try:\n article_list = paginator.get_page(current_page)\n except:\n article_list = paginator.get_page(1)\n return render(request,'categoryinfo.html',locals())\n\n\n\n###########################################【留言】#######################################\n\ndef contact(request):\n list=models.Contact.objects.all()\n return render(request,'contact.html',locals())\n\ndef msg(request):\n ret = {}\n name=request.POST.get('name')\n content=request.POST.get('message')\n soup = BeautifulSoup(content, 'html.parser')\n # 过虑\n for tag in soup.find_all():\n if tag.name == 'script':\n tag.decompose()\n models.Contact.objects.create(name=name,content=str(soup))\n\n ret['status']=0\n return JsonResponse(ret)\n\ndef remsg(request):\n ret = {}\n name=request.POST.get('name')\n content=request.POST.get('message')\n id=request.POST.get('replyid')\n soup = BeautifulSoup(content, 'html.parser')\n # 过虑\n for tag in soup.find_all():\n if tag.name == 'script':\n tag.decompose()\n models.Contact.objects.create(name=name,content=str(soup),parent_comment_id=id)\n\n ret['status']=0\n return JsonResponse(ret)\n\ndef remsgTree(request):\n\n comment_list=list(models.Contact.objects.order_by(\"pk\").values(\"pk\",\"name\",\"content\",\"parent_comment_id\"))\n\n return JsonResponse(comment_list,safe=False)\n\n\n##################################错误页面处理#######################################\n\ndef page_not_found(request, exception):\n return render(request,'404.html',locals())\n\ndef page_error(request):\n return render(request,'500.html',locals())","repo_name":"leemamas/dmblog","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33027333930","text":"from Player import Player\nimport random\nimport math\nimport copy\nimport threading\nfrom collections import OrderedDict, defaultdict\nfrom AI import *\n\n\nclass OnlineGame:\n\n def __init__(self, maxPlayers, complete_list):\n #If lobby fills up, game closes, so no one else can join midway\n self.game_closed = False\n\n self.complete_list = complete_list\n\n self.rounds = 0\n\n self.max_players = maxPlayers\n # Dict, key is the client's connection, value is the clients username\n self.players = {}\n #Dicts containing each player's conn as keys, and the value is the Player object, containing the player's Swarm, player's name, etc.\n self.player_swarms = OrderedDict()\n #Player conn as keys, value is True or False depending on whether or not the players are connected.\n self.connected = {}\n\n self.game_over = False\n\n self.key_lock = threading.Lock()\n\n #Dict, one key will be a string with the entire round summary, and the other key will be a dict of the stats of all Nanovor on the field after the round\n #That inner dict will have a key that is the name of each player, and the value being the string with their nanovor's updated stats after the round\n #Names as keys might not actually work, in the event that multiple users have the same name. Maybe have index positions as keys? How would I get indexes though\n #Or maybe have a key called \"Players\", which holds list of 2-tuples, first element player's name, second their nanovors updated stats\n self.round_summary = {}\n\n #Counts how many players sent in their decisions for the round. Must be equal to the number of players in game. Increments by 1, resets to 0 when all info is received.\n self.player_info_received = 0\n\n self.round_over = False\n\n def add_player(self, player):\n with self.key_lock:\n self.players[player[0]] = player[1]\n self.connected[player[0]] = True\n\n def remove_player(self, player):\n #with self.key_lock:\n if player in self.players.keys():\n del self.players[player]\n if player in self.player_swarms.keys():\n del self.player_swarms[player]\n if player in self.connected.keys():\n del self.connected[player]\n\n def get_players(self):\n with self.key_lock:\n #Returning only the players' sockets\n return self.players\n\n def game_size(self):\n with self.key_lock:\n return self.max_players\n\n def full(self):\n with self.key_lock:\n if len(self.players) == self.max_players or self.game_closed:\n return True\n\n def set_swarm(self, player, swarm):\n with self.key_lock:\n swarm_tracker = []\n name_list = [nano.get_name() for nano in self.complete_list]\n for nano in swarm:\n i = name_list.index(nano)\n swarm_tracker.append(copy.deepcopy(self.complete_list[i]))\n self.player_swarms[player] = Player(self.players[player],swarm_tracker)\n\n #TODO Here we handle the case that the player chose to play single-player. If that's the case, assign a swarm to the AI.\n if self.max_players == 1:\n swarm_tracker = []\n chosen_swarm = pick_swarm()\n for vor in chosen_swarm:\n j = name_list.index(vor)\n swarm_tracker.append(copy.deepcopy(self.complete_list[j]))\n self.players[\"AI\"] = \"Spydran Agent\"\n self.player_swarms[\"AI\"] = Player(\"Spydran Agent\", swarm_tracker)\n self.connected[\"AI\"] = True\n\n def ready(self):\n #with self.key_lock:\n #Players are not added to the dict until after their swarm is set, if not all players have been added, game cannot start\n\n #TODO, creating another exception here for single-player matches\n if len(self.player_swarms) != self.max_players and self.max_players > 1:\n return False\n self.game_closed = True\n return True\n\n def gameOver(self,conn):\n if self.game_over:\n #TODO: added this to account for single-player\n if self.max_players == 1 and \"AI\" in self.players.keys():\n del self.players[\"AI\"]\n if len(self.player_swarms_copy) == 0:\n return f\"Game Over: DRAW!\\n\\nMatch Round Total: {self.rounds}\"\n else:\n winner = list(self.player_swarms_copy.keys())[0]\n result = {\"Results\":\"you Won! Congrats!\" if conn == winner else f\"you Lost! Better luck next time!\\n\\nWinner: {self.players_copy[winner]}\"}\n result[\"Results\"] += f\"\\n\\nMatch Round Total: {self.rounds}\"\n result.update({self.player_swarms_copy[winner].get_swarm().index(nanovor): nanovor.display_stats() for nanovor in self.player_swarms_copy[winner].get_swarm()})\n return result\n #If game isn't over, check if the client is even still in the game (they won't be if they aren't in the swarm dict, because they would've\n #been deleted during the round wrap up method execution)\n elif conn not in self.player_swarms.keys():\n del self.players[conn]\n return \"You were eliminated!\"\n #If not over, it'll return False (conditional branch didn't execute)\n return self.game_over\n\n #This function will return all data necessary for the players to make their roundly decisions. Data will be sent in form of strings,\n #Not class objects, because nothing will change while the players are deciding their next move (data changes in the server side every round,\n #and the updated data is sent back to each client at the end of each round).\n #What to return to each client: Their swarm, their current Nanovor, their override/EN, and the same for every other opponent\n def gameInformation(self, client):\n #Ensures only one client at a time can access this function, that way a data race doesn't occur and break the system\n with self.key_lock:\n #Accesses the player object to reduce the wording below\n player = self.player_swarms[client]\n\n #Player will not have a current nanovor if it is the first round or if their current Nanovor was splatted\n if player.get_current_nanovor() != '':\n active_nanovor = {\"Stats\": player.get_current_nanovor().display_stats() + (f\"\\nSwap Blocked for: {player.get_swap_block()} turn(s).\" if player.get_swap_block() else ''),\n \"ActiveIDX\":player.get_swarm().index(player.get_current_nanovor())}\n else:\n active_nanovor = {\"Stats\":'', \"ActiveIDX\":None}\n\n #Players swarm keys will be each Nanovors index position inside of the player's swarm list, and the values will be a string of that nanovor's current stats. Entire dict has info for entire swarm.\n #Key is always ordered; the client can just return the index of the nanovor that they chose to swap into, as their next active, etc\n #{Index:String, ...}\n player_swarm = {player.get_swarm().index(nanovor): nanovor.display_stats() for nanovor in player.get_swarm()}\n #Opponents is a list of Player objects\n opponents = [competitor for conn,competitor in self.player_swarms.items() if conn != client]\n #Dict containing the index of the opponent in the list of opponents as keys, then list with a string saying their name, and their active nano stats as elements\n #{Index: [String Username, String Active Nano Stats]}\n #Making the index key +1 because each individual client will be the 0 index, this will make iterating through the dict to make the interface much easier on the client side.\n opponent_active = {opponents.index(opponent) + 1: [opponent.get_name(), opponent.get_current_nanovor().display_stats() + (f\"\\nSwap Blocked for: {opponent.get_swap_block()} turn(s).\" if opponent.get_swap_block() else '') if opponent.get_current_nanovor() != '' else \"\\n\\n\\nUnknown\\n\\n\\n\"] for opponent in opponents}\n\n #Contains strings of every attack possessed by every Nanovor in the clients Swarm, so they can access this info on their side easier with back buttons\n #Key is the Nanovors index position inside of the player's swarm, value is another dict, where the key there is the attack name with EN cost and damage, and value the attack description\n #{Index:{String AttackName: String AttackDesc}}\n player_attacks = defaultdict(list)\n for nano in player.get_swarm():\n for attack in nano.get_attacks():\n player_attacks[player.get_swarm().index(nano)].append((f\"{attack.get_name()} {attack.get_cost()} EN{self.display_damage(nano, attack)}\",attack.get_attack_summary()))\n\n #OP nested dict comprehension with values as a dict comprehension\n #player_attacks = {player.get_swarm().index(nano): [f\"{attack.get_name()} {attack.get_cost()} EN{self.display_damage(nano,attack)}\", attack.get_attack_summary() for attack in nano.get_attacks()] for nano in player.get_swarm()}\n\n #{IDX:[Str AttackName..], stores index position of every nanovor in the player's swarm, and the names of the attacks that are pure overrides (empty if none).\n pure_overrides = {player.get_swarm().index(nano): [attack.get_name() for attack in nano.get_attacks() if attack.pure_override()] for nano in player.get_swarm()}\n #Dict of the EN and Overrides for each player, including the client. Client will be the first index, 0. All others will follow\n energy_override_info = {0: {\"EN\": player.get_energy(), \"Override\": self.decode_override(player.get_current_override()), \"Pure\": pure_overrides}}\n energy_override_info.update({opponents.index(opponent) + 1: {\"EN\": opponent.get_energy(), \"Override\": self.decode_override(opponent.get_current_override())} for opponent in opponents})\n\n #HP and name of every Nanovor in that opponent's swarm if the Nanovor has been revealed, else it is unknown. Player can always see their own swarm HP/names\n #The first element, 0, is actually the client requesting the information\n #The client can be grouped here into the same dictionary as the opponents rather than being added on the client side, because the client does not need to access\n #the HP summary and Name of the nanovor in their header beyond just having them displayed for viewing (they do however, access their own attacks and EN/Overrides)\n all_swarm_info = {0:[f\"{nano.get_name()}\\nHP: {nano.get_health()}/{nano.get_max_hp()}\" for nano in player.get_swarm()]}\n\n #Again, making the index +1 here so that the client on their side can be index 0 and this dict is just added on to the end of that\n #Sorted here should send all the unknown Nanovor (so, all the \"?\") to the front, so that in the client side when they hover over, all the ? will be left-most.\n #This is for better visibility when the information pops out. Ex. If a ? is between two revealed Nanovor, it might be hard to hover over it since it would not expand much.\n all_swarm_info.update({opponents.index(opponent) + 1: sorted([f\"{nano.get_name()}\\nHP: {nano.get_health()}/{nano.get_max_hp()}\" if nano.check_reveal() else \"?\" for nano in opponent.get_swarm()]) for opponent in opponents})\n\n #Dict with the keys being the index of the opponent in the opponent list and the value being a list of tuples with first element being the attack name and\n #the second element being the attack description for each attack their ACTIVE nanovor has. Plus 1 for keys for same reasons as above\n opponent_attack_info = defaultdict(list)\n for i,opponent in enumerate(opponents):\n if opponent.get_current_nanovor() != '':\n for attack in opponent.get_current_nanovor().get_attacks():\n if opponent.get_current_nanovor().check_reveal():\n opponent_attack_info[i + 1].append((f\"{attack.get_name()} {attack.get_cost()} EN{self.display_damage(opponent.get_current_nanovor(), attack)}\", attack.get_description()))\n else:\n opponent_attack_info[i + 1].append((\"?\", \"?\"))\n #If there is no active Nanovor (such as in the first round), give 3 artificial attacks to fill in the space.\n else:\n opponent_attack_info[i + 1].extend([(\"?\", \"?\"), (\"?\", \"?\"), (\"?\",\"?\")])\n\n return {\"Active Nanovor\":active_nanovor, \"Player Swarm\":player_swarm, \"Player Attacks\":player_attacks, \"Opponent Active\":opponent_active,\n \"Energy & Overrides\":energy_override_info, \"All Swarms\":all_swarm_info, \"Opponent Attacks\":opponent_attack_info}\n\n def decode_override(self, override):\n translation = []\n for type,info in override.items():\n if type == \"SPIKE\":\n if info == \"Spike\":\n translation.append(\"Omni Spike\")\n else:\n translation.append(f\"{info} Spike\")\n elif type in \"STR SPD ARM\":\n translation.append(f\"+{info} {type}\")\n elif type == \"DODGE\":\n translation.append(f\"Dodge: {info}% Chance\")\n elif \"EN\" in type:\n if \"HEX\" in type:\n translation.append(f\"+{info} EN (Hexites)\")\n elif \"MAG\" in type:\n translation.append(f\"+{info} EN (Magnamods)\")\n else:\n translation.append(f\"+{info} EN (All Nanovor)\")\n return \", \".join(translation)\n\n def display_damage(self, nanovor, attack):\n separator = \" | \"\n calculations = []\n STR_MULTIPLIER = nanovor.get_strength()/100\n\n if attack.get_damage():\n calculations.append(\"{} HP\".format(self.round(attack.get_damage()[0] * STR_MULTIPLIER)))\n if len(attack.get_damage()) > 1:\n calculations.append(\"-{} HP\".format(attack.get_damage()[1]))\n\n if attack.get_spike_combo():\n combos = attack.get_spike_combo().keys()\n if \"DMGSET\" in combos:\n calculations.append(\"{} HP*\".format(self.round(attack.get_spike_combo()[\"DMGSET\"] * STR_MULTIPLIER)))\n elif \"DMGDOUBLE\" in combos:\n calculations.append(\"{} HP*\".format(self.round(attack.get_damage()[0] * 2 * STR_MULTIPLIER)))\n elif \"PIERCE\" in combos:\n if type(attack.get_spike_combo()[\"PIERCE\"]) == dict:\n if \"PART\" in attack.get_spike_combo()[\"PIERCE\"]:\n calculations.append(\"{} HP*\".format(self.round(attack.get_damage()[0] * STR_MULTIPLIER) + self.round(attack.get_spike_combo()[\"PIERCE\"][\"PART\"] * STR_MULTIPLIER)))\n\n if attack.get_special_condition():\n conditions = attack.get_special_condition().keys()\n if \"DMG-CLASS\" in conditions:\n if \"Magnamod\" in attack.get_special_condition()[\"DMG-CLASS\"]:\n calculations.append(\"{} HP*\".format(self.round(attack.get_special_condition()[\"DMG-CLASS\"][\"Magnamod\"] * STR_MULTIPLIER)))\n elif \">STR\" in conditions:\n if \"120\" in attack.get_special_condition()[\">STR\"]:\n if \"DMGSET\" in attack.get_special_condition()[\">STR\"][\"120\"]:\n calculations.append(\"{} HP*\".format(self.round(attack.get_special_condition()[\">STR\"][\"120\"][\"DMGSET\"] * STR_MULTIPLIER)))\n elif \"CHANCE-DMG-50\" in conditions:\n if \"XTRANONPIERCE\" in attack.get_special_condition()[\"CHANCE-DMG-50\"]:\n calculations.append(\"{} HP*\".format(self.round(attack.get_special_condition()[\"CHANCE-DMG-50\"][\"XTRANONPIERCE\"] * STR_MULTIPLIER)))\n elif \"XTRAPIERCE\" in attack.get_special_condition()[\"CHANCE-DMG-50\"]:\n calculations.append(\"{} HP*\".format(self.round(attack.get_special_condition()[\"CHANCE-DMG-50\"][\"XTRAPIERCE\"] * STR_MULTIPLIER)))\n\n return f\"\\n{separator.join(calculations)}\" if calculations else ''\n\n def control_center(self, conn, decisions):\n #Prevents counter and other stuff from getting wonky if users send info at the same time\n with self.key_lock:\n #The first person continued from previous round summary, and sent in new info, so it's now a new round. Reset to false\n self.round_over = False\n #decisions = {\"Active\":self.active, \"Next\":self.next, \"Attack\":self.attack, \"Target\":self.target}\n #where active is the index of the active nano, next is the index of the next nano, attack is a string of the attack name, target is index of target in opponent list\n #player is a Player class object\n player = self.player_swarms[conn]\n player.set_current_nanovor(player.get_swarm()[decisions[\"Active\"]])\n player.set_next_nanovor(player.get_swarm()[decisions[\"Next\"]])\n\n #If opponent PASSED, set their attack to empty string, start_round will handle passing. Else, find the attack object and set that as active.\n for attack in player.get_current_nanovor().get_attacks():\n if attack.get_name() == decisions[\"Attack\"]:\n player.set_selected_attack(attack)\n break\n else:\n player.set_selected_attack('')\n\n opponents = [competitor for sock,competitor in self.player_swarms.items() if sock != conn]\n # Subtract 1 because the curr_list contains the client at the beginning, whereas onlineBattle checks the opponents which excludes the client\n # So, opponents will always be 1 element shorter. So we subtract 1 to adjust for that.\n # Also check to see if there are even any opponents left (in the event all but 1 players quit), and that the target is not None (Pass or pure override).\n # If not, set the target to an empty string (won't be checked anyways).\n player.set_targets(opponents[decisions[\"Target\"] - 1] if len(opponents) > 0 and decisions[\"Target\"] is not None else '')\n self.player_info_received += 1\n\n #TODO this section is in the case of single-player matches. Once the player sends in their decision, simply make the decisions for the AI as well.\n if self.max_players == 1:\n computer = self.player_swarms[\"AI\"]\n if computer.get_current_nanovor() == '' or computer.get_current_nanovor().get_health() <= 0:\n computer.remove_swap_block(computer.get_swap_block())\n computer.set_current_nanovor(computer.get_swarm()[0])\n\n computer.set_next_nanovor(computer.get_current_nanovor())\n computer.set_targets(player)\n matrix = fill_matrix(player, computer)\n move = [attack for attack in computer.get_current_nanovor().get_attacks()][decide(matrix)]\n computer.set_selected_attack(move)\n\n #TODO: added the OR exception so the round can get started in single-player. This is why I didn't want to make the AI here intiailly, so many small nuances.\n # Later on I'll have to optimize the code so that this isn't so annoying to deal with.\n if self.player_info_received == len(self.player_swarms) or self.max_players == 1:\n #Start the round, all info received, and reset the counter for next round. Also reset the summary from previous round\n self.round_summary = ''\n self.player_info_received = 0\n self.start_round()\n self.round_carnage_report()\n return\n\n def handle_quitters(self, conn=False):\n if conn and conn in self.connected.keys():\n self.connected[conn] = False\n self.control_center(conn, {\"Active\":0, \"Next\":0, \"Attack\":\"PASS\", \"Target\":1})\n else:\n for conn,on in self.connected.copy().items():\n if not on:\n self.round_summary[\"Round Summary\"] += f\"\\n{self.players[conn]} quit the game!\"\n self.remove_player(conn)\n\n def get_round_summary(self):\n if self.round_over:\n return self.round_summary\n return \"Waiting\"\n\n # BRING IN ALL THE FUNCTIONS THAT DO THE BEHIND-THE-SCENES WORK (LIKELY POST THEM AT THE VERY BOTTOM?)\n def start_round(self):\n # NOTE: THIS FUNCTION IS A COMBINATION OF START_ROUND AND SIMULATE_MOVE FROM THE COMBATRULES\n\n # Keep an eye on this, trying to figure out the issue of override buffs stacking multiple times\n # If the active nanovor has been buffed already, remove the buff so that it can be reapplied before the speed raffle and\n # Attack, that way buffs don't stack and a nanovor that hasn't received the buff won't get its stats depleted\n for homie in self.player_swarms.values():\n self.remove_override_buffs(homie)\n\n # Removed buffs from previous turn, Applying all buffs to all current nanovor before we even get to attack\n for plyr in self.player_swarms.values():\n self.apply_override_buffs(plyr)\n\n # After all players select their attacks, we determine who gets to go first, and if they can even attack, based on speed\n # and how much energy each player has\n pecking_order = self.determine_order(list(self.player_swarms.values()))\n\n # Round summary title, before the speed ranking is added\n self.round_summary += (\"***** ROUND {} CARNAGE REPORT *****\\n\\n\".format(self.rounds + 1))\n self.round_summary += \"ORDER OF ATTACK\\n\"\n\n for i,homie in enumerate(pecking_order):\n # Set all the currently active nanovor to revealed, opponents can now see their stats\n homie.get_current_nanovor().reveal()\n\n # Instead of printing to the console, add to the self string variable to paste a text box message at the end of the round\n self.round_summary += (\n \"({}) {}\\'s {} ==> Speed: {} \\n\".format(i+1,homie.get_name(), homie.get_current_nanovor().get_name(),\n homie.get_current_nanovor().get_speed()))\n self.round_summary += \"\\nMOVES MADE\\n\"\n\n for player in pecking_order:\n # Check to see if player chose to attack or pass\n if player.get_selected_attack() != '':\n # Check if the player's current nanovor is even alive for the attack\n if player.get_current_nanovor().get_health() <= 0:\n continue\n # Check if nanovor is stunned before attacking\n elif player.get_current_nanovor().check_stun_length() > 0:\n\n self.round_summary += (\"{}\\'s {} is Stunned!\\n\".format(player.get_name(), player.get_current_nanovor().get_name()))\n\n player.get_current_nanovor().change_length_stun(-1)\n\n self.round_summary += (\"Turns until stun wears off: {}\\n\".format(player.get_current_nanovor().check_stun_length()))\n # If player is missing energy for the attack, nanovor fizzled\n elif player.get_energy() < player.get_selected_attack().get_cost():\n self.round_summary += (\"{}\\'s {} Fizzled!\\n\".format(player.get_name(), player.get_current_nanovor().get_name()))\n # If the player's nanovor is alive, isn't stunned, and player has enough energy for the attack, execute it.\n else:\n self.play(player, player.get_targets())\n # Remove energy for a successful attack\n player.remove_energy(player.get_selected_attack().get_cost())\n # If their selected attack is empty, they chose to pass!\n else:\n if player.get_current_nanovor().get_health() > 0:\n self.round_summary += (\"{}\\'s {} passed.\\n\".format(player.get_name(), player.get_current_nanovor().get_name()))\n player.get_current_nanovor().change_length_stun(-1)\n\n # This function called for each players attack. Apply buffs to those involved, then remove them. When its the next player's turn,\n # those buffs, if active, will be applied again. This is to prevent buffs adding additional stats every round they are active.\n def play(self, attacker, defenders):\n # Call it once in the beginning to save us the heartache of calling it multiple times over and also to save a lot of space.\n attack = attacker.get_selected_attack()\n # If the attack is simply an override, then Dodge, target being splatted, and anything else does not matter! Set the override and skip the rest.\n if attack.pure_override():\n self.round_summary += f\"{attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()} used {attack.get_name()}! ({self.decode_override(attack.get_override())} Override)\\n\"\n self.remove_override_buffs(attacker)\n attacker.set_override(attack.get_override())\n return\n\n # Check to see if the defender is already dead (in a 3+ player match, someone might've beat you to the punch)\n # If you were beaten to the punch, you are returned the energy that you would have spent\n # I returned the energy here because in the other function call in start_round, the function removes energy\n # regardless of whether or not the attack was successful. By adding it here, it offsets that reduction.\n if defenders.get_current_nanovor().get_health() <= 0:\n self.round_summary += (\"{}\\'s {} fizzled! It\\'s target was already splatted!\\n\".format(attacker.get_name(),\n attacker.get_current_nanovor().get_name()))\n attacker.add_energy(attack.get_cost())\n return\n\n #Replace placeholder with the actual damage dealt later on.\n self.round_summary += (\n \"{}\\'s {} used {} on {}\\'s {}! (DMGPLACEHOLDER)\\n\".format(attacker.get_name(), attacker.get_current_nanovor().get_name(),\n attack.get_name(), defenders.get_name(),\n defenders.get_current_nanovor().get_name()))\n\n # Check to see if the opponent has a dodge override active. If so, run it to see if the nanovor dodged.\n # If the nanovor successfully dodged, the opponent still loses the energy and override for the attack, but\n # The function will exit before any hacks or damage or spike combos are put in play against the defender\n if self.determine_dodge(defenders):\n # No damage was dealt, so replace the placeholder with empty string.\n self.round_summary = self.round_summary.replace(\"(DMGPLACEHOLDER)\", '')\n self.round_summary += \"{}\\'s {} dodged the attack!\\n\".format(defenders.get_name(),defenders.get_current_nanovor().get_name())\n\n # Apply any overrides that come with the attack regardless of whether or not the defender dodged\n if attack.get_override():\n self.remove_override_buffs(attacker)\n attacker.set_override(attack.get_override())\n\n # effects to self still happen regardless if the attack landed on the defender\n self.apply_self_hacks(attacker)\n self.apply_self_spike_combos(attacker)\n\n if attack.get_consumes():\n if \"SPIKE\" in attacker.get_current_override().keys():\n if attacker.get_current_override()[\"SPIKE\"] in attack.get_description():\n attacker.remove_override()\n\n # Applying recoil damage, even if defender dodged.\n if attack.get_damage():\n if len(attack.get_damage()) > 1:\n attacker.get_current_nanovor().remove_health(attack.get_damage()[1])\n if attacker.get_current_nanovor().get_health() <= 0:\n\n self.round_summary += (\"{}\\'s {} splatted itself!\\n\".format(attacker.get_name(),\n attacker.get_current_nanovor().get_name()))\n\n attacker.remove_nanovor(attacker.get_current_nanovor())\n\n return\n\n attack_damage = 0\n recoil_damage = 0\n final_piercing = False\n extra_pierce_damage = 0\n STR_MULTIPLIER = attacker.get_current_nanovor().get_strength() / 100\n\n # Returns true if the attack does damage, returns False if nothing is returned. So, if this statement goes off, we know it does damage\n if attack.get_damage():\n attack_damage = attack.get_damage()[0]\n if len(attack.get_damage()) > 1:\n recoil_damage = attack.get_damage()[1]\n\n # Check special conditions, if any. If there are, apply them.\n special_conds = self.handle_special_conditions(attacker, defenders)\n if len(special_conds.keys()) > 0:\n for condition in special_conds.keys():\n if condition == \"DMGSET\":\n attack_damage = special_conds[condition]\n elif condition == \"PIERCE\":\n if special_conds[condition] == \"ALL\":\n final_piercing = True\n #STRMULT will be False if it is a key, so set the multiplier to 1 (will have no effect on damage output).\n elif condition == \"STRMULT\":\n STR_MULTIPLIER = 1\n\n set_up = self.apply_self_spike_combos(attacker)\n if len(set_up) > 0:\n for effect in set_up:\n # Checking for armor piercing effects\n if effect == \"ALL\":\n final_piercing = True\n elif effect == \"PART\":\n # Watch out for attacks that are armor piercing and have additional pierce damage\n # NOTE: additional pierce damage is BASE DAMAGE, so you apply STR\n extra_pierce_damage += attack.get_spike_combo()[\"PIERCE\"][\"PART\"]\n extra_pierce_damage = self.round(extra_pierce_damage * STR_MULTIPLIER)\n # Doubling damage if that is the effect\n elif effect == \"DMGDOUBLE\":\n attack_damage *= 2\n # DmgSET, an int element lets me know that the effect is setting the damage\n elif type(effect) == int:\n attack_damage = effect\n\n # Applying strength to the attack damage to get the total damage\n attack_damage = self.round(attack_damage * STR_MULTIPLIER)\n\n # Would have to apply armor piercing last. Damage would vary depending on the opponent's armor\n if attack.get_armorpiercing() or final_piercing:\n attack_damage += defenders.get_current_nanovor().get_armor()\n\n # Applying damage to the defender, while taking into account the armor of the defender, if any. Apply extra pierce damage afterwards, since it ignores armor\n defenders.get_current_nanovor().remove_health(attack_damage - defenders.get_current_nanovor().get_armor() + extra_pierce_damage)\n # Applying recoil damage\n attacker.get_current_nanovor().remove_health(recoil_damage)\n if recoil_damage > 0:\n self.round_summary += f\"{attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()} dealt {recoil_damage} damage to itself!\\n\"\n #Replacing placeholder with actual damage done.\n self.round_summary = self.round_summary.replace(\"(DMGPLACEHOLDER)\", f\" (-{attack_damage - defenders.get_current_nanovor().get_armor() + extra_pierce_damage} HP)\" if attack_damage > 0 else '')\n\n # Mostly applying battle kraken's hack zap\n self.handle_aoe_effects(attacker, list(self.player_swarms.values()))\n\n # Apply the hacks after dealing any damage\n self.apply_hacks(attacker, defenders)\n self.apply_self_hacks(attacker)\n\n # Apply effects from the combos onto enemy nanovor after dealing any damage\n self.apply_spike_combos(attacker, defenders)\n\n # Remove the override if it was consumes, after applying damage/effects/hacks\n if attack.get_consumes():\n if \"SPIKE\" in attacker.get_current_override().keys():\n if attacker.get_current_override()[\"SPIKE\"] in attack.get_description():\n attacker.remove_override()\n\n # Keep eye on this, may have to move it to after the attack damage is applied\n # This depends on if there are any attacks that set an override while ALSO dealing damage,\n # Without the need of a spike combo. This would only give issues if the current override is also a buff.\n if attack.get_override():\n # remove buffs so that the stats do not remain inflated if you place a different override.\n # apply override buffs so that newly placed overrides are taken into account when a player gets attacked on the same turn\n self.remove_override_buffs(attacker)\n attacker.set_override(attack.get_override())\n self.apply_override_buffs(attacker)\n\n # Checking if defender nanovor dies from the attack\n if defenders.get_current_nanovor().get_health() <= 0:\n self.round_summary += (\n \"{}\\'s {} splatted {}\\'s {}!\\n\".format(attacker.get_name(), attacker.get_current_nanovor().get_name(),\n defenders.get_name(),\n defenders.get_current_nanovor().get_name()))\n\n defenders.remove_nanovor(defenders.get_current_nanovor())\n\n # Checking if attacking nanovor dies from recoil\n if attacker.get_current_nanovor().get_health() <= 0:\n\n self.round_summary += (\n \"{}\\'s {} splatted itself!\\n\".format(attacker.get_name(), attacker.get_current_nanovor().get_name()))\n\n attacker.remove_nanovor(attacker.get_current_nanovor())\n\n\n '''\n \n \n MAIN ENGINE FUNCTIONS THAT SUPPORT THE START_ROUND & PLAY FUNCTIONS, THESE FUNCTIONS DO ALL THE BEHIND-THE-SCENES WORK!\n \n \n '''\n\n\n # Made my own adaptation of the round function to solve rounding issues\n def round(self, num):\n if type(num) == float:\n if int(\"{:.2f}\".format(num).split(\".\")[1]) >= 50:\n return math.ceil(num)\n else:\n return round(num)\n else:\n return num\n\n def apply_override_buffs(self, player):\n current = player.get_current_override()\n for nano in player.get_swarm():\n if not (nano.check_buffs()):\n if \"STR\" in current.keys():\n nano.add_strength(current[\"STR\"])\n nano.change_buffed_status(True)\n if \"ARM\" in current.keys():\n nano.add_armor(current[\"ARM\"])\n nano.change_buffed_status(True)\n if \"SPD\" in current.keys():\n nano.add_speed(current[\"SPD\"])\n nano.change_buffed_status(True)\n\n def remove_override_buffs(self, player):\n current = player.get_current_override()\n for nano in player.get_swarm():\n if nano.check_buffs():\n if \"STR\" in current.keys():\n nano.remove_strength(current[\"STR\"])\n if \"ARM\" in current.keys():\n nano.remove_armor(current[\"ARM\"])\n if \"SPD\" in current.keys():\n nano.remove_speed(current[\"SPD\"])\n nano.change_buffed_status(False)\n\n def apply_energy_override(self, player):\n current = player.get_current_override()\n if \"EN-ALL\" in current.keys():\n player.add_energy(current[\"EN-ALL\"])\n elif \"EN-MAG\" in current.keys():\n if player.get_current_nanovor().get_class() == \"Magnamod\":\n player.add_energy(current[\"EN-MAG\"])\n elif \"EN-VEL\" in current.keys():\n if player.get_current_nanovor().get_class() == \"Velocitron\":\n player.add_energy(current[\"EN-VEL\"])\n elif \"EN-HEX\" in current.keys():\n if player.get_current_nanovor().get_class() == \"Hexite\":\n player.add_energy(current[\"EN-HEX\"])\n\n def determine_dodge(self, player):\n current = player.get_current_override()\n if \"DODGE\" in current.keys():\n luck = random.randint(1, 100)\n if luck % (100 // current[\"DODGE\"]) == 0:\n return True\n return False\n\n def determine_order(self, player_list):\n order = []\n copy_list = player_list[:]\n\n # This loop removes the next player in line from the copy list until there is a single player left: the one who goes last.\n # As a result, total_speed decreases each time, because a player was removed.\n while len(order) != len(player_list):\n total_speed = sum([player.get_current_nanovor().get_speed() for player in copy_list])\n\n # Handles scenario where all active nanovor combined have 0 speed, randomly chooses which one goes next\n if total_speed == 0:\n next_in_line = random.randint(0, len(copy_list) - 1)\n order.append(copy_list[next_in_line])\n copy_list.pop(next_in_line)\n continue\n\n player_odds = {}\n odds_range = {}\n\n for player in copy_list:\n player_odds[player] = player.get_current_nanovor().get_speed() / total_speed * 100\n\n current = 1\n for player, odds in player_odds.items():\n\n # new way I'm testing, just simply using the odds themselves. Hesitated at first because rounding could result in total odds > 100\n odds_range[player] = [current, current + odds]\n current += odds\n\n # Note: Used to be 1 to total_speed. Changed to 100 bc it's standardized.\n # Idea: change it to current, to account for the 101's\n # determine = random.randint(1,current)\n determine = random.uniform(1, 100)\n\n for player, odds in odds_range.items():\n\n if odds[0] <= determine < odds[1]:\n order.append(player)\n copy_list.remove(player)\n\n return order\n\n # Apply hack effects to the defender(s)\n def apply_hacks(self, attacker, defenders):\n attack = attacker.get_selected_attack()\n\n # All Hacks: Stun, Swap Block, Obliterate\n if attack.get_hack():\n for hack in attack.get_hack().keys():\n if hack == \"SWAP\":\n defenders.add_swap_block(attack.get_hack()[hack])\n # Don't need to set defenders next Nanovor to the current one, command center simply doesn't swap, skips to attack selection.\n # Sets the defenders most recent hack to Swap\n defenders.set_recent_hack(\"SWAP\")\n self.round_summary += f\"{defenders.get_name()} is swap-blocked! Blocked for the next {defenders.get_swap_block()} turn(s)!\\n\"\n elif hack == \"OBLIT\":\n self.remove_override_buffs(defenders)\n defenders.remove_override()\n self.round_summary += f\"{defenders.get_name()}\\'s Override was erased!\\n\"\n elif hack == \"STUN\":\n defenders.get_current_nanovor().change_length_stun(attack.get_hack()[hack])\n defenders.set_recent_hack(\"STUN\")\n self.round_summary += f\"{defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()} got stunned!\\n\"\n elif hack == \"ENSAP\":\n defenders.remove_energy(attack.get_hack()[hack])\n self.round_summary += f\"{defenders.get_name()} lost {attack.get_hack()[hack]} EN!\\n\"\n elif hack == \"STR\":\n defenders.get_current_nanovor().remove_strength(attack.get_hack()[hack])\n self.round_summary += f\"-{attack.get_hack()[hack]} STR for {defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()}!\\n\"\n elif hack == \"SPD\":\n defenders.get_current_nanovor().remove_speed(attack.get_hack()[hack])\n self.round_summary += f\"-{attack.get_hack()[hack]} SPD for {defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()}!\\n\"\n elif hack == \"ARM\":\n defenders.get_current_nanovor().remove_armor(attack.get_hack()[hack])\n self.round_summary += f\"-{attack.get_hack()[hack]} ARM for {defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()}!\\n\"\n\n # Apply hack effects to the attacker, if any.\n def apply_self_hacks(self, attacker):\n attack = attacker.get_selected_attack()\n\n if attack.get_hack():\n for hack in attack.get_hack().keys():\n if hack == \"SELFSTUN\":\n attacker.get_current_nanovor().change_length_stun(attack.get_hack()[hack])\n attacker.set_recent_hack(\"STUN\")\n self.round_summary += f\"{attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()} stunned itself!\\n\"\n elif hack == \"DECSELFSTR\":\n attacker.get_current_nanovor().remove_strength(attack.get_hack()[hack])\n self.round_summary += f\"-{attack.get_hack()[hack]} STR for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif hack == \"DECSELFSPD\":\n attacker.get_current_nanovor().remove_speed(attack.get_hack()[hack])\n self.round_summary += f\"-{attack.get_hack()[hack]} SPD for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif hack == \"DECSELFARM\":\n attacker.get_current_nanovor().remove_armor(attack.get_hack()[hack])\n self.round_summary += f\"-{attack.get_hack()[hack]} ARM for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif hack == \"INCSELFSTR\":\n attacker.get_current_nanovor().add_strength(attack.get_hack()[hack])\n self.round_summary += f\"+{attack.get_hack()[hack]} STR for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif hack == \"INCSELFSPD\":\n attacker.get_current_nanovor().add_speed(attack.get_hack()[hack])\n self.round_summary += f\"+{attack.get_hack()[hack]} SPD for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif hack == \"INCSELFARM\":\n attacker.get_current_nanovor().add_armor(attack.get_hack()[hack])\n self.round_summary += f\"+{attack.get_hack()[hack]} ARM for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n\n # Apply Spike Combo effects to the defender(s)\n def apply_spike_combos(self, attacker, defenders):\n attack = attacker.get_selected_attack()\n\n # So many different spike combos, this is going to look terrible if done with nothing but if statements, but that may be the only way\n # So Far: DMGDOUBLE, DMGSET, PIERCE (either all or part), SETNEW\n if attack.get_spike_combo():\n if \"SPIKE\" in attacker.get_current_override().keys():\n if attacker.get_current_override()[\"SPIKE\"] in attack.get_description():\n for effect in attack.get_spike_combo().keys():\n if effect == \"SWAP\":\n # change this later to include all defenders (though currently only 1 AOE attack exists) Need for loop. Maybe add above\n defenders.add_swap_block(attack.get_spike_combo()[effect])\n # Don't need to set defenders next Nanovor to the current one, command center simply doesn't swap, skips to attack selection.\n # Set the defenders recent hack to Swap\n defenders.set_recent_hack(\"SWAP\")\n self.round_summary += \"{} is swap-blocked! Blocked for the next {} turn(s)!\\n\".format(\n defenders.get_name(), defenders.get_swap_block())\n\n elif effect == \"OBLIT\":\n self.remove_override_buffs(defenders)\n defenders.remove_override()\n self.round_summary += f\"{defenders.get_name()}\\'s Override was erased!\\n\"\n elif effect == \"STUN\":\n defenders.get_current_nanovor().change_length_stun(attack.get_spike_combo()[effect])\n defenders.set_recent_hack(\"STUN\")\n self.round_summary += f\"{defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()} got stunned!\\n\"\n elif effect == \"ENSAP\":\n defenders.remove_energy(attack.get_spike_combo()[effect])\n self.round_summary += f\"{defenders.get_name()} lost {attack.get_spike_combo()[effect]} EN!\\n\"\n elif effect == \"STR\":\n defenders.get_current_nanovor().remove_strength(attack.get_spike_combo()[effect])\n self.round_summary += f\"-{attack.get_spike_combo()[effect]} STR for {defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()}!\\n\"\n elif effect == \"SPD\":\n defenders.get_current_nanovor().remove_speed(attack.get_spike_combo()[effect])\n self.round_summary += f\"-{attack.get_spike_combo()[effect]} SPD for {defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()}!\\n\"\n elif effect == \"ARM\":\n defenders.get_current_nanovor().remove_armor(attack.get_spike_combo()[effect])\n self.round_summary += f\"-{attack.get_spike_combo()[effect]} ARM for {defenders.get_name()}\\'s {defenders.get_current_nanovor().get_name()}!\\n\"\n\n # Maybe should be in self_spike_combos, but the order of functions in play would make\n # it so that a STR increase or decrease is applied BEFORE the attack damage is calculated.\n # Leaving these here for now to prevent errors. If I rearrange the play function later,\n # Which I certainly will, I will move these where they should be.\n elif effect == \"DECSELFSTR\":\n attacker.get_current_nanovor().remove_strength(attack.get_spike_combo()[effect])\n self.round_summary += f\"-{attack.get_spike_combo()[effect]} STR for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif effect == \"DECSELFSPD\":\n attacker.get_current_nanovor().remove_speed(attack.get_spike_combo()[effect])\n self.round_summary += f\"-{attack.get_spike_combo()[effect]} SPD for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif effect == \"DECSELFARM\":\n attacker.get_current_nanovor().remove_armor(attack.get_spike_combo()[effect])\n self.round_summary += f\"-{attack.get_spike_combo()[effect]} ARM for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif effect == \"INCSELFSTR\":\n attacker.get_current_nanovor().add_strength(attack.get_spike_combo()[effect])\n self.round_summary += f\"+{attack.get_spike_combo()[effect]} STR for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif effect == \"INCSELFSPD\":\n attacker.get_current_nanovor().add_speed(attack.get_spike_combo()[effect])\n self.round_summary += f\"+{attack.get_spike_combo()[effect]} SPD for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n elif effect == \"INCSELFARM\":\n attacker.get_current_nanovor().add_armor(attack.get_spike_combo()[effect])\n self.round_summary += f\"+{attack.get_spike_combo()[effect]} ARM for {attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()}!\\n\"\n\n # Apply spike combo effects to the attacker, if any.\n def apply_self_spike_combos(self, attacker):\n\n attack = attacker.get_selected_attack()\n\n deliverable = []\n\n if attack.get_spike_combo():\n if \"SPIKE\" in attacker.get_current_override().keys():\n if attacker.get_current_override()[\"SPIKE\"] in attack.get_description():\n for effect in attack.get_spike_combo().keys():\n if effect == \"SELFSTUN\":\n attacker.get_current_nanovor().change_length_stun(attack.get_spike_combo()[effect])\n attacker.set_recent_hack(\"STUN\")\n self.round_summary += f\"{attacker.get_name()}\\'s {attacker.get_current_nanovor().get_name()} stunned itself!\\n\"\n elif effect == \"SETNEW\":\n self.remove_override_buffs(attacker)\n attacker.remove_override()\n attacker.set_override(attack.get_spike_combo()[effect])\n elif effect == \"ENADD\":\n attacker.add_energy(attack.get_spike_combo()[effect])\n self.round_summary += f\"{attacker.get_name()} gained {attack.get_spike_combo()[effect]} EN!\\n\"\n elif effect == \"PIERCE\":\n if type(attack.get_spike_combo()[effect]) == dict:\n deliverable.append(\"PART\")\n else:\n # If its not Partial Pierce, it's fully piercing\n deliverable.append(attack.get_spike_combo()[effect])\n elif effect == \"DMGDOUBLE\":\n deliverable.append(\"DMGDOUBLE\")\n elif effect == \"DMGSET\":\n deliverable.append(attack.get_spike_combo()[effect])\n\n return deliverable\n\n def handle_special_conditions(self, attacker, defenders):\n attack = attacker.get_selected_attack()\n\n if attack.get_special_condition():\n for condition in attack.get_special_condition().keys():\n for details in attack.get_special_condition()[condition].keys():\n if condition == \"DMG-CLASS\":\n if details == defenders.get_current_nanovor().get_class():\n return {\"DMGSET\": attack.get_special_condition()[condition][details]}\n\n elif condition == \"PIERCE-CLASS\":\n if details == defenders.get_current_nanovor().get_class():\n return {\"PIERCE\": attack.get_special_condition()[condition][details]}\n\n # Experimental, this is for attacks that do damage based on enemy energy\n # The condition would look like this: {{\"EN-DMG\":{\"PIERCE\":10}} with 10 being the multiplier per energy\n # Send in the STRMULT key to let the play function know NOT to apply the STR multiplier to this damage (it is straight-damage).\n elif condition == \"EN-DMG\":\n if details == \"PIERCE\":\n return {\"DMGSET\":attack.get_special_condition()[condition][details] * defenders.get_energy(), \"PIERCE\":\"ALL\", \"STRMULT\":False}\n # Experimental, if the defenders speed is below a certain threshold,\n # apply the effect. If it's pierce all, return it as a dict to play function.\n # Used only for Giga Siren's Incinerate Attack\n elif condition == \"STR\":\n if defenders.get_current_nanovor().get_strength() > int(details):\n if \"DMGSET\" in attack.get_special_condition()[condition][details].keys():\n return {\"DMGSET\": attack.get_special_condition()[condition][details][\"DMGSET\"]}\n\n # Experimental, tackles the attacks that have 50% chance of doing x amount of damage, or more x amount.\n # Looks like: {\"CHANCE-DMG-50\": {\"PIERCE\":100}} with 100 being the damage it can do.\n elif condition == \"CHANCE-DMG-50\":\n risk = random.randint(1, 100)\n self.round_summary = self.round_summary.replace(\"(DMGPLACEHOLDER)\", '')\n if risk > 50:\n self.round_summary += \"{} was successful! (DMGPLACEHOLDER)\\n\".format(attack.get_name())\n if details == \"PIERCE\":\n # If piercing,remove health w/out taking into account enemy armor. This is for fixed damage. (Ie, Plasma Lash 3.0 Solid Strike).\n return {\"DMGSET\": attack.get_special_condition()[condition][details], \"PIERCE\":\"ALL\", \"STRMULT\":False}\n elif details == \"XTRANONPIERCE\":\n # Thunderpoid 3.0, chance of doing double damage, or regular damage. Also used for Spike Hornet's Wisecrack\n return {\"DMGSET\": attack.get_special_condition()[condition][details]}\n elif details == \"XTRAPIERCE\":\n # Cyber Slicer 1.0, chance of doing 30 base damage, or 50 BASE damage that IGNORES armor\n return {\"DMGSET\": attack.get_special_condition()[condition][details], \"PIERCE\": \"ALL\"}\n '''\n elif details == \"NONPIERCE\":\n # If non-piercing, subtract opponent armor from damage output. This is for fixed damage, though there isn't an attack like this yet\n defenders.get_current_nanovor().remove_health(attack.get_special_condition()[condition][details] - defenders.get_current_nanovor().get_armor())\n '''\n else:\n self.round_summary += \"{} failed! (DMGPLACEHOLDER)\\n\".format(attack.get_name())\n return {}\n\n def handle_aoe_effects(self, attacker, player_list):\n # Keep an eye here, it is applying aoe effects to all players before attacking.\n # Does not include any damage or stat decrease which is handled later\n if attacker.get_selected_attack().get_aoe_effect():\n # If clearhacks isn't even in the aoe effect, the second part doesn't even go off because of python's and rule, where if the left statement is false,\n # the right side of the statement isn't even checked. Short-circuit eval.\n if \"CLEARHACKS\" in attacker.get_selected_attack().get_aoe_effect().keys() and (\n attacker.get_selected_attack().get_aoe_effect()[\"CLEARHACKS\"] == \"RECENT\"):\n for homie in player_list:\n for nanovor in homie.get_swarm():\n if nanovor != homie.get_current_nanovor():\n nanovor.change_length_stun(nanovor.check_stun_length() * -1)\n else:\n if homie.get_recent_hack() == \"SWAP\":\n homie.remove_swap_block(homie.get_swap_block())\n elif homie.get_recent_hack() == \"STUN\":\n homie.get_current_nanovor().change_length_stun(\n homie.get_current_nanovor().check_stun_length() * -1)\n self.round_summary += \"Most recent Stun or Swap condition removed from all Nanovor in all Swarms!\\n\"\n\n\n '''\n \n \n MAIN ENGINE FUNCTIONS END HERE\n \n \n '''\n\n\n # Displays a summary of everything that happened the previous round on the screen so users can keep track of the events.\n def round_carnage_report(self):\n\n self.round_summary += \"\\nSWAPS MADE\\n\"\n\n for plyr in list(self.player_swarms.values()):\n # Present the Nanovor that were switched in on the Carnage Report, if any switched in. Also note if someone tried to switch but was swap blocked\n if plyr.get_swap_block() == 0 and plyr.get_current_nanovor().get_health() > 0:\n if plyr.get_next_nanovor() != plyr.get_current_nanovor():\n self.round_summary += \"{} swapped out their {} & swapped in their {}!\\n\".format(plyr.get_name(),\n plyr.get_current_nanovor().get_name(),\n plyr.get_next_nanovor().get_name())\n else:\n if plyr.get_current_nanovor().get_health() > 0 and (\n plyr.get_next_nanovor() != plyr.get_current_nanovor()):\n self.round_summary += \"{} tried to swap out their {}, but was swap-blocked!\\n\".format(plyr.get_name(), plyr.get_current_nanovor().get_name())\n\n # Apply Buffs as soon as turn is over so the players can notice the immediate effects and not be confused as to why buffs weren't applied\n # These buffs are later removed before the round starts. Then, they are added back again (prevents buffs from stacking when they shouldn't)\n # apply buffs is called twice in a row, BUT, the function checks to see if the nanos are already buffed, which also prevents multiple stacking\n for plyr in list(self.player_swarms.values()):\n self.apply_override_buffs(plyr)\n\n #Turn the entire string into a value in a dict, so that you can add the player's resulting active nanovor stats in another key\n self.round_summary = {\"Round Summary\":self.round_summary, \"Players\":[]}\n\n for conn, plyr in self.player_swarms.items():\n curr_vor = plyr.get_current_nanovor().display_stats()\n if plyr.get_swap_block() > 0:\n curr_vor += \"\\nSwap Blocked for: {} turn(s).\".format(plyr.get_swap_block())\n\n #Adds 2-tuple to list, 1st elemnt being the username, 2nd the stats\n self.round_summary[\"Players\"].append((f\"{self.players[conn]}\\'s Nanovor\", curr_vor))\n\n self.round_wrapup()\n\n\n def round_wrapup(self):\n self.rounds += 1\n\n players_alive = self.player_swarms.copy()\n\n for conn,player in players_alive.items():\n # If a player is eliminated (ie, no remaining nanovor)\n if len(player.get_swarm()) == 0:\n if player in self.player_swarms.values():\n del self.player_swarms[conn]\n self.round_summary[\"Round Summary\"] += f\"\\n{player.get_name()} was eliminated!\"\n\n #Remove players who left the game, the consequent if statement then checks to see if that leaves the game with 1 player\n #If so, game will be over. Otherwise, game will continue, just without that additional player. Think this also erases the\n #need for an after-screen message telling the remaining player that their opponent quit (instead, it will be in the Caranage Report)\n self.handle_quitters()\n\n if len(self.player_swarms) <= 1:\n #Make a copy so that players can access the final screen at their own pace and game can be erased from games w/out worry\n self.players_copy = self.players.copy()\n self.player_swarms_copy = self.player_swarms.copy()\n\n #Set to True to deliver the final Carnage Report\n self.round_over = True\n self.game_over = True\n\n else:\n # Before anyone gets to swap or choose their next nanovor, the energy and energy overrides must be applied.\n for conn,player in players_alive.items():\n player.add_energy(2)\n self.apply_energy_override(player)\n\n # If the player isn't swap blocked and if their nanovor isn't dead (meaning they wont be redirected to a new nanovor selection), then swap their nanovor.\n # If either of these is False, the following loop will handle both of those cases.\n if player.get_swap_block() == 0 and player.get_current_nanovor().get_health() > 0:\n player.set_current_nanovor(player.get_next_nanovor())\n # Reveal the new active nanovor so the opponents can see its attacks and stats\n player.get_current_nanovor().reveal()\n\n # Remove a swap block turn from everyone AFTER checking to see if they had any blocks. If they didn't, the function is set so that negative blocks don't exist\n player.remove_swap_block(1)\n\n # If a player's nanovor died, allow them to choose a new one\n if player.get_current_nanovor().get_health() <= 0:\n\n # If the active nanovor died, remove any swap block hacks active on the player so that they do not carry over.\n player.remove_swap_block(player.get_swap_block())\n\n #If the active nanovor died, set the player's nanovor to an empty string, so that the client side knows\n #whether or not it needs to prompt the user to select a new active nanovor\n player.set_current_nanovor('')\n\n self.round_over = True\n","repo_name":"EJar74/SiliconianShowdown","sub_path":"SiliconShowdown/OnlineGame.py","file_name":"OnlineGame.py","file_ext":"py","file_size_in_byte":62705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70982731949","text":"import torch\nimport numpy as np\nimport pathlib\nimport attr\nfrom collections.abc import Iterable\nimport argparse\n\ndef load_tensor(file, device=None):\n t = torch.load(file)\n if device is not None:\n t = t.to(device)\n return t\n\n \ndef to_dict_with_sorted_values(d, key=None):\n return {k: sorted(v, key=key) for k, v in d.items()}\n\n\ndef to_dict_with_set_values(d):\n result = {}\n for k, v in d.items():\n hashable_v = []\n for v_elem in v:\n if isinstance(v_elem, list):\n hashable_v.append(tuple(v_elem))\n else:\n hashable_v.append(v_elem)\n result[k] = set(hashable_v)\n return result\n\ndef save_tensor(tensor, file):\n pathlib.Path(file).parent.mkdir(parents=True, exist_ok=True)\n torch.save(tensor, file)\n\n\ndef toJSON(obj):\n ''' \n Calls this instance in case of serialization failure.\n Assumes the object is attr \n '''\n if attr.has(obj):\n return attr.asdict(obj)\n elif isinstance(obj, np.int64):\n return int(obj)\n else:\n raise NotImplementedError(\n \"serialization obj not attr but {}\".format(type(obj)))\n\n\ndef tuplify(dictionary):\n if dictionary is None:\n return tuple()\n assert isinstance(dictionary, dict)\n def value(x): return dictionary[x]\n return tuple(key for key in sorted(dictionary, key=value))\n\n\ndef dictify(iterable):\n assert isinstance(iterable, Iterable)\n return {v: i for i, v in enumerate(iterable)}\n\ndef dash_separated_ints(value):\n vals = value.split(\"-\")\n for val in vals:\n try:\n int(val)\n except ValueError:\n raise argparse.ArgumentTypeError(\n \"%s is not a valid dash separated list of ints\" % value\n )\n\n return value\n\n\ndef dash_separated_floats(value):\n vals = value.split(\"-\")\n for val in vals:\n try:\n float(val)\n except ValueError:\n raise argparse.ArgumentTypeError(\n \"%s is not a valid dash separated list of floats\" % value\n )\n\n return value\n\n","repo_name":"vkkhare/RecoEdge","sub_path":"fedrec/utilities/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39687087982","text":"import media\r\nimport fresh_tomatoes\r\n\r\n\r\n# define instances\r\nvenom = media.Movie(\r\n \"Venom\",\r\n \"https://upload.wikimedia.org/wikipedia/en/0/05/Venom_poster.jpg\",\r\n \"https://www.youtube.com/watch?reload=9&v=u9Mv98Gr5pY\"\r\n )\r\n\r\n \r\ncaptain_marvel = media.Movie(\r\n \"Captain Marvel\",\r\n \"https://upload.wikimedia.org/wikipedia/en/8/85/Captain_Marvel_poster.jpg\",\r\n \"https://www.youtube.com/watch?v=Z1BCujX3pw8\"\r\n )\r\n\r\n\r\naquaman = media.Movie(\r\n \"Aquaman\",\r\n \"https://upload.wikimedia.org/wikipedia/en/3/3a/Aquaman_poster.jpg\",\r\n \"https://www.youtube.com/watch?v=VMzYtgzYO7U\"\r\n )\r\n\r\n\r\nshazam = media.Movie(\r\n \"Shazam!\",\r\n \"https://upload.wikimedia.org/wikipedia/en/7/74/Shazam_film_poster.jpg\",\r\n \"https://www.youtube.com/watch?v=-oD7B7oiBtw\"\r\n )\r\n\r\n\r\nantman_and_the_wasp = media.Movie(\r\n \"Ant-Man and the Wasp\",\r\n \"https://upload.wikimedia.org/wikipedia/en/2/2c/Ant-Man_and_the_Wasp_poster.jpg\",\r\n \"https://www.youtube.com/watch?v=8_rTIAOohas\"\r\n )\r\n\r\n\r\navengers_infinity_war = media.Movie(\r\n \"Avengers infinity war\",\r\n \"https://upload.wikimedia.org/wikipedia/en/4/4d/Avengers_Infinity_War_poster.jpg\",\r\n \"https://www.youtube.com/watch?v=6ZfuNTqbHE8\"\r\n )\r\n\r\n\r\n# movies list to store instances\r\nmovies = [\r\n venom,\r\n captain_marvel,\r\n aquaman, shazam,\r\n antman_and_the_wasp,\r\n avengers_infinity_war\r\n ]\r\n\r\n\r\ndef main():\r\n # insert movies as parameter\r\n fresh_tomatoes.open_movies_page(movies)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"swatisuman0690/my-movie-trailer-website","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29323287323","text":"from typing import Literal\nfrom pyts.classification import TimeSeriesForest\nfrom sklearn.svm import SVC\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom abc import ABC, abstractmethod\nimport pandas as pd\n\n\nclass Strategy(ABC):\n @abstractmethod\n def execute(self, input_data: dict, stock: str):\n raise NotImplementedError\n\n\nclass EMACrossoverStrategy(Strategy):\n def __init__(self, data: dict):\n self.ema_crossed = False\n self.is_above = {}\n for key, val in data.items():\n self.is_above[key] = (\n 1\n if val[0][\"input_data\"][\"ema10\"]\n > val[0][\"input_data\"][\"ema50\"]\n else 0\n )\n\n def buy_condition(self, value, stock: str) -> Literal[\"str\", None]:\n value = value[\"input_data\"]\n if value[\"ema10\"] > value[\"ema50\"] and not self.is_above[stock]:\n self.ema_crossed = True\n self.is_above[stock] = 1\n return \"buy\"\n\n def sell_condition(self, value, stock: str) -> Literal[\"str\", None]:\n value = value[\"input_data\"]\n if value[\"ema10\"] < value[\"ema50\"] and self.is_above[stock]:\n self.ema_crossed = True\n self.is_above[stock] = 0\n return \"sell\"\n\n def execute(self, input_data: dict, stock: str) -> Literal[\"str\", None]:\n return self.buy_condition(input_data, stock) or self.sell_condition(\n input_data, stock\n )\n\n\nclass SVCStrategy(Strategy):\n def __init__(self, data: dict):\n self.models = {}\n for key, val in data.items():\n try:\n self.models[key] = SVC(random_state=123).fit(\n val.get(\"X_train\"), val.get(\"y_train\")\n )\n except ValueError:\n print(val.get(\"X_train\"))\n\n def execute(self, input_data: dict, stock: str) -> str:\n prediction = self.models[stock].predict([input_data.get(\"input_data\")])\n if bool(prediction):\n return \"buy\"\n return \"sell\"\n\n\nclass TSRFStrategy(Strategy):\n def __init__(self, data: dict):\n self.models = {}\n for key, val in data.items():\n self.models[key] = TimeSeriesForest(random_state=123).fit(\n val.get(\"X_train\"), val.get(\"y_train\")\n )\n\n def execute(self, input_data: dict, stock: str) -> str:\n prediction = self.models[stock].predict([input_data.get(\"input_data\")])\n if bool(prediction):\n return \"buy\"\n return \"sell\"\n\n\n\nclass ARIMAStrategy(Strategy):\n def __init__(self, data: dict):\n self.models = {}\n self.trend = {}\n self.data = {}\n for key, val in data.items():\n self.models[key] = ARIMA(list(val.get(\"prices\")), order=(0, 1, 2)).fit()\n self.data[key] = list(val.get(\"prices\"))\n\n def execute(self, input_data: dict, stock: str) -> str:\n output = self.models[stock].forecast()[0]\n self.data[stock].append(input_data.get(\"price\"))\n self.models[stock] = ARIMA(self.data[stock], order=(0, 1, 2)).fit()\n if output > input_data.get(\"price\"):\n return \"buy\"\n return \"sell\"\n","repo_name":"Magda-Rubaj/StockMarketAgents","sub_path":"src/strategies.py","file_name":"strategies.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16862838789","text":"import nltk\nimport os\nimport re\nimport sys\nimport pandas as pd\n\n\ndef helloworld():\n print('Hello World. Today is a good day to code.')\n'''Note: These are the cleaning modules that were created to clean the text of obvious errors or words that will likely \n not be material to our ultimate analysis.\n These functions constitute the underlying code for the subsequent text-cleaning pipeline program that is used \n in the ultimate code (see end of document)'''\n\ndef clean_text_4_classification_remove_backslashes(Text_file):\n '''The purpose of this function is to clean the text files of numerous instances of backslashes \n in order to prepare them for the regex expression search. \n Input = Single text file \n Output = Single text file cleaned \n '''\n \n # Convert text to lowercase\n Text_file_lower = Text_file.lower()\n \n # Split any values in the text on the backslash. The Text_split_slash should return a list. \n Text_split_slash = Text_file_lower.split('\\\\')\n \n # Return the list to a text. \n Text_rejoined = ' '.join(Text_split_slash)\n \n # Return a list of the cleaned text docs. \n return Text_rejoined\n\n\ndef clean_text_4_classification_remove_nABC(Text_file):\n '''The purpose of this function is to remove the 'n' that appears before words that begin with an upper case letter. \n Input = Single txt file\n Output = Clean list of tokens from original txt file\n '''\n # Define the regex expression that you want to search for. \n Regex_exp = re.compile('n[A-Z*]')\n \n # Create a list to capture the tokens once they are cleaned \n Text_tokenized_cleaned = []\n \n # Tokenize the given text\n Text_tokenized = nltk.word_tokenize(Text_file)\n \n # Run for loop over tokens for a given text. \n for token in Text_tokenized:\n\n # Search for the regex expression\n Regex_search = re.search(Regex_exp, token)\n \n # Test if there was match (None = no match)\n if Regex_search != None:\n \n # If there was a match, take all letters after the 'n'. \n token_cleaned = token[1:]\n \n Text_tokenized_cleaned.append(token_cleaned)\n \n # If the Regex_search returned None, return the token back to the Text_tokenized_cleaned list\n else:\n Text_tokenized_cleaned.append(token)\n \n # Return a list of clean tokens\n return Text_tokenized_cleaned\n\n\n\ndef create_dict_punct():\n '''The purpose of this function is to simply create a dictionary of punctuation symbols to use\n in other functions\n Input = None\n Output = Dict whose keys are the distinct punctuation marks. \n '''\n import string\n Dict = {}\n Punct = string.punctuation\n for x in Punct:\n Dict[x] = ''\n return Dict \n\ndef strip_punctuation(Token_list):\n '''The purpose of this function is to strip the punctuation from a list of tokens. \n Input = List of tokens\n Output = List of tokens absent punctuation. \n '''\n # Import punctuation dictionary\n Dict_punct = create_dict_punct()\n\n # Create a list to capture the cleaned tokens\n Clean_token_list = [] \n \n # Iterate over the tokens in the txt file\n for x in Token_list:\n if x not in Dict_punct:\n # Append tokens to clean token list\n Clean_token_list.append(x)\n \n # Return a list of cleaned text\n return Clean_token_list\n\ndef strip_two_letter_words(Token_list):\n '''The purpose of this function is to remove any two letter tokens from a list of tokens.\n Input = List of tokens\n Output = List of tokens absent two letter words'''\n \n List = [x for x in Token_list if len(x) > 2]\n \n return List\n\ndef create_dict_stopwords():\n '''The purpose of this code is to create a dictionary of stop words. \n Input = None\n Output = Dictionary of stop words'''\n \n from nltk.corpus import stopwords\n Stopwords = stopwords.words('english') \n Dict = {}\n for x in Stopwords:\n Dict[x] = ''\n return Dict\n\ndef strip_stop_words(Token_list):\n ''' The purpose of this code is to strip the stop words from a given text\n Input = List of tokens \n Outpu = Text clean of stop words'''\n \n stop_words = create_dict_stopwords()\n List = []\n for x in Token_list:\n if x not in stop_words:\n List.append(x)\n return List\n\ndef create_Concatenated_text_file(Dir_list, New_file_name): \n # Create new write file\n New_File = open(str(New_file_name) + '.txt','w')\n \n # Identify text files to retreive\n Text_files = (file for file in Dir_list if 'txt.' in file) # attempt to use a generator. \n\n # Create Loop Through List of Directories\n for x in Text_files:\n File = open(x, 'rb')\n Text = File.read()\n \n # Write files to new file\n New_File.write(str(Text))\n New_File.write('\\n')\n # Close File\n New_File.close()\n\ndef write_to_text_file(Text_2_write, File_name2_use):\n file = open(str(File_name2_use) + '.txt', 'w') \n file.write(Text_2_write) \n\n\ndef create_Wordnet_set():\n '''The purpose of this function is to create a set of all words from the wordnet dictionary.\n Input = None\n Output = Set object of all words. \n '''\n # Import words from wordnet\n from nltk.corpus import wordnet as wn\n Words = wn.words()\n\n # Create List to capture words \n List_dict_words = []; [List_dict_words.append(x) for x in Words]\n \n # Create Set\n Set_dict_words = set(List_dict_words)\n \n # Return Set\n return Set_dict_words\n\ndef get_set_from_text(Dir_list):\n '''The purpose of this code is to create a set of unique tokens from a text file as a string object. \n Input = Text file as a string object \n Output = Set of unique tokens. \n '''\n # Define Set Object\n Create_set = ''\n \n # Obtain Your Target File\n Target_file = (file for file in Dir_list if 'Cleaned' in file)\n \n # Loop over Target_file since it is a generator object. \n Concat_file = next(Target_file)\n File = open(Concat_file)\n Text = File.read()\n # Tokenize Text\n Text_tokenized = nltk.word_tokenize(Text)\n # Create Set\n Create_set = set(Text_tokenized)\n # Return Set\n return Create_set\n\n\ndef correct_tokens_nABC_using_wordnet_dict(Token_list):\n '''The purpose of this code is to '''\n \n # Creat a clean list of tokens to return to the user. \n Token_list_cleaned = []\n \n # Convert tokens to lowercase\n Token_list_lower = [x.lower() for x in Token_list]\n \n # Loop over the list of tokens\n for token in Token_list_lower:\n # Find the tokens that start with an 'n'\n if token[0] == 'n':\n # See if the token is in the WordNet Dict when the 'n' is dropped\n if token[1:] in Wordnet_set:\n # If the token is in the dictionary, append the token without the 'n'\n Token_list_cleaned.append(token[1:])\n else:\n # If not, then just append the token as there was no matching word. \n Token_list_cleaned.append(token)\n \n # If the token does not start with an 'n', then this code does not apply and append back to the list. \n else:\n Token_list_cleaned.append(token)\n \n return Token_list_cleaned\n\n\n\ndef text_clearning_pipeline_Input_4_Error_Checker_Function(Text_file):\n \n '''This pipeline will be placed inside a larger function that loops over the Target Directory, identifies the text files,\n opens them, etc, and also captures the target file, tokenized text and statistics. We'll need to create these \n variables within the master function. \n ''' \n \n '''The purpose of this function is to prepare text for use with the Error Checker Program\n Input = Single text file\n Output = List of clean tokens representing a single text. \n '''\n # Run Clearning Pipeline (These functions are taken from the ones define above)\n txt_strip_backslashes = clean_text_4_classification_remove_backslashes(Text_file)\n txt_strip_nABC = clean_text_4_classification_remove_nABC(txt_strip_backslashes)\n txt_strip_punct = strip_punctuation(txt_strip_nABC)\n txt_strip_2_letter_words = strip_two_letter_words(txt_strip_punct)\n txt_strip_stop_words = strip_stop_words(txt_strip_2_letter_words)\n txt_correct_nABC_using_wordnet = correct_tokens_nABC_using_wordnet_dict(txt_strip_stop_words)\n \n # Rejoin the tokens into a text so that we can write the text to a file. This way we don't need to run this \n # code everytime we want to work with the cleaned text. \n Text_rejoined = ' '.join(txt_correct_nABC_using_wordnet)\n \n # Return List of clean tokenized text\n return Text_rejoined\n \n\ndef get_cleaned_concatenated_text_file(Dir_list):\n '''\n Input = List of files in the directory\n Output = Cleaned text \n \n '''\n # Note, the author assumes there is only one Concat file in the dir. Since the order of the files in the dir\n # can change, the better approach is to identify it using a list comprehension with an if statement. \n Dirty_text_loc = (file for file in Dir_list if 'Concatenated' in file)\n \n Concat_file = next(Dirty_text_loc) \n File = open(Concat_file)\n # Read in dirty text\n Text_dirty = File.read()\n # Run cleaning pipeline\n Clean_text = text_clearning_pipeline_Input_4_Error_Checker_Function(Text_dirty)\n \n # Return cleaned text\n return Clean_text","repo_name":"ccirelli2/Bros-Coding","sub_path":"Module_Part_I_Legal_Doc_Classification.py","file_name":"Module_Part_I_Legal_Doc_Classification.py","file_ext":"py","file_size_in_byte":9691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38692005872","text":"from django.db import models\n\n\n# null=true if no validation for input empty text\nclass Customer(models.Model):\n fname=models.CharField(max_length=50)\n lname=models.CharField(max_length=50)\n phone=models.CharField(max_length=50,default=\"\")\n email=models.EmailField()\n password=models.CharField(max_length=500)\n\n def register(self):\n self.save()\n\n\n def isExits(self):\n # Checking email and self email is current email in input text comparing with already email in db\n if Customer.objects.filter(email=self.email):\n return True\n else:\n return False\n\n def isExitsPhone(self):\n # Checking email and self email is current email in input text comparing with already email in db\n if Customer.objects.filter(phone=self.phone):\n return True\n else:\n return False\n\n @staticmethod\n def getCustomerEmail(email):\n try:\n # get is used to get single record\n return Customer.objects.get(email=email)\n except:\n return False\n\n\n","repo_name":"AkhileshNegi1710/ImageHub","sub_path":"store/models/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5551641712","text":"import os\nfrom PIL import Image\nimport random\nfrom torchvision import transforms\nimport torch\nfrom torch.utils.data import Dataset\nimport random\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as F\nimport numbers \nimport PIL.ImageOps as ImageOps\nimport PIL.ImageChops as ImageChops\nimport numpy as np \nimport time\n\n# My random crop for multiple images \nclass RandomCrop(object):\n\tdef __init__(self, size, padding=0):\n\t\tif isinstance(size, numbers.Number):\n\t\t\tself.size = (int(size), int(size))\n\t\telse:\n\t\t\tself.size = size\n\t\tself.padding = padding\n\n\t@staticmethod\n\tdef get_params(img, output_size):\n\t\tw, h = img.size\n\t\tth, tw = output_size\n\t\tif w == tw and h == th:\n\t\t\treturn 0, 0, h, w\n\n\t\ti = random.randint(0, h - th)\n\t\tj = random.randint(0, w - tw)\n\t\treturn i, j, th, tw\n\n\tdef __call__(self, img1,img2):\n\n\t\tif self.padding > 0:\n\t\t\timg1 = F.pad(img1, self.padding)\n\t\t\timg2 = F.pad(img2, self.padding)\n\n\t\tassert(img1.size == img2.size)\n\n\t\ti, j, h, w = self.get_params(img1, self.size)\n\n\t\treturn F.crop(img1, i, j, h, w), F.crop(img2, i, j, h, w)\n\n\tdef __repr__(self):\n\t\treturn self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)\n\nresize = transforms.Resize(300, interpolation=2)\nrandom_crop = RandomCrop(256)\nto_tensor = transforms.ToTensor()\ncenter_crop = transforms.CenterCrop(256)\nnormalize = transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])\n\n#########################################\n# Masking the input image with segmentation mask\n#########################################\ndef mask_image(img_input,img_mask):\n\n\timg_mask = np.array(img_mask)\n\timg_mask = np.concatenate((np.expand_dims(img_mask,axis=2),np.expand_dims(img_mask,axis=2),np.expand_dims(img_mask,axis=2)),axis=2)\n\timg_mask = Image.fromarray(img_mask,mode='RGB')\n\n\treturn Image.composite(img_input,img_mask,img_mask.convert('1'))\n\n\n\nclass imageandlabel(Dataset):\n\n\tdef __init__ (self,root_dir,training):\n\t\tself.root_dir = root_dir\n\t\tself.training = training\n\t\tself.files = [fn for fn in os.listdir(root_dir) if (('target' in fn) and fn.endswith('.png'))]\n\n\tdef __len__(self):\n\t\treturn len(self.files)\n\n\tdef __getitem__(self,idx):\n\t\timgname = os.path.join(self.root_dir,self.files[idx])\n\t\timage_input = Image.open(imgname)\n\n\t\tif self.training == True:\n\t\t\tr = random.random()\n\n\t\t\t####################################################\n\t\t\t# I generate three kinds of masking procedures:\n\t\t\t# 1) Segmentation mask of the image- I use to help model learn\n\t\t\t# 2) Mask for hairs(occlusion) on the images\n\t\t\t# 3) Randomly generate mask \n\t\t\t####################################################\n\t\t\tif r < 0.3:\n\t\t\t\ttry:\n\t\t\t\t\timage_seg_target = Image.open(imgname.split('_target.png')[0] + '_segmentation.png')\n\t\t\t\t\timage_seg_target = image_seg_target.convert('1')\n\t\t\t\t\timage_seg_target = ImageChops.invert(image_seg_target)\n\t\t\t\texcept:\n\t\t\t\t\timage_seg_target = Image.open(imgname.split('_target.png')[0] + '_mask.png')\n\t\t\t\t\timage_seg_target = image_seg_target.convert('1')\n\t\t\telif r > 0.7:\n\t\t\t\timage_seg_target = torch.randn(1,32,32)\n\t\t\t\timage_seg_target = torch.nn.functional.dropout(image_seg_target,p=0.6,training=True)\n\t\t\t\timage_seg_target[image_seg_target != 0] = 1\n\t\t\t\tto_pil = transforms.ToPILImage()\n\t\t\t\timage_seg_target = to_pil(image_seg_target).convert('1')\n\t\t\telse:\n\t\t\t\timage_seg_target = Image.open(imgname.split('_target.png')[0] + '_mask.png')\n\t\t\t\timage_seg_target = image_seg_target.convert('1')\n\t\telse:\n\t\t\timage_seg_target = Image.open(imgname.split('_target.png')[0] + '_mask.png')\n\t\t\timage_seg_target = image_seg_target.convert('1')\n\t\t\t\n\t\timage_input = resize(image_input)\n\t\timage_seg_target = resize(image_seg_target)\n\n\t\t###################################\n\t\t# Data Augmentation Blocks you can add more if you want to \n\t\t###################################\n\t\tif self.training == True:\n\t\t\tif random.random() < 0.5:\n\t\t\t\timage_input = image_input.transpose(Image.FLIP_TOP_BOTTOM)\n\t\t\t\timage_seg_target = image_seg_target.transpose(Image.FLIP_TOP_BOTTOM)\n\n\t\t\tif random.random() < 0.5:\n\t\t\t\timage_input = image_input.transpose(Image.FLIP_LEFT_RIGHT)\n\t\t\t\timage_seg_target = image_seg_target.transpose(Image.FLIP_LEFT_RIGHT)\n\n\t\t\timage_input, image_seg_target = random_crop(image_input,image_seg_target) \n\t\telse:\n\t\t\timage_input = center_crop(image_input)\n\t\t\timage_seg_target = center_crop(image_seg_target)\n\n\t\t#image_target = self.mask_image(image_input.copy(), image_seg_target.copy())\n\t\timage_seg_target = ImageChops.invert(image_seg_target)\n\t\timage_missed_input = self.mask_image(image_input, image_seg_target)\n\t\timage_missed_input = np.array(image_missed_input)\n\n\t\t##########################################\n\t\t# Change masked region to white. This helps model learn better.\n\t\t#########################################\n\t\tfor i in range(image_missed_input.shape[0]):\n\t\t\tfor j in range(image_missed_input.shape[1]):\n\t\t\t\tif (image_missed_input[i,j,:] == [0,0,0]).all():\n\t\t\t\t\timage_missed_input[i,j,:] = [255,255,255]\n\n\t\timage_missed_input = Image.fromarray(image_missed_input)\n\n\t\t##############################################\n\t\t# Converting to tensor and normalization\n\t\t###############################################\n\t\timage_seg_target = to_tensor(image_seg_target)\n\t\timage_missed_input = to_tensor(image_missed_input)\n\t\timage_missed_input = normalize(image_missed_input)\n\n\t\timage_input = to_tensor(image_input)\n\t\timage_input = normalize(image_input)\n\n\t\timage_seg_target = image_seg_target.expand_as(image_missed_input)\n\t\t\n\t\tsample = {'input': image_missed_input, 'mask':image_seg_target, 'target':image_input, 'name':self.files[idx]}\n\t\treturn sample\n\n\nif __name__ == '__main__':\n\tmain()\n\ndef main():\n\tm = imageandlabel('data/val/',1)\n\tdataloader = torch.utils.data.DataLoader(m, batch_size=1, num_workers=0, shuffle=False)\n\n\tfor i in dataloader:\n\t\tdata = i['mask']\n\n\n","repo_name":"devansh20la/Image_inpainting","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":5807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33905688938","text":"from marvin.trainers.IL import ILTrainer\nfrom marvin.trainers.RL import RLTrainer\nfrom marvin.utils.trainer_parameters import parser\n\nif __name__ == \"__main__\":\n # args are the input hyperparameters and details that the user sets\n args = parser.parse_args()\n if args.rl:\n t = RLTrainer(args)\n else:\n t = ILTrainer(args)\n\n t.train()","repo_name":"mw9385/Hierarchical_Coverage_Path_Planning","sub_path":"mtsp/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"402037619","text":"from selenium import webdriver\r\nfrom selenium.webdriver.support.select import Select\r\nimport time\r\n\r\n### IF driver in current path ###\r\ndriver = webdriver.Chrome()\r\ndriver.maximize_window()\r\n\r\n# Open a URL\r\ndriver.get('https://www.toolsqa.com/automation-practice-form/')\r\ntime.sleep(5)\r\n\r\ncontinents = Select(driver.find_element_by_id('continents'))\r\n\r\n# To select a value based on text\r\ncontinents.select_by_visible_text('Europe')\r\ntime.sleep(10)\r\n\r\n# To select a value based on value\r\n#continents.select_by_value('Europe')\r\n\r\n# To select a value based on index\r\ncontinents.select_by_index(3)\r\n\r\n#time.sleep(100)\r\n#driver.close()\r\n'''\r\n1.UnexpectedTagNameException::\r\n\r\nselenium.common.exceptions.UnexpectedTagNameException: Message: Select only work\r\ns on '\n ''\n '(Blank form = logout)'\n '')\n \n USERID_REGEX = re.compile('[a-z0-9_@+.-]*$')\n \n @require_admin \n def post(self):\n \"\"\"HTTP post method.\"\"\"\n try:\n userid = utils.get_verified_arg(self.USERID_REGEX, self.request, \n 'userid')\n except utils.InvalidValue:\n self.error(400)\n self.response.out.write('invalid userid, must be ^%s' %\n self.USERID_REGEX.pattern)\n return\n\n self.response.headers.add_header('Set-Cookie',\n 'footprinttest=%s;path=/' % userid)\n self.response.out.write('You are logged ')\n if userid:\n self.response.out.write('in!')\n else:\n self.response.out.write('out!')\n self.response.out.write('
Continue' % self.request.url)\n\n\nclass TestModerator(webapp.RequestHandler):\n \"\"\"test moderation functionality.\"\"\"\n def get(self):\n \"\"\"HTTP get method.\"\"\"\n user = userinfo.get_user(self.request)\n if not user:\n self.response.out.write('Not logged in.')\n return\n\n self.response.out.write('Moderator Request
    ')\n\n if user.get_user_info().moderator:\n self.response.out.write('
  • You are already a moderator.')\n\n if user.get_user_info().moderator_request_email:\n # TODO: This is very vulnerable to html injection.\n self.response.out.write('
  • We have received your request'\n '
  • Your email: %s'\n '
  • Your comments: %s' %\n (cgi.escape(user.get_user_info().moderator_request_email),\n cgi.escape(user.get_user_info().moderator_request_desc)))\n\n self.response.out.write('
')\n self.response.out.write(\n '
'\n 'Your email address:
'\n 'Why you want to be a moderator:
'\n '
'\n '
')\n\n def post(self):\n \"\"\"HTTP post method.\"\"\"\n # todo: xsrf protection\n user = userinfo.get_user(self.request)\n if not user:\n self.response.out.write('Not logged in.')\n return\n\n try:\n # This regex is a bit sloppy but good enough.\n email = utils.get_verified_arg(re.compile('[a-z0-9_+.-]+@[a-z0-9.-]+$'),\n self.request, 'email')\n desc = self.request.get('desc')\n except utils.InvalidValue:\n self.error(400)\n self.response.out.write('
' +\n 'Valid email address required.
')\n return \n\n user_info = user.get_user_info()\n user_info.moderator_request_email = self.request.get('email')\n user_info.moderator_request_desc = self.request.get('desc')\n if not user_info.moderator_request_admin_notes:\n user_info.moderator_request_admin_notes = ''\n user_info.moderator_request_admin_notes += (\n '%s: Requested.\\n' %\n datetime.datetime.isoformat(datetime.datetime.now()))\n user_info.put()\n\n return self.get()\n\n\nAPP = webapp.WSGIApplication([\n ('/test/login', TestLogin),\n ('/test/moderator', TestModerator),\n ], debug=True)\n\ndef main():\n \"\"\"main() for standalone execution.\"\"\"\n run_wsgi_app(APP)\n\nif __name__ == '__main__':\n main()\n","repo_name":"thegooglecodearchive/allforgood","sub_path":"frontend/testpages.py","file_name":"testpages.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13217482758","text":"##\n##HOMEWORK # 1\n##\n## NAME = CHARLES STEVENSON\n## DATE = AUGUST 25, 2016\n## CLASS = Artificial Intelligence TR 8 - 9:20\n##\n##\n## Description:\n## This is the first homework assignment\n## where I read a file into a priority queue\n## and then outputed to the console by sorting\n## the integers using the priority queue\n##\n##\n\nfrom Queue import PriorityQueue\n\ndef readLines(f, q):\n #Extract name\n name = f.readline()\n name = name[:-1]#get rid of /n character\n for line in f:\n num = int (line[:-1])\n q.put(num)#put number into priority queue\n return name\n\ndef main():\n #get && open file\n fileName = \"priority.txt\"\n f = open(fileName, 'r+')\n #create queue\n q = PriorityQueue()\n #Load Queue\n name = readLines(f, q)\n #Output Queue && Name\n print(\"Hello \" + name + \"!\")\n while not q.empty():\n print(q.get())\n\n#execute\nmain()\n","repo_name":"Inkozi/School","sub_path":"AI/HelloWorld.py","file_name":"HelloWorld.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35874416639","text":"def remains():\n from datetime import date\n seg = date.today() #какое число сегодня\n segg = seg.year + 1 #расчёт грядушего года\n newyear = date(segg, 1, 1) # 1 января следующего года\n ostalos = newyear - seg #расчёт разницы\n return ostalos.days #конверт даты в формат год-месяц-день\n\nprint('До Нового года осталось', remains(), 'дней')\n\n","repo_name":"MNaugolnov/hw","sub_path":"hw3-1.py","file_name":"hw3-1.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31089509034","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path('', views.home, name=\"home\"),\r\n path('delete/', views.delete, name=\"delete\"),\r\n path('cross_off/', views.cross_off, name=\"cross_off\"),\r\n path('uncross/', views.uncross, name=\"uncross\"),\r\n path('edit/', views.ListUpdateView.as_view(), name=\"edit\"), \r\n]\r\n","repo_name":"Shreybanugariya/To-Do-List-Django","sub_path":"todo_list/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5542716298","text":"#Imports All Variables and Functions From Assets List\nfrom assets import *\n#Imports String \nimport string\n\n\n#Score Count For If User Is Playing More Than One Game.\npscore = 0 #Player's Total Score\naiscore = 0 #AI's Total Score\n\n#Function That Prints Out A Scoreboard With The Current Scores Updated\ndef scoreboard():\n return f\"\"\"WORDS LEFT IN WORDLIST: {wordcount()}\n =======================\n SCORES: \n Player: {pscore} \n AI: {aiscore} \n =======================\n \"\"\"\n\n#Calls/Executes \"scoreboard\" Function\nprint(scoreboard())\n\n#Function Used To Play The Game\ndef playgame():\n #How Many Letters You Can Incorrectly. The \"lives\" Variable Is Used To Display Hangman Picture Stages\n lives = 6\n\n #Assigns and Empty Variable Named \"word\" That Will Be Filled In Later\n word = \"\"\n\n #Used For When You Play Multiple Games. If Words Are Still In List Then You Can Play Again. If Not, You Can't Play Again\n if len(wordlist) > 0:\n word = getword() #Executes \"getword\" Function From assets.py, To Randomly Select A Word From \"wordlist\" For You To Solve \n wordlist.remove(word) #Removes Selected Word From List, So You Won't Have Same Word Again If You Try Again\n else:\n #If No More Words Are In \"wordlist\" Variable Then You Can't Play Again and Are Prompted That You Are Out Of Words.\n print(\"Out Of Words!!!\")\n exit()\n #This Is Variable That Shows How Many Characters Are In Word and Is Filled In When Guessed Correctly\n toguess = \"\" #For Example: The Word Is \"Mouse\", This Displays _ _ _ _ _ . You Guess \"u\" Then This Displays _ _ U _ _\n\n #Sets Up Board If Words Have Hypens(-), Apostrophes('), Commas(,), Periods(.) and/or Spaces\n for i in word:\n if i == \" \":\n toguess += \" \"\n elif i == \"-\":\n toguess += \"-\"\n elif i == \"'\":\n toguess += \"'\"\n elif i == \",\":\n toguess += \",\" \n elif i == \".\":\n toguess += \".\" \n else:\n toguess += \"_\"\n\n #Converts Board Word To A List For Comparisons\n toguess = list(toguess)\n\n print(\"Word:\",\" \".join(toguess)) #This Prints To Console, The Above Display(\"toguess\" Variable)\n\n #This Prints To Console Hangman Picture At It's Default State(No Body Parts or Losses Yet)\n print(hangman[6])\n\n #This Variable \"notguessed\" Makes A List With The Letters Of The Alphabet That Haven't Been Used Yet\n notguessed = [i for i in string.ascii_uppercase]\n\n # #This Breaks Word Into A List Of Characters In Word For Comparison Purposes\n # word = list(word)\n\n #This While Loop Does Most Of The Work Allowing You To Guess Letters Until You Are Correct or You Lose\n while lives > 0: #Loop Continues Untils Lives Are Used Up\n\n #Prompts User For Input To Guess A Letter\n letter = str(input(\"Guess A Letter: \")).upper()\n\n #If Letter Is Not In Alphabet List Then It's Been Used Already Or Is Invalid\n if letter not in notguessed:\n print(letter, \"has been used already\")\n\n #If Letter Is In Alphabet List Then It Hasn't Been Used Yet and Will Be Removed From List \n elif letter in notguessed:\n notguessed.remove(letter)\n\n #If Letter Is Not In Word, Then You Lose A Life\n if letter not in word:\n lives -= 1\n\n #For Loop That Compares If Letter Guessed Is In Word and If It Is, Then Assigns It To \"toguess\" Variable\n for i in range(len(word)):\n if word[i] == letter:\n toguess[i] = letter\n\n #Prints To Console The \"toguess\" Variable and Hangman Picture That Determines How Many Lives/Chances Are Left If Any\n print(f\"Word: {' '.join(toguess)}\\n{hangman[lives]}\")\n print(f\"Letters Left: {notguessed}\")\n\n\n #Compares If \"toguess\" Variable Is Equal To Word and Prints \"You Win!!!\" if Comparison Proves True As Well As Breaks Loop\n if \"\".join(word) == \"\".join(toguess):\n global pscore\n print(\"You Win!!!\")\n pscore += 1\n break\n\n #If You Run Out Of Lives/Chances and Get Full Hangman Body, Console Then Prints The Word As Well As You Lose\n if lives == 0:\n global aiscore\n print(\"Word Is:\", \"\".join(word))\n print(\"You Lose!!!\")\n aiscore += 1\n\n #Console Prompts User To See If They Want To Play Again. If \"Y\" Then \"playgame\" Function Is Called and You Play Again, Else You Don't\n print(scoreboard())\n retry = str(input(\"Play Again(Y/N): \")).upper()\n\n if retry == \"Y\":\n playgame()\n else:\n if pscore > aiscore:\n print(\"Player Wins The Game!!!\")\n elif aiscore > pscore:\n print(\"AI Wins The Game!!!\")\n else: \n print(\"It's A Tie!!!\")\n print(\"Have A Good Day!!!\")\n\n \nplaygame() #Calls/Executes \"playgame\" Function To Start Game\n\n \n","repo_name":"leisenhour2/Github-Portfolio","sub_path":"Python/Games/Hangman/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73952122986","text":"import unittest\n\nimport k2\nimport torch\n\n\nclass TestCtcTopo(unittest.TestCase):\n\n @staticmethod\n def visualize_ctc_topo():\n '''This function shows how to visualize\n standard/modified ctc topologies. It's for\n demonstration only, not for testing.\n '''\n max_token = 2\n labels_sym = k2.SymbolTable.from_str('''\n 0\n z 1\n o 2\n ''')\n aux_labels_sym = k2.SymbolTable.from_str('''\n z 1\n o 2\n ''')\n\n word_sym = k2.SymbolTable.from_str('''\n zoo 1\n ''')\n\n standard = k2.ctc_topo(max_token, modified=False)\n modified = k2.ctc_topo(max_token, modified=True)\n standard.labels_sym = labels_sym\n standard.aux_labels_sym = aux_labels_sym\n\n modified.labels_sym = labels_sym\n modified.aux_labels_sym = aux_labels_sym\n\n standard.draw('standard_topo.svg', title='standard CTC topo')\n modified.draw('modified_topo.svg', title='modified CTC topo')\n fsa = k2.linear_fst([1, 2, 2], [1, 0, 0])\n fsa.labels_sym = labels_sym\n fsa.aux_labels_sym = word_sym\n fsa.draw('transcript.svg', title='transcript')\n\n standard_graph = k2.compose(standard, fsa)\n modified_graph = k2.compose(modified, fsa)\n standard_graph.draw('standard_graph.svg', title='standard graph')\n modified_graph.draw('modified_graph.svg', title='modified graph')\n\n # z z o o o \n inputs = k2.linear_fsa([1, 1, 0, 0, 2, 2, 0, 2, 0])\n inputs.labels_sym = labels_sym\n inputs.draw('inputs.svg', title='inputs')\n standard_lattice = k2.intersect(standard_graph,\n inputs,\n treat_epsilons_specially=False)\n standard_lattice.draw('standard_lattice.svg', title='standard lattice')\n\n modified_lattice = k2.intersect(modified_graph,\n inputs,\n treat_epsilons_specially=False)\n modified_lattice = k2.connect(modified_lattice)\n modified_lattice.draw('modified_lattice.svg', title='modified lattice')\n\n # z z o o o \n inputs2 = k2.linear_fsa([1, 1, 0, 0, 2, 2, 2, 0])\n inputs2.labels_sym = labels_sym\n inputs2.draw('inputs2.svg', title='inputs2')\n standard_lattice2 = k2.intersect(standard_graph,\n inputs2,\n treat_epsilons_specially=False)\n standard_lattice2 = k2.connect(standard_lattice2)\n # It's empty since the topo requires that there must be a blank\n # between the two o's in zoo\n assert standard_lattice2.num_arcs == 0\n standard_lattice2.draw('standard_lattice2.svg',\n title='standard lattice2')\n\n modified_lattice2 = k2.intersect(modified_graph,\n inputs2,\n treat_epsilons_specially=False)\n modified_lattice2 = k2.connect(modified_lattice2)\n modified_lattice2.draw('modified_lattice2.svg',\n title='modified lattice2')\n\n def test_no_repeated(self):\n # standard ctc topo and modified ctc topo\n # should be equivalent if there are no\n # repeated neighboring symbols in the transcript\n max_token = 3\n standard = k2.ctc_topo(max_token, modified=False)\n modified = k2.ctc_topo(max_token, modified=True)\n transcript = k2.linear_fsa([1, 2, 3])\n standard_graph = k2.compose(standard, transcript)\n modified_graph = k2.compose(modified, transcript)\n\n input1 = k2.linear_fsa([1, 1, 1, 0, 0, 2, 2, 3, 3])\n input2 = k2.linear_fsa([1, 1, 0, 0, 2, 2, 0, 3, 3])\n inputs = [input1, input2]\n for i in inputs:\n lattice1 = k2.intersect(standard_graph,\n i,\n treat_epsilons_specially=False)\n lattice2 = k2.intersect(modified_graph,\n i,\n treat_epsilons_specially=False)\n lattice1 = k2.connect(lattice1)\n lattice2 = k2.connect(lattice2)\n\n aux_labels1 = lattice1.aux_labels[lattice1.aux_labels != 0]\n aux_labels2 = lattice2.aux_labels[lattice2.aux_labels != 0]\n aux_labels1 = aux_labels1[:-1] # remove -1\n aux_labels2 = aux_labels2[:-1]\n assert torch.all(torch.eq(aux_labels1, aux_labels2))\n assert torch.all(torch.eq(aux_labels2, torch.tensor([1, 2, 3])))\n\n def test_with_repeated(self):\n max_token = 2\n standard = k2.ctc_topo(max_token, modified=False)\n modified = k2.ctc_topo(max_token, modified=True)\n transcript = k2.linear_fsa([1, 2, 2])\n standard_graph = k2.compose(standard, transcript)\n modified_graph = k2.compose(modified, transcript)\n\n # There is a blank separating 2 in the input\n # so standard and modified ctc topo should be equivalent\n input = k2.linear_fsa([1, 1, 2, 2, 0, 2, 2, 0, 0])\n lattice1 = k2.intersect(standard_graph,\n input,\n treat_epsilons_specially=False)\n lattice2 = k2.intersect(modified_graph,\n input,\n treat_epsilons_specially=False)\n lattice1 = k2.connect(lattice1)\n lattice2 = k2.connect(lattice2)\n\n aux_labels1 = lattice1.aux_labels[lattice1.aux_labels != 0]\n aux_labels2 = lattice2.aux_labels[lattice2.aux_labels != 0]\n aux_labels1 = aux_labels1[:-1] # remove -1\n aux_labels2 = aux_labels2[:-1]\n assert torch.all(torch.eq(aux_labels1, aux_labels2))\n assert torch.all(torch.eq(aux_labels1, torch.tensor([1, 2, 2])))\n\n # There are no blanks separating 2 in the input.\n # The standard ctc topo requires that there must be a blank\n # separating 2, so lattice1 in the following is empty\n input = k2.linear_fsa([1, 1, 2, 2, 0, 0])\n lattice1 = k2.intersect(standard_graph,\n input,\n treat_epsilons_specially=False)\n lattice2 = k2.intersect(modified_graph,\n input,\n treat_epsilons_specially=False)\n lattice1 = k2.connect(lattice1)\n lattice2 = k2.connect(lattice2)\n assert lattice1.num_arcs == 0\n\n # Since there are two 2s in the input and there are also two 2s\n # in the transcript, the final output contains only one path.\n # If there were more than two 2s in the input, the output\n # would contain more than one path\n aux_labels2 = lattice2.aux_labels[lattice2.aux_labels != 0]\n aux_labels2 = aux_labels2[:-1]\n assert torch.all(torch.eq(aux_labels1, torch.tensor([1, 2, 2])))\n\n\n# TODO(fangjun): Add test for CUDA.\n\nif __name__ == '__main__':\n # TestCtcTopo.visualize_ctc_topo()\n unittest.main()\n","repo_name":"k2-fsa/k2","sub_path":"k2/python/tests/ctc_topo_test.py","file_name":"ctc_topo_test.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","stars":973,"dataset":"github-code","pt":"37"} +{"seq_id":"32435205909","text":"import json\nimport paho.mqtt.client as mqtt\nimport requests\n\n# Toggle for print statements\nenable_print_statements = True\n\n# Define the UPRN parameter\nuprn = \"100081226975\"\n\n# Define the JSON payloads\njson_payloads = [\n {\n \"jsonrpc\": \"2.0\",\n \"id\": \"1\",\n \"method\": \"goss.echo.westberks.forms.getNextRubbishCollectionDate\",\n \"params\": {\n \"uprn\": uprn\n }\n },\n {\n \"jsonrpc\": \"2.0\",\n \"id\": \"2\",\n \"method\": \"goss.echo.westberks.forms.getNextRecyclingCollectionDate\",\n \"params\": {\n \"uprn\": uprn\n }\n },\n {\n \"jsonrpc\": \"2.0\",\n \"id\": \"3\",\n \"method\": \"goss.echo.westberks.forms.getNextFoodWasteCollectionDate\",\n \"params\": {\n \"uprn\": uprn\n }\n }\n]\n\n# URL for the POST request\nurl = \"https://www.westberks.gov.uk/apiserver/ajaxlibrary\"\n\n# Set the request headers\nheaders = {\n \"Content-Type\": \"application/json; charset=UTF-8\",\n \"User-Agent\": \"PostmanRuntime/7.32.2\",\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\"\n}\n\n# MQTT broker details\nmqtt_broker = \"192.168.1.55\" # <--- make this you own! use: https://uprn.uk/postcode/\nmqtt_port = 1883\nmqtt_topic_base = \"bin_days/\"\nmqtt_topics = [\n \"rubbishCollectionDate\",\n \"recyclingCollectionDate\",\n \"foodWasteCollectionDate\"\n]\n\n# MQTT client setup\nclient = mqtt.Client()\nclient.connect(mqtt_broker, mqtt_port)\n\n# Send the POST requests and publish responses to MQTT\nfor i, payload in enumerate(json_payloads):\n # Convert the JSON payload to a string\n json_string = json.dumps(payload)\n\n # Send the POST request\n if enable_print_statements:\n print(\"Sending POST request\", i+1, \"to:\", url)\n response = requests.post(url, data=json_string, headers=headers)\n\n # Check the response status code\n if enable_print_statements:\n print(\"Response status code:\", response.status_code)\n\n # Publish the response to the MQTT topic\n mqtt_topic = mqtt_topic_base + mqtt_topics[i]\n mqtt_payload = response.text\n if enable_print_statements:\n print(\"Publishing to MQTT topic:\", mqtt_topic)\n client.publish(mqtt_topic, mqtt_payload)\n\n# Disconnect from the MQTT broker\nclient.disconnect()\n\nif enable_print_statements:\n print(\"Script execution complete.\")\n","repo_name":"merlinmb/WestBerkshireBinDays","sub_path":"WestBerksBinDays.py","file_name":"WestBerksBinDays.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10636193569","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Alexandre Sac--Morane\nalexandre.sac-morane@uclouvain.be\n\nThis is the file where the user can change the different parameters for the simulation.\n\"\"\"\n\n#-------------------------------------------------------------------------------\n#Librairy\n#-------------------------------------------------------------------------------\n\nimport math\nimport numpy as np\n\n#-------------------------------------------------------------------------------\n#User\n#-------------------------------------------------------------------------------\n\ndef All_parameters():\n '''this function is called in main.py to have all the parameters needed in the simulation'''\n\n #---------------------------------------------------------------------------\n #Geometric parameters\n\n N_grain_disk = 300 #number of grains\n R_mean = 350 #µm radius to compute the grain distribution. Then recomputed\n L_R = [1.2*R_mean, 1.1*R_mean, 0.9*R_mean, 0.8*R_mean] #from larger to smaller\n L_percentage_R = [1/6, 1/3, 1/3, 1/6] #distribution of the different radius\n #Recompute the mean radius\n R_mean = 0\n for i in range(len(L_R)):\n R_mean = R_mean + L_R[i]*L_percentage_R[i]\n\n #write dict\n dict_geometry = {\n 'N_grain_disk' : N_grain_disk,\n 'R_mean' : R_mean,\n 'L_R' : L_R,\n 'L_percentage_R' : L_percentage_R,\n }\n\n #---------------------------------------------------------------------------\n #Material parameters\n\n Y = 70*(10**9)*(10**6)*(10**(-12)) #Young Modulus µN/µm2\n nu = 0.3 #Poisson's ratio\n rho = 2500*10**(-6*3) #density kg/µm3\n mu_friction_gg = 0.5 #grain-grain\n mu_friction_gw = 0 #grain-wall\n coeff_restitution = 0.2 #1 is perfect elastic\n\n #write dict\n dict_material = {\n 'Y' : Y,\n 'nu' : nu,\n 'rho' : rho,\n 'mu_friction_gg' : mu_friction_gg,\n 'mu_friction_gw' : mu_friction_gw,\n 'coeff_restitution' : coeff_restitution\n }\n\n #---------------------------------------------------------------------------\n\n #Box définition\n x_box_min = 0 #µm\n x_box_max = 2*R_mean*math.sqrt(N_grain_disk/0.6) #µm 0.6 from Santamarina, 2014 to avoid boundaries effect\n y_box_min = 0 #µm\n\n #write dict\n dict_sample = {\n 'x_box_min' : x_box_min,\n 'x_box_max' : x_box_max,\n 'y_box_min' : y_box_min\n }\n\n #---------------------------------------------------------------------------\n #External sollicitations\n \n #Confinement\n Vertical_Confinement_Linear_Force = Y*2*R_mean/1000 #µN/µm used to compute the Vertical_Confinement_Force\n Vertical_Confinement_Force = Vertical_Confinement_Linear_Force*(x_box_max-x_box_min) #µN\n gravity = 0 #µm/s2\n \n #Dissolution\n frac_dissolved = 0.15 #Percentage of grain dissolved\n frac_Rmean0 = 0.05\n DR_dissolution = frac_Rmean0*R_mean #Reduction of the grain radius at eact iteration\n\n #write dict\n dict_sollicitations = {\n 'Vertical_Confinement_Force' : Vertical_Confinement_Force,\n 'gravity' : gravity,\n 'frac_dissolved' : frac_dissolved,\n 'DR_dissolution' : DR_dissolution\n }\n\n #---------------------------------------------------------------------------\n #Algorithm parameters\n\n #DEM parameters\n dt_DEM_crit = math.pi*min(L_R)/(0.16*nu+0.88)*math.sqrt(rho*(2+2*nu)/Y) #s critical time step from O'Sullivan 2011\n dt_DEM = dt_DEM_crit/8 #s time step during DEM simulation\n Spring_type = 'Ponctual' #Kind of contact\n factor_neighborhood = 1.5 #margin to detect a grain into a neighborhood\n i_update_neighborhoods = 200 #the frequency of the update of the neighborhood of the grains and the walls\n \n #Stop criteria of the DEM\n i_DEM_stop = 3000 #maximum iteration for one DEM simulation\n Ecin_ratio = 0.0002\n n_window_stop = 50\n dk0_stop = 0.03\n dy_box_max_stop = 0.5\n\n #PF-DEM\n n_t_PFDEM = 20 #number of cycle PF-DEM\n\n #Debugging\n i_print_plot = 100 #frenquency of the print and plot (if Debug_DEM) in DEM step\n Debug = True #plot configuration before and after DEM simulation\n Debug_DEM = False #plot configuration inside DEM\n SaveData = True #save simulation\n main_folder_name = 'Data_RTS' #where data are saved\n template_simulation_name = 'f_'+str(int(1000*frac_Rmean0))+'_Run_' #template of the simulation name\n\n #write dict\n dict_algorithm = {\n 'dt_DEM_crit' : dt_DEM_crit,\n 'dt_DEM' : dt_DEM,\n 'i_update_neighborhoods': i_update_neighborhoods,\n 'i_DEM_stop' : i_DEM_stop,\n 'Ecin_ratio' : Ecin_ratio,\n 'n_window_stop' : n_window_stop,\n 'dk0_stop' : dk0_stop,\n 'dy_box_max_stop' : dy_box_max_stop,\n 'n_t_PFDEM' : n_t_PFDEM,\n 'Spring_type' : Spring_type,\n 'Debug' : Debug,\n 'Debug_DEM' : Debug_DEM,\n 'SaveData' : SaveData,\n 'main_folder_name' : main_folder_name,\n 'template_simulation_name' : template_simulation_name,\n 'i_print_plot' : i_print_plot,\n 'factor_neighborhood' : factor_neighborhood\n }\n\n #---------------------------------------------------------------------------\n #Initial condition parameters\n\n #Generation of grains\n N_test_max = 5000 # maximum number of tries to generate a grain without overlap\n factor_ymax_box = 2.5 #margin to generate grains\n n_generation = 2 #number of grains generation\n #/!\\ Work only for 2 /!\\\n \n #DEM\n dt_DEM_IC = dt_DEM_crit/5 #s time step during IC\n factor_neighborhood_IC = 1.5 #margin to detect a grain into a neighborhood\n i_update_neighborhoods_gen = 5 #the frequency of the update of the neighborhood of the grains and the walls during IC generations\n i_update_neighborhoods_com = 100 #the frequency of the update of the neighborhood of the grains and the walls during IC combination\n #Stop DEM\n i_DEM_stop_IC = 3000 #stop criteria for DEM during IC\n Ecin_ratio_IC = 0.0005\n \n #Plot\n Debug_DEM_IC = False #plot configuration inside DEM during IC\n i_print_plot_IC = 100 #frequency of the print and plot (if Debug_DEM_IC) for IC\n \n #write dict\n dict_ic = {\n 'n_generation' : n_generation,\n 'i_update_neighborhoods_gen': i_update_neighborhoods_gen,\n 'i_update_neighborhoods_com': i_update_neighborhoods_com,\n 'factor_ymax_box' : factor_ymax_box,\n 'i_DEM_stop_IC' : i_DEM_stop_IC,\n 'Debug_DEM' : Debug_DEM_IC,\n 'dt_DEM_IC' : dt_DEM_IC,\n 'Ecin_ratio_IC' : Ecin_ratio_IC,\n 'i_print_plot_IC' : i_print_plot_IC,\n 'factor_neighborhood_IC' : factor_neighborhood_IC,\n 'N_test_max' : N_test_max\n }\n\n #---------------------------------------------------------------------------\n\n return dict_algorithm, dict_geometry, dict_ic, dict_material, dict_sample, dict_sollicitations\n\n#-------------------------------------------------------------------------------\n\ndef Criteria_StopSimulation(dict_algorithm):\n '''Criteria to stop simulation (PF and DEM)'''\n Criteria_Verified = False\n if dict_algorithm['i_PF'] >= dict_algorithm['n_t_PFDEM']:\n Criteria_Verified = True\n return Criteria_Verified\n","repo_name":"AlexSacMorane/DEM_LookREVandRTS","sub_path":"User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7833738280","text":"\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param: head: The head of linked list.\n @return: You should return the head of the sorted linked list, using constant space complexity.\n \"\"\"\n def mergeLists(self, l1, l2):\n dummy = ListNode(-1)\n cur = dummy\n while l1 and l2:\n if l1.val < l2.val:\n cur.next = l1\n l1 = l1.next\n else:\n cur.next = l2\n l2 = l2.next\n cur = cur.next\n \n if l1: cur.next = l1\n else: cur.next = l2\n return dummy.next\n \n def sortList(self, head):\n # write your code here\n if not head or not head.next: return head\n \n slow, fast = head, head.next\n while fast.next and fast.next.next:\n slow, fast = slow.next, fast.next.next\n \n l1 = head\n l2 = slow.next\n slow.next = None\n \n left = self.sortList(l1)\n right = self.sortList(l2)\n \n return self.mergeLists(left, right)\n \n","repo_name":"yihanc/LC","sub_path":"LINTCODE/98_sort_list.py","file_name":"98_sort_list.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70221929066","text":"# Python Modules\nimport json\nimport os\nimport datetime\n\n# Custom Modules\n# Exceptions\nfrom outsystems.exceptions.no_deployments import NoDeploymentsError\nfrom outsystems.exceptions.invalid_parameters import InvalidParametersError\nfrom outsystems.exceptions.not_enough_permissions import NotEnoughPermissionsError\nfrom outsystems.exceptions.server_error import ServerError\nfrom outsystems.exceptions.environment_not_found import EnvironmentNotFoundError\nfrom outsystems.exceptions.impossible_action_deployment import ImpossibleApplyActionDeploymentError\n# Functions\nfrom outsystems.lifetime.lifetime_base import send_get_request, send_post_request, send_delete_request\nfrom outsystems.lifetime.lifetime_environments import get_environment_key\nfrom outsystems.file_helpers.file import store_data\n# Variables\nfrom outsystems.vars.lifetime_vars import DEPLOYMENTS_ENDPOINT, DEPLOYMENT_STATUS_ENDPOINT, \\\n DEPLOYMENT_START_ENDPOINT, DEPLOYMENT_CONTINUE_ENDPOINT, DEPLOYMENTS_SUCCESS_CODE, DEPLOYMENTS_EMPTY_CODE, \\\n DEPLOYMENTS_INVALID_CODE, DEPLOYMENTS_NO_PERMISSION_CODE, DEPLOYMENTS_FAILED_CODE, DEPLOYMENT_GET_SUCCESS_CODE, \\\n DEPLOYMENT_GET_NO_PERMISSION_CODE, DEPLOYMENT_GET_NO_DEPLOYMENT_CODE, DEPLOYMENT_GET_FAILED_CODE, \\\n DEPLOYMENT_STATUS_SUCCESS_CODE, DEPLOYMENT_STATUS_NO_PERMISSION_CODE, DEPLOYMENT_STATUS_NO_DEPLOYMENT_CODE, DEPLOYMENT_STATUS_FAILED_CODE, \\\n DEPLOYMENT_SUCCESS_CODE, DEPLOYMENT_INVALID_CODE, DEPLOYMENT_NO_PERMISSION_CODE, DEPLOYMENT_NO_ENVIRONMENT_CODE, DEPLOYMENT_FAILED_CODE, \\\n DEPLOYMENT_DELETE_SUCCESS_CODE, DEPLOYMENT_DELETE_IMPOSSIBLE_CODE, DEPLOYMENT_DELETE_NO_PERMISSION_CODE, DEPLOYMENT_DELETE_NO_DEPLOYMENT_CODE, \\\n DEPLOYMENT_DELETE_FAILED_CODE, DEPLOYMENT_ACTION_SUCCESS_CODE, DEPLOYMENT_ACTION_IMPOSSIBLE_CODE, DEPLOYMENT_ACTION_NO_PERMISSION_CODE, \\\n DEPLOYMENT_ACTION_NO_DEPLOYMENT_CODE, DEPLOYMENT_ACTION_FAILED_CODE, DEPLOYMENT_PLAN_V1_API_OPS, DEPLOYMENT_PLAN_V2_API_OPS\nfrom outsystems.vars.file_vars import DEPLOYMENTS_FILE, DEPLOYMENT_FILE, DEPLOYMENT_FOLDER, DEPLOYMENT_STATUS_FILE\nfrom outsystems.vars.pipeline_vars import DEPLOYMENT_STATUS_LIST, DEPLOYMENT_SAVED_STATUS\n\n\n# Returns a list of deployments ordered by creation date, from newest to oldest.\ndef get_deployments(artifact_dir: str, endpoint: str, auth_token: str, date: str):\n # Builds the parameters for the api call\n params = {\"MinDate\": date}\n # Sends the request\n response = send_get_request(\n endpoint, auth_token, DEPLOYMENTS_ENDPOINT, params)\n status_code = int(response[\"http_status\"])\n if status_code == DEPLOYMENTS_SUCCESS_CODE:\n # Stores the result\n store_data(artifact_dir, DEPLOYMENTS_FILE, response[\"response\"])\n return response[\"response\"]\n elif status_code == DEPLOYMENTS_EMPTY_CODE:\n raise NoDeploymentsError(\"There are no deployments starting on {} until now. Details: {}\".format(\n date, response[\"response\"]))\n elif status_code == DEPLOYMENTS_INVALID_CODE:\n raise InvalidParametersError(\"Invalid request starting on {} until now. Parameters: {}. Details: {}\".format(\n date, params, response[\"response\"]))\n elif status_code == DEPLOYMENTS_NO_PERMISSION_CODE:\n raise NotEnoughPermissionsError(\n \"You don't have enough permissions to see the deployment list. Details: {}\".format(response[\"response\"]))\n elif status_code == DEPLOYMENTS_FAILED_CODE:\n raise ServerError(\n \"Failed to list the deployments. Details {}\".format(response[\"response\"]))\n else:\n raise NotImplementedError(\n \"There was an error. Response from server: {}\".format(response))\n\n\n# Returns the details of a given deployment, validating if there are any conflicts.\n# The returned information contains the applications included in the deployment plan and\n# the possible conflicts that can arise from the deployment of the selected applications.\ndef get_deployment_info(artifact_dir: str, endpoint: str, auth_token: str, deployment_key: str):\n # Builds the API call\n query = \"{}/{}\".format(DEPLOYMENTS_ENDPOINT, deployment_key)\n # Sends the request\n response = send_get_request(endpoint, auth_token, query, None)\n status_code = int(response[\"http_status\"])\n if status_code == DEPLOYMENT_GET_SUCCESS_CODE:\n # Stores the result\n filename = \"{}{}\".format(deployment_key, DEPLOYMENT_FILE)\n filename = os.path.join(DEPLOYMENT_FOLDER, filename)\n store_data(artifact_dir, filename, response[\"response\"])\n return response[\"response\"]\n elif status_code == DEPLOYMENT_GET_NO_PERMISSION_CODE:\n raise NotEnoughPermissionsError(\n \"You don't have enough permissions to see the details of that deployment. Details: {}\".format(response[\"response\"]))\n elif status_code == DEPLOYMENT_GET_NO_DEPLOYMENT_CODE:\n raise NoDeploymentsError(\"There are no deployments with the key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_GET_FAILED_CODE:\n raise ServerError(\"Failed to access the details of deployment with key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n else:\n raise NotImplementedError(\n \"There was an error. Response from server: {}\".format(response))\n\n\n# Returns the details of a given deployment execution, including the deployment status and messages.\ndef get_deployment_status(artifact_dir: str, endpoint: str, auth_token: str, deployment_key: str):\n # Builds the API call\n query = \"{}/{}/{}\".format(DEPLOYMENTS_ENDPOINT,\n deployment_key, DEPLOYMENT_STATUS_ENDPOINT)\n # Sends the request\n response = send_get_request(endpoint, auth_token, query, None)\n status_code = int(response[\"http_status\"])\n if status_code == DEPLOYMENT_STATUS_SUCCESS_CODE:\n # Stores the result\n filename = \"{}{}\".format(deployment_key, DEPLOYMENT_STATUS_FILE)\n filename = os.path.join(DEPLOYMENT_FOLDER, filename)\n store_data(artifact_dir, filename, response[\"response\"])\n return response[\"response\"]\n elif status_code == DEPLOYMENT_STATUS_NO_PERMISSION_CODE:\n raise NotEnoughPermissionsError(\n \"You don't have enough permissions to see the details of that deployment. Details: {}\".format(response[\"response\"]))\n elif status_code == DEPLOYMENT_STATUS_NO_DEPLOYMENT_CODE:\n raise NoDeploymentsError(\"There are no deployments with the key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_STATUS_FAILED_CODE:\n raise ServerError(\"Failed to get the status of deployment with key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n else:\n raise NotImplementedError(\n \"There was an error. Response from server: {}\".format(response))\n\n\n# Returns the details of the running deployment plan to a specific target environment or empty if nothing is running\ndef get_running_deployment(artifact_dir: str, endpoint: str, auth_token: str, dest_env_key: str):\n # List of running deployments\n running_deployments = []\n # Date 24h prior to now\n date = datetime.datetime.now() - datetime.timedelta(days=1)\n date = date.date()\n try:\n latest_deployments = get_deployments(artifact_dir, endpoint, auth_token, date)\n for deplyoment in latest_deployments:\n if deplyoment[\"TargetEnvironmentKey\"] == dest_env_key:\n deployment_status = get_deployment_status(artifact_dir, endpoint, auth_token, deplyoment[\"Key\"])\n if deployment_status[\"DeploymentStatus\"] in DEPLOYMENT_STATUS_LIST:\n running_deployments.append(deplyoment)\n\n return running_deployments\n\n except NoDeploymentsError:\n # If there are no deployments, return empty\n return running_deployments\n except:\n # Legit exception that needs to be handle -> bubble up\n raise\n\n\n# Returns the details of the saved deployment plan to a specific target environment or None if nothing is found\ndef get_saved_deployment(artifact_dir: str, endpoint: str, auth_token: str, dest_env_key: str):\n # Date 24h prior to now\n date = datetime.datetime.now() - datetime.timedelta(days=1)\n date = date.date()\n try:\n latest_deployments = get_deployments(artifact_dir, endpoint, auth_token, date)\n for deployment in latest_deployments:\n if deployment[\"TargetEnvironmentKey\"] == dest_env_key:\n deployment_status = get_deployment_status(artifact_dir, endpoint, auth_token, deployment[\"Key\"])\n if deployment_status[\"DeploymentStatus\"] in DEPLOYMENT_SAVED_STATUS:\n return deployment\n\n return None\n\n except NoDeploymentsError:\n # If there are no deployments, return empty\n return None\n except:\n # Legit exception that needs to be handle -> bubble up\n raise\n\n\n# Creates a deployment to a target environment.\n# An optional list of applications to include in the deployment can be specified.\n# The input is a subset of deployment object.\ndef send_deployment(artifact_dir: str, endpoint: str, auth_token: str, lt_api_version: int, app_keys: list, dep_note: str, source_env: str, dest_env: str):\n # builds the deployment plan\n deployment_request = _create_deployment_plan(\n artifact_dir, endpoint, lt_api_version, auth_token, app_keys, dep_note, source_env, dest_env)\n # Sends the request\n response = send_post_request(\n endpoint, auth_token, DEPLOYMENTS_ENDPOINT, deployment_request)\n status_code = int(response[\"http_status\"])\n if status_code == DEPLOYMENT_SUCCESS_CODE:\n return response[\"response\"]\n elif status_code == DEPLOYMENT_INVALID_CODE:\n raise InvalidParametersError(\"The request is invalid. Check the body of the request for errors. Body: {}. Details: {}.\".format(\n deployment_request, response[\"response\"]))\n elif status_code == DEPLOYMENT_NO_PERMISSION_CODE:\n raise NotEnoughPermissionsError(\n \"You don't have enough permissions to create the deployment. Details: {}\".format(response[\"response\"]))\n elif status_code == DEPLOYMENT_NO_ENVIRONMENT_CODE:\n raise EnvironmentNotFoundError(\n \"Can't find the source or target environment. Details: {}.\".format(response[\"response\"]))\n elif status_code == DEPLOYMENT_FAILED_CODE:\n raise ServerError(\n \"Failed to create the deployment. Details: {}\".format(response[\"response\"]))\n else:\n raise NotImplementedError(\n \"There was an error. Response from server: {}\".format(response))\n\n\n# Discards a deployment, if possible. Only deployments whose state is “saved” can be deleted.\ndef delete_deployment(endpoint: str, auth_token: str, deployment_key: str):\n # Builds the API call\n query = \"{}/{}\".format(DEPLOYMENTS_ENDPOINT, deployment_key)\n # Sends the request\n response = send_delete_request(endpoint, auth_token, query)\n status_code = int(response[\"http_status\"])\n if status_code == DEPLOYMENT_DELETE_SUCCESS_CODE:\n return response[\"response\"]\n elif status_code == DEPLOYMENT_DELETE_IMPOSSIBLE_CODE:\n raise ImpossibleApplyActionDeploymentError(\n \"You can't delete the deployment with key {}. Try aborting the deployment first. Details: {}\".format(deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_DELETE_NO_PERMISSION_CODE:\n raise NotEnoughPermissionsError(\n \"You don't have enough permissions to delete the deployment. Details: {}\".format(response[\"response\"]))\n elif status_code == DEPLOYMENT_DELETE_NO_DEPLOYMENT_CODE:\n raise NoDeploymentsError(\"There are no deployments with the key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_DELETE_FAILED_CODE:\n raise ServerError(\"Failed to delete the deployment with key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n else:\n raise NotImplementedError(\n \"There was an error. Response from server: {}\".format(response))\n\n\n# Executes the start command in a specified deployment.\n# The initiation of a deployment plan will check if it's valid.\n# The applications to redeploy, if applicable, will also be included in the deployment plan.\n# If continue with errors is allowed, an additional parameter is added to the query string.\ndef start_deployment(endpoint: str, auth_token: str, deployment_key: str, **kwargs):\n redeploy = False\n if \"redeploy_outdated\" not in kwargs:\n redeploy = True\n\n # Builds the API call\n query = \"{}/{}/{}\".format(DEPLOYMENTS_ENDPOINT,\n deployment_key, DEPLOYMENT_START_ENDPOINT)\n # If the parameter to redeploy outdated has a value, that must be included in the call\n if not redeploy:\n query = \"{}?RedeployOutdated={}\".format(\n query, kwargs[\"redeploy_outdated\"])\n # If the parameter to continue with errors has a value of True, that must be included in the call\n if \"continue_with_errors\" in kwargs and kwargs[\"continue_with_errors\"]:\n query = \"{}&ContinueWithErrors={}\".format(query, True)\n\n # Sends the request\n response = send_post_request(endpoint, auth_token, query, None)\n status_code = int(response[\"http_status\"])\n if status_code == DEPLOYMENT_ACTION_SUCCESS_CODE:\n return response[\"response\"]\n elif status_code == DEPLOYMENT_ACTION_IMPOSSIBLE_CODE:\n raise ImpossibleApplyActionDeploymentError(\n \"You can't start the deployment with key {}. Details: {}\".format(deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_ACTION_NO_PERMISSION_CODE:\n raise NotEnoughPermissionsError(\n \"You don't have enough permissions to start the deployment. Details: {}\".format(response[\"response\"]))\n elif status_code == DEPLOYMENT_ACTION_NO_DEPLOYMENT_CODE:\n raise NoDeploymentsError(\"There are no deployments with the key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_ACTION_FAILED_CODE:\n raise ServerError(\"Failed to start the deployment with key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n else:\n raise NotImplementedError(\n \"There was an error. Response from server: {}\".format(response))\n\n\n# Executes the continue command in a specified deployment.\ndef continue_deployment(endpoint: str, auth_token: str, deployment_key: str):\n # Builds the API call\n query = \"{}/{}/{}\".format(DEPLOYMENTS_ENDPOINT,\n deployment_key, DEPLOYMENT_CONTINUE_ENDPOINT)\n # Sends the request\n response = send_post_request(endpoint, auth_token, query, None)\n status_code = int(response[\"http_status\"])\n if status_code == DEPLOYMENT_ACTION_SUCCESS_CODE:\n return response[\"response\"]\n elif status_code == DEPLOYMENT_ACTION_IMPOSSIBLE_CODE:\n raise ImpossibleApplyActionDeploymentError(\n \"You can't continue the deployment with key {}. Details: {}\".format(deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_ACTION_NO_PERMISSION_CODE:\n raise NotEnoughPermissionsError(\n \"You don't have enough permissions to continue the deployment. Details: {}\".format(response[\"response\"]))\n elif status_code == DEPLOYMENT_ACTION_NO_DEPLOYMENT_CODE:\n raise NoDeploymentsError(\"There are no deployments with the key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n elif status_code == DEPLOYMENT_ACTION_FAILED_CODE:\n raise ServerError(\"Failed to continue the deployment with key {}. Details: {}\".format(\n deployment_key, response[\"response\"]))\n else:\n raise NotImplementedError(\n \"There was an error. Response from server: {}\".format(response))\n\n\n# Function to check if the deployment is with Prepared status\ndef check_deployment_two_step_deploy_status(dep_status: dict):\n return dep_status[\"Info\"] == \"deployment_prepared\"\n\n\n# ---------------------- PRIVATE METHODS ----------------------\ndef _create_deployment_plan(artifact_dir: str, endpoint: str, lt_api_version: int, auth_token: str, app_keys: str, dep_note: str, source_env: str, dest_env: str):\n if lt_api_version == 1:\n api_var_name = DEPLOYMENT_PLAN_V1_API_OPS\n elif lt_api_version == 2:\n api_var_name = DEPLOYMENT_PLAN_V2_API_OPS\n else:\n raise NotImplementedError(\n \"Unsupported API version for LifeTime: used {}\".format(lt_api_version))\n source_env_key = get_environment_key(\n artifact_dir, endpoint, auth_token, source_env)\n dest_env_key = get_environment_key(\n artifact_dir, endpoint, auth_token, dest_env)\n deployment_request = {api_var_name: app_keys, \"Notes\": dep_note,\n \"SourceEnvironmentKey\": source_env_key, \"TargetEnvironmentKey\": dest_env_key}\n return json.dumps(deployment_request)\n","repo_name":"OutSystems/outsystems-pipeline","sub_path":"outsystems/lifetime/lifetime_deployments.py","file_name":"lifetime_deployments.py","file_ext":"py","file_size_in_byte":17096,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"30371887138","text":"def timer(original_func):\r\n def wrapper(*args,**kwargs):\r\n import time\r\n t1 = time.time()\r\n result = original_func(*args,**kwargs)\r\n t2 = time.time() - t1\r\n print(f'{original_func.__name__} ran in {t2} seconds')\r\n return result\r\n return wrapper\r\n\r\n\r\n@timer\r\ndef display_info(name,age):\r\n import time\r\n time.sleep(1)\r\n print(f'display info ran with arguements {name} and {age}')\r\ndisplay_info('Pouya',32)\r\n\r\n","repo_name":"pouya-alipour741/Courses","sub_path":"Python_fani_institude/decorator using closure.py","file_name":"decorator using closure.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24318141508","text":"import scrapy\nfrom slugify import slugify\n\n\nclass FifaSpider(scrapy.Spider):\n name = \"fifastats\"\n\n # TODO - run this for extended period of time to get all players\n\n def start_requests(self):\n urls = [\n # 'https://www.fifaindex.com/players/',\n 'https://www.fifaindex.com/players/fifa17_173/',\n 'https://www.fifaindex.com/players/fifa16_73/',\n 'https://www.fifaindex.com/players/fifa15_14/',\n 'https://www.fifaindex.com/players/fifa14_13/',\n 'https://www.fifaindex.com/players/fifa13_10/',\n\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n for row in response.css('tr td'):\n link = row.css('a::attr(href)').extract()\n # print(name, link)\n if link: \n if '/player/' in link[0]:\n url = response.urljoin(link[0])\n yield scrapy.Request(url, callback=self.parse_player)\n\n next_page = response.css('li.next a::attr(href)').extract_first()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse)\n\n def parse_player(self, response):\n name = response.css('div.media-body').css('h2.media-heading::text').extract()[0]\n\n try:\n team = response.css('div.col-lg-4').css('div.panel-heading').css('a::attr(title)')[2].extract()\n except IndexError:\n team = response.css('div.col-lg-4').css('div.panel-heading').css('a::attr(title)')[0].extract()\n\n number = response.css('div.col-lg-4').css('div.panel-body').css('span.pull-right')[3].css(\n 'span::text').extract()[0]\n\n if len(number) == 4:\n number = response.css('div.col-lg-4').css('div.panel-body').css('span.pull-right')[1].css(\n 'span::text').extract()[0]\n\n position = response.css('div.col-lg-5').css('div.panel-body').css('span.label::text').extract_first()\n\n rating = response.css('div.col-lg-5').css('div.panel-heading').css('span.label')[0].css('span.label::text').extract()[0]\n\n nationality = slugify(response.css('h2.subtitle a::text').extract()[0])\n\n yield {\n 'name': slugify(name),\n 'info': {\n 'raw team': team,\n 'team': slugify(team),\n 'position': position,\n 'raw name': name,\n 'rating': int(rating),\n 'kit number': number,\n 'nationality': nationality,\n 'url': response.request.url,\n }\n }\n\n\nclass MatchSpider(scrapy.Spider):\n name = \"matchlineups\"\n\n # TODO - want the other names - not full names\n\n def start_requests(self):\n urls = [\n # 'http://www.betstudy.com/soccer-stats/c/england/premier-league/d/results/2017-2018/',\n # 'http://www.betstudy.com/soccer-stats/c/england/premier-league/d/results/2016-2017/',\n # 'http://www.betstudy.com/soccer-stats/c/england/premier-league/d/results/2015-2016/',\n # 'http://www.betstudy.com/soccer-stats/c/england/premier-league/d/results/2014-2015/',\n # 'http://www.betstudy.com/soccer-stats/c/england/premier-league/d/results/2013-2014/',\n 'http://www.betstudy.com/soccer-stats/c/france/ligue-1/d/results/2017-2018/',\n 'http://www.betstudy.com/soccer-stats/c/france/ligue-1/d/results/2016-2017/',\n 'http://www.betstudy.com/soccer-stats/c/france/ligue-1/d/results/2015-2016/',\n 'http://www.betstudy.com/soccer-stats/c/france/ligue-1/d/results/2014-2015/',\n 'http://www.betstudy.com/soccer-stats/c/france/ligue-1/d/results/2013-2014/'\n\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_fixtures_page)\n\n def parse_fixtures_page(self, response):\n for info_button in response.css('ul.action-list').css('a::attr(href)'):\n url = response.urljoin(info_button.extract())\n yield scrapy.Request(url, callback=self.parse_match_page)\n\n def parse_match_page(self, response):\n \n home_team, away_team = response.css('div.player h2 a::text').extract()\n\n date = response.css('em.date').css('span.timestamp::text').extract_first()\n\n url = response.request.url\n\n match_number = response.request.url.split('-')[-1].split('/')[0]\n\n home_goals, away_goals = response.css('div.info strong.score::text').extract_first().split('-')\n \n for table in response.css('div.table-holder'):\n if table.css('h2::text').extract_first() == 'Lineups and subsitutes':\n lineups = table\n \n home_lineup_css = lineups.css('table.info-table')[0]\n away_lineup_css = lineups.css('table.info-table')[1]\n\n home_lineup_raw = [slugify(x) for x in home_lineup_css.css('tr td.left-align').css('a::attr(title)').extract()]\n away_lineup_raw = [slugify(x) for x in away_lineup_css.css('tr td.left-align').css('a::attr(title)').extract()]\n\n home_lineup = [slugify(x) for x in home_lineup_css.css('tr td.left-align').css('a::text').extract()]\n away_lineup = [slugify(x) for x in away_lineup_css.css('tr td.left-align').css('a::text').extract()]\n\n home_lineup_number = [int(x) for x in home_lineup_css.css('tr td.size23 strong::text').extract()]\n away_lineup_number = [int(x) for x in away_lineup_css.css('tr td.size23 strong::text').extract()]\n\n home_lineup_nationality = [int(x) for x in home_lineup_css.css('tr td.left-align::attr(alt)').extract()]\n away_lineup_nationality = [int(x) for x in away_lineup_css.css('tr td.left-align::attr(alt)').extract()]\n\n home_lineup_nationality = [slugify(x) for x in\n home_lineup_css.css('tr td.left-align').css('img.flag-ico::attr(alt)').extract()]\n away_lineup_nationality = [slugify(x) for x in\n away_lineup_css.css('tr td.left-align').css('img.flag-ico::attr(alt)').extract()]\n\n yield {\n 'match number': int(match_number),\n 'info': {\n 'date': date,\n 'home team': slugify(home_team),\n 'away team': slugify(away_team),\n 'home goals': int(home_goals),\n 'away goals': int(away_goals),\n 'home lineup raw names': home_lineup_raw,\n 'away lineup raw names': away_lineup_raw,\n 'home lineup names': home_lineup,\n 'away lineup names': away_lineup,\n 'home lineup numbers': home_lineup_number,\n 'away lineup numbers': away_lineup_number,\n 'home lineup nationalities': home_lineup_nationality,\n 'away lineup nationalities': away_lineup_nationality,\n 'url': url,\n }\n }\n\n\nclass FifaIndexTeamScraper(scrapy.Spider):\n name = \"fifa-index-team\"\n\n # TODO - run this for extended period of time to get all players\n\n def start_requests(self):\n urls = [\n 'https://www.fifaindex.com/teams/',\n 'https://www.fifaindex.com/teams/fifa17_173/',\n 'https://www.fifaindex.com/teams/fifa16_73/',\n 'https://www.fifaindex.com/teams/fifa15_14/',\n 'https://www.fifaindex.com/teams/fifa14_13/',\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n links = [a.extract() for a in response.css('td a::attr(href)')]\n for link in links:\n if '/team/' in link:\n url = response.urljoin(link)\n yield scrapy.Request(url, callback=self.parse_team)\n\n next_page = response.css('li.next a::attr(href)').extract_first()\n if next_page is not None and int(next_page.split('/')[-2]) < 10:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse)\n\n def parse_team(self, response):\n team = slugify(response.css('.media-heading::text').extract_first())\n\n for i in range(1, len(response.css('tr'))):\n name = slugify(response.css('.table > tbody:nth-child(2) > tr:nth-child({}) > td:nth-child(6) > a:nth-child(1)::attr(title)'.format(i)).extract_first())\n number = int(response.css('.table > tbody:nth-child(2) > tr:nth-child({}) > td:nth-child(1)::text'.format(i)).extract_first())\n nationality = slugify(response.css('.table > tbody:nth-child(2) > tr:nth-child({}) > td:nth-child(4) > a:nth-child(1) > img:nth-child(1)::attr(title)'.format(i)).extract_first())\n position = response.css('.table > tbody:nth-child(2) > tr:nth-child({}) > td:nth-child(7) > a:nth-child(1) > span:nth-child(1)::text'.format(i)).extract_first()\n rating = response.css('table > tbody:nth-child(2) > tr:nth-child({}) > td:nth-child(5) > span:nth-child(1)::text'.format(i)).extract_first()\n\n\n yield {\n 'name': slugify(name),\n 'team': team,\n 'position': position,\n 'rating': int(rating),\n 'number': number,\n 'nationality': nationality,\n 'url': response.request.url\n }\n\n\nclass FixturesSpider(scrapy.Spider):\n name = \"fixtures\"\n\n # TODO - want the other names - not full names\n\n def start_requests(self):\n urls = [\n 'http://www.betstudy.com/soccer-stats/c/england/premier-league/d/fixtures/'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_fixtures)\n\n def parse_fixtures(self, response):\n for fixture in response.css('tr')[1:]:\n home_team = fixture.css('td.right-align a::text').extract_first()\n away_team = fixture.css('td.left-align a::text').extract_first()\n date = fixture.css('td::text').extract_first()\n yield {\n 'date': date,\n 'home team': slugify(home_team),\n 'away team': slugify(away_team),\n 'url': response.request.url\n }\n\n","repo_name":"prosperityai/EPL-Predicions","sub_path":"crawler/crawler/spiders/fifa_spider.py","file_name":"fifa_spider.py","file_ext":"py","file_size_in_byte":10247,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"29302598684","text":"import pytest\r\n\r\n\r\n@pytest.mark.smoke\r\n@pytest.mark.skip\r\ndef test_first_program(setup):\r\n print('Hello')\r\n msg = \"Hello\"\r\n assert msg == \"Hi\", \"Test failed, because strings do not match! \"\r\n\r\n\r\ndef test_second_program(setup):\r\n a = 4\r\n b = 6\r\n assert a + 2 == 6, \"Addition is right\"\r\n print(a + 2)\r\n\r\n\r\n","repo_name":"Gvlmihre/Pytest-With-Python","sub_path":"pytestsDemo/test_demo2.py","file_name":"test_demo2.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29567950953","text":"import sys\nsys.path.append(\"../\")\nimport pony.orm as pny\nimport manager.workflow as workflow\n\nimport Database\n\n\nDatabase.initialiseDatabase()\n\nwith pny.db_session:\n incidents=Database.Incident.select(lambda i: i.status == \"ACTIVE\" or i.status == \"PENDING\")[:]\n \n\n ids=[]\n for incident in incidents:\n ids.append(incident.uuid)\n print(ids)\n\nif len(ids) > 0:\n workflow.OpenConnection()\n for id in ids:\n workflow.Cancel(id,reason=\"Manualy killed\")\n workflow.CloseConnection()\nelse:\n print(\"No active incidents to kill\")","repo_name":"VESTEC-EU/vestec-system","sub_path":"WorkflowManager/killworkflows.py","file_name":"killworkflows.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29315058678","text":"from pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\ndb = client.irish\nprueba = db.pruebaColection\npost_data = {\n 'title': 'Python and MongoDB',\n 'content': 'PyMongo is fun, you guys',\n 'author': 'Scott'\n}\nresult = prueba.insert_one(post_data)\nprint('One post: {0}'.format(result.inserted_id))","repo_name":"paisap/chat-online","sub_path":"web_scraping-python/connection_database.py","file_name":"connection_database.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1967628282","text":"# Author: Leonardo Rossi Leao\n# Description: thread that observe variables by serial communication and \n# executes a callback function when a value changes\n# Created at: 2023-10-27\n# Last update: 2023-10-28\n\nfrom time import sleep\nfrom datetime import datetime\nfrom PyQt5.QtCore import QThread, pyqtSignal\n\nclass Observer(QThread):\n\n onValueChange = pyqtSignal(str, float)\n\n def __init__(self, get_function, delay: int = 0.1):\n super(Observer, self).__init__()\n\n self.get_function = get_function\n self.save_path = None\n self.delay = delay\n\n self.variables = {}\n self.is_running = True\n\n\n def appendVariable(self, name: str, command: str, callback):\n\n \"\"\"\n Appends a variable to be observed.\n\n Params\n ------\n name (str): name of the variable.\n command (str): command to be sent to the device.\n callback (function): function to be executed when the value changes.\n \"\"\"\n\n self.variables[name] = {\n \"command\": command,\n \"value\": None,\n \"callback\": callback\n }\n\n\n def setSavePath(self, path: str):\n\n \"\"\"\n Sets the path where the data will be saved.\n \n Params\n ------\n path (str): path where the data will be saved.\n \"\"\"\n\n self.save_path = path\n\n\n def stop(self):\n\n \"\"\"\n Stops the variable observer.\n \"\"\"\n\n self.is_running = False\n\n\n def get(self, parameter: str):\n\n \"\"\"\n Returns the value of the variable.\n\n Params\n ------\n parameter (str): name of the variable.\n \"\"\"\n\n return self.variables[parameter][\"value\"]\n\n\n def run(self):\n\n \"\"\"\n Starts the variable observer. This function save the readed\n data in a file and executes a callback function when a value\n changes.\n \"\"\"\n\n while self.is_running:\n received = self.get_function()\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n if received is not None:\n id, value = received\n if id is not None and id.isnumeric() and value is not None:\n if self.save_path is not None:\n with open(self.save_path + f\"/{id}.txt\", \"a\") as file:\n file.write(f\"{now},{value}\\n\")\n self.variables[id][\"value\"] = value\n self.variables[id][\"callback\"](id, value)","repo_name":"leonardo-unicamp/ECU_BCS_TRICICLO_PID4","sub_path":"gui/modules/observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12349306775","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 7 23:56:14 2017\n@author: cesar\nA palindromic number reads the same both ways. The largest palindrome made \nfrom the product of two 2-digit numbers is 9009 = 91 × 99.\nFind the largest palindrome made from the product of two 3-digit numbers.\n\"\"\"\n#○import pdb\n\ndef check_pal(a):\n ori=str(a)\n end=ori[::-1]\n \n if end == ori:\n return(True)\n else:\n return(False)\n\ndef gen_pal(start, stop):\n plist=[]\n for i in range(start,stop):\n if check_pal(i) == True:\n plist.append(i)\n else:\n pass\n return(plist) \n\n\ndef search_factor(min_thr,max_thr,pal):\n '''search factor for palindrome:\n data= min and max threeshold for factor search and palindrome\n return true when found\n '''\n #get factor.\n if max_thr - int(pal**0.5) < int(pal**0.5) - min_thr: #99,10=max_val,min_val\n i=int(pal**0.5)\n while i <= max_thr: #99=max_val\n print('checking {} from {}'.format(i,pal))\n i +=1\n if pal%i == 0:\n print('found factor {} for palindrome {}'.format(i,pal))\n return(True)\n break\n \n else:\n # pdb.set_trace()\n i=int(pal**0.5)\n while i >= min_thr: #10=min_val\n print('checking {} from {}'.format(i,pal))\n if pal%i == 0:\n print('found factor {} for palindrome {}'.format(i,pal))\n return(True)\n break\n i-=1\n \n\nif __name__ == '__main__':\n plist=gen_pal(100*100,999*999)\n plist.reverse()\n for i in plist:\n res=search_factor(100,999,i)\n if res==True:\n break\n\n \n","repo_name":"cesalmo/projecteuler","sub_path":"p04.py","file_name":"p04.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6994810576","text":"\"\"\"Classes for managing sections and metadata in config files.\n\nThese classes parse sections and metadata found in the comments of sxhkdrc\nfiles, in which different styles of formatting are handled specially.\n\nAs for sections, they may be \"simple\" in which there are no subsections, or\nrepresent a \"stack\" in which there *are* subsections, which require that they\nare completely enclosed by their parent sections. There may also be *no*\nsections below the root, in which no comments create new sections.\n\nWith regard to metadata, there may only be descriptions on single lines\n(\"simple\") or they may be key-value pairs across multiple lines (\"key-value\").\n\nMany of them will also take regular expressions to configure the recognition of\nsections and metadata.\n\nSection classes:\n SectionHandler: abstract base class.\n RootSectionHandler: no new sections and places all keybinds in the root section.\n SimpleSectionHandler: flat sections.\n StackSectionHandler: subsections.\n\nMetadata classes:\n MetadataParser: abstract base class.\n NullMetadataParser: no-op for all operations.\n SimpleDescriptionParser: description line immediately above the keybind.\n KeyValueMetadataParser: key-value pairs above the keybind.\n\"\"\"\nfrom __future__ import annotations\n\nimport re\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom dataclasses import dataclass, field\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n Optional,\n Pattern,\n Tuple,\n Union,\n)\n\nfrom .errors import MetadataParserError, SectionEOFError, SectionPushError\nfrom .parser import Keybind\nfrom .seq import SpanTree, expand_sequences\n\n__all__ = [\n # General.\n \"SectionTreeNode\",\n \"find_enclosing_section\",\n # Section handlers.\n \"SectionHandler\",\n \"RootSectionHandler\",\n \"SimpleSectionHandler\",\n \"StackSectionHandler\",\n # Metadata parsers.\n \"MetadataParser\",\n \"NullMetadataParser\",\n \"SimpleDescriptionParser\",\n \"KeyValueMetadataParser\",\n]\n\n\n# TODO: should sections have metadata too?\n# Invariants:\n# - each section completely encloses all subsections recursively\n@dataclass\nclass SectionTreeNode:\n \"\"\"Node representing a section in the sxhkdrc.\n\n Note that only the root section will have `name` be `None`.\n \"\"\"\n\n name: Optional[str]\n start: int\n end: Optional[int]\n children: List[SectionTreeNode]\n keybind_children: List[Keybind]\n\n def __init__(self, name: Optional[str], start: int, end: Optional[int]):\n self.name = name\n self.start = start\n # initially None when created\n self.end = end\n self.children = []\n self.keybind_children = []\n\n def add_child(\n self, name: str, start: int, end: Optional[int]\n ) -> SectionTreeNode:\n \"\"\"Add a subsection with the given name and line range.\"\"\"\n child = SectionTreeNode(name, start, end)\n self.children.append(child)\n return child\n\n def add_keybind(self, keybind: Keybind) -> None:\n \"\"\"Add `keybind` to this section as a direct child.\"\"\"\n if keybind.line is not None:\n assert keybind.line >= self.start\n if self.end is not None:\n assert keybind.line <= self.end\n self.keybind_children.append(keybind)\n\n @staticmethod\n def _get_level_prefix(level: int) -> str:\n return f\"{' ' * (level-1)}└{'─' * (level-1)}\"\n\n @staticmethod\n def _default_keybind_child_callback(keybind: Keybind, level: int) -> None:\n msg = f\"{SectionTreeNode._get_level_prefix(level)}\"\n if \"description\" in keybind.metadata:\n desc = keybind.metadata[\"description\"]\n # TODO: handle exceptions\n expanded = expand_sequences(desc)\n if isinstance(expanded, str):\n assert desc == expanded\n # No permutations, so just print it plainly.\n msg = f\"{msg} {keybind.hotkey.raw!r} (line {keybind.line})\"\n if \"mode\" in keybind.metadata:\n msg = f\"{msg} (mode: {keybind.metadata['mode']})\"\n msg = f\"{msg}: {desc}\"\n print(msg)\n return\n else:\n assert isinstance(expanded, SpanTree)\n # Try to print each permutation separately.\n permutations = expanded.generate_permutations()\n if len(permutations) == len(keybind.hotkey.permutations):\n prefix = msg\n for hotkey_perm, desc_perm in zip(\n keybind.hotkey.permutations, permutations\n ):\n msg = f\"{prefix} {str(hotkey_perm)!r} (line {keybind.line})\"\n if \"mode\" in keybind.metadata:\n msg = f\"{msg} (mode: {keybind.metadata['mode']})\"\n msg = f\"{msg}: {desc_perm}\"\n print(msg)\n else:\n if isinstance(keybind.hotkey.raw, str):\n msg = f\"{msg} {keybind.hotkey.raw!r} (line {keybind.line})\"\n else:\n msg = f\"{msg} {' '.join(keybind.hotkey.raw)!r} (line {keybind.line})\"\n if \"mode\" in keybind.metadata:\n msg = f\"{msg} (mode: {keybind.metadata['mode']})\"\n msg = f\"{msg}: {desc}\"\n print(msg)\n return\n\n else:\n if isinstance(keybind.hotkey.raw, str):\n msg = f\"{msg} {keybind.hotkey.raw!r} (line {keybind.line})\"\n else:\n msg = f\"{msg} {' '.join(keybind.hotkey.raw)!r} (line {keybind.line})\"\n if \"mode\" in keybind.metadata:\n msg = f\"{msg} (mode: {keybind.metadata['mode']})\"\n print(msg)\n\n def _print_tree_rec(\n self,\n level: int,\n keybind_child_callback: Callable[[Keybind, int], None],\n ) -> None:\n assert level >= 0\n pos = (self.start, self.end)\n if level == 0:\n print(f\"{self.name} {pos}\")\n else:\n print(f\"{' ' * (level-1)}└{'─' * (level-1)} {self.name} {pos}\")\n # Print this section's keybinds under this node.\n for keybind in self.keybind_children:\n keybind_child_callback(keybind, level + 1)\n # And now the descendants.\n for child in self.children:\n child._print_tree_rec(level + 1, keybind_child_callback)\n\n def print_tree(\n self,\n keybind_child_callback: Optional[\n Callable[[Keybind, int], None]\n ] = None,\n ) -> None:\n \"\"\"Print the section tree rooted at this node, with keybinds.\"\"\"\n if not keybind_child_callback:\n keybind_child_callback = (\n SectionTreeNode._default_keybind_child_callback\n )\n self._print_tree_rec(0, keybind_child_callback)\n\n @classmethod\n def build_root(cls) -> SectionTreeNode:\n \"\"\"Return a new root node.\"\"\"\n return cls(None, 1, None)\n\n\n# recursive and works because:\n# - base case: empty list\n# - few cases above base: child_gaps is a flat list of (node,gap) pairs\ndef _find_enclosing_section_rec(\n node: SectionTreeNode, keybind: Keybind\n) -> List[Tuple[SectionTreeNode, int]]:\n assert node.start is not None, node\n assert node.end is not None, node\n assert keybind.line is not None, keybind\n if not (node.start <= keybind.line <= node.end):\n return []\n gap = abs(keybind.line - node.start) + abs(node.end - keybind.line)\n child_gaps = []\n for child in node.children:\n child_gaps.extend(_find_enclosing_section_rec(child, keybind))\n return [(node, gap)] + child_gaps\n\n\ndef find_enclosing_section(\n node: SectionTreeNode, keybind: Keybind\n) -> Optional[SectionTreeNode]:\n \"\"\"Find smallest enclosing section that is a descendant of `node` for `keybind`.\n\n Assumes that `keybind` has a non-None line number.\n \"\"\"\n assert keybind.line is not None, keybind\n gaps = []\n for child in node.children:\n gaps.extend(_find_enclosing_section_rec(child, keybind))\n # sort by gap size\n gaps.sort(key=lambda x: x[1])\n if gaps:\n # smallest gap -> smallest section that contains the keybind\n section, gap = gaps[0]\n return section\n else:\n return None\n\n\nclass SectionHandler(ABC):\n \"\"\"Abstract base class for managing sxhkdrc sections.\n\n Abstract methods/properties:\n - reset\n - clone_config\n - push\n - root\n - current_section\n \"\"\"\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\"Reset the section handler.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def clone_config(self) -> SectionHandler:\n \"\"\"Create an empty instance with the same configuration.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def push(self, text: str, line: int) -> bool:\n \"\"\"Return whether `text` can be parsed as a section header or footer.\n\n This should be used for delimiting sections and metadata comments.\n \"\"\"\n raise NotImplementedError\n\n def push_eof(self, last_line: int) -> None:\n \"\"\"Do clean-up actions after the input ends.\n\n This should be called by overriding subclasses so that the root section\n gets its `end` attribute defined.\n \"\"\"\n self.root.end = last_line\n\n @abstractproperty\n def root(self) -> SectionTreeNode:\n \"\"\"Return the root section.\"\"\"\n raise NotImplementedError\n\n @abstractproperty\n def current_section(self) -> SectionTreeNode:\n \"\"\"Return the current section.\"\"\"\n raise NotImplementedError\n\n\n@dataclass\nclass RootSectionHandler(SectionHandler):\n \"\"\"Handler for a single-section sxhkdrc, where all keybinds are children of the root.\"\"\"\n\n _section: SectionTreeNode\n\n def __init__(self) -> None:\n self.reset()\n\n def reset(self) -> None:\n \"\"\"Reset the section handler.\"\"\"\n self._section = SectionTreeNode.build_root()\n\n def clone_config(self) -> RootSectionHandler:\n \"\"\"Create an empty instance with the same configuration.\"\"\"\n return RootSectionHandler()\n\n def push(self, text: str, line: int) -> bool:\n \"\"\"Reject the input.\n\n No new sections will be defined.\n \"\"\"\n return False\n\n @property\n def root(self) -> SectionTreeNode:\n \"\"\"Return the root section (which is the only node).\"\"\"\n return self._section\n\n @property\n def current_section(self) -> SectionTreeNode:\n \"\"\"Return the current section (which is always the root).\"\"\"\n return self._section\n\n\n@dataclass\nclass SimpleSectionHandler(SectionHandler):\n \"\"\"Handler for sections one level under the root section.\n\n Each new section is a direct child of the root and each such section ends\n when a new section begins or EOF.\n\n Instance variables:\n section_header_re: the pattern to match and parse out section headers.\n sections: the sections of the config in the order they were defined.\n \"\"\"\n\n section_header_re: Pattern[str]\n sections: List[SectionTreeNode]\n _root: SectionTreeNode = field(repr=False)\n\n def __init__(self, section_header_re: Union[str, re.Pattern[str]]):\n \"\"\"Create an instance with a regex for section headers.\n\n `section_header_re` must have a named group 'name'.\n \"\"\"\n if isinstance(section_header_re, re.Pattern):\n self.section_header_re = section_header_re\n else:\n self.section_header_re = re.compile(section_header_re)\n if \"name\" not in self.section_header_re.groupindex:\n raise ValueError(\n \"section header regex must have the named group 'name'\"\n )\n self.reset()\n\n def reset(self) -> None:\n \"\"\"Reset the section handler.\"\"\"\n self._root = SectionTreeNode.build_root()\n self.sections = [self._root]\n\n def clone_config(self) -> SimpleSectionHandler:\n \"\"\"Create an empty instance with the same configuration.\"\"\"\n return SimpleSectionHandler(self.section_header_re)\n\n def push(self, text: str, line: int) -> bool:\n \"\"\"Return whether `text` can be parsed as a section header.\n\n If `section_header_re` matches `text`, it is a section header: a new\n section is added and the previous section ended.\n \"\"\"\n m = self.section_header_re.search(text)\n if m:\n # starting a new section ends the previous one\n if self.sections[-1] is not self._root:\n self.sections[-1].end = line - 1\n # self.sections[self._curr_section_name] = (self._curr_section_start, end)\n self.sections.append(\n self._root.add_child(\n m.group(\"name\"),\n line,\n None,\n )\n )\n return True\n else:\n return False\n\n @property\n def root(self) -> SectionTreeNode:\n \"\"\"Return the root section.\"\"\"\n return self._root\n\n @property\n def current_section(self) -> SectionTreeNode:\n \"\"\"Return the latest section.\"\"\"\n return self.sections[-1]\n\n\n@dataclass\nclass StackSectionHandler(SectionHandler):\n \"\"\"Handler for recursive sections and subsections.\n\n This uses a stack to keep track of sections, and requires that inner\n sections be merged before their parent sections.\n\n Instance variables:\n section_header_re: the pattern to match and parse out section headers.\n section_footer_re: the pattern to match section footers.\n \"\"\"\n\n section_header_re: Pattern[str]\n section_footer_re: Pattern[str]\n _section_tree: SectionTreeNode = field(repr=False)\n _section_stack: List[SectionTreeNode] = field(repr=False)\n\n def __init__(\n self,\n section_header_re: Union[str, re.Pattern[str]],\n section_footer_re: Union[str, re.Pattern[str]],\n ):\n \"\"\"Create an instance with regexes for section headers and footers.\n\n `section_header_re` must have a named group 'name'.\n `section_footer_re` doesn't need any named groups.\n \"\"\"\n if isinstance(section_header_re, re.Pattern):\n self.section_header_re = section_header_re\n else:\n self.section_header_re = re.compile(section_header_re)\n if isinstance(section_footer_re, re.Pattern):\n self.section_footer_re = section_footer_re\n else:\n self.section_footer_re = re.compile(section_footer_re)\n if \"name\" not in self.section_header_re.groupindex:\n raise ValueError(\n \"section header regex must have the named group 'name'\"\n )\n self.reset()\n\n def reset(self) -> None:\n \"\"\"Reset the section handler.\"\"\"\n self._section_tree = SectionTreeNode.build_root()\n self._section_stack = [self._section_tree]\n\n def clone_config(self) -> StackSectionHandler:\n \"\"\"Create an empty instance with the same configuration.\"\"\"\n return StackSectionHandler(\n self.section_header_re, self.section_footer_re\n )\n\n def push(self, text: str, line: int) -> bool:\n \"\"\"Return whether `text` can be parsed as a section header or footer.\n\n If `section_header_re` matches `text`, it is a section header: a new\n section is created and added to the stack.\n\n If `section_footer_re` matches `text`, it is a section footer: the\n current section's ending is defined and it is popped from the stack.\n\n NOTE: ending a section before any have been defined raises SectionPushError.\n \"\"\"\n m = self.section_header_re.search(text)\n if m:\n name = m.group(\"name\")\n node = self._section_stack[-1].add_child(name, line, None)\n self._section_stack.append(node)\n return True\n else:\n m = self.section_footer_re.search(text)\n if m:\n # first *must* always be root\n if len(self._section_stack) == 1:\n raise SectionPushError(\n \"Ended a section without opening one first\",\n line=line,\n )\n node = self._section_stack.pop()\n node.end = line\n return True\n else:\n return False\n\n def push_eof(self, last_line: int) -> None:\n \"\"\"Ensure no sections have been left unclosed.\n\n If any *have* been left open, SectionEOFError is raised.\n Otherwise, defines the ending line number for the root section.\n \"\"\"\n if len(self._section_stack) > 1:\n raise SectionEOFError(\n f\"Got EOF while reading section '{self._section_stack[-1].name}'\",\n last_line=last_line,\n sections=self._section_stack[1:],\n )\n super().push_eof(last_line)\n\n @property\n def root(self) -> SectionTreeNode:\n \"\"\"Return the root section.\"\"\"\n return self._section_tree\n\n @property\n def current_section(self) -> SectionTreeNode:\n \"\"\"Return the section at the top of the stack.\"\"\"\n return self._section_stack[-1]\n\n\nclass MetadataParser(ABC):\n \"\"\"Abstract base class for parsing metadata comments for a keybind.\n\n Abstract methods:\n - parse\n \"\"\"\n\n @abstractmethod\n def parse(self, lines: Iterable[str], start_line: int) -> Dict[str, Any]:\n \"\"\"Parse metadata from the comments immediately preceding a keybind.\"\"\"\n raise NotImplementedError\n\n\n@dataclass\nclass NullMetadataParser(MetadataParser):\n \"\"\"Parser that always returns an empty mapping when parsing.\"\"\"\n\n def parse(self, lines: Iterable[str], start_line: int) -> Dict[str, Any]:\n \"\"\"Return an empty dict.\"\"\"\n return {}\n\n\n@dataclass\nclass SimpleDescriptionParser(MetadataParser):\n \"\"\"Parser for the description line immediately above a keybind.\n\n Instance variables:\n description_re: the pattern to parse out the description from the comment.\n \"\"\"\n\n description_re: Pattern[str]\n\n def __init__(self, description_re: str):\n \"\"\"Create an instance with a regex for description lines.\n\n `description_re` must have a named group 'description'.\n \"\"\"\n self.description_re = re.compile(description_re)\n if \"description\" not in self.description_re.groupindex:\n raise ValueError(\n \"description regex must have the named group 'description'\"\n )\n\n def parse(self, lines: Iterable[str], start_line: int) -> Dict[str, Any]:\n \"\"\"Parse a description from the last element of `lines`.\n\n If the regex match with `description_re` succeeds, a dict with the\n key-value pair \"description\" and the 'description' match group is\n returned. Otherwise, an empty dict is returned instead.\n \"\"\"\n comments = list(lines)\n assert len(comments) > 0\n maybe_description = comments[-1]\n m = self.description_re.search(maybe_description)\n if m:\n return {\"description\": m.group(\"description\")}\n else:\n return {}\n\n\n@dataclass\nclass KeyValueMetadataParser(MetadataParser):\n \"\"\"Parser for key-value pairs in metadata.\n\n Instance variables:\n pair_re: the pattern to parse out key-value pairs.\n empty_re: the pattern to match empty metadata lines.\n \"\"\"\n\n pair_re: Pattern[str]\n empty_re: Pattern[str] # part of description but no pair\n\n def __init__(self, pair_re: str, empty_re: str):\n \"\"\"Create an instance with regexes for pairs and empty metadata lines.\n\n `pair_re` must have the named groups 'key' and 'value'.\n `empty_re` doesn't need any named groups.\n \"\"\"\n self.pair_re = re.compile(pair_re)\n if (\n \"key\" not in self.pair_re.groupindex\n or \"value\" not in self.pair_re.groupindex\n ):\n raise ValueError(\n \"pair regex must have named groups 'key' and 'value'\"\n )\n self.empty_re = re.compile(empty_re)\n\n def parse(self, lines: Iterable[str], start_line: int) -> Dict[str, Any]:\n \"\"\"Parse metadata comments for key-value pairs.\n\n They are parsed in the reverse order of `lines` so that the contiguous\n block of metadata comments immediately preceding a keybind isn't\n interrupted by non-metadata comments.\n\n `empty_re` is used to match comments that are part of that contiguous\n block, but don't define key-value pairs. Useful for \"blank\" lines\n between key-value pairs or for comments about them in a separate line.\n\n NOTE: duplicate keys cause MetadataParserError to be raised.\n \"\"\"\n comments = list(lines)\n # Each dict value stores the line number of the pair that is latest in\n # the file, so that errors on duplicates show the actual duplicate\n # rather than the first occurrence.\n metadata: Dict[str, Tuple[int, Any]] = {}\n i = len(comments) - 1\n while i >= 0:\n comment = comments[i]\n m = self.pair_re.search(comment)\n if m:\n key = m.group(\"key\")\n value = m.group(\"value\")\n if key in metadata:\n dup_line, _ = metadata[key]\n raise MetadataParserError(\n f\"Duplicate key '{key}'\",\n key=key,\n value=value,\n line=dup_line,\n )\n metadata[key] = (start_line + i, value)\n i -= 1\n elif self.empty_re.search(comment):\n # metadata continues\n i -= 1\n continue\n else:\n break\n return {key: val for key, (_, val) in metadata.items()}\n","repo_name":"Hoboneer/sxhkd-parser","sub_path":"sxhkd_parser/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":21963,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"4954598837","text":"from datetime import datetime, timedelta\nfrom typing import TypeVar, Callable, Generic\n\nfrom pycontrolflow.flow_value import wrap_input\nfrom pycontrolflow.nodes.FlowSingleOutputNode import FlowSingleOutputNode\nfrom pycontrolflow.type_utils import is_same_type\nfrom pycontrolflow.types import TNodeInput, TComparable\n\nTValue = TypeVar(\"TValue\")\n\n\nclass Comparer(FlowSingleOutputNode[bool], Generic[TValue]):\n def __init__(self,\n input1: TNodeInput[TValue],\n input2: TNodeInput[TValue],\n invert: bool,\n op: Callable[[TValue, TValue], bool]) -> None:\n input1_wrap = wrap_input(input1)\n input2_wrap = wrap_input(input2)\n\n super().__init__([input1_wrap, input2_wrap])\n\n input1_type = input1_wrap.get_type()\n input2_type = input2_wrap.get_type()\n\n if not is_same_type(input1_type, input2_type):\n raise TypeError(f\"Comparer types mismatch, got: {input1_type} and {input2_type}\")\n\n self._input1 = input1_wrap\n self._input2 = input2_wrap\n self._invert = invert\n self._op = op\n\n def process(self, cur_date: datetime, delta: timedelta) -> None:\n super().process(cur_date, delta)\n value1 = self._input1.get_notnull()\n value2 = self._input2.get_notnull()\n\n state = self._op(value1, value2)\n\n if self._invert:\n state = not state\n\n self.set_output(state)\n\n\nclass CompareGreaterThan(Comparer[TComparable], Generic[TComparable]):\n def __init__(self, input1: TNodeInput[TComparable], input2: TNodeInput[TComparable], invert: bool = False) -> None:\n super().__init__(input1, input2, invert, lambda a, b: a > b)\n\n\nclass CompareGreaterEqualTo(Comparer[TComparable], Generic[TComparable]):\n def __init__(self, input1: TNodeInput[TComparable], input2: TNodeInput[TComparable], invert: bool = False) -> None:\n super().__init__(input1, input2, invert, lambda a, b: not a < b)\n\n\nclass CompareLessThan(Comparer[TComparable], Generic[TComparable]):\n def __init__(self, input1: TNodeInput[TComparable], input2: TNodeInput[TComparable], invert: bool = False) -> None:\n super().__init__(input1, input2, invert, lambda a, b: a < b)\n\n\nclass CompareLessEqualTo(Comparer[TComparable], Generic[TComparable]):\n def __init__(self, input1: TNodeInput[TComparable], input2: TNodeInput[TComparable], invert: bool = False) -> None:\n super().__init__(input1, input2, invert, lambda a, b: not a > b)\n","repo_name":"KrystianD/pycontrolflow","sub_path":"pycontrolflow/nodes/comparators/Compare.py","file_name":"Compare.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19471194663","text":"\"\"\"\r\nScreen Resolution: 1920 x 1080\r\nBrowser: Chrome Maximized. Toolbars enabled. Down arrow 5 times.\r\nName: Ryan Kardas\r\nGame: https://www.miniclip.com/games/sushi-go-round/en/\r\n\r\nPLAY AREA:\r\nX1: 341\r\nY1: 216\r\n\r\nX2: 1140\r\nY2: 815\r\n\"\"\"\r\nfrom PIL import ImageGrab\r\nimport os\r\nimport time\r\nimport win32api, win32con\r\nfrom PIL import ImageOps\r\nfrom numpy import *\r\n\r\n# Globals\r\n# ---------------------\r\nx_pad = 340 # X1 - 1\r\ny_pad = 215 # Y1 - 1\r\n\r\nclass Cord:\r\n # 'f_' prefix means it's referring to the food locations.\r\n\r\n f_shrimp = (43, 410)\r\n f_rice = (122, 413)\r\n f_nori = (37, 492)\r\n f_roe = (106, 479)\r\n f_salmon = (40, 564)\r\n f_unagi = (132, 548)\r\n\r\n # 't_' prefix means it's referring to the phone menu locations.\r\n\r\n phone = (716, 452)\r\n\r\n menu_toppings = (683, 339)\r\n\r\n t_shrimp = (655, 270) # Greyed out: (127, 102, 90)\r\n t_nori = 649, 330 # Greyed out: (51, 127, 70)\r\n t_roe = 694, 340 # Greyed out: (225, 181, 105)\r\n t_salmon = (583, 411) # Greyed out: (127, 71, 47)\r\n t_unagi =(684, 270) # Greyed out: (94, 49, 8)\r\n t_exit = (742, 412)\r\n\r\n menu_rice = (698, 364)\r\n buy_rice = 673, 346 # Greyed out: (127, 127, 127)\r\n\r\n delivery_norm = (614, 364)\r\n\r\n\r\nclass Blank:\r\n seat_1 = 2978\r\n seat_2 = 3220\r\n seat_3 = 5403\r\n seat_4 = 4898\r\n seat_5 = 5689\r\n seat_6 = 4580\r\n\r\n\r\n# Dictionary used to store how many ingredients are available\r\nfoodOnHand = {'shrimp': 5,\r\n 'rice': 10,\r\n 'nori': 10,\r\n 'roe': 10,\r\n 'salmon': 5,\r\n 'unagi': 5}\r\n\r\n# Dictionary used to store possible outcomes of each food\r\nsushiTypes = {\r\n 2852: 'onigiri',\r\n 2916: 'onigiri',\r\n 2980: 'onigiri',\r\n 3516: 'caliroll',\r\n 3389: 'caliroll',\r\n 3050: 'gunkan',\r\n 2796: 'gunkan',\r\n 2923: 'gunkan',\r\n}\r\n\r\n\r\ndef screenGrab():\r\n box = (x_pad + 1, y_pad + 1, x_pad + 799, y_pad + 599)\r\n im = ImageGrab.grab(box)\r\n\r\n ##im.save(os.getcwd() + '\\\\full_snap__' + str(int(time.time())) + '.png', 'PNG')\r\n return im\r\n\r\n\r\ndef grab():\r\n box = (x_pad + 1, y_pad + 1, x_pad + 799, y_pad + 599)\r\n im = ImageOps.grayscale(ImageGrab.grab(box))\r\n a = array(im.getcolors())\r\n a = a.sum()\r\n print(a)\r\n return a\r\n\r\n\r\ndef leftClick():\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\r\n time.sleep(.1)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\r\n print(\"Click.\") # Debugging\r\n\r\n\r\ndef leftDown():\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\r\n time.sleep(.1)\r\n print(\"Left Down\")\r\n\r\n\r\ndef leftUp():\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\r\n time.sleep(.1)\r\n print(\"Left Up\")\r\n\r\n\r\ndef startGame():\r\n # location of first menu (Play Button)\r\n mousePos((458, 254))\r\n leftClick()\r\n time.sleep(.1)\r\n\r\n # location of second menu\r\n mousePos((380, 483))\r\n leftClick()\r\n time.sleep(.1)\r\n\r\n # location of third menu\r\n mousePos((735, 565))\r\n leftClick()\r\n time.sleep(.1)\r\n\r\n # location of fourth menu\r\n mousePos((479, 458))\r\n leftClick()\r\n time.sleep(.1)\r\n\r\n\r\ndef mousePos(cord):\r\n win32api.SetCursorPos((x_pad + cord[0], y_pad + cord[1]))\r\n\r\n\r\ndef get_cords():\r\n x, y = win32api.GetCursorPos()\r\n x = x - x_pad\r\n y = y - y_pad\r\n print(x, y)\r\n\r\n\r\ndef get_seat_one():\r\n box = (34 + x_pad, 68 + y_pad, 108 + x_pad, 88 + y_pad)\r\n im = ImageOps.grayscale(ImageGrab.grab(box))\r\n a = array(im.getcolors())\r\n a = a.sum()\r\n print(\"Seat 1\")\r\n print(a)\r\n im.save(os.getcwd() + '\\\\seat_one__' + str(int(time.time())) + '.png', 'PNG')\r\n return a\r\n\r\n\r\ndef get_seat_two():\r\n box = (160 + x_pad, 68 + y_pad, 234 + x_pad, 88 + y_pad)\r\n im = ImageOps.grayscale(ImageGrab.grab(box))\r\n a = array(im.getcolors())\r\n a = a.sum()\r\n print(\"Seat 2\")\r\n print(a)\r\n im.save(os.getcwd() + '\\\\seat_two__' + str(int(time.time())) + '.png', 'PNG')\r\n return a\r\n\r\n\r\ndef get_seat_three():\r\n box = (286 + x_pad, 68 + y_pad, 360 + x_pad, 88 + y_pad)\r\n im = ImageOps.grayscale(ImageGrab.grab(box))\r\n a = array(im.getcolors())\r\n a = a.sum()\r\n print(\"Seat 3\")\r\n print(a)\r\n im.save(os.getcwd() + '\\\\seat_three__' + str(int(time.time())) + '.png', 'PNG')\r\n return a\r\n\r\n\r\ndef get_seat_four():\r\n box = (413 + x_pad, 68 + y_pad, 487 + x_pad, 88 + y_pad)\r\n im = ImageOps.grayscale(ImageGrab.grab(box))\r\n a = array(im.getcolors())\r\n a = a.sum()\r\n print(\"Seat 4\")\r\n print(a)\r\n im.save(os.getcwd() + '\\\\seat_four__' + str(int(time.time())) + '.png', 'PNG')\r\n return a\r\n\r\n\r\ndef get_seat_five():\r\n box = (539 + x_pad, 68 + y_pad, 613 + x_pad, 88 + y_pad)\r\n im = ImageOps.grayscale(ImageGrab.grab(box))\r\n a = array(im.getcolors())\r\n a = a.sum()\r\n print(\"Seat 5\")\r\n print(a)\r\n im.save(os.getcwd() + '\\\\seat_five__' + str(int(time.time())) + '.png', 'PNG')\r\n return a\r\n\r\n\r\ndef get_seat_six():\r\n box = (665 + x_pad, 68 + y_pad, 739 + x_pad, 88 + y_pad)\r\n im = ImageOps.grayscale(ImageGrab.grab(box))\r\n a = array(im.getcolors())\r\n a = a.sum()\r\n print(\"Seat 6\")\r\n print(a)\r\n im.save(os.getcwd() + '\\\\seat_six__' + str(int(time.time())) + '.png', 'PNG')\r\n return a\r\n\r\n\r\ndef get_all_seats():\r\n get_seat_one()\r\n get_seat_two()\r\n get_seat_three()\r\n get_seat_four()\r\n get_seat_five()\r\n get_seat_six()\r\n\r\n\r\ndef clear_tables():\r\n mousePos((112, 255))\r\n leftClick()\r\n\r\n mousePos((244, 255))\r\n leftClick()\r\n\r\n mousePos((353, 255))\r\n leftClick()\r\n\r\n mousePos((496, 255))\r\n leftClick()\r\n\r\n mousePos((612, 255))\r\n leftClick()\r\n\r\n mousePos((750, 255))\r\n leftClick()\r\n time.sleep(1)\r\n \"\"\"\r\n Plate cords:\r\n\r\n 112, 255\r\n 244, 255\r\n 353, 255\r\n 496, 255\r\n 612, 255\r\n 750, 255\r\n \"\"\"\r\n\r\n\r\ndef foldMat():\r\n mousePos((250, 474))\r\n leftClick()\r\n time.sleep(.1)\r\n\r\n\r\ndef makeFood(food):\r\n if food == 'caliroll':\r\n print(\"Making a caliroll\")\r\n foodOnHand['rice'] -= 1 # Subtracts 1 from amount of Rice\r\n foodOnHand['nori'] -= 1 # Subtracts 1 from amount of Nori\r\n foodOnHand['roe'] -= 1 # Subtracts 1 from amount of Roe\r\n mousePos(Cord.f_rice) # Get Rice\r\n leftClick()\r\n time.sleep(.1)\r\n mousePos(Cord.f_nori) # Get Nori\r\n leftClick()\r\n time.sleep(.1)\r\n mousePos(Cord.f_roe) # Get Roe\r\n leftClick()\r\n time.sleep(.1)\r\n foldMat() # Fold Mat\r\n time.sleep(1.5)\r\n\r\n elif food == 'onigiri':\r\n print(\"Making an Onigiri\")\r\n foodOnHand['rice'] -= 2 # Subtracts 2 from amount of Rice\r\n foodOnHand['nori'] -= 1 # Subtracts 1 from amount of Nori\r\n mousePos(Cord.f_rice) # Get Rice\r\n leftClick()\r\n time.sleep(.5)\r\n mousePos(Cord.f_rice) # Get Rice\r\n leftClick()\r\n time.sleep(.1)\r\n mousePos(Cord.f_nori) # Get Nori\r\n leftClick()\r\n time.sleep(.1)\r\n foldMat() # Fold Mat\r\n time.sleep(1.5)\r\n\r\n elif food == 'gunkan':\r\n print(\"Making a Gunkan\")\r\n foodOnHand['rice'] -= 1 # Subtracts 1 from amount of Rice\r\n print(foodOnHand['rice'])\r\n foodOnHand['nori'] -= 1 # Subtracts 1 from amount of Nori\r\n foodOnHand['roe'] -= 2 # Subtracts 2 from amount of Roe\r\n mousePos(Cord.f_rice) # Get Rice\r\n leftClick()\r\n time.sleep(.05)\r\n mousePos(Cord.f_nori) # Get Nori\r\n leftClick()\r\n time.sleep(.05)\r\n mousePos(Cord.f_roe) # Get Roe\r\n leftClick()\r\n time.sleep(.5)\r\n mousePos(Cord.f_roe) # Get Roe\r\n leftClick()\r\n time.sleep(.1)\r\n foldMat() # Fold Mat\r\n time.sleep(1.5)\r\n\r\n\r\ndef buyFood(food):\r\n print(\"buying food\")\r\n\r\n if food == 'rice':\r\n mousePos(Cord.phone) # Open Phone\r\n time.sleep(.1)\r\n leftClick()\r\n mousePos(Cord.menu_rice) # Open Menu Toppings\r\n time.sleep(.05)\r\n leftClick()\r\n s = screenGrab()\r\n if s.getpixel(Cord.buy_rice) != (127, 127, 127):\r\n print(\"Rice is available\")\r\n mousePos(Cord.buy_rice)\r\n time.sleep(.1)\r\n leftClick()\r\n mousePos(Cord.delivery_norm)\r\n foodOnHand['rice'] += 10 # Add 10 to amount of Rice\r\n time.sleep(.1)\r\n leftClick()\r\n time.sleep(2.5)\r\n else:\r\n print(\"Rice is NOT available\")\r\n mousePos(Cord.t_exit)\r\n leftClick()\r\n time.sleep(1)\r\n buyFood(food)\r\n\r\n\r\n if food == 'nori':\r\n mousePos(Cord.phone) # Open Phone\r\n time.sleep(.1)\r\n leftClick()\r\n mousePos(Cord.menu_toppings) # Open Menu Toppings\r\n time.sleep(.05)\r\n leftClick()\r\n s = screenGrab()\r\n print(\"Test for Nori\")\r\n time.sleep(.1)\r\n if s.getpixel(Cord.t_nori) != (109, 123, 127):\r\n print(\"Nori is available\")\r\n mousePos(Cord.t_nori) # Get Nori\r\n time.sleep(.1)\r\n leftClick()\r\n mousePos(Cord.delivery_norm) # Normal Delivery\r\n foodOnHand['nori'] += 10 # Add 10 to amount of Nori\r\n time.sleep(.1)\r\n leftClick()\r\n time.sleep(2.5)\r\n else:\r\n print(\"Nori is not available\")\r\n mousePos(Cord.t_exit)\r\n leftClick()\r\n time.sleep(1)\r\n buyFood(food)\r\n\r\n if food == 'roe':\r\n mousePos(Cord.phone) # Open Phone\r\n time.sleep(.1)\r\n leftClick()\r\n mousePos(Cord.menu_toppings) # Open Menu Toppings\r\n time.sleep(.05)\r\n leftClick()\r\n s = screenGrab()\r\n\r\n time.sleep(.1)\r\n if s.getpixel(Cord.t_roe) != (109, 123, 127):\r\n print(\"Roe is available\")\r\n mousePos(Cord.t_roe) # Get Roe\r\n time.sleep(.1)\r\n leftClick()\r\n mousePos(Cord.delivery_norm) # Normal Delivery\r\n foodOnHand['roe'] += 10\r\n time.sleep(.1)\r\n leftClick()\r\n time.sleep(2.5)\r\n\r\n else:\r\n print(\"Roe is NOT available\")\r\n mousePos(Cord.t_exit)\r\n leftClick()\r\n time.sleep(1)\r\n buyFood(food)\r\n\r\n\r\ndef checkFood():\r\n print(\"Checking Food\")\r\n for i, j in foodOnHand.items():\r\n if i == 'nori' or i == 'rice' or i == 'roe':\r\n if j <= 4:\r\n print(\"%s is low and needs to be replenished\" % i)\r\n buyFood(i)\r\n\r\n\r\ndef check_bubs():\r\n\r\n checkFood()\r\n s1 = get_seat_one() # Seat 1\r\n if s1 != Blank.seat_1:\r\n if s1 in sushiTypes:\r\n print(\"Table 1 is occupied and needs %s\" % sushiTypes[s1])\r\n makeFood(sushiTypes[s1])\r\n else:\r\n print(\"Sushi not found!\\n SushiType = %i\" % s1)\r\n else:\r\n print(\"Table 1 unoccupied\")\r\n\r\n print(\"Clearing tables\")\r\n clear_tables()\r\n checkFood()\r\n s2 = get_seat_two() # Seat 2\r\n if s2 != Blank.seat_2:\r\n if s2 in sushiTypes:\r\n print(\"Table 2 is occupied and needs %s\" % sushiTypes[s2])\r\n makeFood(sushiTypes[s2])\r\n else:\r\n print(\"Sushi not found!\\n SushiType = %i\" % s2)\r\n\r\n else:\r\n print(\"Table 2 unoccupied\")\r\n\r\n checkFood()\r\n s3 = get_seat_three() # Seat 3\r\n if s3 != Blank.seat_3:\r\n if s3 in sushiTypes:\r\n print(\"Table 3 is occupied and needs %s\" % sushiTypes[s3])\r\n makeFood(sushiTypes[s3])\r\n else:\r\n print(\"Sushi not found!\\n SushiType = %i\" % s3)\r\n\r\n else:\r\n print(\"Table 3 unoccupied\")\r\n\r\n checkFood()\r\n s4 = get_seat_four() # Seat 4\r\n if s4 != Blank.seat_4:\r\n if s4 in sushiTypes:\r\n print(\"Table 4 is occupied and needs %s\" % sushiTypes[s4])\r\n makeFood(sushiTypes[s4])\r\n else:\r\n print(\"Sushi not found!\\n SushiType = %i\" % s4)\r\n\r\n else:\r\n print(\"Table 4 unoccupied\")\r\n\r\n print(\"Clearing Tables\")\r\n clear_tables()\r\n checkFood()\r\n s5 = get_seat_five()\r\n if s5 != Blank.seat_5:\r\n if s5 in sushiTypes:\r\n print(\"Table 5 is occupied and needs %s\" % sushiTypes[s5])\r\n makeFood(sushiTypes[s5])\r\n else:\r\n print(\"Sushi not found!\\n SushiType = %i\" % s5)\r\n\r\n else:\r\n print(\"Table 5 unoccupied\")\r\n\r\n checkFood()\r\n s6 = get_seat_six() # Seat 6\r\n if s6 != Blank.seat_6:\r\n if s6 in sushiTypes:\r\n print(\"Table 6 is occupied and needs %s\" % sushiTypes[s6])\r\n makeFood(sushiTypes[s6])\r\n else:\r\n print(\"Sushi not found!\\n SushiType = %i\" % s6)\r\n\r\n else:\r\n print(\"Table 6 unoccupied\")\r\n\r\n print(\"Clearing Tables\")\r\n clear_tables()\r\n print(\"Sleeping\")\r\n time.sleep(10)\r\n\r\n\r\ndef main():\r\n startGame()\r\n time.sleep(10)\r\n while True:\r\n check_bubs()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"KardasR/SushiBot","sub_path":"SushiBot/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":13232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25594129620","text":"import unittest\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom src.Map2D import Map2D\nfrom src.Axis import Axis\n\nclass Map2DTest(unittest.TestCase):\n def setUp(self):\n print(\"setUp\")\n self.xy_resolution = 100\n self.angle_resolution = 500\n self.map2D = Map2D(self.xy_resolution, self.angle_resolution, 3)\n\n def test_init(self):\n print(\"test_init\")\n\n fig = plt.figure(figsize=(10, 3))\n ax = fig.subplots(1, 1)\n\n im = ax.imshow(np.sum(self.map2D._filter, axis=0), cmap='gray')\n ax.set_axis_off()\n fig.colorbar(im)\n plt.show()\n\n def test_add_data(self):\n print(\"test_add_data\")\n\n for i in range(10):\n print(i)\n angle = np.pi / 3\n self.map2D.add_data([0, 0], 0, 0.5)\n self.map2D.add_data([0, 0], angle, 0.5)\n\n for i, data in enumerate(self.map2D.data):\n if i % 100 == 0:\n angle = i / self.angle_resolution * 2 * np.pi\n\n fig = plt.figure(figsize=(10, 3))\n ax1, ax2 = fig.subplots(1, 2)\n\n im = ax1.imshow(data, cmap='gray')\n fig.colorbar(im)\n ax1.set_axis_off()\n\n ax2.imshow(self.map2D._filter[i], cmap='gray')\n plt.title(\"{}\".format(angle / np.pi * 180))\n plt.show()\n\n print('likelihood, ad_filter = self.map2D.get_Likelihood_function([0, 0])')\n likelihood, likelihood_polor, ad_filter = self.map2D.get_Likelihood_function([0, 0])\n\n fig = plt.figure(figsize=(10, 3))\n ax1, ax2 = fig.subplots(1, 2)\n\n print(likelihood)\n\n im = ax1.imshow(likelihood, cmap='gray')\n fig.colorbar(im)\n\n ax2.imshow(likelihood_polor, cmap='gray')\n plt.show()\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"RyosukeMatsushima/LidarMapCloud","sub_path":"tests/TestMap2D.py","file_name":"TestMap2D.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38868383950","text":"import xlrd\nfrom flask import Flask, request, url_for, redirect\nfrom flask import render_template\nfrom flask import jsonify\n\napp = Flask(__name__)\n\ndef calc_info(player):\n laps = 59\n xl = xlrd.open_workbook(r'times.xlsx')\n table = xl.sheets()[0]\n start = (player - 1) * laps + 1\n end = start + laps\n col1 = table.col_values(1, start, end)\n col2 = [round(i, 3) for i in table.col_values(4, start, end)]\n xydata = {\"xData\": col1, \"yData\": col2}\n\n movies = []\n for i in range(laps):\n name = \"rank\" + str(player) + \"_\" + str(i + 1)\n ball = {'file': name + '.mp4', 'title': \"第\" + str(i + 1) + \"圈:\\t\" + str(col2[i]) + \"s\"}\n movies.append(ball)\n\n xl = xlrd.open_workbook(r'information.xls')\n table = xl.sheets()[0]\n names = table.col_values(1)\n genders = table.col_values(2)\n fiscodes = table.col_values(3)\n ages = table.col_values(5)\n nations = table.col_values(6)\n pictures = table.col_values(7)\n info = {'name': names[player], 'gender': str(genders[player]), 'fiscode': int(fiscodes[player]),\n 'age': int(ages[player]), 'nation': str(nations[player]),\n 'picture': str(pictures[player]), \"rank\": int(player - 1)}\n return movies, xydata, info\n\n@app.route('/', methods=['GET', 'POST'])\ndef movie_list():\n movies, xydata, info = calc_info(1)\n return render_template(\"index.html\", movies=movies, xydata=xydata, info=info)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef player(player):\n movies, xydata, info = calc_info(player)\n return render_template(\"index.html\", movies=movies, xydata=xydata, info=info)\n\n\n\n\nif __name__=='__main__':\n app.run()","repo_name":"LadySundada/alpineski","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33615091017","text":"\ntable = [\n\t[\"PATH OUT\", \"\", \"\", \"\"],\n\t[\"DEPTH\", 0, \"DATA AUGMENTATION\", 1],\n\t[\"COORDCONV\", 0, \"OPTICAL FLOW\", 1],\n\t[\"BATCH SIZE\", 0, \"LEARNING RATE\", 1],\n\t[\"# EPOCHS\", 0, \"TRAIN. PARAM.\", 1],\n]\ndef print_table(table):\n\tlongest_cols = [(max([len(str(row[i])) for row in table[1:3]]) + 3) for i in range(len(table[0]))]\n\trow_format = \"\".join([\"{:>\" + str(longest_col) + \"}\" for longest_col in longest_cols])\n\tfor row in table:\n\t\tprint(row_format.format(*row))\n\n\ndef param_print(params):\n\tfor i,(p,q) in enumerate(zip(params,[1,3]*5)):\n\t\ttable[int(i/2)][q] = p\n\tprint_table(table)\n","repo_name":"juanjo3ns/SalBCE","sub_path":"src/utils/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"34259122748","text":"from . import defaults\n\n\ndef inject_settings(pelican):\n '''Inject default settings'''\n for key, value in defaults.__dict__.items():\n if not key.startswith('_'):\n pelican.settings.setdefault(key, value)\n\n\ndef generate_drafts_page(generator, writer):\n '''Generate the drafts page'''\n if not generator.settings['SHOW_DRAFTS']:\n return\n filename = generator.settings['DRAFTS_SAVE_AS']\n template = generator.get_template('drafts')\n writer.write_file(filename, template, generator.context, is_drafts=True)\n","repo_name":"noirbizarre/pelican-drafts","sub_path":"drafts/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"34640336006","text":"# https://leetcode.com/problems/monotonic-array/\nfrom typing import List\n\n\nclass Solution2:\n def isMonotonic(self, A: List[int]) -> bool:\n if not A or len(A) < 3:\n return True\n\n length = len(A)\n count = 0\n for i in range(length - 1):\n if count > 0:\n if A[i + 1] < A[i]:\n return False\n elif count < 0:\n if A[i + 1] > A[i]:\n return False\n else:\n count = A[i + 1] - A[i]\n\n return True\n\n\nclass Solution:\n def isMonotonic(self, A: List[int]) -> bool:\n if not A or len(A) < 3:\n return True\n\n UP = 1\n DOWN = -1\n EQUAL = 0\n CHAOS = -2\n\n def _monotonic(nums: List[int]) -> int:\n if len(nums) == 2:\n if nums[0] < nums[1]:\n return UP\n if nums[0] > nums[1]:\n return DOWN\n return EQUAL\n\n tail_status = _monotonic(nums[1:])\n if tail_status == CHAOS:\n return CHAOS\n\n if tail_status == EQUAL:\n return _monotonic(nums[:2])\n\n if tail_status == UP:\n return CHAOS if nums[0] > nums[1] else UP\n\n if tail_status == DOWN:\n return CHAOS if nums[0] < nums[1] else DOWN\n\n return _monotonic(A) != CHAOS\n\n\ndef test_case(nums: List[int]) -> bool:\n s = Solution2()\n return s.isMonotonic(nums)\n","repo_name":"ZhangYet/vanguard","sub_path":"myrtle/befor0225/monotonic_array.py","file_name":"monotonic_array.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34268551100","text":"\"\"\"empty message\n\nRevision ID: ef808551631c\nRevises: d28e51d247e5\nCreate Date: 2020-07-12 15:06:09.584984\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ef808551631c'\ndown_revision = 'd28e51d247e5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('DepositRequests', sa.Column('status', sa.String(), nullable=True))\n op.add_column('DepositRequests', sa.Column('userID', sa.Integer(), nullable=True))\n op.drop_column('DepositRequests', 'email')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('DepositRequests', sa.Column('email', sa.VARCHAR(), nullable=True))\n op.drop_column('DepositRequests', 'userID')\n op.drop_column('DepositRequests', 'status')\n # ### end Alembic commands ###\n","repo_name":"MohmedH/OpulentGroupDashboard","sub_path":"migrations/versions/ef808551631c_.py","file_name":"ef808551631c_.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73991955946","text":"from dataclasses import dataclass\n# 'Source': 'id+time',\ntypeTrans = {'Plain': 'text', 'At': 'target', 'Image': 'url', 'Face': 'faceId', 'File': 'id+name+size'}\n\n@dataclass\nclass MessageRecv:\n msgType: str\n msg: str\n \n@dataclass\nclass GroupMessageRecvList:\n groupId: str\n groupName: str\n senderId: str\n senderName: str\n messageChain: list\n groupType: str = 'GroupMessage'\n # messageChain: list[GroupMessageRecv]\n\n@dataclass\nclass FriendMessageRecvList:\n senderId: str\n senderName: str\n senderRemark: str\n messageChain: list\n groupType: str = 'FriendMessage'\n\n# @dataclass\n# class GroupMessageSendList:\n# groupId: str\n# groupName: str\n# msgType: str\n# msg: str\n\n# @dataclass\n# class DOSendGroupAt:\n","repo_name":"leeq1n/duduGroup","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72747147946","text":"animals = [camel, lion, deer, goose, bat, rabbit]\n\n# write your code here\nwhile True:\n selection = input(\"Please enter the number of the habitat you would like to view: \")\n if selection == \"exit\":\n break\n else:\n print(animals[int(selection)])\n print(\"See you later!\")\n","repo_name":"zeus034/zookeeper","sub_path":"zookeeper.py","file_name":"zookeeper.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4615736915","text":"import datetime\nimport re\nfrom django.db import models\n\ndt = datetime.datetime\nSCAN_FORMAT = '[0-9]{3}:[0-9]{8}'\n\nclass PersonType(models.Model):\n STAFF = 'STA'\n STUDENT = 'STU'\n FACULTY = 'FAC'\n INSTRUCTOR = 'INS'\n GUEST = 'GST'\n CONSULTANT = \"CNS\"\n AFFILIATIONS = (\n (STAFF, 'Staff'),\n (FACULTY, 'Faculty'),\n (INSTRUCTOR, 'Instructor'),\n (STUDENT, 'Student'),\n (GUEST, 'Guest'),\n (CONSULTANT, 'Consultant')\n )\n STR_CHOICES = {key: value for (key, value) in AFFILIATIONS}\n name = models.CharField(max_length=3,choices=AFFILIATIONS, unique=True)\n\n def __str__(self):\n return self.STR_CHOICES[self.name]\n\nclass Person(models.Model):\n class Meta:\n verbose_name_plural = 'People'\n first_name = models.CharField(max_length=25, blank=True)\n last_name = models.CharField(max_length=25, blank=True)\n affiliation_type = models.ManyToManyField(PersonType, blank=True)\n email = models.CharField(max_length=50, blank=True)\n department = models.CharField(max_length=200, blank=True)\n add_to_mailing_list = models.CharField(max_length=1, blank=True, choices=(('N', 'No'),\n ('Y', 'Yes'),\n ('A', 'Added')))\n rfid = models.CharField(max_length=12, blank=True)\n join_date = models.DateTimeField('date joined', default=dt.now)\n\n def get_name(self):\n return '%s %s' % (self.first_name, self.last_name)\n\n def get_events(self):\n return Event.objects.filter(checkin__visitor=self)\n\n def get_checkins(self):\n return SignIn.objects.filter(visitor=self)\n\n def get_visit_count(self):\n return len(SignIn.objects.filter(visitor=self))\n\n def get_affiliations(self):\n return [a.name for a in self.affiliation_type.all()]\n\n get_affiliations.short_description = \"Affiliations\"\n\n def is_valid(self):\n #there must be at least an rfid or an email\n # rfid and email not already be in the database\n # email, rfid must be valid\n if self.rfid:\n rfid_matcher = re.compile(SCAN_FORMAT)\n if re.search(rfid_matcher, self.rfid):\n return True\n else:\n return False\n elif self.email:\n return True\n else:\n return False\n\n def __str__(self):\n return self.get_name()\n\nclass EventType(models.Model):\n DIAGLOGUE = 'DIA'\n WORKSHOP = 'WKS'\n MEETUP = 'MUP'\n SYMPOSIUM = 'SYM'\n COLLOQUIUM = 'COL'\n MEETING = 'MET'\n FACULTY_MEETING = 'FME'\n WEBINAR = 'WEB'\n EVENT_TYPES = (\n (DIAGLOGUE, 'Dialogue'),\n (WORKSHOP, 'Workshop'),\n (MEETUP, 'Meetup'),\n (SYMPOSIUM, 'Symposium'),\n (COLLOQUIUM, 'Colloquium'),\n (MEETING, 'Meeting'),\n (FACULTY_MEETING, 'Faculty Meeting'),\n (WEBINAR, 'Webinar')\n )\n STR_CHOICES = {key: value for (key, value) in EVENT_TYPES}\n name = models.CharField(max_length=3,choices=EVENT_TYPES, unique=True)\n\n def __str__(self):\n return self.STR_CHOICES[self.name]\n\nclass EventAudienceType(models.Model):\n STAFF = 'STA'\n FACULTY = 'FAC'\n GRADUATE_STUDENTS = 'GRA'\n AUDIENCE_TYPES = (\n (STAFF, 'Academic Support Staff'),\n (FACULTY, 'Faculty'),\n (GRADUATE_STUDENTS, 'Graduate Students')\n )\n STR_CHOICES = {key: value for (key, value) in AUDIENCE_TYPES}\n name = models.CharField(max_length=20,choices=AUDIENCE_TYPES, unique=True)\n\n def __str__(self):\n return self.STR_CHOICES[self.name]\n\nclass Event(models.Model):\n\n title = models.CharField(max_length=200)\n jira_key = models.CharField(max_length=10, blank=True, unique=True, null=True)\n start = models.DateTimeField(default=dt.now)\n end = models.DateTimeField(blank=True, null=True)\n host = models.CharField(max_length=200)\n description = models.TextField(blank=True)\n event_type = models.ManyToManyField(EventType, blank=True)\n audience_type = models.ManyToManyField(EventAudienceType, blank=True)\n active = models.BooleanField(default=False)\n pre_calculated_attendance = models.IntegerField(default=0, blank=True)\n\n def is_valid(self):\n if self.title:\n return True\n else:\n return False\n\n def attendance_count(self):\n return len(SignIn.objects.filter(event__isnull=False, event=self)) + self.get_pre_calculated_attendance()\n\n def __str__(self):\n return self.title\n\n def get_pre_calculated_attendance(self):\n if self.pre_calculated_attendance:\n return self.pre_calculated_attendance\n else:\n return 0\n\nclass Service(models.Model):\n ETS = 'ETS'\n TLS = 'TLS'\n RIT = 'RIT'\n DH = 'DH'\n CTL = 'CTL'\n LIB = 'LIB'\n PARTNERS = (\n (ETS, 'ETS'),\n (TLS, 'TLS'),\n (RIT, 'Research IT'),\n (DH, 'Digital Humanities'),\n (CTL, 'Center for Teaching and Learning'),\n (LIB, 'Library')\n )\n name = models.CharField(max_length=50)\n # host = models.CharField(max_length=3,\n # choices=PARTNERS,\n # default=TLS,\n # blank=True)\n description = models.TextField(null=True)\n short_name = models.CharField(max_length=20, blank=True, null=True)\n active = models.NullBooleanField(default=True, null=True)\n list_priority = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n return self.name\n\nclass SignIn(models.Model):\n class Meta:\n verbose_name = \"Sign-in\"\n timestamp = models.DateTimeField(default=dt.now)\n person = models.ForeignKey(Person, on_delete=models.CASCADE, blank=True, null=True)\n event = models.ForeignKey(Event, on_delete=models.CASCADE, blank=True, null=True)\n comments = models.CharField(max_length=200, blank=True)\n service = models.ForeignKey(Service, on_delete=models.CASCADE, blank=True, null=True)\n\n def get_reason(self):\n if self.event:\n return str(self.event)\n else:\n return self.service\n\n def is_valid(self):\n if self.timestamp:\n return True\n else:\n return False\n\n def __str__(self):\n return \"%s - %s here for %s\" % (self.pretty_date(), self.person, self.service)\n\n def pretty_date(self):\n return self.timestamp.strftime('%a. %m/%d %I:%M%p')\n","repo_name":"fmirdita/The_DatabAIS","sub_path":"databais/reception/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20532749295","text":"import cProfile\nfrom pstats import Stats, SortKey\n\nimport json\nimport gol\nimport utils\n\nTEST_BOARD_FILENAME = 'scenarios/testboard_100_emptyq.bmp'\ntestboard = utils.load_board(TEST_BOARD_FILENAME)\n\nprint(\nf\"\"\"\nProfile parameters: \nboard file: {TEST_BOARD_FILENAME}\nboard size: {testboard.size}\n\"\"\"\n)\n\nfor max_depth in range(3):\n game = gol.GameOfLife(\n initial_board=testboard,\n max_depth=max_depth + 1\n )\n\n print(f'# Max Depth: {max_depth + 1}')\n print('```')\n\n with cProfile.Profile() as pr:\n for i in range(1000):\n game.update_board()\n pr.print_stats()\n \n print('```')\n","repo_name":"raphaelpaiva/GameOfLife","sub_path":"profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3915990002","text":"import numpy\nfrom bitarray import bitarray\nfrom bitstream import BitStream\nimport pickle\nimport struct\nimport bitstring\nimport sys\n\ndef bubbleSort(array1,array2): #Sorting Algorithm to Descending Order\n size = len(array1)\n \n for i in range(0,size-1):\n swap = True\n for j in range(0,size-1-i):\n if array1[j] > array1[j+1]:\n \n temp1 = array1[j]\n array1[j] = array1[j+1]\n array1[j+1] = temp1\n \n temp2 = array2[j]\n array2[j] = array2[j+1]\n array2[j+1] = temp2\n \n swap = False\n\n if swap: break\n \n array1 = array1[::-1]\n array2 = array2[::-1]\n\n return array1,array2\n\ndef readFile(filename):\n file = open(filename, 'r')\n \n c = []\n p = []\n\n while True:\n char = file.read(1)\n if not char: break\n \n if char not in c:\n c.append(char)\n p.append(int(1))\n else: \n p[c.index(char)] += 1\n\n file.close()\n\n return p,c\n\ndef haufmannCompressing(filename):\n freq_list, char_list = readFile(filename)\n char_list_artif = char_list\n code_list = [''] * len(freq_list)\n \n char_list_artif, code_list = haufmannCoding(freq_list, char_list, code_list, char_list_artif)\n ''' to Control : PASSED '''\n for i in range(len(char_list_artif)):\n print(char_list_artif[i] + \" : \" + code_list[i])\n\n compressFile(filename, char_list_artif, code_list)\n\n \n\ndef haufmannCoding(freq_list, char_list, code_list, char_list_artif): \n freq_list, char_list = bubbleSort(freq_list, char_list)\n last_elem = len(freq_list) - 1\n \n if last_elem == 0:\n return char_list_artif, code_list\n \n for char in char_list[last_elem]:\n code_list[char_list_artif.index(char)] = '1' + code_list[char_list_artif.index(char)]\n \n for char in char_list[last_elem -1]:\n code_list[char_list_artif.index(char)] = '0' + code_list[char_list_artif.index(char)] \n \n ''' add last two char and freq '''\n freq_list[last_elem -1], char_list[last_elem -1] = freq_list[last_elem] + freq_list[last_elem -1], char_list[last_elem] + char_list[last_elem -1] \n \n ''' won't use last elements anymore '''\n freq_list, char_list = freq_list[0:last_elem], char_list[0:last_elem]\n \n return haufmannCoding(freq_list, char_list, code_list, char_list_artif)\n\ndef compressFile(filename, char_list, code_list):\n for i in range(len(code_list)):\n code_list[i] = '0b' + code_list[i]\n \n file = open(filename, 'r')\n stream = bitstring.BitString()\n \n while True:\n char = file.read(1)\n if not char: break \n stream.insert(code_list[char_list.index(char)])\n \n \n path = filename.split('.')[0]+'.bnr'\n outputFile = open(path,\"wb\")\n stream.tofile(outputFile)\n\n outputFile.close()\n file.close()\n\n return stream\n\nif len(sys.argv) != 2:\n sys.stderr.write(\"Please enter the filename to compress!\")\n sys.exit(1)\n \nfilename = sys.argv[-1]\nprint(filename)\nhaufmannCompressing(filename)\n\n\n\n\n","repo_name":"ozctimoti/CompressionAlgorithms","sub_path":"haufmannCompressing.py","file_name":"haufmannCompressing.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18852104705","text":"# -*- coding: utf-8 -*-\nimport re\nimport json\nimport scrapy\nfrom SMOM import helper\nfrom SMOM.items import SmomItem\nfrom scrapy.http import Request\n\n# #要闻\n# 'http://c.m.163.com/dlist/article/dynamic?{}'\n# par = {\n# 'from':'T1467284926140',\n# 'offset':'0',\n# 'size':'20',\n# 'fn':'1',\n# 'LastStdTime':'0',\n# 'passport':'',\n# 'devId':'UtQj6VTqfPTNdHOhqXgx4w%3D%3D',\n# # 'lat':'nEob1URk2zlHby%2FZRQvN9A%3D%3D',\n# # 'lon':'gnbXwKYIXyBHzMQxFSRSxQ%3D%3D',\n# 'version':'54.6',\n# 'net':'wifi',\n# # 'ts':'1556091859',\n# # 'sign':'uqtTuIyP5oD9HzgvKRKuccyK81gp7LyGqaF2wqK%2F62B48ErR02zJ6%2FKXOnxX046I',\n# 'encryption':'1',\n# 'canal':'miliao_news',\n# # 'mac':'I0hRorjreoVkNP82fbwMpUn4xdWy8S3keUAmEYPgEfc%3D',\n# # 'open':'',\n# # 'openpath':''\n# }\n#\n# #推荐\n# 'http://c.m.163.com/recommend/getSubDocPic?{}'\n# par2 = {\n# # 'tid':'T1348647909107',\n# 'from':'toutiao',\n# 'offset':'0',\n# 'size':'10',\n# 'fn':'3',\n# 'LastStdTime':'0',\n# 'spestr':'reader_expert_1',\n# # 'prog':'bjrec_toutiaotoutiao-1100000423-1200000585-1110000662-1111000458-1111000774-1111000698-1111000797-1111000478-1111000673-1200000742-1200000685-1200000604-1111000619-1200000724-1111000438-1111000834-1111000592-1111000394-1111000589-1200000673-1111000545-1200000594-1111000543-1200000710-1200000677-1200000632-1111000289-1111000388-1111000242-1200000576-1200000652-1111000828-1111000729-1200000734-1111000629-1200000678-1111000626-1111000701-1111000548e',\n# 'passport':'',\n# 'devId':'UtQj6VTqfPTNdHOhqXgx4w%3D%3D',\n# # 'lat':'nEob1URk2zlHby%2FZRQvN9A%3D%3D',\n# # 'lon':'gnbXwKYIXyBHzMQxFSRSxQ%3D%3D',\n# 'version':'54.6',\n# 'net':'wifi',\n# # 'ts':'1556095657',\n# # 'sign':'QKcKkwKuzkBU7u6u%2B67TXuGGUbk990WpvAlMfaMcqUx48ErR02zJ6%2FKXOnxX046I',\n# 'encryption':'1',\n# 'canal':'miliao_news',\n# # 'mac':'I0hRorjreoVkNP82fbwMpUn4xdWy8S3keUAmEYPgEfc%3D',\n# # 'open':'',\n# # 'openpath':''\n# }\n# 不需要headers\n\n# 'http://c.m.163.com/nc/article/list/T1414142214384/{}-20.html'\n# 需要headers\n\n# 网易新闻APP\nclass AppNeteaseSpider(scrapy.Spider):\n name = 'app.netease'\n entry_point = {\n # '要闻': 'http://c.m.163.com/dlist/article/dynamic?from=T1467284926140&offset=0&size=20&fn=1&LastStdTime=0&passport=&devId=UtQj6VTqfPTNdHOhqXgx4w%3D%3D&version=54.6&net=wifi&encryption=1&canal=miliao_news',\n # '头条': 'http://c.m.163.com/recommend/getSubDocPic?from=T1467284926140&offset=0&size=20&fn=1&LastStdTime=0&passport=&devId=UtQj6VTqfPTNdHOhqXgx4w%3D%3D&version=54.6&net=wifi&encryption=1&canal=miliao_news',\n # '财经': 'http://c.m.163.com/dlist/article/dynamic?from=T1348648756099&offset=0&size=10&fn=1&LastStdTime=0&passport=&devId=UtQj6VTqfPTNdHOhqXgx4w%3D%3D&version=55.1&net=wifi&encryption=1&canal=miliao_news&open=&openpath=',\n # '新时代': 'http://c.m.163.com/nc/article/list/T1414142214384/0-20.html'\n '汽车': 'http://c.m.163.com/nc/auto/districtcode/list/440600/{}-20.html'\n }\n\n headers = {\n 'User-Agent': 'NewsApp/54.6 Android/4.4.4 (Xiaomi/MI 3C)'\n }\n\n def start_requests(self):\n for key in self.entry_point.keys():\n for i in range(16):\n yield Request(url=self.entry_point[key].format(i*20), callback=self.parse, headers=self.headers,dont_filter=True)\n\n def parse(self, response):\n jsonbd = json.loads(response.text)\n if len(jsonbd['list']) == 0: return\n for item in jsonbd['list']:\n if 'url_3w' not in item.keys() or len(item['url_3w']) == 0: continue\n url = item['url_3w']\n like = item['votecount'] if 'votecount' in item.keys() else None\n id = item['postid'] if 'postid' in item.keys() else None\n date = item['ptime'] if 'ptime' in item.keys() else None\n source = item['source'] if 'source' in item.keys() else None\n replyCount = item['replyCount'] if 'replyCount' in item.keys() else None\n yield Request(url=url, callback=self.content_parse, encoding='utf-8',\n meta={'like': like, 'id': id, 'date': date, 'replyCount': replyCount, 'source': source})\n\n def content_parse(self, response):\n\n pipleitem = SmomItem()\n\n pipleitem['S0'] = response.meta['id']\n pipleitem['S1'] = response.url\n pipleitem['S2'] = response.meta['source']\n pipleitem['S3a'] = '文章评论类'\n pipleitem['S3d'] = helper.list2str(response.xpath('string(//div[@class=\"post_crumb\"])').extract())\n pipleitem['S4'] = response.css('title::text').extract_first()\n pipleitem['S5'] = helper.get_localtimestamp()\n pipleitem['S6'] = response.meta['date']\n pipleitem['S7'] = '网易新闻APP'\n pipleitem['S9'] = '1'\n pipleitem['S10'] = None\n pipleitem['S11'] = None\n pipleitem['S12'] = response.meta['comment_count'] if 'comment_count' in response.meta.keys() else None\n pipleitem['S13'] = response.meta['replyCount']\n pipleitem['ID'] = response.meta['id']\n pipleitem['G1'] = None\n pipleitem['Q1'] = helper.list2str(response.xpath('string(//div[@id=\"endText\"])').extract()).replace('\\t','')\n\n # pipleitem['image_urls'] = helper.list2str(response.css('#endText img::attr(src)').extract())\n # pipleitem['video_urls'] = helper.list2str(response.css('#endText source::attr(src)').extract())\n\n return pipleitem\n","repo_name":"MingCrash/SMOM","sub_path":"SMOM/spiders/app_netease.py","file_name":"app_netease.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1396506408","text":"\"\"\"make synthetic datasets: WIP!!\"\"\"\nimport networkx as nx\nimport numpy as np\n\nfrom hcga.graph import Graph, GraphCollection\nfrom hcga.io import save_dataset\n\nnp.random.seed(0)\n\n\ndef make(\n folder=\"./datasets\",\n write_to_file=True,\n graph_type=\"SBM\",\n):\n \"\"\"Make dataset.\"\"\"\n if graph_type == \"SBM\":\n graphs = make_SBM()\n else:\n raise Exception(\"This type of synthetic dataset is not yet implemented\")\n if write_to_file:\n save_dataset(graphs, \"SYNTH_\" + graph_type, folder=folder)\n\n\ndef make_SBM():\n \"\"\"Make SBM with 1, 2, 3 and 4 clusters.\"\"\"\n n_graphs = 10\n\n def _add_graph(label):\n for _ in range(n_graphs):\n graph = nx.stochastic_block_model(sizes, probs)\n graphs.add_graph(Graph(list(graph.nodes), list(graph.edges), label))\n\n graphs = GraphCollection()\n\n sizes = [20, 20, 20, 20]\n probs = [\n [0.5, 0.02, 0.02, 0.02],\n [0.02, 0.5, 0.02, 0.02],\n [0.02, 0.02, 0.5, 0.02],\n [0.02, 0.02, 0.02, 0.5],\n ]\n _add_graph(4)\n\n sizes = [27, 27, 26]\n probs = [[0.5, 0.02, 0.02], [0.02, 0.5, 0.02], [0.02, 0.02, 0.5]]\n _add_graph(3)\n\n sizes = [40, 40]\n probs = [[0.5, 0.02], [0.02, 0.5]]\n _add_graph(2)\n\n sizes = [80]\n probs = [[0.5]]\n _add_graph(1)\n\n return graphs\n\n\n# below are deprecated functions\ndef synthetic_data_watts_strogatz(N=1000):\n \"\"\"small world.\"\"\"\n graphs = []\n graph_labels = []\n\n p = np.linspace(0, 1, N)\n\n for i in range(N):\n G = nx.connected_watts_strogatz_graph(40, 5, p[i])\n graphs.append(G)\n graph_labels.append(p[i])\n\n return graphs, np.asarray(graph_labels)\n\n\ndef synthetic_data_powerlaw_cluster(N=1000):\n \"\"\"powerlaw.\"\"\"\n graphs = []\n graph_labels = []\n\n p = np.linspace(0, 1, N)\n\n for i in range(N):\n G = nx.powerlaw_cluster_graph(40, 5, p[i])\n graphs.append(G)\n graph_labels.append(p[i])\n\n return graphs, np.asarray(graph_labels)\n\n\ndef synthetic_data_sbm(N=1000):\n \"\"\"sbm\"\"\"\n graphs = []\n graph_labels = []\n\n import random\n\n for _ in range(int(N / 2)):\n G = nx.stochastic_block_model(\n [random.randint(10, 30), random.randint(10, 30), random.randint(10, 30)],\n [[0.6, 0.1, 0.1], [0.1, 0.6, 0.1], [0.1, 0.1, 0.6]],\n )\n graphs.append(G)\n graph_labels.append(1)\n\n for _ in range(int(N / 2)):\n G = nx.stochastic_block_model(\n [random.randint(20, 40), random.randint(20, 40)], [[0.6, 0.1], [0.1, 0.6]]\n )\n graphs.append(G)\n graph_labels.append(2)\n\n return graphs, np.asarray(graph_labels)\n","repo_name":"barahona-research-group/hcga","sub_path":"hcga/dataset_creation/synthetic_datasets.py","file_name":"synthetic_datasets.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"42576250202","text":"######################\r\n## Import Libraries ##\r\n######################\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport re\r\nfrom sklearn import metrics\r\n\r\n#################\r\n## Import Data ##\r\n#################\r\n#KKH data used for training & validation from 2007-2013\r\nos.chdir('D:\\\\Users\\\\srrzyx\\\\Desktop\\\\MRI Brain Text Classification\\\\MRI_text_classification_v2\\\\cleaned_data')\r\ndata_ACR = pd.read_csv('KKH_ACR_cleaned_v2.csv')\r\ndata_non_ACR = pd.read_csv('KKH_non_ACR_cleaned_v2.csv') \r\nlen(data_ACR)/(len(data_ACR)+len(data_non_ACR)) #Blind guess probability of following ACR (baseline accuracy) \r\n\r\nKKH_data = data_ACR[['No.','Indication for MRI', 'ACR/No ACR']].append(data_non_ACR[['No.','Indication for MRI', 'ACR/No ACR']], ignore_index=True) #Combining Dataset\r\n\r\n########################\r\n## Cleaning the texts ##\r\n########################\r\n#--------------------------#\r\n#-- Negated text removal --#\r\n#--------------------------#\r\nNegWords = [' NO ']\r\nindication_cleaned = [] #To store cleaned data\r\nremoved_text = [] #To store sentence containing text that were removed\r\n\r\ndef shorten(text, NegWord): #Formula for removing texts after NegWord\r\n i = text.index(NegWord)\r\n return text[:i]\r\n \r\n#Remove negation text \r\nfor i in range(0,len(KKH_data)):\r\n SentenceList = re.split(r'[.?\\-\",;:]+',KKH_data['Indication for MRI'][i].upper()) #Split by punctuation\r\n NewSentenceList = re.split(r'[.?\\-\",;:]+',KKH_data['Indication for MRI'][i].upper())\r\n for j in NewSentenceList:\r\n #Removing negated terms\r\n for k in NegWords:\r\n try :\r\n shorten(j, k)\r\n NewSentenceList[NewSentenceList.index(j)] = shorten(j, k)\r\n except :\r\n pass\r\n removed_text.append({'removed_text' : ';'.join(list(set(SentenceList) - set(NewSentenceList)))})\r\n NewSentence = ''.join(NewSentenceList)\r\n indication_cleaned.append(NewSentence)\r\n\r\nKKH_data['Indication for MRI cleaned'] = pd.DataFrame(indication_cleaned)\r\nKKH_data['removed_text'] = pd.DataFrame(removed_text)\r\n\r\n#-----------------------#\r\n#-- Normalizing texts --#\r\n#-----------------------#\r\nimport nltk\r\n#nltk.download('stopwords') #no need to run if already downloaded\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\n\r\ndef clean_text(df_text_column, data): \r\n corpus = []\r\n for i in range(0, len(data)):\r\n text = re.sub('[^a-zA-Z]', ' ', df_text_column[i])\r\n text = text.lower()\r\n text = text.split()\r\n ps = PorterStemmer()\r\n text = [ps.stem(word) for word in text if not word in set(stopwords.words('english'))]\r\n text = ' '.join(text)\r\n corpus.append(text)\r\n return corpus\r\n\r\nKKH_data['Indication for MRI cleaned'] = clean_text(KKH_data['Indication for MRI cleaned'], KKH_data)\r\n\r\n#--------------------------------#\r\n#-- Remove empty cell (if any) --#\r\n#--------------------------------#\r\nKKH_data['Indication for MRI cleaned'].replace('\\s+', ' ',regex=True,inplace=True) #Replace cells with multiple whitespaces to single whitespace\r\nKKH_data = KKH_data[KKH_data['Indication for MRI cleaned'] != ' ']\r\nKKH_data.dropna(subset=['Indication for MRI cleaned'], how='any', inplace=True)\r\nKKH_data = KKH_data.reset_index(drop=True)\r\n\r\n######################\r\n## Split Train-Test ##\r\n######################\r\nX = KKH_data[['No.','Indication for MRI cleaned']]\r\ny = KKH_data['ACR/No ACR']\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\r\ny_train.value_counts()\r\ny_test.value_counts()\r\nX_test['y_test'] = y_test\r\n\r\n# Creating the vectorizer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nvect = TfidfVectorizer() # Can also try ngram_range=(1,2)\r\n#from sklearn.feature_extraction.text import CountVectorizer\r\n#vect = CountVectorizer() # Can also try ngram_range=(1,2)\r\n\r\n####################\r\n## Training Model ##\r\n####################\r\n#-------------------------#\r\n#-- Logistic Regression --#\r\n#-------------------------#\r\nimport statsmodels.api as sm\r\n\r\nmax_features = []\r\n#Iterate to get the maximum number of features/words available\r\nfor i in range(1,51):\r\n try: \r\n log_vect = TfidfVectorizer(max_features=i)\r\n X_logreg = log_vect.fit_transform(KKH_data['Indication for MRI cleaned']).toarray()\r\n features = log_vect.get_feature_names()\r\n X_logreg = pd.DataFrame(X_logreg, columns = features)\r\n X_logreg_train, X_logreg_test, y_logreg_train, y_logreg_test = train_test_split(X_logreg, y, test_size = 0.20, random_state = 0)\r\n logreg = sm.Logit(y_logreg_train, X_logreg_train)\r\n lresult = logreg.fit(max_iter = 1)\r\n max_features.append(i)\r\n except:\r\n print(\"Failed for max features = \", i)\r\nmax_features = max(max_features)\r\n\r\nlog_vect = TfidfVectorizer(max_features=max_features)\r\nX_logreg = log_vect.fit_transform(KKH_data['Indication for MRI cleaned']).toarray()\r\nfeatures = log_vect.get_feature_names()\r\nX_logreg = pd.DataFrame(X_logreg, columns = features)\r\nX_logreg_train, X_logreg_test, y_logreg_train, y_logreg_test = train_test_split(X_logreg, y, test_size = 0.20, random_state = 0)\r\nlogreg = sm.Logit(y_logreg_train, X_logreg_train)\r\nlresult = logreg.fit(max_iter = 1)\r\n\r\ny_prob_logreg = lresult.predict(X_logreg_test)\r\ny_pred_logreg = np.where(y_prob_logreg > 0.5, 1,0)\r\nlresult.summary2() # Logistic Regression summary\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_test, y_pred_logreg) #Accuracy\r\nmetrics.roc_auc_score(y_test, y_prob_logreg) #ROC-AUC score\r\ncm_logreg = metrics.confusion_matrix(y_test, y_pred_logreg); cm_logreg #Confusion Matrix\r\ncm_logreg[0,0]/(cm_logreg[0,0]+cm_logreg[0,1]) #Specificity\r\ncm_logreg[1,1]/(cm_logreg[0,1]+cm_logreg[1,1]) #Precision\r\ncm_logreg[1,1]/(cm_logreg[1,0]+cm_logreg[1,1]) #Recall\r\nmetrics.f1_score(y_test, y_pred_logreg, average='binary') #F1 Score\r\n\r\n#------------------------#\r\n#-- K-Nearest Neigbour --#\r\n#------------------------#\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier()\r\nfrom sklearn.pipeline import make_pipeline\r\nnp.random.seed(0)\r\nknn_pipe = make_pipeline(vect, knn)\r\nknn_pipe.steps\r\nknn_pipe.fit(X_train['Indication for MRI cleaned'], y_train)\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\nparameters_knn = [{'kneighborsclassifier__n_neighbors': list(range(1,151))}]\r\ngrid_search = GridSearchCV(estimator = knn_pipe,\r\n param_grid = parameters_knn,\r\n scoring = 'roc_auc',\r\n cv = 3,\r\n verbose=2)\r\ngrid_search = grid_search.fit(X_train['Indication for MRI cleaned'], y_train)\r\ngrid_mean_scores = grid_search.cv_results_['mean_test_score']\r\nplt.plot(list(range(1,151)), grid_mean_scores)\r\nplt.xlabel('k value')\r\nplt.ylabel('Cross-Validated Accuracy')\r\nbest_accuracy_knn = grid_search.best_score_ #Best cross-validation accuracy (not validation accuracy)\r\nbest_parameters_knn = grid_search.best_params_ #Best parameters\r\nbest_parameters_value_knn = list(best_parameters_knn.values())\r\n\r\nnp.random.seed(0)\r\nknn = KNeighborsClassifier(n_neighbors = best_parameters_value_knn[0])\r\nknn_pipe = make_pipeline(vect, knn)\r\nknn_pipe.fit(X_train['Indication for MRI cleaned'], y_train)\r\n\r\ny_pred_knn = knn_pipe.predict(X_test['Indication for MRI cleaned'])\r\ny_prob_knn = knn_pipe.predict_proba(X_test['Indication for MRI cleaned'])\r\nX_test['y_pred_knn'] = y_pred_knn\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_test, y_pred_knn) #Accuracy\r\nmetrics.roc_auc_score(y_test, y_prob_knn[:, 1]) #ROC-AUC score\r\ncm_knn = metrics.confusion_matrix(y_test, y_pred_knn); cm_knn #Confusion Matrix\r\ncm_knn[0,0]/(cm_knn[0,0]+cm_knn[0,1]) #Specificity\r\ncm_knn[1,1]/(cm_knn[0,1]+cm_knn[1,1]) #Precision\r\ncm_knn[1,1]/(cm_knn[1,0]+cm_knn[1,1]) #Recall\r\nmetrics.f1_score(y_test, y_pred_knn, average='binary') #F1 Score\r\n\r\n#-------------------#\r\n#-- Random Forest --#\r\n#-------------------#\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nrf = RandomForestClassifier()\r\nfrom sklearn.pipeline import make_pipeline\r\nnp.random.seed(0)\r\nrf_pipe = make_pipeline(vect, rf)\r\nrf_pipe.steps\r\nrf_pipe.fit(X_train['Indication for MRI cleaned'], y_train)\r\n\r\n#Grid Search\r\nfrom sklearn.model_selection import GridSearchCV\r\nparameters_rf = [{'randomforestclassifier__n_estimators': [10,50,100,300,500,1000,2000,3000,5000],\r\n 'randomforestclassifier__max_depth': [None,3,5,7,10,15,20],\r\n 'randomforestclassifier__criterion': [\"gini\", \"entropy\"],\r\n 'randomforestclassifier__max_features': [\"auto\", \"log2\", None]}]\r\n\r\ngrid_search = GridSearchCV(estimator = rf_pipe,\r\n param_grid = parameters_rf,\r\n scoring = 'roc_auc',\r\n cv = 3,\r\n verbose=2,\r\n n_jobs = -1)\r\ngrid_search = grid_search.fit(X_train['Indication for MRI cleaned'], y_train)\r\nbest_accuracy_rf = grid_search.best_score_ #Best cross-validation accuracy (not validation accuracy)\r\nbest_parameters_rf = grid_search.best_params_ #Best parameters\r\nbest_parameters_value_rf = list(best_parameters_rf.values())\r\n\r\nnp.random.seed(0)\r\nrf = RandomForestClassifier(n_estimators = best_parameters_value_rf[3],\r\n max_depth = best_parameters_value_rf[1],\r\n criterion = best_parameters_value_rf[0],\r\n max_features = best_parameters_value_rf[2])\r\nrf_pipe = make_pipeline(vect, rf)\r\nrf_pipe.fit(X_train['Indication for MRI cleaned'], y_train)\r\n\r\ny_pred_rf = rf_pipe.predict(X_test['Indication for MRI cleaned'])\r\ny_prob_rf = rf_pipe.predict_proba(X_test['Indication for MRI cleaned'])\r\nX_test['y_pred_rf'] = y_pred_rf\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_test, y_pred_rf) #Accuracy\r\nmetrics.roc_auc_score(y_test, y_prob_rf[:, 1]) #ROC-AUC score\r\ncm_rf = metrics.confusion_matrix(y_test, y_pred_rf); cm_rf #Confusion Matrix\r\ncm_rf[0,0]/(cm_rf[0,0]+cm_rf[0,1]) #Specificity\r\ncm_rf[1,1]/(cm_rf[0,1]+cm_rf[1,1]) #Precision\r\ncm_rf[1,1]/(cm_rf[1,0]+cm_rf[1,1]) #Recall\r\nmetrics.f1_score(y_test, y_pred_rf, average='binary') #F1 Score\r\n\r\n#Random Forest Feature Importance\r\nmax_num_features = 15 #Number of features to display on feature importance plot\r\nrf_importance = pd.DataFrame({'importance_value':rf_pipe.steps[1][1].feature_importances_}) #Based on \"gini importance\" or \"mean decrease impurity\"\r\nrf_importance['feature_name'] = vect.get_feature_names()\r\nrf_importance = rf_importance.sort_values(by='importance_value', ascending=False)[0:max_num_features]\r\n\r\nplt.figure()\r\nplt.title('RandomForest Feature Importance Plot', fontweight='bold')\r\nplt.barh(list(rf_importance['feature_name'][0:max_num_features]), list(rf_importance['importance_value'][0:max_num_features]), color='b', align='center')\r\nplt.xlabel('Relative Importance', fontweight='bold')\r\nplt.ylabel('Features', fontweight='bold')\r\nplt.gca().invert_yaxis()\r\n\r\n #Identifying false negative cases\r\n#cm_rf[1,0] #Number of false negative cases\r\n#false_negative_list = X_test[(X_test['y_test'] == 1) & (X_test['y_pred_rf'] == 0)]['No.']\r\n#false_negative_cases = data_ACR[data_ACR['No.'].isin(list(false_negative_list))]\r\n#os.chdir('D:\\\\Users\\\\srrzyx\\\\Desktop')\r\n#false_negative_cases.to_csv('false_negative_cases.csv',index=False)\r\n\r\n#-------------#\r\n#-- XGBoost --#\r\n#-------------#\r\nfrom xgboost import XGBClassifier\r\nxgb = XGBClassifier()\r\nfrom sklearn.pipeline import make_pipeline\r\nnp.random.seed(0)\r\nxgb_pipe = make_pipeline(vect, xgb)\r\nxgb_pipe.steps\r\nxgb_pipe.fit(X_train['Indication for MRI cleaned'], y_train)\r\n\r\n#Grid Search\r\nfrom sklearn.model_selection import GridSearchCV\r\nparameters_xgb = [{'xgbclassifier__colsample_bytree': [0.05,0.1,0.3,0.5,1],\r\n 'xgbclassifier__max_depth': [1,3,5,10,15,20],\r\n 'xgbclassifier__n_estimators': [50, 100, 150, 300, 500, 750, 1000],\r\n 'xgbclassifier__reg_alpha': [0.01,0.05,0.1,0.2],\r\n 'xgbclassifier__gamma': [0.3,0.5,0.7,1], \r\n 'xgbclassifier__subsample': [0.5,0.7,1]},]\r\n\r\ngrid_search = GridSearchCV(estimator = xgb_pipe,\r\n param_grid = parameters_xgb,\r\n scoring = 'roc_auc',\r\n cv = 3,\r\n verbose=2,\r\n n_jobs = -1)\r\ngrid_search = grid_search.fit(X_train['Indication for MRI cleaned'], y_train)\r\nbest_accuracy_xgb = grid_search.best_score_ #Best cross-validation accuracy (not validation accuracy)\r\nbest_parameters_xgb = grid_search.best_params_ #Best parameters\r\nbest_parameters_value_xgb = list(best_parameters_xgb.values())\r\n\r\nnp.random.seed(0)\r\nxgb = XGBClassifier(n_estimators = best_parameters_value_xgb[3], \r\n max_depth = best_parameters_value_xgb[2], \r\n colsample_bytree = best_parameters_value_xgb[0], \r\n subsample = best_parameters_value_xgb[5], \r\n reg_alpha = best_parameters_value_xgb[4],\r\n gamma = best_parameters_value_xgb[1])\r\nxgb_pipe = make_pipeline(vect, xgb)\r\nxgb_pipe.fit(X_train['Indication for MRI cleaned'], y_train)\r\n\r\ny_pred_xgb = xgb_pipe.predict(X_test['Indication for MRI cleaned'])\r\ny_prob_xgb = xgb_pipe.predict_proba(X_test['Indication for MRI cleaned'])\r\nX_test['y_pred_xgb'] = y_pred_xgb\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_test, y_pred_xgb) #Accuracy\r\nmetrics.roc_auc_score(y_test, y_prob_xgb[:, 1]) #ROC-AUC score\r\ncm_xgb = metrics.confusion_matrix(y_test, y_pred_xgb); cm_xgb #Confusion Matrix\r\ncm_xgb[0,0]/(cm_xgb[0,0]+cm_xgb[0,1]) #Specificity\r\ncm_xgb[1,1]/(cm_xgb[0,1]+cm_xgb[1,1]) #Precision\r\ncm_xgb[1,1]/(cm_xgb[1,0]+cm_xgb[1,1]) #Recall\r\nmetrics.f1_score(y_test, y_pred_xgb, average='binary') #F1 Score\r\n\r\n# XGB Feature Importance\r\nmax_num_features = 15 #Number of features to display on feature importance plot\r\nfrom xgboost import plot_importance\r\nplot_importance(xgb_pipe.steps[1][1], max_num_features = max_num_features, importance_type='weight') #But no feature names\r\n\r\n#Alternative feature importance plot\r\nf_score = pd.DataFrame({'keys':list(xgb.get_booster().get_fscore().keys()), 'f_score': list(xgb.get_booster().get_fscore().values())})\r\nf_score = f_score.sort_values(by=['f_score'], ascending=False).reset_index(drop=True) # Sort by f_score and reset index\r\nf_score['keys'].replace('f', '', regex=True, inplace=True) #Remove letter 'f' from the keys column\r\nf_score['feature'] = 0\r\nfor i in range(0,len(f_score)):\r\n f_score['feature'][i] = vect.get_feature_names()[int(f_score['keys'][i])]\r\n \r\n#plt.rcdefaults()\r\nfig, ax = plt.subplots()\r\nax.barh(list(f_score['feature'][0:max_num_features]), list(f_score['f_score'][0:max_num_features]), color='blue', align='center')\r\nax.invert_yaxis() # labels read top-to-bottom\r\nax.set_xlabel('F score', fontweight='bold')\r\nax.set_ylabel('Features', fontweight='bold')\r\nax.set_title('XGBoost Feature Importance Plot', fontweight='bold')\r\nfor i, v in enumerate(list(f_score['f_score'][0:max_num_features])):\r\n ax.text(v + 0.5, i + .20, str(v), color='gray', fontsize=9)\r\nplt.show()\r\n\r\n#Identifying false negative cases\r\n#cm_xgb[1,0] #Number of false negative cases\r\n#false_negative_list = X_test[(X_test['y_test'] == 1) & (X_test['y_pred_xgb'] == 0)]['No.']\r\n#false_negative_cases = data_ACR[data_ACR['No.'].isin(list(false_negative_list))]\r\n#os.chdir('D:\\\\Users\\\\srrzyx\\\\Desktop')\r\n#false_negative_cases.to_csv('false_negative_cases.csv',index=False)\r\n\r\n#---------------#\r\n#-- ROC Curve --#\r\n#---------------#\r\nplt.figure(0).clf()\r\n\r\nplt.title('ROC Curve')\r\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_prob_logreg) #Logistic Regression\r\nplt.plot(fpr, tpr, 'r', label = 'LR AUC = %0.2f' % metrics.roc_auc_score(y_test, y_prob_logreg))\r\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_prob_knn[:, 1]) #k-Nearest Neighbor\r\nplt.plot(fpr, tpr, 'y', label = 'KNN AUC = %0.2f' % metrics.roc_auc_score(y_test, y_prob_knn[:, 1]))\r\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_prob_knn[:, 1]) #Random Forest\r\nplt.plot(fpr, tpr, 'b', label = 'RF AUC = %0.2f' % metrics.roc_auc_score(y_test, y_prob_rf[:, 1]))\r\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_prob_xgb[:, 1]) #XGboost\r\nplt.plot(fpr, tpr, 'g', label = 'XGB AUC = %0.2f' % metrics.roc_auc_score(y_test, y_prob_xgb[:, 1]))\r\nplt.plot([0, 1], [0, 1],'k--') #Baseline\r\nplt.ylabel('True Positive Rate')\r\nplt.xlabel('False Positive Rate')\r\nplt.legend(loc=0)\r\n\r\nplt.show()\r\n\r\n#######################\r\n## LIME - Validation ##\r\n#######################\r\n#-------------------#\r\n#-- Random Forest --#\r\n#-------------------#\r\nfrom lime.lime_text import LimeTextExplainer\r\npatient_no = 659 #Input patient case number ('No.') that you want to review\r\nindex_no = X_test[X_test['No.'] == patient_no].index.tolist()[0]\r\nclass_names = ['Non_ACR', 'ACR']\r\nexplainer = LimeTextExplainer(class_names=class_names)\r\nexp = explainer.explain_instance(X_test['Indication for MRI cleaned'][index_no], rf_pipe.predict_proba, num_features=6)\r\nexp.as_list()\r\nexp.as_pyplot_figure();\r\n\r\nprint('Patient ID: %d' % index_no)\r\nprint('Probability of following ACR:', rf_pipe.predict_proba([X_test['Indication for MRI cleaned'][index_no]])[0,1])\r\nprint('True class: %s' % class_names[y_test[index_no]])\r\nprint('Indication for MRI / History:', list(data_ACR[data_ACR['No.'] == X_test.loc[index_no,]['No.']]['Indication for MRI']))\r\n\r\n#-------------#\r\n#-- XGBoost --#\r\n#-------------#\r\nfrom lime.lime_text import LimeTextExplainer\r\npatient_no = 659 #Input patient case number ('No.') that you want to review\r\nindex_no = X_test[X_test['No.'] == patient_no].index.tolist()[0]\r\nclass_names = ['Non_ACR', 'ACR']\r\nexplainer = LimeTextExplainer(class_names=class_names)\r\nexp = explainer.explain_instance(X_test['Indication for MRI cleaned'][index_no], xgb_pipe.predict_proba, num_features=6)\r\nexp.as_list()\r\nexp.as_pyplot_figure();\r\n\r\nprint('Patient ID: %d' % index_no)\r\nprint('Probability of following ACR:', xgb_pipe.predict_proba([X_test['Indication for MRI cleaned'][index_no]])[0,1])\r\nprint('True class: %s' % class_names[y_test[index_no]])\r\nprint('Indication for MRI / History:', list(data_ACR[data_ACR['No.'] == X_test.loc[index_no,]['No.']]['Indication for MRI']))\r\n\r\n##########################\r\n## Predict New KKH data ##\r\n##########################\r\n#Load new KKH data (2014-2017)\r\nos.chdir('D:\\\\Users\\\\srrzyx\\\\Desktop\\\\MRI Brain Text Classification\\\\MRI_text_classification_v2\\\\NewData\\\\KKH_newdata(2014-2017)\\\\Original')\r\nnew_data = pd.read_excel('KKH_201401-201709 annonymised.xls')\r\nnew_data.columns.tolist()\r\n\r\n#Negated text removal\r\nNegWords = [' NO ']\r\nindication_cleaned = [] #To store cleaned data\r\nremoved_text = [] #To store sentence containing text that were removed\r\n\r\ndef shorten(text, NegWord): #Formula for removing texts after NegWord\r\n i = text.index(NegWord)\r\n return text[:i]\r\n\r\nfor i in range(0,len(new_data)):\r\n SentenceList = re.split(r'[.?\\-\",;:]+',new_data['Indication/History'][i].upper()) #Split by punctuation\r\n NewSentenceList = re.split(r'[.?\\-\",;:]+',new_data['Indication/History'][i].upper())\r\n for j in NewSentenceList:\r\n #Removing negated terms\r\n for k in NegWords:\r\n try :\r\n shorten(j, k)\r\n NewSentenceList[NewSentenceList.index(j)] = shorten(j, k)\r\n except :\r\n pass\r\n removed_text.append({'removed_text' : ';'.join(list(set(SentenceList) - set(NewSentenceList)))})\r\n NewSentence = ''.join(NewSentenceList)\r\n indication_cleaned.append(NewSentence)\r\n\r\nnew_data['Indication_History_cleaned'] = pd.DataFrame(indication_cleaned)\r\nnew_data['removed_text'] = pd.DataFrame(removed_text)\r\n\r\n#Data Cleaning\r\nnew_data['Indication_History_cleaned'].replace('\\s+', ' ',regex=True,inplace=True) #Replace cells with multiple whitespaces to single whitespace\r\nnew_data.dropna(subset=['Indication_History_cleaned'], how='any', inplace=True) #Drop rows if text is null\r\nnew_data = new_data[(new_data['Indication_History_cleaned'] != ' ') & (new_data['Indication_History_cleaned'] != '. ')]\r\nnew_data.dropna(subset=['Follow_ACR'], how='any', inplace=True)\r\nnew_data = new_data.reset_index(drop=True)\r\nnew_data['Indication_History_cleaned'] = clean_text(new_data['Indication_History_cleaned'], new_data) #Clean data using created function 'clean_text'\r\n\r\n#Preparing data for prediction\r\nX_newtest = new_data[['Annonymised ID','Indication_History_cleaned']]\r\ny_newtest = new_data['Follow_ACR']\r\nX_newtest['y_newtest'] = y_newtest\r\ny_newtest.value_counts()[1]/(y_newtest.value_counts()[1]+y_newtest.value_counts()[0]) #Blind guess probability of following ACR (baseline accuracy) \r\n\r\n#-------------------------#\r\n#-- Logistic Regression --#\r\n#-------------------------#\r\nlog_vect_new = TfidfVectorizer(max_features=max_features) \r\nX_logreg_new = log_vect_new.fit_transform(X_newtest['Indication_History_cleaned']).toarray()\r\nfeatures_new = log_vect_new.get_feature_names()\r\nX_logreg_new = pd.DataFrame(X_logreg_new, columns = features)\r\n\r\n#Predicting the new data\r\ny_newprob_logreg = lresult.predict(X_logreg_new)\r\ny_newpred_logreg = np.where(y_newprob_logreg > 0.5, 1,0)\r\nX_newtest['y_newpred_logreg'] = y_newpred_logreg\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_newtest, y_newpred_logreg) #Accuracy\r\nmetrics.roc_auc_score(y_newtest, y_newprob_logreg) #ROC-AUC score\r\ncm_newdata_logreg = metrics.confusion_matrix(y_newtest, y_newpred_logreg); cm_newdata_logreg #Confusion Matrix\r\ncm_newdata_logreg[0,0]/(cm_newdata_logreg[0,0]+cm_newdata_logreg[0,1]) #Specificity\r\ncm_newdata_logreg[1,1]/(cm_newdata_logreg[0,1]+cm_newdata_logreg[1,1]) #Precision\r\ncm_newdata_logreg[1,1]/(cm_newdata_logreg[1,0]+cm_newdata_logreg[1,1]) #Recall\r\nmetrics.f1_score(y_newtest, y_newpred_logreg, average='binary') #F1 Score\r\n\r\n#------------------------#\r\n#-- K-Nearest Neigbour --#\r\n#------------------------#\r\n#Predicting the new data\r\ny_newpred_knn = knn_pipe.predict(X_newtest['Indication_History_cleaned'])\r\ny_newprob_knn = knn_pipe.predict_proba(X_newtest['Indication_History_cleaned'])\r\nX_newtest['y_newpred_knn'] = y_newpred_knn\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_newtest, y_newpred_knn) #Accuracy\r\nmetrics.roc_auc_score(y_newtest, y_newprob_knn[:, 1]) #ROC-AUC score\r\ncm_newdata_knn = metrics.confusion_matrix(y_newtest, y_newpred_knn); cm_newdata_knn #Confusion Matrix\r\ncm_newdata_knn[0,0]/(cm_newdata_knn[0,0]+cm_newdata_knn[0,1]) #Specificity\r\ncm_newdata_knn[1,1]/(cm_newdata_knn[0,1]+cm_newdata_knn[1,1]) #Precision\r\ncm_newdata_knn[1,1]/(cm_newdata_knn[1,0]+cm_newdata_knn[1,1]) #Recall\r\nmetrics.f1_score(y_newtest, y_newpred_knn, average='binary') #F1 Score\r\n\r\n#Identifying false negative cases\r\n#cm_newdata_knn[1,0] #Number of false negative cases\r\n#false_negative_new_list = X_newtest[(X_newtest['y_newtest'] == 1) & (X_newtest['y_newpred_knn'] == 0)]['Annonymised ID']\r\n#false_negative_new_cases = new_data[new_data['Annonymised ID'].isin(list(false_negative_new_list))]\r\n#os.chdir('D:\\\\Users\\\\srrzyx\\\\Desktop')\r\n#false_negative_new_cases.to_csv('false_negative_cases.csv',index=False)\r\n\r\n#-------------------#\r\n#-- Random Forest --#\r\n#-------------------#\r\n#Predicting the new data\r\ny_newpred_rf = rf_pipe.predict(X_newtest['Indication_History_cleaned'])\r\ny_newprob_rf = rf_pipe.predict_proba(X_newtest['Indication_History_cleaned'])\r\nX_newtest['y_newpred_rf'] = y_newpred_rf\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_newtest, y_newpred_rf) #Accuracy\r\nmetrics.roc_auc_score(y_newtest, y_newprob_rf[:, 1]) #ROC-AUC score\r\ncm_newdata_rf = metrics.confusion_matrix(y_newtest, y_newpred_rf); cm_newdata_rf #Confusion Matrix\r\ncm_newdata_rf[0,0]/(cm_newdata_rf[0,0]+cm_newdata_rf[0,1]) #Specificity\r\ncm_newdata_rf[1,1]/(cm_newdata_rf[0,1]+cm_newdata_rf[1,1]) #Precision\r\ncm_newdata_rf[1,1]/(cm_newdata_rf[1,0]+cm_newdata_rf[1,1]) #Recall\r\nmetrics.f1_score(y_newtest, y_newpred_rf, average='binary') #F1 Score\r\n\r\n#Identifying false negative cases\r\n#cm_newdata[1,0] #Number of false negative cases\r\n#false_negative_new_list = X_newtest[(X_newtest['y_newtest'] == 1) & (X_newtest['y_newpred_rf'] == 0)]['Annonymised ID']\r\n#false_negative_new_cases = new_data[new_data['Annonymised ID'].isin(list(false_negative_new_list))]\r\n#os.chdir('D:\\\\Users\\\\srrzyx\\\\Desktop')\r\n#false_negative_new_cases.to_csv('false_negative_cases.csv',index=False)\r\n\r\n#-------------#\r\n#-- XGBoost --#\r\n#-------------#\r\n#Predicting the new data\r\ny_newpred_xgb = xgb_pipe.predict(X_newtest['Indication_History_cleaned'])\r\ny_newprob_xgb = xgb_pipe.predict_proba(X_newtest['Indication_History_cleaned'])\r\nX_newtest['y_newpred_xgb'] = y_newpred_xgb\r\n\r\n#Performance Metrics\r\nmetrics.accuracy_score(y_newtest, y_newpred_xgb) #Accuracy\r\nmetrics.roc_auc_score(y_newtest, y_newprob_xgb[:, 1]) #ROC-AUC score\r\ncm_newdata_xgb = metrics.confusion_matrix(y_newtest, y_newpred_xgb); cm_newdata_xgb #Confusion Matrix\r\ncm_newdata_xgb[0,0]/(cm_newdata_xgb[0,0]+cm_newdata_xgb[0,1]) #Specificity\r\ncm_newdata_xgb[1,1]/(cm_newdata_xgb[0,1]+cm_newdata_xgb[1,1]) #Precision\r\ncm_newdata_xgb[1,1]/(cm_newdata_xgb[1,0]+cm_newdata_xgb[1,1]) #Recall\r\nmetrics.f1_score(y_newtest, y_newpred_xgb, average='binary') #F1 Score\r\n\r\n#Identifying false negative cases\r\n#cm_newdata[1,0] #Number of false negative cases\r\n#false_negative_new_list = X_newtest[(X_newtest['y_newtest'] == 1) & (X_newtest['y_newpred_xgb'] == 0)]['Annonymised ID']\r\n#false_negative_new_cases = new_data[new_data['Annonymised ID'].isin(list(false_negative_new_list))]\r\n#os.chdir('D:\\\\Users\\\\srrzyx\\\\Desktop')\r\n#false_negative_new_cases.to_csv('false_negative_cases.csv',index=False)\r\n\r\n#---------------#\r\n#-- ROC Curve --#\r\n#---------------#\r\nplt.figure(0).clf()\r\n\r\nplt.title('ROC Curve')\r\nfpr, tpr, thresholds = metrics.roc_curve(y_newtest, y_newprob_logreg) #Logistic Regression\r\nplt.plot(fpr, tpr, 'r', label = 'LR AUC = %0.2f' % metrics.roc_auc_score(y_newtest, y_newprob_logreg))\r\nfpr, tpr, thresholds = metrics.roc_curve(y_newtest, y_newprob_knn[:, 1]) #k-Nearest Neighbor\r\nplt.plot(fpr, tpr, 'y', label = 'KNN AUC = %0.2f' % metrics.roc_auc_score(y_newtest, y_newprob_knn[:, 1]))\r\nfpr, tpr, thresholds = metrics.roc_curve(y_newtest, y_newprob_rf[:, 1]) #Random Forest\r\nplt.plot(fpr, tpr, 'b', label = 'RF AUC = %0.2f' % metrics.roc_auc_score(y_newtest, y_newprob_rf[:, 1]))\r\nfpr, tpr, thresholds = metrics.roc_curve(y_newtest, y_newprob_xgb[:, 1]) #XGboost\r\nplt.plot(fpr, tpr, 'g', label = 'XGB AUC = %0.2f' % metrics.roc_auc_score(y_newtest, y_newprob_xgb[:, 1]))\r\nplt.plot([0, 1], [0, 1],'k--') #Baseline\r\nplt.ylabel('True Positive Rate')\r\nplt.xlabel('False Positive Rate')\r\nplt.legend(loc=0)\r\n\r\nplt.show()\r\n\r\n#################\r\n## LIME - Test ##\r\n#################\r\n#-------------------#\r\n#-- Random Forest --#\r\n#-------------------#\r\n#Predicting the output using RF & explaining using LIME\r\nindex_no = 1473 #Input index number\r\nID_no = X_newtest['Annonymised ID'][index_no]\r\nclass_names = ['Non_ACR', 'ACR']\r\nexplainer = LimeTextExplainer(class_names=class_names)\r\nexp = explainer.explain_instance(X_newtest['Indication_History_cleaned'][index_no], rf_pipe.predict_proba, num_features=6)\r\nexp.as_list()\r\nexp.as_pyplot_figure();\r\n\r\nprint('Patient ID:', ID_no)\r\nprint('Probability of following ACR:', rf_pipe.predict_proba([X_newtest['Indication_History_cleaned'][index_no]])[0,1])\r\nprint('True class: %s' % class_names[int(y_newtest[index_no])])\r\nprint('Indication for MRI / History:', list(new_data[new_data['Annonymised ID'] == ID_no]['Indication/History']))\r\n\r\n#-------------#\r\n#-- XGBoost --#\r\n#-------------#\r\n#Predicting the output using RF & explaining using LIME\r\nindex_no = 1473 #Input index number\r\nID_no = X_newtest['Annonymised ID'][index_no]\r\nclass_names = ['Non_ACR', 'ACR']\r\nexplainer = LimeTextExplainer(class_names=class_names)\r\nexp = explainer.explain_instance(X_newtest['Indication_History_cleaned'][index_no], rf_pipe.predict_proba, num_features=6)\r\nexp.as_list()\r\nexp.as_pyplot_figure();\r\n\r\nprint('Patient ID:', ID_no)\r\nprint('Probability of following ACR:', xgb_pipe.predict_proba([X_newtest['Indication_History_cleaned'][index_no]])[0,1])\r\nprint('True class: %s' % class_names[int(y_newtest[index_no])])\r\nprint('Indication for MRI / History:', list(new_data[new_data['Annonymised ID'] == ID_no]['Indication/History']))\r\n","repo_name":"AlwinZYX/NLP-Radiology","sub_path":"KKH_MRI_text-class_ACR_v2.py","file_name":"KKH_MRI_text-class_ACR_v2.py","file_ext":"py","file_size_in_byte":28514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17911987206","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport numpy as np\nimport csv\n\nlager = pd.read_excel('GEWERBE - Rechnung - Lager.xlsx',\n header=2,\n dtype={\n \"Jahr\" : str,\n })\n\n# remove the stuff below in the Lager file\nlager = lager.iloc[0:lager['Jahr'].last_valid_index()+1]\nlager = lager.convert_dtypes({\n 'verkauft' : np.float64,\n})\n\n# Load Lager and remove all rows where Artikelnr is not set.\n# Ignore rows that have an invalid value in the Artikelnr column\nlager_agg = lager[lager['eingestellt'].isnull()].groupby('Artikelnr', dropna=True).agg(\n available=('Stk', 'sum'),\n price=('verkauft', 'max'))\nlager[lager['Artikelnr'] == 'DEU2015002320']['verkauft'].max()\n\n# Load Coin Database\ncoin_db = pd.read_excel('Münzdatenbank.xlsx',\n dtype={\n 'groupid': str,\n 'Praegejahr': str\n })\n\n# Generate the Picpath\ncoin_db['picpath'] = coin_db['Artikelnr'] + '0.jpg'\n\n# Append the required 0 and 2 to the end of the Artikelnr\ncoin_db_2 = coin_db.copy()\ncoin_db_2['Artikelnr'] = coin_db['Artikelnr'] + '2'\ncoin_db_2['rate'] = \"20\"\n\ncoin_db['Artikelnr'] = coin_db['Artikelnr'] + '0'\ncoin_db['rate'] = \"0\"\n\n# join the dataframes to get descriptions from coin_db\ncoin_db = pd.concat([coin_db, coin_db_2])\ncoin_db = coin_db.set_index('Artikelnr')\ncoins_in_shop = lager_agg.join(coin_db, how=\"right\")\ncoins_in_shop.loc[coins_in_shop['available'].notna(), 'CMD'] = ''\ncoins_in_shop['available'] = coins_in_shop['available'].fillna(0)\n\n# get missing values on the right\ncoin_db_missing = lager_agg.join(coin_db, how=\"left\")\ncoin_db_missing = coin_db_missing.loc[coin_db_missing['name'].isna(), ['available', 'price']\n ].join(lager.set_index('Artikelnr')['Münzbezeichnung'],\n how='left').reset_index().groupby('Münzbezeichnung').first()\ncoin_db_missing.to_excel(\"fehlt.xlsx\")\n\nprint(\"Fehlende Münzen in der Münzdatenbank:\")\nprint(coin_db_missing)\n\ncoins_in_shop.to_csv('gesamt.csv',\n sep='\\t',\n encoding=\"cp1252\",\n line_terminator='\\r\\n',\n quoting=csv.QUOTE_MINIMAL,\n quotechar='\\'',\n index=True,\n index_label='realid'\n )\n","repo_name":"stfl/counting-coins","sub_path":"src/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40709422474","text":"#!/usr/bin/env python3\n# \nimport sys\nimport numpy as np\nfrom scipy.sparse import spdiags\nfrom scipy.sparse.linalg import spsolve\nimport matplotlib.pyplot as plt\n\nfrom fealpy.mesh import MeshFactory\nfrom fealpy.pde.timeharmonic_2d import CosSinData\nfrom fealpy.functionspace import FirstKindNedelecFiniteElementSpace2d \nfrom fealpy.functionspace.femdof import multi_index_matrix2d\n\n\n\nclass FirstKindNedelecFiniteElementSpace2dTest:\n def __init__(self):\n pass\n\n def show_basis(self):\n h = 0.5\n box = [-h, 1+h, -h, np.sqrt(3)/2+h]\n mesh = MeshFactory.one_triangle_mesh()\n space = FirstKindNedelecFiniteElementSpace2d(mesh, p=0)\n fig = plt.figure()\n space.show_basis(fig, box=box)\n plt.show()\n\n def interpolation(self, n=4, p=0, plot=True):\n \n box = [-0.5, 1.5, -0.5, 1.5]\n\n def u(p):\n x = p[..., 0]\n y = p[..., 1]\n val = np.zeros_like(p)\n pi = np.pi\n val[..., 0] = np.sin(pi*x)*np.cos(pi*y)\n val[..., 1] = np.sin(pi*x)*np.cos(pi*y)\n return val\n\n mesh = MeshFactory.boxmesh2d([0, 1, 0, 1], nx=n, ny=n, meshtype='tri')\n space = FirstKindNedelecFiniteElementSpace2d(mesh, p=p)\n uI = space.interpolation(u)\n error = space.integralalg.L2_error(u, uI)\n print(error)\n if plot:\n fig = plt.figure()\n axes = fig.gca()\n mesh.add_plot(axes, box=box)\n plt.show()\n\n def solve_time_harmonic_2d(self, n=3, p=0, plot=True):\n pde = CosSinData()\n mesh = pde.init_mesh(n=n, meshtype='tri')\n space = FirstKindNedelecFiniteElementSpace2d(mesh, p=p)\n\n gdof = space.number_of_global_dofs()\n uh = space.function()\n\n A = space.curl_matrix() - space.mass_matrix()\n F = space.source_vector(pde.source)\n\n isBdDof = space.boundary_dof()\n bdIdx = np.zeros(gdof, dtype=np.int)\n bdIdx[isBdDof] = 1\n Tbd = spdiags(bdIdx, 0, gdof, gdof)\n T = spdiags(1-bdIdx, 0, gdof, gdof)\n A = T@A@T + Tbd\n F[isBdDof] = 0 \n uh[:] = spsolve(A, F)\n\n\n error0 = space.integralalg.L2_error(pde.solution, uh)\n error1 = space.integralalg.L2_error(pde.curl, uh.curl_value)\n print(error0, error1)\n\n if plot:\n box = [-0.5, 1.5, -0.5, 1.5]\n fig = plt.figure()\n axes = fig.gca()\n mesh.add_plot(axes, box=box)\n #mesh.find_node(axes, showindex=True)\n #mesh.find_edge(axes, showindex=True)\n #mesh.find_cell(axes, showindex=True)\n #node = ps.reshape(-1, 2)\n #uv = phi.reshape(-1, 2)\n #axes.quiver(node[:, 0], node[:, 1], uv[:, 0], uv[:, 1])\n plt.show()\n\ntest = FirstKindNedelecFiniteElementSpace2dTest()\nif sys.argv[1] == \"show_basis\":\n test.show_basis()\nelif sys.argv[1] == \"interpolation\":\n n = int(sys.argv[2])\n p = int(sys.argv[3])\n test.interpolation(n=n, p=p)\nelif sys.argv[1] == \"solve\":\n n = int(sys.argv[2])\n p = int(sys.argv[3])\n test.solve_time_harmonic_2d(n=n, p=p)\n","repo_name":"weihuayi/fealpy","sub_path":"example/oldexample/test/FirstKindNedelecFiniteElementSpace2dTest.py","file_name":"FirstKindNedelecFiniteElementSpace2dTest.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"30629048353","text":"# here we need to load additional outputs of suite2p (this will be much easier when re-implemented in datajoint)\n\n# paths\nstat1_path = f'{data_root}/JM/{ds1}/{tseries1}/suite2p/plane0/stat.npy'\nstat2_path = f'{data_root}/JM/{ds2}/{tseries2}/suite2p/plane0/stat.npy'\n\nredcell1_path = f'{data_root}/JM/{ds1}/{tseries1}/suite2p/plane0/redcell.npy'\nredcell2_path = f'{data_root}/JM/{ds2}/{tseries2}/suite2p/plane0/redcell.npy'\n\n# loading\nstat1 = np.load(stat1_path, allow_pickle=True)\nstat2 = np.load(stat2_path, allow_pickle=True)\n\nredcell1 = np.load(redcell1_path, allow_pickle=True)[:,0]\nredcell2 = np.load(redcell2_path, allow_pickle=True)[:,0]\n\n# choose which stat (coordinates of which ROIs) to use\nstat1_redcell = stat1[redcell1 == True]\nstat2_redcell = stat2[redcell2 == True]\n\nimg2_r_reg = register_rigid_fromstat(img1_r, img2_r, stat1_redcell, stat2_redcell)\nshow_fov_rg(img2_r_reg, img1_r, vmax_fact_r=1.5)","repo_name":"juremaj/centuri-hackathon-2023_brainbow","sub_path":"long_deve_registration/code_dump/keypoints_stat.py","file_name":"keypoints_stat.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17490090363","text":"class Solution:\n \"\"\"\n @param: A: a string\n @param: B: a string\n @return: a boolean\n \"\"\"\n def Permutation(self, A, B):\n if A is '' and B is '':\n return True\n if not A or not B:\n return False\n\n cnts = {}\n for char in A:\n cnts[char] = cnts.get(char, 0) + 1\n for char in B:\n if char not in cnts or cnts[char] == 0:\n return False\n cnts[char] -= 1\n for cnt in cnts.values():\n if cnt != 0:\n return False\n\n return True\n","repo_name":"jaychsu/algorithm","sub_path":"lintcode/211_string_permutation.py","file_name":"211_string_permutation.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":130,"dataset":"github-code","pt":"37"} +{"seq_id":"21930083320","text":"#nsolve_S1.py\n#first basic recursive number place solver\n#file in github\nimport sys\nfrom os.path import basename\n\nfrom time import perf_counter\nfrom nsolve_common import v\n\ndef solve(v,n,pr=False):#depth first recursive solver\n #---begin internal function\n def check(v, r, c, i):#check related cells( row, column, block)\n return row(v, r, i) and column(v, c, i) and block(v, r,c, i) \n def row(v, r, i):\n return i not in v[r] #True if not used in row\n def column(v, c, i):\n return all(True if i != v[r][c] else False for r in range(9))\n def block(v, r, c, i): #17Sec faster than above\n c0 ,r0= (c // 3) * 3 , (r // 3) * 3\n return all(True if i not in v[r][c0:c0+ 3]\n else False for r in range(r0, r0 + 3))\n #---end internal function\n #solve from here\n global rc,t\n rc+=1\n while n<81:\n r,c=n // 9,n % 9\n if v[r][c] == 0:break\n n+=1\n if n >=81: # finished?\n t1=perf_counter()-t\n if pr:\n for i in range(9):print(v[i]) #print solution\n print('found:','{:5.3f}'.format(t1)+'s','rc=','{:,d}'.format(rc),end='')\n return #finished\n for i in range(1,10): #try 1 - 9\n if check(v,r,c,i): #possible to place?\n v[r][c] = i #place it\n solve(v,n + 1,pr) #call myself to go next cell \n v[r][c] = 0 #nothing to place here,cancel current placement\n #return #removable. return to previous cell. (backtrack). \n\n\nif __name__=='__main__' :\n pth=sys.argv[0]\n pyname=\"Script=\"+pth\n fn=basename(pth)\n print(fn)\n\n vsave=[[v[i][j] for j in range(9)]for i in range(9)]\n rpt=7 #repeat counter for statistic of execution time\n \n rc=0 #recursion counter (global) \n t=perf_counter()\n solve(v,0,True)\n t=perf_counter()-t\n print(f' 1st solve end. {t:5.3f}s ncall= {rc:,d}\\n')\n tsum=0\n tav=0\n tmin=9999\n print('t=')\n for i in range(rpt):\n rc=0 \n v=[[vsave[i][j] for j in range(9)]for i in range(9)]\n t=perf_counter()\n solve(v,0)\n t=perf_counter()-t\n tsum+=t\n tav=tsum/(i+1)\n print(f' ret={t:5.3f}s')\n if t=k recessive) = 1 - P(<= k-1 recessive)\nanswer = round(1 - sum(gen_final[:k]),3)\n\nwith open('output/rosalind_wfmd_output.txt', 'w') as f: \n f.write(str(answer))\n","repo_name":"lingminhao/Project-Rosalind","sub_path":"Bioinformatics Stronghold/WFMD.py","file_name":"WFMD.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"18675968736","text":"class Solution:\n def findDiagonalOrder(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[int]\n \"\"\"\n temp = []\n leny = len(matrix)\n lenx = len(matrix[0])\n con = 1\n \n for ij in range(lenx+leny-1):\n for i in range(max(0,ij-leny+1), min(lenx,ij+1))[::con]:\n j = ij - i\n temp.append(matrix[j][i])\n con = -con\n return temp\n \n","repo_name":"chichuyun/LeetCode","sub_path":"0498/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"22023584972","text":"import cv2\r\nimport mediapipe as mp\r\nimport time\r\n\r\n\r\nclass handDetector:\r\n def __init__(\r\n self, mode=False, maxHands=2, modelComp=1, detectionCon=0.5, trackCon=0.5\r\n ):\r\n self.mode = mode\r\n self.maxHands = maxHands\r\n self.modelComp = modelComp\r\n self.detectionCon = detectionCon\r\n self.trackCon = trackCon\r\n self.mp_drawing = mp.solutions.drawing_utils\r\n self.mp_drawing_styles = mp.solutions.drawing_styles\r\n self.mp_hands = mp.solutions.hands\r\n self.hands = self.mp_hands.Hands(\r\n self.mode, self.maxHands, self.modelComp, self.detectionCon, self.trackCon\r\n )\r\n\r\n def findHands(self, image, draw=True):\r\n image.flags.writeable = False\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n self.results = self.hands.process(image)\r\n\r\n image.flags.writeable = True\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n if self.results.multi_hand_landmarks:\r\n for hand_landmarks in self.results.multi_hand_landmarks:\r\n # for id, lm in enumerate(hand_landmarks.landmark):\r\n # print(id, lm)\r\n if draw:\r\n self.mp_drawing.draw_landmarks(\r\n image,\r\n hand_landmarks,\r\n self.mp_hands.HAND_CONNECTIONS,\r\n self.mp_drawing_styles.DrawingSpec(color=(0, 0, 256)),\r\n self.mp_drawing_styles.DrawingSpec(color=(0, 256, 0)),\r\n )\r\n return image\r\n\r\n def finPos(self, image, handNo=0):\r\n lmList = []\r\n if self.results.multi_hand_landmarks:\r\n myHand = self.results.multi_hand_landmarks[handNo]\r\n\r\n for id, lm in enumerate(myHand.landmark):\r\n h, w, c = image.shape\r\n cx, cy = int(lm.x * w), int(lm.y * h)\r\n lmList.append([id, cx, cy])\r\n return lmList\r\n\r\n\r\ndef main():\r\n pTime = 0\r\n cTime = 0\r\n cap = cv2.VideoCapture(0)\r\n detector = handDetector()\r\n while True:\r\n success, image = cap.read()\r\n image = detector.findHands(image)\r\n cTime = time.time()\r\n fps = 1 / (cTime - pTime)\r\n pTime = cTime\r\n image = cv2.flip(image, 1)\r\n cv2.putText(\r\n image,\r\n str(int(fps)),\r\n (500, 400),\r\n cv2.FONT_HERSHEY_PLAIN,\r\n 3,\r\n (120, 52, 40),\r\n 3,\r\n )\r\n cv2.imshow(\"Camera Result\", image)\r\n landmarks = detector.finPos(image)\r\n if len(landmarks) != 0:\r\n print(landmarks[4])\r\n cv2.waitKey(1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Mrinank-Bhowmick/python-beginner-projects","sub_path":"projects/HandTrack/HTrackMod.py","file_name":"HTrackMod.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":616,"dataset":"github-code","pt":"38"} +{"seq_id":"43484932272","text":"from __future__ import print_function\n\nimport os\nimport sys\nimport subprocess\nimport re\nimport inspect\nimport uuid\n\n\nclass Term(object):\n\n def __init__(self, name=None, keepterm=False, xterm_args=None, xterm_prg_name='x-terminal-emulator'):\n self.name = name\n self.keepterm = keepterm\n self.xterm_args = xterm_args\n self.xterm_prg_name = xterm_prg_name\n\n def start(self):\n self.pipe_path = os.sep + os.path.join('tmp', 'fuddly_term_'+str(uuid.uuid4()))\n if not os.path.exists(self.pipe_path):\n os.mkfifo(self.pipe_path)\n self.cmd = [self.xterm_prg_name]\n if self.name is not None:\n self.cmd.extend(['-title',self.name])\n if self.xterm_args:\n self.cmd.extend(self.xterm_args)\n if self.keepterm:\n self.cmd.append('--hold')\n self.cmd.extend(['-e', 'tail -f {:s}'.format(self.pipe_path)])\n self._p = None\n\n def _launch_term(self):\n self._p = subprocess.Popen(self.cmd)\n\n def stop(self):\n if not self.keepterm and self._p is not None and self._p.poll() is None:\n self._p.kill()\n self._p = None\n try:\n os.remove(self.pipe_path)\n except FileNotFoundError:\n pass\n\n def print(self, s, newline=False):\n s += '\\n' if newline else ''\n if self._p is None or self._p.poll() is not None:\n self._launch_term()\n with open(self.pipe_path, \"w\") as input_desc:\n input_desc.write(s)\n\n def print_nl(self, s):\n self.print(s, newline=True)\n\n\ndef ensure_dir(f):\n d = os.path.dirname(f)\n if not os.path.exists(d):\n os.makedirs(d)\n\ndef ensure_file(f):\n if not os.path.isfile(f):\n open(f, 'a').close()\n\ndef chunk_lines(string, length):\n l = string.split(' ')\n chk_list = []\n full_line = ''\n for wd in l:\n full_line += wd + ' '\n if len(full_line) > (length - 1):\n chk_list.append(full_line)\n full_line = ''\n if full_line:\n chk_list.append(full_line)\n # remove last space char\n if chk_list:\n chk_list[-1] = (chk_list[-1])[:-1]\n return chk_list\n\ndef find_file(filename, root_path):\n for (dirpath, dirnames, filenames) in os.walk(root_path):\n if filename in filenames:\n return dirpath + os.sep + filename\n else:\n return None\n\ndef retrieve_app_handler(filename):\n mimetype = subprocess.check_output(['xdg-mime', 'query', 'filetype', filename])[:-1]\n desktop_file = subprocess.check_output(['xdg-mime', 'query', 'default', mimetype])[:-1]\n\n file_path = find_file(desktop_file.decode(), root_path='~/.local/share/applications/')\n if file_path is None:\n file_path = find_file(desktop_file.decode(), root_path='/usr/share/applications/')\n\n if file_path is None:\n return None\n\n with open(file_path, 'r') as f:\n buff = f.read()\n result = re.search(\"Exec=(.*)\", buff)\n app_name = result.group(1).split()[0]\n return app_name\n\n\nif sys.version_info[0] > 2:\n def get_caller_object(stack_frame=2):\n caller_frame_record = inspect.stack()[stack_frame]\n return caller_frame_record.frame.f_locals['self']\nelse:\n def get_caller_object(stack_frame=2):\n caller_frame_record = inspect.stack()[stack_frame]\n return caller_frame_record[0].f_locals['self']\n","repo_name":"kamaal44/fuddly","sub_path":"libs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"38"} +{"seq_id":"24761477791","text":"from django.shortcuts import render\r\nfrom calendar import Calendar\r\nimport json\r\nfrom optparse import Values\r\nfrom pdb import line_prefix\r\nfrom django.shortcuts import render\r\nfrom django.views import View\r\nfrom .models import usuario\r\nfrom django.http.response import JsonResponse\r\nfrom django.utils.decorators import method_decorator\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n\r\nclass usuarioviews(View):\r\n @method_decorator(csrf_exempt)\r\n def dispatch(sel,request,*args,**kwargs) :\r\n return super().dispatch(request,*args,**kwargs) \r\n#consultar y agregar\r\n def get(self,request,doc=0):\r\n if doc>0:\r\n usu=list(usuario.objects.filter(id_usuario=doc).values())\r\n if len(usu)>0:\r\n usurespuesta=usu[0]\r\n datos={\"usuario\":usurespuesta}\r\n else:\r\n datos={\"respuesta\":\" datos no encontrados\"}\r\n else:\r\n usu=list(usuario.objects.values())\r\n datos={'listado_ususario':usu}\r\n return JsonResponse (datos)\r\n#consulta con un parametro\r\n\r\n def post(self,request):\r\n datos=json.loads(request.body)\r\n usuario.objects.create(documento=datos['documento'],nombre=datos['nombre'],apellido=datos['apellido'],correo=datos['correo'],celular=datos['celular'])\r\n return JsonResponse(datos)\r\n#insertar\r\n def put(self,request,doc):\r\n datos=json.loads(request.body)\r\n usu=list(usuario.objects.filter(id_usuario=doc).values()) \r\n if len(usu)>0:\r\n usuario=usuario.objects.get(id_usuario=doc)\r\n usuario.correo=datos['correo']\r\n usuario.imagen=datos['imagen']\r\n usuario.nombre_usuario=datos['nombre_usuraio']\r\n usuario.password=datos['password']\r\n usuario.rol=datos['rol'] \r\n usuario.fecha_creacion=['fecha_creacion']\r\n mensaje={\"respuesta\":\"datos almacenado correctamente\"}\r\n else:\r\n mensaje={\"respuesta\":\"dotos no encontrados\"}\r\n return JsonResponse(mensaje)\r\n# eliminar\r\n def delete(self,request,doc):\r\n usu=list(usuario.objects.filter(id_usuario=doc).values())\r\n if len(usu)>0:\r\n usuario.objects.filter(id_usuario=doc).delete()\r\n mensaje={\"mensaje\":\"el registro fue eliminado\"}\r\n else:\r\n mensaje={\"respuesta\":\"dotos no encontrados\"} \r\n return JsonResponse (mensaje) \r\n# Create your views here.\r\n","repo_name":"527carlos/prueva","sub_path":"appsprint1_reto3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27259148266","text":"from django.core.validators import MinValueValidator, RegexValidator\nfrom django.db import models\nfrom users.models import User\n\n\nclass Tag(models.Model):\n \"\"\"Модель Tag.\"\"\"\n name = models.CharField(\n max_length=200,\n blank=False,\n unique=True,\n verbose_name='Название')\n color = models.CharField(\n max_length=7,\n blank=False,\n validators=[\n RegexValidator(\n regex='^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$',\n message='Цвет должен быть HEX-кодом!'\n )\n ],\n unique=True,\n verbose_name='Цвет')\n slug = models.SlugField(\n max_length=200,\n blank=False,\n unique=True,\n verbose_name='Слаг')\n\n class Meta:\n verbose_name = 'Тег'\n verbose_name_plural = 'Теги'\n\n def __str__(self):\n return self.name\n\n\nclass Ingredient(models.Model):\n \"\"\"Модель Ingredient.\"\"\"\n name = models.CharField(\n max_length=200,\n blank=False,\n verbose_name='Название')\n measurement_unit = models.CharField(\n max_length=200,\n blank=False,\n verbose_name='Единица измерения')\n\n class Meta:\n verbose_name = 'Ингредиент'\n verbose_name_plural = 'Ингредиенты'\n\n def __str__(self):\n return self.name\n\n\nclass Recipe(models.Model):\n \"\"\"Модель Recipe.\n Значения полей is_favorited и is_in_shopping_cart берется из\n соответствующих методов вьюсета для модели Recipe.\"\"\"\n tags = models.ManyToManyField(\n Tag,\n blank=False,\n related_name='tags',\n verbose_name='Тег'\n )\n author = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.CASCADE,\n related_name='recipes',\n verbose_name='Автор'\n )\n ingredients = models.ManyToManyField(\n Ingredient,\n blank=False,\n through='IngredientAmount',\n related_name='ingredients',\n verbose_name='Ингредиент'\n )\n name = models.CharField(\n blank=False,\n max_length=200,\n db_index=True,\n verbose_name='Название'\n )\n image = models.ImageField(\n 'Картинка',\n upload_to='recipes/',\n blank=False\n )\n text = models.TextField(\n blank=False,\n verbose_name='Описание'\n )\n cooking_time = models.PositiveIntegerField(\n blank=False,\n validators=[\n MinValueValidator(1,\n message='Минимальное время 1 минута!')],\n verbose_name='Время приготовления (в минутах)'\n )\n pub_date = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ('-pub_date',)\n verbose_name = 'Рецепт'\n verbose_name_plural = 'Рецепты'\n\n def __str__(self):\n return self.name\n\n\nclass IngredientAmount(models.Model):\n \"\"\"Промежуточная модель IngredientAmount.\n Нужна для создания поля amount.\"\"\"\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n verbose_name='Рецепт')\n ingredient = models.ForeignKey(\n Ingredient,\n blank=False,\n related_name='ingredient',\n on_delete=models.CASCADE,\n verbose_name='Ингредиент')\n amount = models.PositiveSmallIntegerField(blank=False)\n\n class Meta:\n verbose_name = 'Количество ингредиента'\n verbose_name_plural = 'Количество ингредиентов'\n constraints = [\n models.UniqueConstraint(\n fields=['recipe', 'ingredient'],\n name='unique_recipe_ingredient')]\n\n def __str__(self):\n return f'{self.recipe} {self.ingredient}'\n\n\nclass Favorite(models.Model):\n \"\"\"Модель Favorite.\"\"\"\n user = models.ForeignKey(\n User,\n related_name='favorites',\n on_delete=models.CASCADE,\n verbose_name='Пользователь'\n\n )\n recipe = models.ForeignKey(\n Recipe,\n related_name='favorite',\n on_delete=models.CASCADE,\n verbose_name='Рецепт'\n )\n\n class Meta:\n verbose_name = 'Избранное'\n verbose_name_plural = 'Избранное'\n constraints = [\n models.UniqueConstraint(\n fields=['user', 'recipe'],\n name='unique_user_recipe_favorite'\n )\n ]\n\n def __str__(self):\n return f'{self.recipe} {self.user}'\n\n\nclass Shopping(models.Model):\n \"\"\"Модель Shopping.\"\"\"\n user = models.ForeignKey(\n User,\n related_name='shoppings',\n on_delete=models.CASCADE,\n verbose_name='Пользователь'\n\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n verbose_name='Рецепт'\n )\n\n class Meta:\n verbose_name = 'Список покупок'\n verbose_name_plural = 'Списки покупок'\n constraints = [\n models.UniqueConstraint(\n fields=['user', 'recipe'],\n name='unique_user_recipe_shopping'\n )\n ]\n\n def __str__(self):\n return f'{self.recipe} {self.user}'\n","repo_name":"esfiro4ka/foodgram-project-react","sub_path":"backend/foodgram/recipes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34355249746","text":"import matplotlib.pyplot as plt \nimport numpy as np \nx = np.arange(1,6)\ny = np.arange(2,11,2)\nfig = plt.figure()#bir figür oluşturduk\naxes = fig.add_axes([0.1,0.1,0.8,0.8])#figürümüze grafik ekliyoruz\n#veridğimiz ilk değer x ekseninin sayfa düzleminde başlangıç noktasını belirler\n#ikinci değer de y ekseninin uzaklığını belirler diğer değerler x ve y ekesninin kaçtan başlatmak istediğimizi belirtiyoruz\naxes2 = fig.add_axes([0.2,0.5,0.2,0.3])\naxes.plot(y,x)\naxes.set_xlabel(\"outer x\")\naxes.set_ylabel(\"outer y\")\naxes.set_title(\"outer graph\")\n\naxes2.plot(x,y)\naxes2.set_xlabel(\"ınner x\")\naxes2.set_ylabel(\"ınner y\")\naxes.set_title(\"ınner graph\")\n \nplt.show()","repo_name":"akbulut99/Data-analysis","sub_path":"matplotlib2.py","file_name":"matplotlib2.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3509326405","text":"\"\"\"\nDesc: 规则引擎App表操作钩子函数\n\"\"\"\nfrom django.dispatch import receiver\nfrom django.db.models import signals\n\nfrom apps.lens import lens\nfrom apps.lens.utils import logger, decorator\nfrom apps import ruler\n\n\n@receiver(lens.post_before, sender=ruler.models.EmergencyOmnibusRule)\n@receiver(lens.post_before, sender=ruler.models.EmergencyOvertimeRule)\ndef post_before(sender, request, **kwargs):\n '''在新增一条数据之前\n '''\n rule = request.data.get('rule')\n logger.info('signals(post_before)接收到了一条(%s)数据' % sender,\n # type(rule),\n # rule,\n )\n\n # 校验rule合法性\n result_valid = ruler.ruler.verify_rule(rule)\n logger.info('signals(post_before) rule合法性结果(%s)' %\n # type(result_valid),\n result_valid,\n )\n if not result_valid:\n raise Exception('rule不合法')\n\n\n# @receiver(lens.valid_after, sender=ruler.models.EmergencyOmnibusRule)\n# def valid_after(sender, request, instance, **kwargs):\n# '''PATCH方法下修改一条数据并且经modelform校验之后\n# '''\n# logger.info('阶段(%s valid_after)' % sender,\n# # sender, request, instance,\n# # kwargs\n# )\n\n\n# @receiver(lens.pre_save, sender=ruler.models.EmergencyOmnibusRule)\n# def pre_save(instance, **kwargs):\n# '''数据存库前\n# '''\n\n\n# @receiver(signals.post_save, sender=ruler.models.EmergencyOmnibusRule)\n# def post_save(sender, instance, **kwargs):\n# '''POST方法下数据存库后\n# '''\n# logger.info('阶段(%s post_save)' % sender)\n\n\n# @receiver(lens.patch_before, sender=ruler.models.EmergencyOmnibusRule)\n# def patch_before(sender, request, instance, **kwargs):\n# '''PATCH方法下数据存库后\n# '''\n# logger.info('阶段(%s patch_before)' % sender,)\n\n\n# @receiver(request_started)\n# def request_started(sender, **kwargs):\n# '''\n# '''\n\n\n# @receiver(request_finished)\n# def request_finished(sender, **kwargs):\n# '''\n# '''\n# logger.info('emergencyomnibus_request_finished', kwargs)\n","repo_name":"yourant/ecc_emergency","sub_path":"apps/ruler/signals/emergencyrule.py","file_name":"emergencyrule.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"19170829278","text":"'''Exercício Python 081: Crie um programa que vai ler vários números e colocar em uma lista.\r\nDepois disso, mostre:\r\nA) Quantos números foram digitados.\r\nB) A lista de valores, ordenada de forma decrescente.\r\nC) Se o valor 5 foi digitado e está ou não na lista.'''\r\n\r\nnumeros = []\r\nwhile True:\r\n numeros.append(int(input('Digite um valor: ')))\r\n resp = input('Deseja continuar? [S/N]: ').strip()\r\n if resp in 'Nn':\r\n break\r\nprint('-='*30)\r\nprint(f'Foram digitados {len(numeros)} números.')\r\nnumeros.sort(reverse = True)\r\nprint(f'Os números digitados, em ordem decrescente foram: {numeros}')\r\nif 5 in numeros:\r\n print('O valor 5 foi digitado.')\r\nelse:\r\n print('O valor 5 não foi digitado.')\r\n","repo_name":"mateusmarinho/python3-cursoemvideo","sub_path":"PythonExercicios/08-listas/ex081.py","file_name":"ex081.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24143770401","text":"import json\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom tempfile import TemporaryFile\n\nimport h5py\nimport yaml\n\nfrom simianpy.misc import getLogger\n\n\nclass File:\n \"\"\"Base class for File IO\"\"\"\n\n description = \"\"\" \"\"\"\n extension = [\"\"]\n isdir = False\n needs_recipe = False\n default_mode = \"r\"\n modes = [\"r\"]\n supported_time_units = [\"dt\"]\n\n def __init__(self, filename, **params):\n self.filename = Path(filename)\n\n self.mode = params.get(\"mode\", self.default_mode)\n if self.mode not in self.modes:\n raise ValueError(\n f\"Provided mode '{self.mode}' is not supported. Please provide one of: {self.modes}\"\n )\n\n if \"logger\" in params:\n self.logger = params[\"logger\"]\n else:\n logger_kwargs = params.get(\"logger_kwargs\", {})\n logger_defaults = {\n \"loggerName\": __name__,\n \"fileName\": self.filename.with_suffix(\".log\"),\n }\n for k, v in logger_defaults.items():\n if k not in logger_kwargs:\n logger_kwargs[k] = v\n\n self.logger = getLogger(**logger_kwargs)\n\n if self.needs_recipe:\n if \"recipe\" in params:\n self.recipe = params[\"recipe\"]\n elif \"recipe_path\" in params:\n self.recipe = self._read_recipe(params[\"recipe_path\"])\n else:\n raise ValueError(\n f\"If 'needs_recipe', must provide one of 'recipe' or 'recipe_path'. Provided params: {params}\"\n )\n else:\n self.recipe = None\n\n self.use_cache = params.get(\"use_cache\", False)\n self.cache_path = params.get(\"cache_path\", None)\n self.overwrite_cache = params.get(\"overwrite_cache\", False)\n if not (self.cache_path is None or self.use_cache):\n raise ValueError(f\"cannot provide cache_path if use_cache is not True\")\n \n self.time_units = params.get(\"time_units\", \"dt\")\n if self.time_units not in self.supported_time_units:\n raise ValueError(\n f\"Provided time_units '{self.time_units}' is not supported. Please provide one of: {self.supported_time_units}\"\n )\n\n def _get_data_cache(self):\n self._data = (\n h5py.File(self.cache_path or TemporaryFile())\n if self.use_cache\n else defaultdict(dict)\n )\n\n def _close_data_cache(self):\n if self.use_cache:\n if hasattr(self, \"_data\"):\n self._data.close()\n if hasattr(self, \"_data\"):\n del self._data\n\n def _read_recipe(self, recipe_path):\n recipe_path = Path(recipe_path)\n if not recipe_path.is_file():\n raise FileNotFoundError(f\"Cannot find file at path: {recipe_path}\")\n recipe_parsers = {\".json\": json.load, \".yaml\": yaml.safe_load}\n if recipe_path.suffix not in recipe_parsers.keys():\n raise ValueError(\n f\"Provided recipe file format '{recipe_path.suffix}' not supported. Please provide one of {recipe_parsers.keys()}\"\n )\n with open(recipe_path, \"r\") as f:\n recipe = recipe_parsers[recipe_path.suffix](f)\n return recipe\n\n def __enter__(self):\n self.open()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type is not None:\n self.logger.error(\n f\"type={exc_type}\\nvalue={exc_value}\\ntraceback:\\n{traceback}\",\n exc_info=True,\n )\n self._close_data_cache()\n try:\n self.close()\n except NotImplementedError:\n self.logger.warning(\n f\"The 'close' method is not implemented for this class. File closing may not be handled properly\",\n exc_info=True,\n )\n\n def open(self):\n raise NotImplementedError\n\n def close(self):\n raise NotImplementedError\n","repo_name":"jselvan/simianpy","sub_path":"simianpy/io/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"26407822289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 19 14:36:18 2017\n\n@author: James\n\"\"\"\n\n#continuous values\n#area under curve give probability\n\nimport pandas as pd\nimport numpy as np\nfrom ecdf_func import ecdf\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set()\n\nmichelson_speed_of_light = pd.read_csv('michelson_speed_of_light.csv')['velocity of light in air (km/s)']\n\nmean = np.mean(michelson_speed_of_light)\n\nstd = np.std(michelson_speed_of_light)\n\nsamples = np.random.normal(mean, std, size=10000)\n\nx, y = ecdf(michelson_speed_of_light)\n\nx_theor, y_theor = ecdf(samples)\n\n_ = plt.plot(x_theor, y_theor)\n\n_ = plt.plot(x, y, marker='.', linestyle='none')\n\n_ = plt.xlabel('speed of light (km/s)')\n\n_ = plt.ylabel('CDF')\n\nplt.show()\n\nplt.close()\n\n#the normal pdf\n# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10\nsamples_std1 = np.random.normal(20, 1, size=100000)\nsamples_std3 = np.random.normal(20, 3, size=100000)\nsamples_std10 = np.random.normal(20, 10, size=100000)\n\n# Make histograms\n_ = plt.hist(samples_std1, bins=100, normed=True, histtype='step')\n_ = plt.hist(samples_std3, bins=100, normed=True, histtype='step')\n_ = plt.hist(samples_std10, bins=100, normed=True, histtype='step')\n\n# Make a legend, set limits and show plot\n_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))\nplt.ylim(-0.01, 0.42)\nplt.show()\nplt.close()\n\n#the normal cdf\n# Generate CDFs\nx_std1, y_std1 = ecdf(samples_std1)\nx_std3, y_std3 = ecdf(samples_std3)\nx_std10, y_std10 = ecdf(samples_std10)\n\n# Plot CDFs\n_ = plt.plot(x_std1, y_std1, marker = '.', linestyle = 'none')\n_ = plt.plot(x_std3, y_std3, marker = '.', linestyle = 'none')\n_ = plt.plot(x_std10, y_std10, marker = '.', linestyle = 'none')\n\n# Make 2% margin\nplt.margins(0.02)\n\n# Make a legend and show the plot\n_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')\nplt.show()\nplt.close()\n\n#Are the belmont stakes results normally distributed?\nbelmont_raw_data = pd.read_csv('belmont.csv')['Time']\n#belmont_no_outliers = pd.to_datetime(belmont_no_outliers, format='%H:%M.%S', errors='ignore')\n\nbelmont_time = belmont_raw_data.str.extract('(\\d+):(\\d+).(\\d+)', expand=True).astype(float)\n\nbelmont_no_outliers = belmont_time[0].mul(60).add(belmont_time[1]).add(belmont_time[2].div(100))\n\n# Compute mean and standard deviation: mu, sigma\nmu = np.mean(belmont_no_outliers)\nsigma = np.std(belmont_no_outliers)\n\n\n# Sample out of a normal distribution with this mu and sigma: samples\nsamples = np.random.normal(mu, sigma, size = 10000)\n\n# Get the CDF of the samples and of the data\nx_theor, y_theor = ecdf(samples)\nx, y = ecdf(belmont_no_outliers)\n\n\n# Plot the CDFs and show the plot\n_ = plt.plot(x_theor, y_theor)\n_ = plt.plot(x, y, marker='.', linestyle='none')\nplt.margins(0.02)\n_ = plt.xlabel('Belmont winning time (sec.)')\n_ = plt.ylabel('CDF')\nplt.show()\nplt.close()\n\n#What are the chances of a hourse marching or beating Secretariat's record\n# Take a million samples out of the Normal distribution: samples\nmu = 149.22101123595507\n\nsignma = 1.627816471774816\n\nsamples = np.random.normal(mu, sigma, size = 1000000)\n\n# Compute the fraction that are faster than 144 seconds: prob\nprob = np.sum(samples <= 144)/len(samples)\n\n# Print the result\nprint('Probability of besting Secretariat:', prob)","repo_name":"jameschenmech/Statistics","sub_path":"probability_density_function.py","file_name":"probability_density_function.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"19556989464","text":"import sys\nimport io\nimport logging\n\nimport debug\nimport log\n\n\nDICT_EOL = '\\r\\n'\n\nlogger = None\n\n\nclass NetworkError(IOError):\n pass\n\n\ndef init():\n global logger\n logger = logging.getLogger(__name__)\n\ndef net_exc(func):\n def wrap_net_exc(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except (IOError, EOFError, UnicodeDecodeError, BufferError) as ex:\n exc_info = sys.exc_info() if debug.enabled else None\n logger.error(ex, exc_info=exc_info)\n raise NetworkError(ex)\n return wrap_net_exc\n\nclass Connection:\n def __init__(self, sock):\n self._sio = sock.makefile(mode=\"rw\", encoding=\"utf-8\", newline='')\n\n @net_exc\n def read_line(self):\n \"\"\"reads a line of input\n \n The trailing EOL is stripped.\n \n throws socket.timeout, EOFError, BufferError\n \"\"\"\n\n buff = io.StringIO(newline='\\n')\n count = 0\n have_cr = False\n\n while count < 1024:\n ch = self._sio.read(1)\n if not ch:\n raise EOFError(\"connection closed by client\")\n buff.write(ch)\n count += 1\n\n if ch == '\\n' and have_cr:\n line = buff.getvalue()[:-2]\n log.trace_client(line)\n return line\n have_cr = ch == '\\r'\n else:\n raise BufferError(\"maximum command line length exceeded by client\")\n\n @staticmethod\n def _split_line(line):\n l = len(line)\n i = 0\n while i < l:\n n, pre = (1022, '') if line[i] != '.' else (1021, '.')\n chunk = ''.join((pre, line[i:i+n]))\n i += n\n yield chunk\n else:\n if l == 0:\n yield line\n\n @classmethod\n def _trunc_line(cls, line):\n return next(cls._split_line(line))\n\n def _write(self, line):\n data = ''.join((line, DICT_EOL))\n self._sio.write(data)\n self._sio.flush()\n log.trace_server(line)\n\n @net_exc\n def write_line(self, line, split=True):\n \"\"\"writes a line of output\n \n The line argument should not end with an EOL.\n The first leading '.' char is doubled.\n If split is True, lines with above-maximum length are split to multiple lines.\n If split is False, lines with above-maximum length are truncated.\n \"\"\"\n\n if split:\n for subline in self.__class__._split_line(line):\n self._write(subline)\n else:\n self._write(self.__class__._trunc_line(line))\n\n @net_exc\n def write_status(self, code, message):\n line = \"{:03d} {:s}\".format(code, message)\n self.write_line(line, split=False)\n\n @net_exc\n def write_text_end(self):\n self._write('.')\n\n @net_exc\n def write_text(self, lines):\n for line in lines:\n self.write_line(line)\n self.write_text_end()\n\n @net_exc\n def close(self):\n self._sio.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n","repo_name":"vsemionov/wordbase","sub_path":"src/wordbase/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29550420327","text":"get_ipython().magic('pylab inline')\n\nimport pandas as pd\n\ndef hot1_encoding(df, index):\n s= df[index]\n m_value_counts= s.value_counts()\n for k,v in m_value_counts.iteritems():\n uv_mask= (df[index] == k).astype(float)\n new_field= \"Field_sourceCol_{}_value_{}\".format(index, k)\n df[new_field]= uv_mask\n \n del df[index]\n\nfrom datetime import datetime\n\ndef extract_day_of_week(s):\n d= datetime.strptime(s, '%m-%d-%Y')\n return d.weekday()\n\ndef get_reconstruction_from_projection(eigenvectors, Mean, m_projection):\n Eig= np.matrix(eigenvectors)\n rec = np.array(m_projection * Eig.transpose() + Mean)\n #rec= Eig*m_projection.transpose()+Mean[:,np.newaxis]\n return np.ravel(rec)\n\nm_fwy_meta_df= pd.read_json('./data/regression/station_meta.json', typ='frame', orient='records')\nm_fwy_meta_df.rename(columns={'station': 'S_ID', 'district' : 'DISTRICT_ID', 'latitude' : 'LAT', 'longitude' : 'LON', 'zip' : 'ZIP'}, inplace=True)\nm_fwy_meta_df.drop(labels=['direction', 'freeway', 'name', 'urban'], axis=1, inplace=True)\nm_fwy_meta_df\n\n#\n# Memory Error occurs when attempting to run for all years and partitions, adjust list parameters as necessary\n#\n# p1_list= ['wkday', 'wkend']\n# p2_list= ['weekday', 'weekend']\np1_list= ['wkend']\np2_list= ['weekend']\npartitions= zip(p1_list, p2_list)\nyears= [2008, 2009, 2010, 2011, 2013, 2014, 2015]\n#years= [2015]\n#\nhot1_columns= ['NUM_LANES', 'FWY_NUM', 'FWY_DIR', 'DAY_OF_WEEK', 'DISTRICT_ID']\nfor pentry in partitions:\n for y in years:\n p1= pentry[0]\n p2= pentry[1]\n #\n print('Processing {}, {}, {}'.format(p1, p2, y))\n a_df= pd.read_csv('./data/regression/trim_{}_{}.csv'.format(y, p1), header=0)\n c_df= pd.merge(a_df, m_fwy_meta_df, on='S_ID')\n #\n base_mean_path= './data/{}/total_flow_{}_mean_vector.pivot_{}_grouping_pca_tmp.csv'\n base_eigs_path= './data/{}/total_flow_{}_eigenvectors.pivot_{}_grouping_pca_tmp.csv'\n mean= pd.read_csv(base_mean_path.format(p2, p2, y), header=None).values[0]\n eigs= pd.read_csv(base_eigs_path.format(p2, p2, y), header=None).values # eigenvectors per row matrix (5 X 288)\n\n rows= c_df[['Flow_Coef_1', 'Flow_Coef_2', 'Flow_Coef_3', 'Flow_Coef_4', 'Flow_Coef_5']].values\n\n new_columns= np.zeros(len(rows))\n for i, row in enumerate(rows):\n rec= get_reconstruction_from_projection(eigs, mean, row)\n new_columns[i]= np.mean(rec)\n\n c_df['AGG_TOTAL_FLOW']= new_columns\n c_df.drop([\n 'S_ID',\n 'Flow_Coef_1',\n 'Flow_Coef_2',\n 'Flow_Coef_3', \n 'Flow_Coef_4', \n 'Flow_Coef_5',\n 'CHP_DESC',\n 'CHP_DURATION',\n 'CC_CODE',\n 'ZIP'\n ], axis=1, inplace=True)\n #\n c_df['CHP_INC']= c_df.CHP_INC.apply(lambda v: 1 if v == 'T' else 0)\n c_df['CHP_INC']= c_df.CHP_INC.astype(float)\n\n c_df['DATE']= c_df.DATE.apply(lambda s: extract_day_of_week(s))\n c_df.rename(columns={'DATE':'DAY_OF_WEEK'}, inplace=True)\n #\n for c in hot1_columns:\n hot1_encoding(c_df, c)\n c_df.to_csv('./data/regression/preprocessed_{}_{}.csv'.format(y, p1), index=False)\n #\n del c_df\n del a_df\n del rows\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/RegressionAnalysis_Preproccessing.py","file_name":"RegressionAnalysis_Preproccessing.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"18591892786","text":"import os\nimport json\nimport yaml\nimport datetime\nimport monitor\nimport models\nfrom flask import Response, request, redirect, url_for, render_template, send_from_directory\nfrom flask_gravatar import Gravatar\nfrom feedwerk.atom import AtomFeed\nfrom app import app\n\n\n# configuration\nroot_path = os.path.dirname(__file__)\n\ngravatar = Gravatar(app,\n size=180,\n rating='g',\n default='retro',\n force_default=False,\n force_lower=False)\n\n@app.before_request\ndef remove_trailing_slash():\n if request.path != '/' and request.path.endswith('/'):\n return redirect(request.path[:-1])\n\n@app.route('/robots.txt')\n@app.route('/sitemap.xml')\ndef static_from_root():\n return send_from_directory(app.static_folder, request.path[1:])\n\n@app.route('/recent.atom')\ndef recent_feed():\n feed = AtomFeed('Recent Articles',\n feed_url=request.url, url=request.url_root)\n with open(os.path.join(root_path, 'postlisting'), 'r') as i:\n articles = json.load(i)\n articles = sorted(articles.items(), reverse=True)\n articles = articles[:10]\n for article in articles:\n feed.add(article[1], unicode(models.render_markdown(root_path + 'blog/posts/{0}.{1}'.format(article[1], 'md'),\n header=True)),\n content_type='html',\n author='Matt Shirley',\n url=models.make_external('http://mattshirley.com/'+article[1]),\n updated=datetime.datetime.strptime(article[0].replace('-','') + '000000', \"%Y%m%d%H%M%S\"),\n published=datetime.datetime.strptime(article[0].replace('-','') + '000000', \"%Y%m%d%H%M%S\"))\n return feed.get_response()\n\n@app.route('/scical.ics')\ndef return_scical():\n cal = models.scrape_scical()\n return Response(cal, mimetype='text/calendar')\n\n@app.context_processor\ndef query_git_repos():\n return dict(get_git_repos=models.get_git_repos)\n\n@app.context_processor\ndef utility_processor():\n return(dict(root_path=root_path, render_markdown=models.render_markdown, recent_blurb=models.most_recent_blurb))\n\n@app.route('/')\ndef display_post(postname):\n content = models.render_markdown('{root}/posts/{postname}.md'.format(root=root_path, postname=postname))\n if content is not False:\n return render_template('markdown.html', **locals())\n elif content is False:\n return render_template('500.html'), 500\n\n@app.route('/update')\ndef update_entries():\n from subprocess import call\n retcode = call(['git', '-C', '/'.join([root_path, '..']), 'submodule', 'foreach', 'git', 'pull', 'origin', 'master'])\n if int(retcode) <= 0:\n return redirect(url_for('about'))\n else:\n return render_template('500.html'), 500\n\n@app.route('/')\ndef index():\n return redirect(url_for('about'))\n\n@app.route('/posts')\ndef posts():\n with open(os.path.join(root_path, 'posts/posts.yaml'), 'r') as listing:\n entries = yaml.load(listing)\n dates = sorted(entries.keys(), reverse=True)\n return render_template('show_entries.html', dates=dates, entries=entries)\n\n@app.route('/about', methods=['GET'])\ndef about():\n serif = True\n if request.method == 'GET':\n print_page = request.args.get('print', False)\n resume_template = request.args.get('resume', 'generic')\n content = models.render_markdown('{0}/static/md/{1}.md'.format(root_path, resume_template))\n if print_page:\n return render_template('print_markdown.html', **locals())\n else:\n content += models.render_markdown('{0}/static/md/{1}.md'.format(root_path, 'talks'))\n return render_template('markdown.html', **locals())\n\n@app.route('/about-me')\ndef aboutme_legacy():\n return redirect(url_for('about'))\n\n@app.route('/presentations')\ndef presentations():\n return redirect(url_for('about') + '#talks')\n\n@app.route('/talks')\ndef talks():\n return redirect(url_for('about') + '#talks')\n\n@app.route('/posters')\ndef posters():\n return redirect(url_for('about') + '#posters')\n\n@app.route('/uploads///')\ndef uploads(year, month, filename):\n dirpath = os.path.join(root_path, 'uploads', year, month)\n return send_from_directory(dirpath, filename)\n\n@app.route('/reload')\ndef reload():\n \"\"\" Monitor for changes to site code and restart wsgi process if necessary \"\"\"\n monitor.start(interval=1.0)\n return redirect(url_for('index'))\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n@app.errorhandler(500)\ndef page_not_found(e):\n return render_template('500.html'), 500\n","repo_name":"mdshw5/mattshirley.com","sub_path":"microblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"26654210145","text":"#Uses python3\n\nimport sys\ndebug = False\n\n\ndef lcs3(a, b, c):\n #write your code here\n d = []\n for i in range(len(c)+1):\n d.append([])\n for j in range(len(b)+1):\n d[i].append([])\n for k in range(len(a)+1):\n #print(\"i: {0}; j: {1}; k: {2}\".format(i, j, k))\n #if (i == 0 and j == 0) or (i == 0 and k == 0) or (j == 0 and k == 0):\n if i == 0 or j == 0 or k == 0:\n d[i][j].append(0)\n else:\n d_mat = d[i-1][j-1][k-1] + (1 if c[i-1] == b[j-1] == a[k-1] else 0)\n d_ins1 = max(d[i-1][j][k], d[i][j-1][k], d[i][j][k-1])\n d_ins2 = max(d[i-1][j-1][k], d[i-1][j][k-1], d[i][j-1][k-1])\n d[i][j].append(max((d_mat, d_ins1, d_ins2)))\n #d[i][j].append(1)\n if debug:\n print(\"i: {0}; j: {1}; k: {2}\".format(i, j, k))\n for i_slice in d:\n print()\n for j_slice in i_slice:\n print(j_slice)\n '''\n elif j == 0:\n d[i].append(0)\n else:\n d_ins = d[i][j-1]\n d_del = d[i-1][j]\n d_mat = d[i-1][j-1] + (1 if a[j-1] == b[i-1] else 0)\n d[i].append(max(d_ins, d_del, d_mat))\n '''\n return d[-1][-1][-1]\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n an = data[0]\n data = data[1:]\n a = data[:an]\n data = data[an:]\n bn = data[0]\n data = data[1:]\n b = data[:bn]\n data = data[bn:]\n cn = data[0]\n data = data[1:]\n c = data[:cn]\n print(lcs3(a, b, c))\n","repo_name":"yffu/algo_sd","sub_path":"01_algorithmic_toolbox/week5_dynamic_programming1/5_longest_common_subsequence_of_three_sequences/lcs3.py","file_name":"lcs3.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"17026097748","text":"__version__ = \"0.0.3\"\n__hash__ = \"\"\n\n\ndef configure(extensions=None, **arguments):\n from contextlib import ExitStack\n from functools import wraps\n from inspect import getfile\n from .base import ExtensionBase, ExtensionDevelopmentError # noqa: F401\n\n extensions = [ExtensionBase.get(e)(e) for e in (extensions or [])]\n\n def _fn(fn):\n @wraps(fn)\n def _fn1(**kwargs):\n with ExitStack() as stack:\n for e in extensions:\n kwargs = e.process(kwargs, arguments) or kwargs\n e.atexit(stack)\n return fn(**kwargs)\n\n for e in extensions:\n ret = e.setup(_fn1, arguments)\n if isinstance(ret, (list, tuple)):\n for w in ret:\n _fn1 = wraps(fn)(w(_fn1))\n elif ret is None:\n _fn1 = None\n else:\n _fn1 = wraps(fn)(ret)\n if not callable(_fn1):\n name = e.__class__.__name__\n raise ExtensionDevelopmentError(\n f\"{name}.setup returned non callable\", name, getfile(e.__class__)\n )\n return _fn1\n\n return _fn\n","repo_name":"cav71/click-plus","sub_path":"src/click_plus/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"73276782509","text":"def main():\n x,y=input(\"enterx\"),input(\"enter y\")\n # if x>y:\n # st=\"x is greater than y\"\n #else:\n # st=\"x is less than y\"\n # print(st)\n st= \"x is greater than y\" if (x>y) else \"y is greater than x\"\n print(st)\n\nif __name__==\"__main__\" :\n main()","repo_name":"Hazemmasry/hazemm","sub_path":"conditional.py","file_name":"conditional.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"31213787209","text":"import numpy as np\nfrom numpy import ma\nfrom numpy.polynomial import Polynomial\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\n\nimport re\nimport argparse\nimport scipy.sparse as sparse\nfrom scipy.sparse import csc_matrix\nfrom scipy.sparse import linalg\nfrom scipy.stats import pearsonr\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--file', type=str, default=\"metabolic.edgelist.txt\")\n parser.add_argument('--directed', type=int, default=1)\n args = parser.parse_args()\n return args\n\ndef get_degree(A):\n kin = np.asarray(A.sum(axis=0)).flatten()\n kout = np.asarray(A.sum(axis=1)).flatten()\n return(kin, kout)\n\n#directed and undirected\ndef degree_distribution(A, networkName, directed=True):\n binNum = 30\n\n if (directed):\n (kin, kout) = get_degree(A)\n bins = np.linspace(0, np.log10(np.max(kin)), num=binNum)\n digitized = np.digitize(np.log10(kin), bins)\n bin_counts = np.asarray([digitized.tolist().count(i) for i in range(0,len(bins))])\n bin_counts = ma.log10(bin_counts)\n #fit the line\n a,b = ma.polyfit(bins, bin_counts, 1, full=False)\n print('best fit in degree line:\\ny = {:.2f} + {:.2f}x'.format(b, a))\n yfit = [b + a * xi for xi in bins]\n fig, axs = plt.subplots(2, 1)\n axs[0].scatter(bins, bin_counts)\n axs[0].plot(bins, yfit, color=\"orange\")\n axs[0].set_title('in-degree distribution')\n axs[0].set_xlabel('Degree (d) log base 10', fontsize=\"small\")\n axs[0].set_ylabel('Frequency log base 10', fontsize=\"small\")\n axs[0].set_ylim(bottom=0)\n\n bins = np.linspace(0, np.log10(np.max(kout)), num=binNum)\n digitized = np.digitize(np.log10(kout), bins)\n bin_counts = np.asarray([digitized.tolist().count(i) for i in range(0,len(bins))])\n bin_counts = ma.log10(bin_counts)\n print('best fit out degree line:\\ny = {:.2f} + {:.2f}x'.format(b, a))\n yfit = [b + a * xi for xi in bins]\n axs[1].scatter(bins, bin_counts)\n axs[1].plot(bins, yfit, color=\"orange\")\n axs[1].set_title('out-degree distribution')\n axs[1].set_xlabel('Degree (d) log base 10', fontsize=\"small\")\n axs[1].set_ylabel('Frequency log base 10', fontsize=\"small\")\n plt.subplots_adjust(hspace=0.01)\n plt.tight_layout()\n plt.savefig(networkName + 'degree.pdf')\n plt.close()\n\n if (not directed):\n (kin,kout) = get_degree(A)\n print (kin.shape)\n #bin the statistics\n bins = np.linspace(0, np.log10(np.max(kin)), num=binNum)\n digitized = np.digitize(np.log10(kin), bins)\n bin_counts = np.asarray([digitized.tolist().count(i) for i in range(0,len(bins))])\n bin_counts = ma.log10(bin_counts)\n #fit the line\n a,b = ma.polyfit(bins, bin_counts, 1, full=False)\n print('best fit line:\\ny = {:.2f} + {:.2f}x'.format(b, a))\n yfit = [b + a * xi for xi in bins]\n plt.scatter(bins, bin_counts)\n plt.plot(bins, yfit, color=\"orange\")\n plt.title('degree distribution')\n plt.xlabel('Degree (d) log base 10', fontsize=\"small\")\n plt.ylabel('Frequency log base 10', fontsize=\"small\")\n plt.ylim(bottom=0)\n # plt.xscale('log')\n # plt.yscale('log')\n plt.tight_layout()\n plt.savefig(networkName + 'degree.pdf')\n plt.close()\n\n#only undirected\ndef clustering_coefficient(A, kin, kout, n, networkName, directed):\n binNum = 30\n A3 = A.dot(A).dot(A)\n\n if (not directed):\n cin = np.zeros(n, dtype=float)\n for i in range(0,n):\n if (kin[i] >= 2):\n cin[i] = A3[i,i] / (kin[i]*(kin[i] - 1))\n\n bins = np.linspace(0, np.max(cin), num=binNum)\n print (\"average clustering coefficient is \" + str(np.mean(cin)))\n # digitized = np.digitize(cin, bins)\n # bin_counts = np.asarray([digitized.tolist().count(i) for i in range(0,len(bins))])\n\n # plt.scatter(bins, bin_counts)\n plt.hist(cin, bins=bins)\n plt.title('clustering coefficient distribution')\n plt.xlabel('Local Clustering Coefficient', fontsize=\"small\")\n plt.ylabel('Frequency', fontsize=\"small\")\n plt.yscale('log')\n plt.ylim(bottom=0)\n plt.tight_layout()\n plt.savefig(networkName + 'cc.pdf')\n plt.close()\n\n return cin\n\n#directed and undirected\ndef shortest_path(A, networkName, directed=False):\n if (not directed):\n dist_mtx = sparse.csgraph.shortest_path(A, method='auto', directed=directed, unweighted=True)\n print (\"dist_mtx calculated\")\n #print (np.min(dist_mtx))\n numPaths = np.mean(dist_mtx, axis=0)\n #print (np.min(numPaths))\n avg = np.mean(numPaths)\n print (\"average length of shortest path is \" + str(avg))\n unique, counts = np.unique(numPaths, return_counts=True)\n plt.bar(unique, counts)\n plt.title('shortest path distribution')\n plt.xlabel('length of shortest path', fontsize=\"small\")\n plt.ylabel('number of nodes', fontsize=\"small\")\n plt.tight_layout()\n plt.savefig(networkName + 'sp.pdf')\n plt.close()\n\n if (directed):\n dist_mtx = sparse.csgraph.shortest_path(A, method='auto', directed=directed, unweighted=True)\n #in-degree\n #print (np.min(dist_mtx))\n Pin = np.mean(dist_mtx, axis=0)\n #print (np.min(Pin))\n avg = np.mean(Pin)\n print (\"average length of in degree shortest path is \" + str(avg))\n unique, counts = np.unique(Pin, return_counts=True)\n plt.bar(unique, counts)\n plt.title('shortest path distribution')\n plt.xlabel('length of shortest path', fontsize=\"small\")\n plt.ylabel('number of nodes', fontsize=\"small\")\n plt.tight_layout()\n plt.savefig(networkName + 'sp.pdf')\n plt.close()\n\n#only undirected\ndef connected_components(A, networkName, directed=False):\n n_components, labels = sparse.csgraph.connected_components(csgraph=A, directed=directed, return_labels=True)\n componentID, numNodes = np.unique(labels, return_counts=True)\n print (\"number of connected component is: \" + str(n_components))\n print (\"portion of nodes in GCC is: \" + str(np.max(numNodes) / A.shape[0]))\n\n\n#only undirected\ndef eigenvalue_distribution(A, networkName, directed=False):\n (kin, kout) = get_degree(A)\n n = kin.shape[0]\n binNum = 30\n if (not directed):\n D = csc_matrix(A.shape, dtype=np.int8)\n for i in range(0, A.shape[0]):\n D[i,i] = kin[i]\n L = D - A\n eigenvalues, vecs = linalg.eigs(L.asfptype(), k=(n-2))\n eigenvalues = eigenvalues.real\n #spectralGap = np.where(eigenvalues > 0, eigenvalues, np.inf).argmin()\n spectralGap = np.min(eigenvalues[eigenvalues > 0])\n print (\"spectral gap is \" + str(spectralGap))\n bins = np.linspace(0, np.max(eigenvalues), num=binNum)\n plt.hist(eigenvalues, bins=bins)\n plt.title('eigenvalue distribution')\n plt.xlabel('eigenvalue', fontsize=\"small\")\n plt.ylabel('frequency', fontsize=\"small\")\n plt.tight_layout()\n plt.savefig(networkName + 'ei.pdf')\n plt.ylim(bottom=0)\n plt.close()\n\n#only undirected\ndef degree_correlations(A, networkName, directed=False):\n (kin, kout) = get_degree(A)\n #assuming only dealing with undirected networks here\n if (not directed):\n #degree correlation matrix E\n E = np.zeros((np.max(kin)+1,np.max(kin)+1))\n for i in range(0, A.shape[0]):\n for j in range(0, A.shape[0]):\n if (A[i,j] == 1):\n k1=kin[i];\n k2=kin[j];\n E[k1,k2] = E[k1,k2] + 1;\n E = E / (np.sum(kin))\n\n Posk = kin[kin != 0]\n knn_ki = pd.DataFrame(Posk)\n knn_ki[1] = np.divide(A.dot(kin)[kin != 0], Posk)\n knn_k = knn_ki.groupby(0).mean()\n pearson_correlation = pearsonr(knn_k.index, knn_k[1])\n # knn = np.zeros(kin.shape[0])\n # for i in range(0,knn.shape[0]):\n # print (A[i].shape)\n # print (kin.shape)\n # knn[i] = float(np.sum(np.dot(A[i], kin)) / kin[i])\n # pearson_correlation = pearsonr(kin, knn)\n\n print(\"overall correlation is \" + str(pearson_correlation))\n plt.imshow(E, cmap='gray_r', origin='lower')\n plt.colorbar()\n plt.title('degree correlations')\n plt.xlabel('Degree d', fontsize=\"small\")\n plt.ylabel('Degree d', fontsize=\"small\")\n plt.savefig(networkName + 'dc.pdf')\n plt.close()\n\ndef degree_cc(A, networkName, cin, kin, directed=False):\n\n plt.scatter(kin, cin)\n plt.title('degree-clustering coefficient relation')\n plt.xlabel('Degree (d)', fontsize=\"small\")\n plt.ylabel('local clustering coefficient', fontsize=\"small\")\n plt.ylim(bottom=0)\n plt.xscale('log')\n plt.tight_layout()\n plt.savefig(networkName + 'dlcc.pdf')\n plt.close()\n\n\n#Actor undirected\n#Collaboration undirected\n#Internet undirected\n#Power Grid undirected\n#Protein undirected \n#Phone Calls undirected\n#Citation directed\n#Metabolic directed\n#Email directed\n#WWW directed\n\n\n#python Q1.py --file metabolic.edgelist.txt --directed 1\n\n\ndef execute(networkName, directed):\n plt.switch_backend('agg')\n d_dir = \"networks/\"\n \n mpl.rcParams['lines.markersize'] = 5\n\n edgelist = open(d_dir+networkName, \"r\")\n print (\"accessing \" + networkName)\n print (\"is directed : \" + str(directed))\n\n lines = list(edgelist.readlines())\n n=0\n for line in lines:\n \tvalues = re.findall(r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\",line)\n \tif (int(values[0]) > n):\n \t\tn = int(values[0])\n \tif (int(values[1]) > n):\n \t\tn = int(values[1])\n n = n + 1\n print (\"number of nodes n is \" + str(n))\n \n #adjacency matrix A\n A = csc_matrix((n,n), dtype=np.int8)\n for line in lines:\n values = re.findall(r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\",line)\n i = int(values[0])\n j = int(values[1])\n if (i == j):\n continue\n A[i,j] = 1\n if (not directed):\n A[j,i] = 1\n\n print (\"there are \" + str(A.count_nonzero()) + \" edges in the adjacency matrix\")\n\n degree_distribution(A, networkName, directed=directed)\n shortest_path(A, networkName, directed=directed)\n \n #load A as undirected\n if (directed):\n A = csc_matrix((n,n), dtype=np.int8)\n for line in lines:\n values = re.findall(r\"[-+]?\\d*\\.\\d+|[-+]?\\d+\",line)\n i = int(values[0])\n j = int(values[1])\n if (i == j):\n continue\n A[i,j] = 1\n A[j,i] = 1\n directed = False\n (kin, kout) = get_degree(A)\n cin = clustering_coefficient(A, kin, kout, n, networkName, directed)\n connected_components(A, networkName, directed=directed)\n eigenvalue_distribution(A, networkName, directed=directed)\n degree_correlations(A, networkName, directed=directed)\n degree_cc(A, networkName, cin, kin, directed=directed)\n edgelist.close()\n\ndef main():\n args = get_args()\n networkName = args.file \n directed = False\n if (args.directed == 1):\n directed = True\n execute(networkName, directed)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n","repo_name":"shenyangHuang/NetworkScienceA1","sub_path":"Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":11316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"40012825626","text":"from rest_framework import serializers\nfrom manage_api.models import User, FileNode, FileType, FileVersion, FilePermission\nfrom manage_api.permissions import get_permission_filter\nfrom django.db.models import Max\nfrom django.core.exceptions import ValidationError\nfrom django.core.exceptions import PermissionDenied\n\nclass NodePreviewSerializer(serializers.ModelSerializer):\n version_details = serializers.SerializerMethodField()\n class Meta:\n model = FileNode\n fields = ['id', 'type', 'is_public', 'created_at', 'version_details']\n \n def get_version_details(self, obj):\n max_version = FileVersion.objects.filter(node_id=obj.id).order_by('-id').first()\n return {\n 'id': getattr(max_version, 'id', None),\n 'name': getattr(max_version, 'name', None),\n 'is_uploaded': getattr(max_version, 'is_uploaded', 0),\n }\n\nclass PermissionSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(\n many=False,\n queryset=User.objects.all(),\n slug_field='username'\n )\n class Meta:\n model = FilePermission\n fields = ['id', 'user', 'can_write', 'created_at']\n read_only_fields = ['id', 'created_at', 'user']\n\nclass PermissionCreateSerializer(PermissionSerializer):\n class Meta(PermissionSerializer.Meta):\n fields = [*PermissionSerializer.Meta.fields, 'node']\n read_only_fields = ['id', 'created_at']\n\n def create(self, validated_data):\n node = validated_data['node']\n permission = FilePermission.objects.create(\n node=node,\n user=validated_data['user'],\n can_write=validated_data['can_write'],\n is_active=1,\n )\n return permission\n\nclass NodeSerializer(NodePreviewSerializer):\n name = serializers.CharField(max_length=256, write_only=True)\n owner = serializers.SlugRelatedField(\n many=False,\n read_only=True,\n slug_field='username'\n )\n childs = NodePreviewSerializer(many=True, read_only=True)\n permissions = PermissionSerializer(many=True, read_only=True)\n\n class Meta:\n model = FileNode\n fields = ['id', 'name', 'owner', 'type', 'is_public', 'created_at', 'parent', 'version_details', 'childs', 'permissions']\n read_only_fields = ['type', 'created_at', 'version_details']\n\n def create(self, validated_data):\n parent = validated_data['parent']\n node = FileNode.objects.create(\n owner=self._user(),\n type=validated_data['type'],\n is_public=validated_data['is_public'],\n parent=parent,\n )\n node = self.update(node, validated_data)\n permission = FilePermission.objects.create(\n node=node,\n user=self._user(),\n can_write=1,\n is_active=1,\n )\n if parent is not None:\n parent_permission = parent.filepermission_set.filter(is_active=True)\n for p in parent_permission:\n if p.user != self._user():\n FilePermission.objects.create(\n node=node,\n user=p.user,\n can_write=p.can_write,\n is_active=1,\n )\n return node\n\n def update(self, instance, validated_data):\n should_upload = instance.type.should_upload\n version = FileVersion.objects.create(\n node=instance,\n name=validated_data['name'],\n is_uploaded=not should_upload,\n author=self._user(),\n )\n return instance\n\n def _user(self):\n request = self.context.get('request', None)\n if request:\n return request.user\n\n\nclass NodeCreateSerializer(NodeSerializer):\n class Meta(NodeSerializer.Meta):\n read_only_fields = ['created_at', 'version_details']\n\n\nclass VersionSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n many=False,\n read_only=True,\n slug_field='username'\n )\n class Meta:\n model = FileVersion\n fields = ['id', 'name', 'author', 'is_uploaded', 'created_at']\n\n","repo_name":"ncugit-sec/StorageManage","sub_path":"DocManageAPI/manage_api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37035361371","text":"# Simulates the flywheel's kalman filter\nimport control as cnt\nimport numpy as np\nimport scipy as sp\nimport csv\nimport sys\nimport matplotlib.pyplot as plt\n\ndef kalmd(A, C, Q, R):\n\t\"\"\"Solves for the steady state kalman gain and error covariance matrices.\n\tKeyword arguments:\n\tsys -- discrete state-space model\n\tQ -- process noise covariance matrix\n\tR -- measurement noise covariance matrix\n\tReturns:\n\tKalman gain, error covariance matrix.\n\t\"\"\"\n\tm = A.shape[0]\n\n\tobservability_rank = np.linalg.matrix_rank(cnt.obsv(A, C))\n\tif observability_rank != m:\n\t print(\n\t \"Warning: Observability of %d != %d, unobservable state\"\n\t % (observability_rank, m)\n\t )\n\n\t# Compute the steady state covariance matrix\n\t# P_prior = sp.linalg.solve_discrete_are(a=A.T, b=C.T, q=Q, r=R)\n\tP_prior = np.array([[R]])\n\tS = C * P_prior * C.T + R\n\tK = P_prior * C.T * np.linalg.inv(S)\n\tP = (np.eye(m) - K * C) * P_prior\n\tprint(str(P_prior[0, 0]) + \" \" + str(S[0, 0]) + \" \" + str(P[0, 0]) + \" \" + str(K[0, 0]))\n\treturn K, P\n\ndef updateStats(existingAggregate, newValue):\n (count, mean, M2) = existingAggregate\n count += 1\n delta = newValue - mean\n mean += delta / count\n delta2 = newValue - mean\n M2 += delta * delta2\n\n return (count, mean, M2)\n\ndef main(fname):\n\tgear_ratio = 5.0\n\tmoment = 0.005\n\tK_t = 2 * 0.198\n\tR = 3.84\n\tK_v = 6.68\n\tRPM_TO_RADS = 0.1047\n\tVAR_RADS = 2.75847841614875\n\tdt = 0.01\n\tA = np.zeros((1, 1))\n\t# A[0, 0] = -gear_ratio ** 2 * K_t / (K_v * R * moment)\n\t# A[0, 0] = 9.8\n\tA[0, 0] = -163 * dt # 163 is the number of steps for time step\n\tAd = A * dt\n\tAd = sp.linalg.expm(Ad)\n\t# 0.9838\n\t# B = np.array([[gear_ratio * K_t / (R * moment)]])\n\t# Bd = np.linalg.inv(A) * (Ad - np.identity(1)) * B\n\tB = np.zeros((1, 1))\n\tBd = B\n\t# divide by -0.0165 to revert discretization\n\tBd[0, 0] = float(1) / 12\n\tC = np.array([[1]])\n\tD = np.array([[0]])\n\tQ = np.zeros((A.shape[0], A.shape[1]))\n\tQ[0, 0] = 1.0\n\tR = VAR_RADS\n\tRd = R / dt\n\tQd_tr = np.concatenate(((-A.T), Q), axis=1)\n\tQd_br = np.concatenate((np.zeros((A.shape[0], A.shape[1])), A), axis=0)\n\tQd_exp = np.concatenate(((Qd_tr, Qd_br.T)), axis=0)\n\tQd_exp = sp.linalg.expm(Qd_exp)\n\tQd = np.zeros((A.shape[0], A.shape[1]))\n\tQd[0, 0] = Qd_exp[0, 1] / np.linalg.inv(Ad)\n\t# ^ the array positions are temporary assuming the flywheel kalman shape\n\n\traw = []\n\tfiltered = []\n\n\tx_hat = np.zeros((A.shape[0], 1))\n\tU = np.zeros((1, 1))\n\tU[0, 0] = 12 # we're inputting 12V roughly\n\tY = np.zeros((C.shape[0], 1))\n\n\tkalman_gain = np.zeros((A.shape[0], C.shape[0]))\n\tkalman_gain, p0 = kalmd(Ad, C, Qd, Rd)\n\t# kalman_gain[0] = 0.2 # smaller kalman gain provides better damping\n\n\ti = 0\n\tx = []\n\tstats = (0, 0, VAR_RADS)\n\twith open(fname) as csv_file:\n\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\tfor row in csv_reader:\n\t\t\tif (i < 10):\n\t\t\t\ti = i + 1\n\t\t\t\tcontinue\n\t\t\tif (i is 10):\n\t\t\t\tx_hat[0, 0] = float(row[0])\n\t\t\tY[0] = float(row[0])\n\t\t\traw.append(float(row[0]))\n\n\t\t\tif (Y[0] > 58.0):\n\t\t\t\tstats = updateStats(stats, Y[0])\n\t\t\t\t(count, mean, M2) = stats\n\t\t\t\tRd = M2 / (count * dt)\n\t\t\t# Predict\n\t\t\tx_hat = Ad * x_hat + Bd * U\n\t\t\tp0 = Ad * p0 * Ad.T + Qd\n\t\t\tS = C * p0 * C.T + Rd\n\t\t\t# Update\n\t\t\tkalman_gain = p0 * C.T * np.linalg.inv(S)\n\t\t\tx_hat_new = kalman_gain * (Y - C * x_hat) + x_hat\n\t\t\tp0 = (np.identity(1) - kalman_gain * C) * p0\n\t\t\t# print(str(kalman_gain[0, 0]) + \" \" + str(p0[0, 0]) + \" \" + str(Qd[0, 0]))\n\n\t\t\tcur = float(x_hat_new.item((0, 0)))\n\t\t\tfiltered.append(cur)\n\n\t\t\t# if (x_hat_new.item(0, 0) > 0.63 * 62.8 and x_hat.item(0, 0) <= 0.63 * 62.8):\n\t\t\t\t# print(\">>>>>>>>>>>> \" + str(i))\n\t\t\tprint(str(Rd))\n\t\t\tx_hat = x_hat_new\n\n\t\t\tx.append(i)\n\n\t\t\tplt.cla()\n\t\t\tplt.plot(x, raw, \".r\", x, filtered, \".b\")\n\t\t\tplt.grid(True)\n\t\t\tplt.pause(0.001)\n\n\t\t\ti = i + 1\n\n\t\tplt.show()\n\t\tprint(str(kalman_gain[0, 0]))\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","repo_name":"purduesigbots/forkner-public","sub_path":"kalmansim.py","file_name":"kalmansim.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"38"} +{"seq_id":"31280482977","text":"import math\nimport sys\n\ndef haversine(lat1, long1, lat2, long2, r):\n phi1 = lat1 * math.pi / 180\n phi2 = lat2 * math.pi /180\n lambda1 = long1 * math.pi /180\n lambda2 = long2 * math.pi /180\n \n relationship1 = (math.sin((phi2 - phi1)/2))**2\n relationship2 = (math.sin((lambda2 - lambda1)/2))**2\n relationship3 = relationship1 + math.cos(phi1) * math.cos(phi2) * relationship2\n\n d = 2*r * math.asin(math.sqrt(relationship3))\n return int(d)\n","repo_name":"jjvolesky/algorithms","sub_path":"python/haversine.py","file_name":"haversine.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22549854035","text":"#describe each location with companies in side\n\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport argparse\nfrom math import *\nfrom sklearn.preprocessing import normalize\nfrom utils import *\n\npjoin = os.path.join\n\nfrom header import *\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg('--run_root', default='/Users/yefeichen/Database/location_recommender_system/')\n arg('--ls_card',default='location_scorecard_191113.csv')\n arg('--app_date',default='_191114')\n arg('--dbname',default='tmp_table')\n args = parser.parse_args()\n\n datapath = args.run_root\n datapath_mid = pjoin(datapath,args.dbname)\n cfile = ['dnb_pa.csv']\n app_date = args.app_date\n apps = app_date + '.csv'\n appsadd = app_date+'_add.csv'\n lfile = args.ls_card # It is fixed as input\n clfile = ['PA']\n clfile = [c + apps for c in clfile]\n\n print('Args:',datapath,apps,lfile,args.ratio)\n\n not_feat_col = feature_column['not_feat_col']\n cont_col_nameC = feature_column['cont_col_nameC']\n spec_col_nameC = feature_column['spec_col_nameC']\n cont_col_nameL = feature_column['cont_col_nameL']\n key_col_comp = feature_column['key_col_comp']\n key_col_loc = feature_column['key_col_loc']\n\n dummy_col_nameL = feature_column['dummy_col_nameC']\n dummy_col_nameC = feature_column['dummy_col_nameL']\n\n\n ##Multi training data generator(multi city)\n # 如果不合并所有数据在进行dummy 会出现一些category在某些城市不出现的情况,从而导致问题\n # 8-2分训练测试集\n\n train_test_val_pairs = []\n dat_comp_pds = []\n dat_loc_pds = []\n\n pdlls = [] # all location feat pd list\n pdccs = []\n for ind_city in range(len(cfile)):\n pdc = pd.read_csv(pjoin(datapath, cfile[ind_city]))\n pdl = pd.read_csv(pjoin(datapath, lfile))\n pdcl = pd.read_csv(pjoin(datapath_mid, clfile[ind_city]))\n\n # building features\n col_list = list(pdl.columns)\n pdll = pdl.merge(pdcl, how='inner', on=['atlas_location_uuid'], suffixes=['', '_right'])\n pdll = pdll.loc[pdll['duns_number'].notnull()]\n pdll = pdll.groupby(['atlas_location_uuid']).first().reset_index()\n pdll = pdll[col_list]\n pdlls.append(pdll)\n\n # company feature\n pdccs.append(pdc)\n\n # for loop end\n # building feature\n # company feature\n pdlls = pd.concat(pdlls, axis=0).reset_index(drop=True)\n pdccs = pd.concat(pdccs, axis=0).reset_index(drop=True)\n\n print('start processing company and location feature...')\n\n # one hot explanation\n comp_one_hot_col_name = dummy_col_nameC #['major_industry_category', 'location_type', 'primary_sic_2_digit']\n loc_one_hot_col_name = dummy_col_nameL #['building_class']\n\n print('one hot description loading...')\n comp_coldict = load_obj(pjoin(datapath_mid, 'comp_feat_dummy_param' + app_date))\n loc_coldict = load_obj(pjoin(datapath_mid, 'loc_feat_dummy_param' + app_date))\n\n print('dummy...')\n XD_comp = apply_dummy(coldict=comp_coldict, data=pdccs)\n XD_loc = apply_dummy(coldict=loc_coldict, data=pdlls)\n\n print('normalization descriptor loading...')\n comp_norm_param = load_obj(pjoin(datapath_mid, 'comp_feat_norm_param' + app_date))\n loc_norm_param = load_obj(pjoin(datapath_mid, 'loc_feat_norm_param' + app_date))\n\n print('normalization...')\n\n cont_comp = comp_dat_process(pdccs, one_hot_col_name=dummy_col_nameC,cont_col_name=cont_col_nameC,\\\n spec_col_name=spec_col_nameC, do_dummy=False)\n cont_loc = location_dat_process(pdlls, one_hot_col_name=dummy_col_nameL,cont_col_name=cont_col_nameL, \\\n do_dummy=False)\n\n XC_comp = apply_para_normalize_dat(cont_comp[cont_col_nameC], comp_norm_param['C_comp'],\n comp_norm_param['S_comp'])\n XC_loc = apply_para_normalize_dat(cont_loc['data'][cont_col_nameL], loc_norm_param['C_loc'],\n loc_norm_param['S_loc'])\n\n Y_loc = pdlls[key_col_loc].to_numpy()\n Y_comp = pdccs[key_col_comp].to_numpy()\n\n X_comp = np.concatenate([Y_comp, XC_comp, XD_comp], axis=1)\n X_loc = np.concatenate([Y_loc, XC_loc, XD_loc], axis=1)\n\n y_comp_name = key_col_comp\n y_loc_name = key_col_loc\n c_comp_name = cont_col_nameC\n d_comp_name = dummy_col_nameC\n c_loc_name = cont_col_nameL\n d_loc_name = dummy_col_nameL\n\n\n dat_comp_pd = pd.DataFrame(data=X_comp, columns=y_comp_name + c_comp_name + d_comp_name)\n dat_loc_pd = pd.DataFrame(data=X_loc, columns=y_loc_name + c_loc_name + d_loc_name)\n\n print(dat_comp_pd.to_numpy().mean())\n print(dat_loc_pd.to_numpy()[:, 1:].mean())\n print(dat_comp_pd.shape)\n\n print('Done')\n\n # print('Final merge...')\n dat_comp_pd.to_csv(pjoin(datapath_mid, 'company_feat' + appsadd))\n dat_loc_pd.to_csv(pjoin(datapath_mid, 'location_feat' + appsadd))\n print('All Done')\n\n print(dat_comp_pd.shape,dat_loc_pd.shape)","repo_name":"chaffeechenyefei/locationIntelligencePipeline","sub_path":"step003_get_csv_of_normalized_data_additionaly.py","file_name":"step003_get_csv_of_normalized_data_additionaly.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22787346064","text":"\n\ndef get_mask_file_names(lst_image_filenames):\n\n # empty list to store file names\n lst_mask_filenames = []\n\n for img_filename in lst_image_filenames:\n\n # Get image\n class_name, rem_part = img_filename.split(\"_\")\n\n # separate the extension\n tmp_id, file_ext = rem_part.split(\".\")\n\n # segmentation file name\n seg_filename = class_name + \"_\" + tmp_id + \"_seg\" + \".\" + file_ext\n\n # Append it to list\n lst_mask_filenames.append(seg_filename)\n\n return lst_mask_filenames\n\n\ndef fancy_print(x, n=75):\n print(\"-\" * n)\n print(x)\n print(\"-\" * n)\n","repo_name":"centific-aicoe/AICity-Prod-Counting-2023","sub_path":"src/utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"32730215435","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport json\nimport logging\nimport threading\nimport time\nimport math\nfrom typing import Union\n\nimport paho.mqtt.client as mclient\nimport schedule\n\nimport Mods.Weatherflow.UDP as wudp\nimport Tools.Autodiscovery as autodisc\nimport Tools.PluginManager\nimport Tools.ResettableTimer as rTimer\nfrom Mods.Weatherflow.UpdateTypes import (DeviceStatus, HubStatus,\n LightningStrikeEvent, Obs_Sky,\n ObsAir, RainStart, RapidWind, Tools, ObsTempest,\n updateType)\nfrom Tools.Config import BasicConfig\n\nclass WeatherflowPlugin:\n\n @staticmethod\n def percentageMinMax(input, min, max):\n return ((input - min) * 100) / (max - min)\n\n @staticmethod\n def reset_daily_rain(self):\n self._logger.debug(\"Setze Täglichen Regenzähler & Temperatur Stats zurück...\")\n self._config[\"Weatherflow/yesterday_daily_rain\"] = self._config[\"Weatherflow/daily_rain\"]\n self._config[\"Weatherflow/daily_rain\"] = 0\n self._config[\"Weatherflow/temp_stats/lmin\"] = self._config.get(\"Weatherflow/temp_stats/min\", \"n/A\")\n self._config[\"Weatherflow/temp_stats/lmax\"] = self._config.get(\"Weatherflow/temp_stats/max\", \"n/A\")\n\n self._config[\"Weatherflow/temp_stats/min\"] = \"RESET\"\n self._config[\"Weatherflow/temp_stats/max\"] = \"RESET\"\n\n @staticmethod\n def get_device_online_topic(serial_number: str):\n return \"device_online/weatherflow/{}/online\".format(serial_number)\n\n @staticmethod\n def reset_hourly_rain(self):\n self._logger.debug(\"Setze Stündlichen Regenzähler zurück...\")\n self._config[\"Weatherflow/hourly_rain\"] = 0\n delta = datetime.timedelta(hours=1)\n if self._lightning_counter[\"lastTime\"] < datetime.datetime.now() - delta:\n self._logger.debug(\"Prüfe Blitzmelder\")\n if self._lightning_counter[\"serial\"] is None:\n self._lightning_counter[\"init\"] = 100\n self.count_lightnings_per_minute()\n\n def __init__(self, client: mclient.Client, opts: BasicConfig, logger: logging.Logger, device_id: str):\n self._client = client\n self._config = opts\n self._logger = logger.getChild(\"Weatherflow\")\n self._device_id = device_id\n self._udp = None\n self._timer = threading.Timer(2, self.check_online_status)\n self._lightning_counter = {\"count\": 0, \"timer\": None, \"serial\": None, \"init\": 0, \"lastTime\": datetime.datetime.now()}\n self._raining_info = {}\n self._wind_info = {}\n self._online_states = {}\n self._pluginManager = None\n self._deviceUpdates = {}\n self.wasWindy = 0\n\n self._wind_filter = {\n \"avg\": -1, \"max\": -1, \"min\": -1, \"temp\": -1\n }\n\n if self._config.get(\"Weatherflow/wind_diff\", None) is None:\n self._config[\"Weatherflow/wind_diff\"] = 0.2\n\n if self._config.get(\"Weatherflow/temp_diff\", None) is None:\n self._config[\"Weatherflow/temp_diff\"] = 0.2\n\n\n def set_pluginManager(self, pm):\n self._pluginManager = pm\n\n def sendStates(self):\n self._wind_filter = {\n \"avg\": -1, \"max\": -1, \"min\": -1, \"temp\": -1\n }\n\n def register(self, wasConnected=False):\n if self._config.get(\"Weatherflow/deregister\", False):\n self._config[\"Weatherflow/deregister\"] = False\n for sens in self._config.get(\"Weatherflow/reg_sensor\", []):\n self._client.publish(sens, \"\", retain=True)\n for ser in self._config.get(\"Weatherflow/serial_reg\", []):\n online_topic = WeatherflowPlugin.get_device_online_topic(ser)\n self._client.publish(online_topic, \"\", retain=True)\n self._config[\"Weatherflow/reg_sensor\"] = []\n self._config[\"Weatherflow/serial_reg\"] = []\n self._config[\"Weatherflow/seen_devices\"] = []\n self._config.save()\n\n if not wasConnected:\n self._logger.info(\"Starte UDP Server, um auf broadcasts von der Station lauschen zu können\")\n self._udp = wudp.UdpServer(self._config.get(\"Weatherflow/broadcast_addr\", \"255.255.255.255\"),\n self._config.get(\"Weatherflow/broadcast_port\", 50222), logger=self._logger.getChild(\"UDP\"))\n self._config.get(\"Weatherflow/events\", True)\n self._udp.on_message = self.process_update\n\n self._udp.start()\n self._timer.start()\n\n self._logger.debug(\"Regestriere Schedule Jobs für Tägliche und Stündliche Reset Aufgaben...\")\n schedule.every().day.at(\"00:00\").do(WeatherflowPlugin.reset_daily_rain, self)\n schedule.every().hours.do(WeatherflowPlugin.reset_hourly_rain, self)\n\n def register_new_serial(self, serial):\n online_topic = WeatherflowPlugin.get_device_online_topic(serial)\n self._client.publish(online_topic, \"online\", retain=True)\n if serial not in self._config.get(\"Weatherflow/serial_reg\", []):\n self._config[\"Weatherflow/serial_reg\"].append(serial)\n\n def register_new_air(self, serial_number, update: Union[ObsAir.ObsAir, ObsTempest.ObsTempest], tempest_device=None):\n deviceInfo = autodisc.DeviceInfo()\n deviceInfo.IDs = [serial_number]\n deviceInfo.mfr = \"Weatherflow\"\n deviceInfo.model = \"Air\"\n deviceInfo.name = \"Weatherflow AIR\"\n deviceInfo.sw_version = update.firmware_revision\n if tempest_device is not None:\n deviceInfo = tempest_device\n std_dev = autodisc.Topics.get_std_devInf()\n \n if len(std_dev.IDs) > 0:\n deviceInfo.via_device = std_dev.IDs[0]\n else:\n self._logger.info(\"Kein std Device gefunden. Kann kein via erstellen!\")\n\n\n self._logger.info(\"Regestriere neue Air mit der Seriellen Nummer: {}\".format(serial_number))\n self.register_new_serial(serial_number)\n self.register_new_sensor(serial_number, \"Luftdruck\", \"station_pressure\", \"mb\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Temperatur\", \"air_temperature\", \"°C\", autodisc.SensorDeviceClasses.TEMPERATURE, deviceInfo,\n value_template=\"{{ value_json.now }}\", json_attributes=True)\n self.register_new_sensor(serial_number, \"Relative Luftfeuchte\", \"relative_humidity\", \"%\", autodisc.SensorDeviceClasses.HUMIDITY, deviceInfo)\n self.register_new_sensor(serial_number, \"Blitze\", \"lightning_count\", \"Stk.\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Durchschnittliche Blitz entfernung\", \"lightning_dist\", \"km\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Batterie (AIR)\", \"battery\", \"%\", autodisc.SensorDeviceClasses.BATTERY, deviceInfo,\n value_template=\"{{ value_json.now }}\", json_attributes=True)\n\n if self._config[\"Weatherflow/events\"]:\n self.register_new_sensor(serial_number, \"Blitz Entfernung\", \"lightning_last_dist\", \"km\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Blitz Energie\", \"lightning_last_nrg\", \"\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Es Blitzt\", \"es_blitzt\", \"\", autodisc.BinarySensorDeviceClasses.POWER, deviceInfo)\n self.register_new_sensor(serial_number, \"Blitze in der Minute\", \"lightning_count_min\", \"Stk/Min\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n\n self.update_sensor(serial_number, \"lightning_last_dist\", \"0\", autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(serial_number, \"lightning_last_nrg\", \"0\", autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(serial_number, \"es_blitzt\", 0, autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(serial_number, \"lightning_count_min\", 0, autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n\n\n def register_new_sky(self, serial_number, upd: Union[Obs_Sky.ObsSky, ObsTempest.ObsTempest], tempest_device=None):\n deviceInfo = autodisc.DeviceInfo()\n deviceInfo.IDs = [serial_number]\n deviceInfo.mfr = \"Weatherflow\"\n deviceInfo.model = \"Sky\"\n deviceInfo.name = \"Weatherflow SKY\"\n deviceInfo.sw_version = upd.firmware_revision\n\n if tempest_device is not None:\n deviceInfo = tempest_device\n\n std_dev = autodisc.Topics.get_std_devInf()\n \n if len(std_dev.IDs) > 0:\n deviceInfo.via_device = std_dev.IDs[0]\n else:\n self._logger.info(\"Kein std Device gefunden. Kann kein via erstellen!\")\n\n self._logger.info(\"Regestriere neue Sky mit der Seriellen Nummer: {}\".format(serial_number))\n self.register_new_serial(serial_number)\n self.register_new_sensor(serial_number, \"Lux\", \"lux\", \"lux\", autodisc.SensorDeviceClasses.ILLUMINANCE, deviceInfo)\n self.register_new_sensor(serial_number, \"UV Index\", \"uv_index\", \"uv\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Regen\", \"accumulated_rain\", \"mm\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Wind Max\", \"wind_gust\", \"m/s\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo,\n value_template=\"{{ value_json.ms }}\", json_attributes=True)\n self.register_new_sensor(serial_number, \"Wind avg\", \"wind_average\", \"m/s\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo,\n value_template=\"{{ value_json.ms }}\", json_attributes=True)\n self.register_new_sensor(serial_number, \"Wind Min\", \"wind_lull\", \"m/s\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo,\n value_template=\"{{ value_json.ms }}\", json_attributes=True)\n self.register_new_sensor(serial_number, \"Wind Richtung\", \"wind_direction\", \"°\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Batterie (SKY)\", \"battery_sky\", \"%\", autodisc.SensorDeviceClasses.BATTERY, deviceInfo,\n value_template=\"{{ value_json.now }}\", json_attributes=True)\n self.register_new_sensor(serial_number, \"Sonnen einstrahlung\", \"solar_radiation\", \"w/m²\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.register_new_sensor(serial_number, \"Täglicher Regen\", \"local_day_rain_accumulation\", \"mm\", autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo,\n json_attributes=True, value_template=\"{{ value_json.today }}\")\n self.register_new_sensor(serial_number, \"Stündlicher Regen\", \"local_hour_rain_accumulation\", \"mm\",\n autodisc.SensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.update_sensor(serial_number, \"local_day_rain_accumulation\", self._config.get(\"Weatherflow/daily_rain\", 0), autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(serial_number, \"local_hour_rain_accumulation\", self._config.get(\"Weatherflow/hourly_rain\", 0),\n autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n\n if self._config[\"Weatherflow/events\"]:\n self.register_new_sensor(serial_number, \"Regen\", \"raining\", \"\", autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR, deviceInfo, \n value_template=\"{{ value_json.Regen }}\", json_attributes=True)\n self.register_new_sensor(serial_number, \"Windig\", \"windy\", \"\", autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR, deviceInfo)\n self.update_sensor(\n serial_number,\n \"raining\", \n json.dumps({\"Regen\": 0,\"Hagel\": \"Nein\" }),\n autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR\n )\n self.update_sensor(serial_number, \"windy\", 0, autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n\n def register_new_sensor(self, serial_number, visible_name, name, messurement_value, device_class: autodisc.DeviceClass, devInf: autodisc.DeviceInfo, value_template=None, json_attributes=None):\n topic = self._config.get_autodiscovery_topic(autodisc.Component.SENSOR, name, device_class, node_id=serial_number)\n online_topic = WeatherflowPlugin.get_device_online_topic(serial_number)\n\n uID = \"{}.wf-{}.{}\".format( \"binary_sensor\" if isinstance(device_class, autodisc.BinarySensorDeviceClasses) else \"sensor\", serial_number, name )\n\n payload = topic.get_config_payload(visible_name, messurement_value, online_topic, value_template=value_template, json_attributes=json_attributes, device=devInf, unique_id=uID)\n self._logger.info(\n \"Neuen Sensor ({}) regestriert. Folgendes ist die Config Payload: {}\".format(visible_name, payload))\n self._client.publish(topic.config, payload, retain=True)\n self._logger.info(\"Neuen Sensor ({}) regestriert. Folgendes ist die Config Payload: {}\".format(visible_name, payload))\n if topic.config not in self._config.get(\"Weatherflow/reg_sensor\", []):\n self._config[\"Weatherflow/reg_sensor\"].append(topic.config)\n\n def update_sensor(self, serial_number, name, value, device_class: autodisc.DeviceClass):\n topic = self._config.get_autodiscovery_topic(autodisc.Component.SENSOR, name, device_class, node_id=serial_number)\n if isinstance(value, dict):\n value = json.dumps(value)\n self._client.publish(topic.state, value)\n\n def update_is_raining(self, serial, is_raining=False, is_hail=False):\n rain_json = {\n \"Regen\": 1 if is_raining else 0,\n \"Hagel\": \"Ja\" if is_hail else \"Nein\" \n }\n if is_raining and self._config[\"Weatherflow/events\"]:\n self.update_sensor(serial, \"raining\", json.dumps(rain_json), autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n if self._raining_info.get(serial, None) is None:\n self._raining_info[serial] = rTimer.ResettableTimer(360, self.update_is_raining, serial)\n else:\n self._raining_info[serial].reset()\n else:\n self.update_sensor(serial, \"raining\", json.dumps(rain_json), autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n self._raining_info[serial] = None\n\n def update_is_windy(self, serial, is_windy=False, km=None, deg=None):\n if km == 0 and deg == 0:\n return\n if is_windy and self._config[\"Weatherflow/events\"]:\n self.wasWindy += 1\n if self.wasWindy == 1:\n self.update_sensor(serial, \"windy\", 1, autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n elif self.wasWindy > 120:\n self.wasWindy = 0\n\n if self._wind_info.get(serial, None) is None:\n self._wind_info[serial] = rTimer.ResettableTimer(60, self.update_is_windy, serial)\n else:\n self._wind_info[serial].reset()\n else:\n self.update_sensor(serial, \"windy\", 0, autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n self.wasWindy = 0\n self._wind_info[serial] = None\n\n def stop(self):\n if self._udp is not None:\n self._udp.shutdown()\n if self._timer is not None:\n self._timer.cancel()\n self._timer = None\n\n for k in self._raining_info.keys():\n if self._raining_info[k] is not None:\n self._raining_info[k].cancel()\n\n for k in self._wind_info.keys():\n if self._wind_info[k] is not None:\n self._wind_info[k].cancel()\n\n def process_update(self, update: dict):\n\n pupd = Tools.parse_json_to_update(update, self._logger)\n if pupd is None:\n self._logger.info(\"Nachricht {} von Station ist kein update.\".format(update))\n return\n self._logger.debug(\"Habe von Station {} update bekommen. Nachricht war: {}\".format(pupd.update_type.name, update))\n\n if pupd.update_type == updateType.UpdateType.DeviceStatus:\n self._deviceUpdates[pupd.serial_number] = pupd\n self._logger.debug(update)\n self.set_lastseen_device(pupd.serial_number, 1, True)\n elif pupd.update_type == updateType.UpdateType.ObsAir:\n self.process_obs_air(pupd)\n elif pupd.update_type == updateType.UpdateType.ObsSky:\n self.process_obs_sky(pupd)\n elif pupd.update_type == updateType.UpdateType.ObsTempest:\n if not self.set_lastseen_device(pupd.serial_number, pupd.report_interval_minutes):\n deviceInfo = autodisc.DeviceInfo()\n deviceInfo.IDs = [pupd.serial_number]\n deviceInfo.mfr = \"Weatherflow\"\n deviceInfo.model = \"Tempest\"\n deviceInfo.name = \"Weatherflow Tempest\"\n deviceInfo.sw_version = pupd.firmware_revision\n self.register_new_air(pupd.serial_number, pupd, deviceInfo)\n self.register_new_sky(pupd.serial_number, pupd, deviceInfo)\n self.process_obs_air(pupd)\n self.process_obs_sky(pupd)\n elif pupd.update_type == updateType.UpdateType.LightningStrikeEvent:\n self._logger.info(\"Air sagt es blitzt!\")\n if not self._config[\"Weatherflow/events\"]:\n self._logger.debug(\"Keine events erlaubt\")\n return\n\n self.update_sensor(pupd.serial_number, \"lightning_last_dist\", pupd.distance, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(pupd.serial_number, \"lightning_last_nrg\", pupd.energy, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(pupd.serial_number, \"es_blitzt\", 1, autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n self._lightning_counter[\"lastTime\"] = datetime.datetime.now()\n if self._lightning_counter[\"serial\"] is None:\n self._lightning_counter[\"serial\"] = pupd.serial_number\n self._logger.debug(\"Seriennummer des Blitzmeldenden Gerätes kopiert\")\n if self._lightning_counter[\"serial\"] == pupd.serial_number:\n self._lightning_counter[\"count\"] += 1\n if self._lightning_counter[\"timer\"] is None:\n self._lightning_counter[\"timer\"] = threading.Timer(60, self.count_lightnings_per_minute)\n self._lightning_counter[\"timer\"].start()\n self._logger.info(\"Blitz Timer gestartet\")\n else:\n self._logger.info(\"Kein Blitztimer gestartert\")\n else:\n self._logger.warn(\"Serienummern nicht gleich!\")\n\n elif pupd.update_type == updateType.UpdateType.RainStart:\n self._logger.info(\"Sky sagt es regnet!\")\n self.update_is_raining(pupd.serial_number, True)\n elif pupd.update_type == updateType.UpdateType.RapidWind:\n #self._logger.info(\"Sky sagt es geht der wind {}m/s Richtung {}°\".format(pupd.wind_speed, pupd.wind_direction))\n self.update_is_windy(pupd.serial_number, True, pupd.wind_speed, pupd.wind_direction)\n\n def set_lastseen_device(self, serial_number: str, interval_minutes: int, no_register=False) -> bool:\n interval = interval_minutes * 60\n if self._online_states.get(serial_number, None) is None and not no_register:\n self._logger.info(\"Muss für {} neuen Online Status erstellen...\".format(serial_number))\n self._online_states[serial_number] = {}\n self._online_states[serial_number][\"lastUpdate\"] = datetime.datetime.now()\n self._online_states[serial_number][\"intervall\"] = interval\n self._online_states[serial_number][\"error\"] = DeviceStatus.SensorStatus.OK\n self._online_states[serial_number][\"wasOnline\"] = False\n if serial_number not in self._config.get(\"Weatherflow/seen_devices\", []):\n self._logger.info(\"Ist unbekanntes Gerät.\")\n if not no_register:\n self._config[\"Weatherflow/seen_devices\"].append(serial_number)\n self._logger.info(\"Speichere als gesehenes Gerät.\")\n return False\n elif no_register:\n return False\n self._online_states[serial_number][\"lastUpdate\"] = datetime.datetime.now()\n self._online_states[serial_number][\"intervall\"] = interval\n return True\n\n def process_obs_air(self, update: Union[ObsAir.ObsAir, ObsTempest.ObsTempest]):\n max_delta = datetime.timedelta( minutes=(update.report_intervall_minutes * 2) )\n if (update.timestamp + max_delta) < datetime.datetime.now():\n self._logger.warning(\"Air Update wird abgewiesen. Zu alt! {}\".format(update.timestamp.isoformat()))\n return\n self._logger.debug(\"Air update\")\n if not self.set_lastseen_device(update.serial_number, update.report_intervall_minutes):\n self.register_new_air(update.serial_number, update)\n\n if self._config.get(\"Weatherflow/temp_stats/min\", \"RESET\") == \"RESET\":\n self._config[\"Weatherflow/temp_stats/min\"] = update.air_temperatur\n elif self._config[\"Weatherflow/temp_stats/min\"] > update.air_temperatur:\n self._config[\"Weatherflow/temp_stats/min\"] = update.air_temperatur\n\n if self._config.get(\"Weatherflow/temp_stats/max\", \"RESET\") == \"RESET\":\n self._config[\"Weatherflow/temp_stats/max\"] = update.air_temperatur\n elif self._config[\"Weatherflow/temp_stats/max\"] < update.air_temperatur:\n self._config[\"Weatherflow/temp_stats/max\"] = update.air_temperatur\n\n\n temperature_json = {\"Heute Min\": self._config[\"Weatherflow/temp_stats/min\"],\n \"Heute Max\": self._config[\"Weatherflow/temp_stats/max\"],\n \"now\": round(update.air_temperatur, 1),\n \"Gestern Min\": self._config.get(\"Weatherflow/temp_stats/lmin\", \"n/A\"),\n \"Gestern Max\": self._config.get(\"Weatherflow/temp_stats/lmax\", \"n/A\"),\n }\n\n self.update_sensor(update.serial_number, \"station_pressure\", update.station_pressure, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n \n if self._config.get(\"Weatherflow/temp_diff\", None) is not None:\n diff = self._config[\"Weatherflow/temp_diff\"]\n if update.air_temperatur > (self._wind_filter[\"temp\"] + diff) or update.air_temperatur < (self._wind_filter[\"temp\"] - diff):\n self._wind_filter[\"temp\"] = update.air_temperatur\n self.update_sensor(update.serial_number, \"air_temperature\", temperature_json, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n else:\n self.update_sensor(update.serial_number, \"air_temperature\", temperature_json, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"relative_humidity\", update.relative_humidity, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"lightning_count\", update.lightning_strike_count, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"lightning_dist\", update.lightning_strike_avg_distance, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n\n if update.lightning_strike_count == 0 and not self._config[\"Weatherflow/events\"]:\n self.update_sensor(update.serial_number, \"lightning_last_dist\", \"0\", autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"lightning_last_nrg\", \"0\", autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n\n if update.serial_number in self._deviceUpdates.keys():\n battery_str = math.floor(WeatherflowPlugin.percentageMinMax(update.battery, 1.6, 2.95))\n if isinstance(update, ObsTempest.ObsTempest):\n battery_str = 100\n sensor_ok = \"\"\n if self._deviceUpdates[update.serial_number]._sensor_status == DeviceStatus.SensorStatus.OK:\n sensor_ok = \"OK\"\n elif self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.AIR_LIGHTNING_DISTURBER:\n sensor_ok = \"OK_LD\"\n else:\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.AIR_LIGHTNING_FAILED:\n sensor_ok = str(sensor_ok) + \"Blitzsensor ist ausgefallen. \"\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.AIR_LIGHTNING_NOISE:\n sensor_ok = str(sensor_ok) + \"Zu viel Rauschen für Blitzsensor. \"\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.AIR_PRESSURE_FAILED:\n sensor_ok = str(sensor_ok) + \"Luftdrucksensor ausgefallen. \"\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.AIR_TEMPERATURE_FAILED:\n sensor_ok = str(sensor_ok) + \"Temperatursensor ausgefallen. \"\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.AIR_RH_FAILED:\n sensor_ok = str(sensor_ok) + \"Luftfeuchtesensor ausgefallen. \"\n\n if self._config.get(\"Weatherflow/{0}/minBat\".format(update.serial_number), 10) > update.battery:\n self._config[\"Weatherflow/{0}/minBat\".format(update.serial_number)] = update.battery\n elif self._config.get(\"Weatherflow/{0}/maxBat\".format(update.serial_number), 0) < update.battery:\n self._config[\"Weatherflow/{0}/maxBat\".format(update.serial_number)] = update.battery\n\n rssi = -100\n try:\n rssi = self._deviceUpdates[update.serial_number]._rssi\n except:\n pass\n\n battery_json = {\n \"min\": self._config.get(\"Weatherflow/{0}/minBat\".format(update.serial_number), 0),\n \"max\": self._config.get(\"Weatherflow/{0}/maxBat\".format(update.serial_number), 0),\n \"now\": battery_str,\n \"volt\": update.battery,\n \"rssi\": rssi,\n \"sensors\": sensor_ok\n }\n self.update_sensor(update.serial_number, \"battery\", battery_json, autodisc.SensorDeviceClasses.BATTERY)\n\n def process_obs_sky(self, update: Union[Obs_Sky.ObsSky, ObsTempest.ObsTempest]):\n max_delta = datetime.timedelta(minutes=(update.report_intervall_minutes * 2) )\n if (update.timestamp + max_delta) < datetime.datetime.now():\n self._logger.warning(\"Sky Update wird abgewiesen. Zu alt!\")\n return\n self._logger.debug(\"Sky update\")\n if not self.set_lastseen_device(update.serial_number, update.report_interval_minutes):\n self.register_new_sky(update.serial_number, update)\n\n self._config[\"Weatherflow/daily_rain\"] += update.accumulated_rain\n self._config[\"Weatherflow/hourly_rain\"] += update.accumulated_rain\n\n self.update_sensor(update.serial_number, \"lux\", update.lux, autodisc.SensorDeviceClasses.ILLUMINANCE)\n self.update_sensor(update.serial_number, \"uv_index\", update.uv_index, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"accumulated_rain\", update.accumulated_rain, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n try:\n if self._config.get(\"Weatherflow/wind_diff\", None) is not None:\n diff = self._config[\"Weatherflow/wind_diff\"]\n if update.wind_gust > (self._wind_filter[\"max\"] + diff) or update.wind_gust < (self._wind_filter[\"max\"] - diff):\n self.update_sensor(update.serial_number, \"wind_gust\",\n {\"ms\": update.wind_gust, \"km/h\": update.wind_gust * 3.6}\n , autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self._wind_filter[\"max\"] = update.wind_gust\n \n if update.wind_avg > (self._wind_filter[\"avg\"] + diff) or update.wind_avg < (self._wind_filter[\"avg\"] - diff):\n self.update_sensor(update.serial_number, \"wind_average\", \n {\"ms\": update.wind_avg, \"km/h\": update.wind_avg * 3.6}\n , autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self._wind_filter[\"avg\"] = update.wind_avg\n\n if update.wind_lull > (self._wind_filter[\"min\"] + diff) or update.wind_lull < (self._wind_filter[\"min\"] - diff):\n self.update_sensor(update.serial_number, \"wind_lull\", \n {\"ms\": update.wind_lull, \"km/h\": update.wind_lull * 3.6}\n , autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self._wind_filter[\"min\"] = update.wind_lull\n else:\n self.update_sensor(update.serial_number, \"wind_gust\",\n {\"ms\": update.wind_gust, \"km/h\": update.wind_gust * 3.6}\n , autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"wind_average\", \n {\"ms\": update.wind_avg, \"km/h\": update.wind_avg * 3.6}\n , autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"wind_lull\", \n {\"ms\": update.wind_lull, \"km/h\": update.wind_lull * 3.6}\n , autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n\n except TypeError:\n pass\n self.update_sensor(update.serial_number, \"wind_direction\", update.wind_direction, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"solar_radiation\", update.solar_radiation, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_sensor(update.serial_number, \"local_hour_rain_accumulation\", self._config[\"Weatherflow/hourly_rain\"], autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n \n daily_rain_js = {\n \"today\": round(self._config[\"Weatherflow/daily_rain\"], 1),\n \"yesterday\": round(self._config.get(\"Weatherflow/yesterday_daily_rain\", 0), 1)\n }\n self.update_sensor(update.serial_number, \"local_day_rain_accumulation\", json.dumps(daily_rain_js), autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n self.update_is_windy(update.serial_number, True, update.wind_avg, update.wind_direction)\n\t\n charging_str = \"NULL\"\n battery_str = 0\n sensors = \"\"\n \n if isinstance(update, ObsTempest.ObsTempest):\n last_pct = self._config.get(\"Weatherflow/{0}/last\".format(update.serial_number), -1)\n if update.battery != last_pct:\n self._config[\"Weatherflow/{0}/last\".format(update.serial_number)] = update.battery\n if last_pct == -1:\n last_pct = update.battery\n self._config[\"Weatherflow/{0}/last\".format(update.serial_number)] = update.battery\n range_min = 1.8\n range_max = 2.85\n battery_str = math.floor(WeatherflowPlugin.percentageMinMax(update.battery, range_min, range_max))\n charging_str = \"Lädt\" if last_pct < update.battery else \"Entlädt\"\n if update.battery <= 2.355:\n charging_str = \"Sensoren auf 5 Minuten intervall gesetzt, Blitzerkennung, Regen deaktiviert| {} Ultra Energiesparmodus\".format(charging_str)\n elif update.battery <= 2.39:\n charging_str = \"Windsensor auf 1 Minuten intervall gesetzt. | {} Energiesparmodus\".format(charging_str)\n elif update.battery <= 2.415:\n charging_str = \"Windsensor auf 6 Sekunden intervall gesetzt. | {} Leichter Energiesparmodus\".format(charging_str)\n else:\n charging_str = \"OK | {} Kein Energiesparen\".format(charging_str)\n else:\n charging_str = \"on battery\"\n battery_str = math.floor(WeatherflowPlugin.percentageMinMax(update.battery, 1.6, 3.18))\n\n if update.battery > 3.32:\n self._config[\"Weatherflow/sky_solar_module\"] = True\n if self._config.get(\"Weatherflow/sky_solar_module\", False):\n charging_str = \"discharging\"\n battery_str = round(WeatherflowPlugin.percentageMinMax(update.battery, 2.5, 3.6), 1)\n if update.battery > 3.2:\n battery_str = 100\n charging_str = \"Komplett aufgeladen\"\n elif update.battery > 3.5:\n battery_str = 100\n charging_str = \"Aufladen\"\n elif update.battery < 3.0:\n charging_str = 'Unter \"Working voltage\"'\n elif update.battery < 1.8:\n charging_str = \"Austauschen\"\n \n if self._deviceUpdates[update.serial_number]._sensor_status == DeviceStatus.SensorStatus.OK:\n sensors = \"OK\"\n else:\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.SKY_LIGHT_UV_FAILED:\n sensors = str(sensors) + \"UV Sensor ist ausgefallen. \"\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.SKY_PRECIP_FAILED:\n sensors = str(sensors) + \"Regen Sensor ist ausgefallen. \"\n if self._deviceUpdates[update.serial_number]._sensor_status & DeviceStatus.SensorStatus.SKY_WIND_FAILED:\n sensors = str(sensors) + \"Wind Sensor ist ausgefallen.\"\n\n if update.rain_type != 0:\n self._logger.info(\"rain_type: {}\".format(update.rain_type))\n self.update_is_raining(update.serial_number, True, update.rain_type == 2)\n \n if self._config.get(\"Weatherflow/{0}/minBat\".format(update.serial_number), 10) > update.battery:\n self._config[\"Weatherflow/{0}/minBat\".format(update.serial_number)] = update.battery\n elif self._config.get(\"Weatherflow/{0}/maxBat\".format(update.serial_number), 0) < update.battery:\n self._config[\"Weatherflow/{0}/maxBat\".format(update.serial_number)] = update.battery\n \n rssi = -100\n try:\n rssi = self._deviceUpdates[update.serial_number]._rssi\n except:\n pass\n\n battery_json = {\n \"min\": self._config.get(\"Weatherflow/{0}/minBat\".format(update.serial_number), 0),\n \"max\": self._config.get(\"Weatherflow/{0}/maxBat\".format(update.serial_number), 0),\n \"now\": battery_str,\n \"volt\": update.battery,\n \"charging state\": charging_str,\n \"rssi\": rssi,\n \"sensors\": sensors\n }\n self.update_sensor(update.serial_number, \"battery_sky\", battery_json, autodisc.SensorDeviceClasses.BATTERY)\n\n\n def count_lightnings_per_minute(self):\n count = self._lightning_counter[\"count\"]\n self._lightning_counter[\"count\"] = 0\n if self._lightning_counter[\"init\"] == 0 and self._lightning_counter[\"serial\"] is not None:\n self._logger.info(\"Es Blitzt, Regestriere Blitze pro Minute zähler für {}\".format(self._lightning_counter[\"serial\"]))\n self._lightning_counter[\"init\"] = 1\n self._lightning_counter[\"timer\"] = threading.Timer(60, self.count_lightnings_per_minute)\n self._lightning_counter[\"timer\"].start()\n if 0 < self._lightning_counter[\"init\"] <= 10:\n if count == 0:\n self._lightning_counter[\"init\"] += 1\n else:\n self._lightning_counter[\"init\"] = 1\n self._lightning_counter[\"timer\"] = threading.Timer(60, self.count_lightnings_per_minute)\n self._lightning_counter[\"timer\"].start()\n self._logger.debug(\"Es blitzt immer nocht. Restarte timer...\")\n self.update_sensor(self._lightning_counter[\"serial\"], \"es_blitzt\", 1, autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n elif self._lightning_counter[\"init\"] > 5:\n self._lightning_counter[\"timer\"] = None\n self._logger.debug(\"Es blitzt nicht mehr. Timer wird nicht neu gestartet...\")\n self.update_sensor(self._lightning_counter[\"serial\"], \"es_blitzt\", 0,autodisc.BinarySensorDeviceClasses.GENERIC_SENSOR)\n self._lightning_counter = {\"count\": 0, \"timer\": None, \"serial\": None, \"init\": 0, \"lastTime\": datetime.datetime.now()}\n\n if self._lightning_counter[\"serial\"] is not None:\n self.update_sensor(self._lightning_counter[\"serial\"], \"lightning_count_min\", count, autodisc.SensorDeviceClasses.GENERIC_SENSOR)\n\n def check_online_status(self):\n if self._timer is not None:\n self._timer.cancel()\n\n for serial in self._online_states.keys():\n last_update = self._online_states[serial][\"lastUpdate\"]\n timespan = datetime.datetime.now() - last_update\n if timespan.seconds > (self._online_states[serial][\"intervall\"] * 4) and self._online_states[serial][\"wasOnline\"]:\n self._logger.info(\"Weatherflow Device {} ist jetzt offline\".format(serial))\n online_topic = WeatherflowPlugin.get_device_online_topic(serial)\n self._client.publish(online_topic, \"offline\", retain=True)\n self._online_states[serial][\"wasOnline\"] = False\n elif not self._online_states[serial][\"wasOnline\"]:\n self._logger.info(\"Weatherflow Device {} ist jetzt online\".format(serial))\n online_topic = WeatherflowPlugin.get_device_online_topic(serial)\n self._client.publish(online_topic, \"online\", retain=True)\n self._online_states[serial][\"wasOnline\"] = True\n\n if self._timer is not None:\n self._timer.cancel()\n self._timer = threading.Timer(30, self.check_online_status)\n self._timer.start()\n\n","repo_name":"mhetzi/mqttRaspberry","sub_path":"Mods/Weatherflow/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":38236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38059065080","text":"#encoding=utf-8\nimport xlrd\n\nclass c_p2nic:\n def __init__(self,nic,nic_name,pc, pc_name, percent, nic_related_p, pc_related_p):\n self.nic = nic\n self.nic_name = nic_name\n self.pc = pc\n self.pc_name = pc_name\n self.percent = percent\n self.nic_related_p = nic_related_p\n self.pc_related_p = pc_related_p\n def get_key_value(self, key_name):\n key_value = {'nic':self.nic,\n 'nic_name':self.nic_name,\n 'pc':self.pc,\n 'pc_name':self.pc_name,\n 'percent':self.percent,\n 'nic_related_p':self.nic_related_p,\n 'pc_related_p':self.pc_related_p\n }\n return key_value[key_name]\n\ndef IPC2NIC_Read(file_name):\n xls_read = xlrd.open_workbook(file_name)\n xls_table = xls_read.sheets()[0]\n table_nrows = xls_table.nrows\n\n p2nic_list = []\n for i in range(table_nrows):\n # t_p2nic = {}\n # t_p2nic['nic']= xls_table.cell(i, 0).value.strip() # 第1列,NIC\n # t_p2nic['nic_name'] = xls_table.cell(i, 1).value.strip() # 第2列,NIC名称\n # t_p2nic['pc'] = xls_table.cell(i, 2).value.strip() # 第3列,分类号\n # t_p2nic['pc_name'] = xls_table.cell(i, 3).value.strip() # 第4列,分类号释义\n # t_p2nic['percent'] = xls_table.cell(i, 4).value # 第5列,NIC和PC相关比例\n # t_p2nic['nic_related_p'] = xls_table.cell(i, 5).value # 第6列,与NIC有关的P的数量\n # t_p2nic['pc_related_p'] = xls_table.cell(i, 6).value # 第7列,与PC有关的P的数量\n # p2nic_list.append(t_p2nic)\n t_p2nic = c_p2nic(\n xls_table.cell(i, 0).value.strip(), # 第1列,NIC\n xls_table.cell(i, 1).value.strip(), # 第2列,NIC名称\n xls_table.cell(i, 2).value.strip(), # 第3列,分类号\n xls_table.cell(i, 3).value.strip(), # 第4列,分类号释义\n xls_table.cell(i, 4).value, # 第5列,NIC和PC相关比例\n xls_table.cell(i, 5).value, # 第6列,与NIC有关的P的数量\n xls_table.cell(i, 6).value # 第7列,与PC有关的P的数量\n )\n p2nic_list.append(t_p2nic)\n return p2nic_list\n\ndef ClassList_Index(class_list, idx, key_name):\n value = []\n for i in range(len(idx)):\n t_idx = idx[i]\n value.append(class_list[t_idx].get_key_value(key_name))\n return value\n\n","repo_name":"yangdongbjcn/patent-container","sub_path":"ZLFX/version/zlfx2023.3/PATENT2ECONOMY_IPC2NIC_MAP.py","file_name":"PATENT2ECONOMY_IPC2NIC_MAP.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"38185207345","text":"from __future__ import division\nimport time\nimport os\nimport argparse\nimport sys\n\nimport torchvision.models as models\nimport torch\n\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-mode', type=str, help='rgb or flow (or joint for eval)')\nparser.add_argument('-train', type=str2bool, default='True', help='train or eval')\nparser.add_argument('-comp_info', type=str)\nparser.add_argument('-rgb_model_file', type=str)\nparser.add_argument('-flow_model_file', type=str)\nparser.add_argument('-gpu', type=str, default='4')\nparser.add_argument('-dataset', type=str, default='charades')\nparser.add_argument('-rgb_root', type=str, default='no_root')\nparser.add_argument('-flow_root', type=str, default='no_root')\nparser.add_argument('-type', type=str, default='original')\nparser.add_argument('-lr', type=str, default='0.1')\nparser.add_argument('-epoch', type=str, default='50')\nparser.add_argument('-model', type=str, default='')\nparser.add_argument('-APtype', type=str, default='wap')\nparser.add_argument('-randomseed', type=str, default='False')\nparser.add_argument('-load_model', type=str, default='False')\nparser.add_argument('-batch_size', type=str, default='False')\nparser.add_argument('-num_channel', type=str, default='False')\nparser.add_argument('-run_mode', type=str, default='False')\nparser.add_argument('-feat', type=str, default='False')\n\n\nargs = parser.parse_args()\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport random\n\n# set random seed\nif args.randomseed==\"False\":\n SEED = 0\nelif args.randomseed==\"True\":\n SEED = random.randint(1, 100000)\nelse:\n SEED = int(args.randomseed)\n\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\ntorch.manual_seed(SEED)\nnp.random.seed(SEED)\ntorch.cuda.manual_seed_all(SEED)\nrandom.seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nprint('Random_SEED!!!:', SEED)\n\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\n\nimport json\n\nimport pickle\nimport math\n\nfrom apmeter import APMeter\n\n\nbatch_size = int(args.batch_size)\n\n\nif args.dataset == 'charades':\n from charades_i3d_per_video import MultiThumos as Dataset\n from charades_i3d_per_video import mt_collate_fn as collate_fn\n if args.run_mode == 'debug':\n print('debug!!!')\n train_split = './data/charades_test.json'\n test_split = './data/charades_test.json'\n else:\n train_split = './data/charades.json'\n test_split = './data/charades.json'\n # print('load feature from:', args.rgb_root)\n rgb_root = '/Path/to/charades_feat_rgb'\n skeleton_root = '/Path/to/charades_feat_pose'\n flow_root = '/Path/to/charades_feat_flow'\n rgb_of=[rgb_root,flow_root]\n classes = 157\n\n\ndef load_data(train_split, val_split, root):\n # Load Data\n print('load data', root)\n if len(train_split) > 0:\n if str(args.feat) == '2d':\n dataset = Dataset(train_split, 'training', root, batch_size, classes, int(args.pool_step))\n else:\n dataset = Dataset(train_split, 'training', root, batch_size, classes)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=8,\n pin_memory=True, collate_fn=collate_fn)\n dataloader.root = root\n else:\n\n dataset = None\n dataloader = None\n\n if str(args.feat) == '2d':\n val_dataset = Dataset(val_split, 'testing', root, batch_size, classes, int(args.pool_step))\n else:\n val_dataset = Dataset(val_split, 'testing', root, batch_size, classes)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=True, num_workers=2,\n pin_memory=True, collate_fn=collate_fn)\n val_dataloader.root = root\n\n dataloaders = {'train': dataloader, 'val': val_dataloader}\n datasets = {'train': dataset, 'val': val_dataset}\n return dataloaders, datasets\n\n\n# train the model\ndef run(models, criterion, num_epochs=50):\n since = time.time()\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n probs = []\n for model, gpu, dataloader, optimizer, sched, model_file in models:\n train_map, train_loss = train_step(model, gpu, optimizer, dataloader['train'], epoch)\n prob_val, val_loss, val_map = val_step(model, gpu, dataloader['val'], epoch)\n probs.append(prob_val)\n sched.step(val_loss)\n\n\ndef eval_model(model, dataloader, baseline=False):\n results = {}\n for data in dataloader:\n other = data[3]\n outputs, loss, probs, _ = run_network(model, data, 0, baseline)\n fps = outputs.size()[1] / other[1][0]\n\n results[other[0][0]] = (outputs.data.cpu().numpy()[0], probs.data.cpu().numpy()[0], data[2].numpy()[0], fps)\n return results\n\n\ndef run_network(model, data, gpu, epoch=0, baseline=False):\n inputs, mask, labels, other = data\n # wrap them in Variable\n inputs = Variable(inputs.cuda(gpu))\n mask = Variable(mask.cuda(gpu))\n labels = Variable(labels.cuda(gpu))\n\n mask_list = torch.sum(mask, 1)\n mask_new = np.zeros((mask.size()[0], classes, mask.size()[1]))\n for i in range(mask.size()[0]):\n mask_new[i, :, :int(mask_list[i])] = np.ones((classes, int(mask_list[i])))\n mask_new = torch.from_numpy(mask_new).float()\n mask_new = Variable(mask_new.cuda(gpu))\n\n inputs = inputs.squeeze(3).squeeze(3)\n #print(\"inputs\",inputs.size())\n activation = model(inputs, mask_new)\n\n \n outputs_final = activation\n\n #print(\"outputs_final\",outputs_final.size())\n outputs_final = outputs_final[-1]\n #print(\"outputs_final\",outputs_final.size())\n outputs_final = outputs_final.permute(0, 2, 1) \n probs_f = F.sigmoid(outputs_final) * mask.unsqueeze(2)\n loss_f = F.binary_cross_entropy_with_logits(outputs_final, labels, size_average=False)\n loss_f = torch.sum(loss_f) / torch.sum(mask) \n\n loss = loss_f \n\n corr = torch.sum(mask)\n tot = torch.sum(mask)\n\n return outputs_final, loss, probs_f, corr / tot\n\n\ndef train_step(model, gpu, optimizer, dataloader, epoch):\n model.train(True)\n tot_loss = 0.0\n error = 0.0\n num_iter = 0.\n apm = APMeter()\n for data in dataloader:\n optimizer.zero_grad()\n num_iter += 1\n\n outputs, loss, probs, err = run_network(model, data, gpu, epoch)\n apm.add(probs.data.cpu().numpy()[0], data[2].numpy()[0])\n error += err.data\n tot_loss += loss.data\n\n loss.backward()\n optimizer.step()\n if args.APtype == 'wap':\n train_map = 100 * apm.value()\n else:\n train_map = 100 * apm.value().mean()\n print('train-map:', train_map)\n apm.reset()\n\n epoch_loss = tot_loss / num_iter\n\n return train_map, epoch_loss\n\n\ndef val_step(model, gpu, dataloader, epoch):\n model.train(False)\n apm = APMeter()\n tot_loss = 0.0\n error = 0.0\n num_iter = 0.\n num_preds = 0\n\n full_probs = {}\n\n # Iterate over data.\n for data in dataloader:\n num_iter += 1\n other = data[3]\n\n outputs, loss, probs, err = run_network(model, data, gpu, epoch)\n\n apm.add(probs.data.cpu().numpy()[0], data[2].numpy()[0])\n\n error += err.data\n tot_loss += loss.data\n\n probs = probs.squeeze()\n\n full_probs[other[0][0]] = probs.data.cpu().numpy().T\n\n epoch_loss = tot_loss / num_iter\n\n\n val_map = torch.sum(100 * apm.value()) / torch.nonzero(100 * apm.value()).size()[0]\n print('val-map:', val_map)\n print(100 * apm.value())\n apm.reset()\n\n return full_probs, epoch_loss, val_map\n\n\nif __name__ == '__main__':\n print(str(args.model))\n print('batch_size:', batch_size)\n print('cuda_avail', torch.cuda.is_available())\n\n if args.mode == 'flow':\n print('flow mode', flow_root)\n dataloaders, datasets = load_data(train_split, test_split, flow_root)\n elif args.mode == 'skeleton':\n print('Pose mode', skeleton_root)\n dataloaders, datasets = load_data(train_split, test_split, skeleton_root)\n elif args.mode == 'rgb':\n print('RGB mode', rgb_root)\n dataloaders, datasets = load_data(train_split, test_split, rgb_root)\n\n if args.train:\n num_channel = args.num_channel\n if args.mode == 'skeleton':\n input_channnel = 256\n else:\n input_channnel = 1024\n\n num_classes = classes\n mid_channel=int(args.num_channel)\n\n if args.model==\"PDAN\":\n print(\"you are processing PDAN_original\")\n from PDAN import PDAN\n # rgb_model = Net(mid_channel, input_channnel, classes)\n stage=1\n block=5\n num_channel=512\n input_channnel=1024\n num_classes=classes\n rgb_model = PDAN(stage, block, num_channel, input_channnel, num_classes)\n pytorch_total_params = sum(p.numel() for p in rgb_model.parameters() if p.requires_grad)\n print('pytorch_total_params', pytorch_total_params)\n #exit()\n print ('stage:', stage, 'block:', block, 'num_channel:', num_channel, 'input_channnel:', input_channnel,\n 'num_classes:', num_classes)\n\n\n rgb_model=torch.nn.DataParallel(rgb_model)\n\n if args.load_model!= \"False\":\n rgb_model.load_state_dict(torch.load(str(args.load_model)))\n print(\"loaded\",args.load_model)\n\n pytorch_total_params = sum(p.numel() for p in rgb_model.parameters() if p.requires_grad)\n print('pytorch_total_params', pytorch_total_params)\n print('num_channel:', num_channel, 'input_channnel:', input_channnel,'num_classes:', num_classes)\n rgb_model.cuda()\n\n criterion = nn.NLLLoss(reduce=False)\n lr = float(args.lr)\n print(lr)\n optimizer = optim.Adam(rgb_model.parameters(), lr=lr)\n lr_sched = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=8, verbose=True)\n run([(rgb_model, 0, dataloaders, optimizer, lr_sched, args.comp_info)], criterion, num_epochs=int(args.epoch))\n\n\n","repo_name":"dairui01/PDAN","sub_path":"train_PDAN.py","file_name":"train_PDAN.py","file_ext":"py","file_size_in_byte":10446,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"38"} +{"seq_id":"11762811370","text":"from flask import Blueprint, request, jsonify\nfrom ..models import db, User, Bike\nfrom ..controllers import users_controllers\n\nbikes =Blueprint('bikes', __name__)\n\n@bikes.route('/all_bikes', methods=['GET'])\ndef get_all_bikes():\n try:\n bikes = Bike.query.all()\n return jsonify({\"message\": f'All bikes accessed', \"bikes\": [bike.serialize() for bike in bikes]}), 200\n except Exception as e:\n return jsonify({\"error\": f\"An error occurred: {str(e)}\"}), 500\n\n@bikes.route('/create_bike', methods=['POST'])\ndef create_bike():\n try:\n data = request.get_json()\n plate = data.get('plate')\n model = data.get('model')\n brand = data.get('brand')\n owner = data.get('owner')\n\n # Create a new bike instance\n new_bike = Bike(plate=plate, model=model, brand=brand, owner=owner)\n\n # Add the bike to the database\n db.session.add(new_bike)\n db.session.commit()\n\n return jsonify({\"message\": \"Bike created successfully\", \"bike\": new_bike.serialize()}), 201\n except Exception as e:\n return jsonify({\"error\": f\"An error occurred: {str(e)}\"}), 500\n \n \n ","repo_name":"Raauul1996/MotosGimenez_Web","sub_path":"Back/src/routes/bikes.py","file_name":"bikes.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22653946187","text":"import tensorlayerx as tlx\nimport paddle\nimport paddle2tlx\nimport tensorlayerx\nimport tensorlayerx.nn as nn\nfrom .backbones.vgg import vgg16\nfrom .layers import Conv1x1\nfrom .layers import make_norm\nfrom .layers import ChannelAttention\nfrom .layers import SpatialAttention\nfrom paddle2tlx.pd2tlx.utils import restore_model_cdet\nDSIFN_URLS = (\n 'https://paddlers.bj.bcebos.com/pretrained/cd/levircd/weights/dsifn_levircd.pdparams'\n )\n\n\nclass DSIFN(nn.Module):\n \"\"\"\n The DSIFN implementation based on PaddlePaddle.\n\n The original article refers to\n C. Zhang, et al., \"A deeply supervised image fusion network for change \n detection in high resolution bi-temporal remote sensing images\"\n (https://www.sciencedirect.com/science/article/pii/S0924271620301532).\n\n Note that in this implementation, there is a flexible number of target classes.\n\n Args:\n num_classes (int): Number of target classes.\n use_dropout (bool, optional): A bool value that indicates whether to use \n dropout layers. When the model is trained on a relatively small dataset, \n the dropout layers help prevent overfitting. Default: False.\n \"\"\"\n\n def __init__(self, num_classes, use_dropout=False):\n super(DSIFN, self).__init__()\n self.encoder1 = VGG16FeaturePicker()\n self.encoder2 = VGG16FeaturePicker()\n self.sa1 = SpatialAttention()\n self.sa2 = SpatialAttention()\n self.sa3 = SpatialAttention()\n self.sa4 = SpatialAttention()\n self.sa5 = SpatialAttention()\n self.ca1 = ChannelAttention(in_ch=1024)\n self.bn_ca1 = nn.BatchNorm2d(num_features=1024, data_format=\\\n 'channels_first'),\n self.o1_conv1 = conv2d_bn(1024, 512, use_dropout)\n self.o1_conv2 = conv2d_bn(512, 512, use_dropout)\n self.bn_sa1 = nn.BatchNorm2d(num_features=512, data_format=\\\n 'channels_first')\n self.o1_conv3 = Conv1x1(512, num_classes)\n self.trans_conv1 = paddle2tlx.pd2tlx.ops.tlxops.tlx_ConvTranspose2d(\n in_channels=512, out_channels=512, kernel_size=2, stride=2,\n data_format='channels_first', padding=0)\n self.ca2 = ChannelAttention(in_ch=1536)\n self.bn_ca2 = nn.BatchNorm2d(num_features=1536, data_format=\\\n 'channels_first')\n self.o2_conv1 = conv2d_bn(1536, 512, use_dropout)\n self.o2_conv2 = conv2d_bn(512, 256, use_dropout)\n self.o2_conv3 = conv2d_bn(256, 256, use_dropout)\n self.bn_sa2 = nn.BatchNorm2d(num_features=256, data_format=\\\n 'channels_first')\n self.o2_conv4 = Conv1x1(256, num_classes)\n self.trans_conv2 = paddle2tlx.pd2tlx.ops.tlxops.tlx_ConvTranspose2d(\n in_channels=256, out_channels=256, kernel_size=2, stride=2,\n data_format='channels_first', padding=0)\n self.ca3 = ChannelAttention(in_ch=768)\n self.o3_conv1 = conv2d_bn(768, 256, use_dropout)\n self.o3_conv2 = conv2d_bn(256, 128, use_dropout)\n self.o3_conv3 = conv2d_bn(128, 128, use_dropout)\n self.bn_sa3 = nn.BatchNorm2d(num_features=128, data_format=\\\n 'channels_first')\n self.o3_conv4 = Conv1x1(128, num_classes)\n self.trans_conv3 = paddle2tlx.pd2tlx.ops.tlxops.tlx_ConvTranspose2d(\n in_channels=128, out_channels=128, kernel_size=2, stride=2,\n data_format='channels_first', padding=0)\n self.ca4 = ChannelAttention(in_ch=384)\n self.o4_conv1 = conv2d_bn(384, 128, use_dropout)\n self.o4_conv2 = conv2d_bn(128, 64, use_dropout)\n self.o4_conv3 = conv2d_bn(64, 64, use_dropout)\n self.bn_sa4 = nn.BatchNorm2d(num_features=64, data_format=\\\n 'channels_first')\n self.o4_conv4 = Conv1x1(64, num_classes)\n self.trans_conv4 = paddle2tlx.pd2tlx.ops.tlxops.tlx_ConvTranspose2d(\n in_channels=64, out_channels=64, kernel_size=2, stride=2,\n data_format='channels_first', padding=0)\n self.ca5 = ChannelAttention(in_ch=192)\n self.o5_conv1 = conv2d_bn(192, 64, use_dropout)\n self.o5_conv2 = conv2d_bn(64, 32, use_dropout)\n self.o5_conv3 = conv2d_bn(32, 16, use_dropout)\n self.bn_sa5 = nn.BatchNorm2d(num_features=16, data_format=\\\n 'channels_first')\n self.o5_conv4 = Conv1x1(16, num_classes)\n self.init_weight()\n\n def forward(self, t1, t2):\n self.encoder1.set_eval(), self.encoder2.set_eval()\n t1_feats = self.encoder1(t1)\n t2_feats = self.encoder2(t2)\n t1_f_l3, t1_f_l8, t1_f_l15, t1_f_l22, t1_f_l29 = t1_feats\n t2_f_l3, t2_f_l8, t2_f_l15, t2_f_l22, t2_f_l29 = t2_feats\n aux_x = []\n x = tensorlayerx.concat([t1_f_l29, t2_f_l29], axis=1)\n x = self.o1_conv1(x)\n x = self.o1_conv2(x)\n x = self.sa1(x) * x\n x = self.bn_sa1(x)\n if self.training:\n aux_x.append(x)\n x = self.trans_conv1(x)\n x = tensorlayerx.concat([x, t1_f_l22, t2_f_l22], axis=1)\n x = self.ca2(x) * x\n x = self.o2_conv1(x)\n x = self.o2_conv2(x)\n x = self.o2_conv3(x)\n x = self.sa2(x) * x\n x = self.bn_sa2(x)\n if self.training:\n aux_x.append(x)\n x = self.trans_conv2(x)\n x = tensorlayerx.concat([x, t1_f_l15, t2_f_l15], axis=1)\n x = self.ca3(x) * x\n x = self.o3_conv1(x)\n x = self.o3_conv2(x)\n x = self.o3_conv3(x)\n x = self.sa3(x) * x\n x = self.bn_sa3(x)\n if self.training:\n aux_x.append(x)\n x = self.trans_conv3(x)\n x = tensorlayerx.concat([x, t1_f_l8, t2_f_l8], axis=1)\n x = self.ca4(x) * x\n x = self.o4_conv1(x)\n x = self.o4_conv2(x)\n x = self.o4_conv3(x)\n x = self.sa4(x) * x\n x = self.bn_sa4(x)\n if self.training:\n aux_x.append(x)\n x = self.trans_conv4(x)\n x = tensorlayerx.concat([x, t1_f_l3, t2_f_l3], axis=1)\n x = self.ca5(x) * x\n x = self.o5_conv1(x)\n x = self.o5_conv2(x)\n x = self.o5_conv3(x)\n x = self.sa5(x) * x\n x = self.bn_sa5(x)\n out5 = self.o5_conv4(x)\n if not self.training:\n return [out5]\n else:\n size = paddle2tlx.pd2tlx.ops.tlxops.tlx_get_tensor_shape(t1)[2:]\n out1 = paddle.nn.functional.interpolate(self.o1_conv3(aux_x[0]),\n size=size, mode='bilinear', align_corners=True)\n out2 = paddle.nn.functional.interpolate(self.o2_conv4(aux_x[1]),\n size=size, mode='bilinear', align_corners=True)\n out3 = paddle.nn.functional.interpolate(self.o3_conv4(aux_x[2]),\n size=size, mode='bilinear', align_corners=True)\n out4 = paddle.nn.functional.interpolate(self.o4_conv4(aux_x[3]),\n size=size, mode='bilinear', align_corners=True)\n return [out5, out4, out3, out2, out1]\n\n def init_weight(self):\n pass\n\n\nclass VGG16FeaturePicker(nn.Module):\n\n def __init__(self, indices=(3, 8, 15, 22, 29)):\n super(VGG16FeaturePicker, self).__init__()\n features = list(vgg16(pretrained=True).features)[:30]\n self.features = nn.ModuleList(features)\n self.features.eval()\n self.indices = set(indices)\n\n def forward(self, x):\n picked_feats = []\n for idx, model in enumerate(self.features):\n x = model(x)\n if idx in self.indices:\n picked_feats.append(x)\n return picked_feats\n\n\ndef conv2d_bn(in_ch, out_ch, with_dropout=True):\n lst = [nn.GroupConv2d(kernel_size=3, stride=1, padding=1, in_channels=\\\n in_ch, out_channels=out_ch, data_format='channels_first'), nn.PRelu\n (), nn.BatchNorm2d(num_features=out_ch, data_format='channels_first')]\n if with_dropout:\n lst.append(paddle2tlx.pd2tlx.ops.tlxops.tlx_Dropout(p=0.6))\n return nn.Sequential([*lst])\n\n\ndef _dsifn(pretrained=None, num_classes=2):\n model = DSIFN(num_classes=num_classes)\n if pretrained:\n model = restore_model_cdet(model, DSIFN_URLS, 'dsifn')\n return model\n","repo_name":"tensorlayer/Paddle2TLX","sub_path":"tlx_models/paddlerscd/models/dsifn.py","file_name":"dsifn.py","file_ext":"py","file_size_in_byte":8117,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"} +{"seq_id":"73572662831","text":"import glob\nimport itertools\nimport logging\nimport os\nimport re\nimport struct\nimport subprocess\nimport sys\nimport zipfile\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__),\n os.pardir, os.pardir, os.pardir, os.pardir,\n 'build', 'android'))\nfrom pylib import constants\nfrom pylib.symbols import elf_symbolizer\n\n\nCHROME_SRC = constants.DIR_SOURCE_ROOT\nANDROID_BUILD_TOP = CHROME_SRC\nSYMBOLS_DIR = CHROME_SRC\nCHROME_SYMBOLS_DIR = None\nARCH = \"arm\"\nTOOLCHAIN_INFO = None\nSECONDARY_ABI_OUTPUT_PATH = None\n\n# See:\n# http://bugs.python.org/issue14315\n# https://hg.python.org/cpython/rev/6dd5e9556a60#l2.8\ndef PatchZipFile():\n oldDecodeExtra = zipfile.ZipInfo._decodeExtra\n def decodeExtra(self):\n try:\n oldDecodeExtra(self)\n except struct.error:\n pass\n zipfile.ZipInfo._decodeExtra = decodeExtra\nPatchZipFile()\n\ndef Uname():\n \"\"\"'uname' for constructing prebuilt/<...> and out/host/<...> paths.\"\"\"\n uname = os.uname()[0]\n if uname == \"Darwin\":\n proc = os.uname()[-1]\n if proc == \"i386\" or proc == \"x86_64\":\n return \"darwin-x86\"\n return \"darwin-ppc\"\n if uname == \"Linux\":\n return \"linux-x86\"\n return uname\n\ndef ToolPath(tool, toolchain_info=None):\n \"\"\"Return a full qualified path to the specified tool\"\"\"\n # ToolPath looks for the tools in the completely incorrect directory.\n # This looks in the checked in android_tools.\n if ARCH == \"arm\":\n toolchain_source = \"arm-linux-androideabi-4.9\"\n toolchain_prefix = \"arm-linux-androideabi\"\n ndk = \"ndk\"\n elif ARCH == \"arm64\":\n toolchain_source = \"aarch64-linux-android-4.9\"\n toolchain_prefix = \"aarch64-linux-android\"\n ndk = \"ndk\"\n elif ARCH == \"x86\":\n toolchain_source = \"x86-4.9\"\n toolchain_prefix = \"i686-linux-android\"\n ndk = \"ndk\"\n elif ARCH == \"x86_64\" or ARCH == \"x64\":\n toolchain_source = \"x86_64-4.9\"\n toolchain_prefix = \"x86_64-linux-android\"\n ndk = \"ndk\"\n elif ARCH == \"mips\":\n toolchain_source = \"mipsel-linux-android-4.9\"\n toolchain_prefix = \"mipsel-linux-android\"\n ndk = \"ndk\"\n else:\n raise Exception(\"Could not find tool chain for \" + ARCH)\n\n toolchain_subdir = (\n \"third_party/android_tools/%s/toolchains/%s/prebuilt/linux-x86_64/bin\" %\n (ndk, toolchain_source))\n\n return os.path.join(CHROME_SRC,\n toolchain_subdir,\n toolchain_prefix + \"-\" + tool)\n\ndef GetAapt():\n \"\"\"Returns the path to aapt.\n\n Args:\n None\n\n Returns:\n the pathname of the 'aapt' executable.\n \"\"\"\n sdk_home = os.path.join('third_party', 'android_tools', 'sdk')\n sdk_home = os.environ.get('SDK_HOME', sdk_home)\n aapt_exe = glob.glob(os.path.join(sdk_home, 'build-tools', '*', 'aapt'))\n if not aapt_exe:\n return None\n return sorted(aapt_exe, key=os.path.getmtime, reverse=True)[0]\n\ndef ApkMatchPackageName(aapt, apk_path, package_name):\n \"\"\"Returns true the APK's package name matches package_name.\n\n Args:\n aapt: pathname for the 'aapt' executable.\n apk_path: pathname of the APK file.\n package_name: package name to match.\n\n Returns:\n True if the package name matches or aapt is None, False otherwise.\n \"\"\"\n if not aapt:\n # Allow false positives\n return True\n aapt_output = subprocess.check_output(\n [aapt, 'dump', 'badging', apk_path]).split('\\n')\n package_name_re = re.compile(r'package: .*name=\\'(\\S*)\\'')\n for line in aapt_output:\n match = package_name_re.match(line)\n if match:\n return package_name == match.group(1)\n return False\n\ndef PathListJoin(prefix_list, suffix_list):\n \"\"\"Returns each prefix in prefix_list joined with each suffix in suffix list.\n\n Args:\n prefix_list: list of path prefixes.\n suffix_list: list of path suffixes.\n\n Returns:\n List of paths each of which joins a prefix with a suffix.\n \"\"\"\n return [\n os.path.join(prefix, suffix)\n for suffix in suffix_list for prefix in prefix_list ]\n\n\ndef _GetChromeOutputDirCandidates():\n \"\"\"Returns a list of output directories to look in.\"\"\"\n if os.environ.get('CHROMIUM_OUTPUT_DIR') or os.environ.get('BUILDTYPE'):\n return [constants.GetOutDirectory()]\n return [constants.GetOutDirectory(build_type='Debug'),\n constants.GetOutDirectory(build_type='Release')]\n\n\ndef GetCandidates(dirs, filepart, candidate_fun):\n \"\"\"Returns a list of candidate filenames, sorted by modification time.\n\n Args:\n dirs: a list of the directory part of the pathname.\n filepart: the file part of the pathname.\n candidate_fun: a function to apply to each candidate, returns a list.\n\n Returns:\n A list of candidate files ordered by modification time, newest first.\n \"\"\"\n candidates = PathListJoin(dirs, [filepart])\n logging.debug('GetCandidates: prefiltered candidates = %s' % candidates)\n candidates = list(\n itertools.chain.from_iterable(map(candidate_fun, candidates)))\n candidates.sort(key=os.path.getmtime, reverse=True)\n return candidates\n\ndef GetCandidateApks():\n \"\"\"Returns a list of APKs which could contain the library.\n\n Args:\n None\n\n Returns:\n list of APK filename which could contain the library.\n \"\"\"\n dirs = PathListJoin(_GetChromeOutputDirCandidates(), ['apks'])\n return GetCandidates(dirs, '*.apk', glob.glob)\n\ndef GetCrazyLib(apk_filename):\n \"\"\"Returns the name of the first crazy library from this APK.\n\n Args:\n apk_filename: name of an APK file.\n\n Returns:\n Name of the first library which would be crazy loaded from this APK.\n \"\"\"\n zip_file = zipfile.ZipFile(apk_filename, 'r')\n for filename in zip_file.namelist():\n match = re.match('lib/[^/]*/crazy.(lib.*[.]so)', filename)\n if match:\n return match.group(1)\n\ndef GetApkFromLibrary(device_library_path):\n match = re.match(r'.*/([^/]*)-[0-9]+(\\/[^/]*)?\\.apk$', device_library_path)\n if not match:\n return None\n return match.group(1)\n\ndef GetMatchingApks(package_name):\n \"\"\"Find any APKs which match the package indicated by the device_apk_name.\n\n Args:\n device_apk_name: name of the APK on the device.\n\n Returns:\n A list of APK filenames which could contain the desired library.\n \"\"\"\n return filter(\n lambda candidate_apk:\n ApkMatchPackageName(GetAapt(), candidate_apk, package_name),\n GetCandidateApks())\n\ndef MapDeviceApkToLibrary(device_apk_name):\n \"\"\"Provide a library name which corresponds with device_apk_name.\n\n Args:\n device_apk_name: name of the APK on the device.\n\n Returns:\n Name of the library which corresponds to that APK.\n \"\"\"\n matching_apks = GetMatchingApks(device_apk_name)\n logging.debug('MapDeviceApkToLibrary: matching_apks=%s' % matching_apks)\n for matching_apk in matching_apks:\n crazy_lib = GetCrazyLib(matching_apk)\n if crazy_lib:\n return crazy_lib\n\ndef GetLibrarySearchPaths():\n if SECONDARY_ABI_OUTPUT_PATH:\n return PathListJoin([SECONDARY_ABI_OUTPUT_PATH], ['lib.unstripped', 'lib', '.'])\n if CHROME_SYMBOLS_DIR:\n return [CHROME_SYMBOLS_DIR]\n dirs = _GetChromeOutputDirCandidates()\n # GYP places unstripped libraries under out/$BUILDTYPE/lib\n # GN places them under out/$BUILDTYPE/lib.unstripped\n return PathListJoin(dirs, ['lib.unstripped', 'lib', '.'])\n\ndef GetCandidateLibraries(library_name):\n \"\"\"Returns a list of candidate library filenames.\n\n Args:\n library_name: basename of the library to match.\n\n Returns:\n A list of matching library filenames for library_name.\n \"\"\"\n def extant_library(filename):\n if (os.path.exists(filename)\n and elf_symbolizer.ContainsElfMagic(filename)):\n return [filename]\n return []\n\n candidates = GetCandidates(\n GetLibrarySearchPaths(), library_name,\n extant_library)\n # For GN, candidates includes both stripped an unstripped libraries. Stripped\n # libraries are always newer. Explicitly look for .unstripped and sort them\n # ahead.\n candidates.sort(key=lambda c: int('unstripped' not in c))\n return candidates\n\n\ndef TranslateLibPath(lib):\n # The filename in the stack trace maybe an APK name rather than a library\n # name. This happens when the library was loaded directly from inside the\n # APK. If this is the case we try to figure out the library name by looking\n # for a matching APK file and finding the name of the library in contains.\n # The name of the APK file on the device is of the form\n # -.apk. The APK file on the host may have any name\n # so we look at the APK badging to see if the package name matches.\n apk = GetApkFromLibrary(lib)\n if apk is not None:\n logging.debug('TranslateLibPath: apk=%s' % apk)\n mapping = MapDeviceApkToLibrary(apk)\n if mapping:\n lib = mapping\n\n # SymbolInformation(lib, addr) receives lib as the path from symbols\n # root to the symbols file. This needs to be translated to point to the\n # correct .so path. If the user doesn't explicitly specify which directory to\n # use, then use the most recently updated one in one of the known directories.\n # If the .so is not found somewhere in CHROME_SYMBOLS_DIR, leave it\n # untranslated in case it is an Android symbol in SYMBOLS_DIR.\n library_name = os.path.basename(lib)\n\n logging.debug('TranslateLibPath: lib=%s library_name=%s' % (lib, library_name))\n\n candidate_libraries = GetCandidateLibraries(library_name)\n logging.debug('TranslateLibPath: candidate_libraries=%s' % candidate_libraries)\n if not candidate_libraries:\n return lib\n\n library_path = os.path.relpath(candidate_libraries[0], SYMBOLS_DIR)\n logging.debug('TranslateLibPath: library_path=%s' % library_path)\n return library_path\n\ndef CallCppFilt(mangled_symbol):\n cmd = [ToolPath(\"c++filt\")]\n process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n process.stdin.write(mangled_symbol)\n process.stdin.write(\"\\n\")\n process.stdin.close()\n demangled_symbol = process.stdout.readline().strip()\n process.stdout.close()\n return demangled_symbol\n\ndef FormatSymbolWithOffset(symbol, offset):\n if offset == 0:\n return symbol\n return \"%s+%d\" % (symbol, offset)\n\ndef SetSecondaryAbiOutputPath(path):\n global SECONDARY_ABI_OUTPUT_PATH\n if SECONDARY_ABI_OUTPUT_PATH and SECONDARY_ABI_OUTPUT_PATH != path:\n raise Exception (\"Assign SECONDARY_ABI_OUTPUT_PATH to different value \" +\n \" origin: %s new: %s\" % (\"\", path))\n else:\n SECONDARY_ABI_OUTPUT_PATH = path\n","repo_name":"blusno1/chromium-1","sub_path":"third_party/android_platform/development/scripts/symbol.py","file_name":"symbol.py","file_ext":"py","file_size_in_byte":10344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"20235859544","text":"from flask import Flask\nimport sys\nfrom flask import jsonify, render_template, make_response, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import text, ForeignKey\nfrom sqlalchemy.orm import relationship\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'\ndb = SQLAlchemy(app)\n\n\nclass Teams(db.Model):\n __tablename__ = \"teams\"\n\n id = db.Column(db.Integer, primary_key=True)\n short = db.Column(db.String(3), unique=True, nullable=False)\n name = db.Column(db.String(50), unique=True, nullable=False)\n\n\nclass Games(db.Model):\n __tablename__ = \"games\"\n\n id = db.Column(db.Integer, primary_key=True)\n home_team_id = db.Column(db.Integer, ForeignKey(\"teams.id\"))\n visiting_team_id = db.Column(db.Integer, ForeignKey(\"teams.id\"))\n home_team_score = db.Column(db.Integer)\n visiting_team_score = db.Column(db.Integer)\n\n # Relationships\n teams = relationship(\"Teams\")\n\n\nclass Quarters(db.Model):\n __tablename__ = \"quarters\"\n id = db.Column(db.Integer, primary_key=True)\n game_id = db.Column(db.Integer, ForeignKey(\"games.id\"))\n quarters = db.Column(db.String(50))\n\n # Relationships\n games = relationship(\"Games\")\n\n\ndef insert_update_query(query):\n db.session.execute(text(query))\n db.session.commit()\n\n\ndef select_query(query):\n return db.session.execute(text(query))\n\n\ndef get_wins(short_name):\n home_query = f\"SELECT Count(*) FROM games WHERE home_team_id = (SELECT id FROM teams WHERE short = '{short_name}') \" \\\n f\"AND home_team_score > visiting_team_score;\"\n\n visiting_query = f\"SELECT Count(*) FROM games WHERE visiting_team_id = (SELECT id FROM teams WHERE short = '{short_name}') \" \\\n f\"AND visiting_team_score > home_team_score;\"\n home_count_result = select_query(home_query).fetchone()[0]\n visiting_count_result = select_query(visiting_query).fetchone()[0]\n\n return home_count_result + visiting_count_result\n\n\ndef get_losses(short_name):\n home_query = f\"SELECT Count(*) FROM games WHERE home_team_id = (SELECT id FROM teams WHERE short = '{short_name}') \" \\\n f\"AND home_team_score < visiting_team_score;\"\n\n visiting_query = f\"SELECT Count(*) FROM games WHERE visiting_team_id = (SELECT id FROM teams WHERE short = '{short_name}') \" \\\n f\"AND visiting_team_score < home_team_score;\"\n home_count_result = select_query(home_query).fetchone()[0]\n visiting_count_result = select_query(visiting_query).fetchone()[0]\n\n return home_count_result + visiting_count_result\n\n\n# make your code here\n@app.route(\"/\", methods=['GET'])\ndef welcome():\n response = make_response(render_template(\"index.html\"), 200)\n return response\n\n\n@app.route(\"/api/v1/teams\", methods=['POST', 'GET'])\ndef team_request():\n if request.method == \"POST\":\n body_json = request.json\n if body_json['short'].isupper() and len(body_json['short']) == 3:\n query = f\"INSERT INTO teams (short, name) VALUES ('{body_json['short']}', '{body_json['name']}');\"\n insert_update_query(query)\n response = make_response(jsonify({\"success\": True, \"data\": \"Team has been added\"}), 201)\n return response\n\n else:\n response = make_response(jsonify({\"success\": False, \"data\": \"Wrong short format\"}), 400)\n return response\n\n elif request.method == \"GET\":\n result = select_query(\"SELECT * FROM teams;\")\n rows = result.fetchall()\n result_json = {\n \"success\": True,\n \"data\": {}\n }\n for row in rows:\n result_json[\"data\"][row[1]] = row[2]\n response = make_response(result_json, 200)\n return response\n\n\n@app.route(\"/api/v1/games\", methods=['POST', 'GET'])\ndef game_request():\n if request.method == \"POST\":\n body_json = request.json\n\n home_id_query = f\"SELECT id FROM teams WHERE short = '{body_json['home_team']}'\"\n visiting_id_query = f\"SELECT id FROM teams WHERE short = '{body_json['visiting_team']}'\"\n\n home_id_result = select_query(home_id_query).fetchone()\n visiting_id_result = select_query(visiting_id_query).fetchone()\n\n if home_id_result is None or visiting_id_result is None:\n response = make_response(jsonify({\"success\": False, \"data\": \"Wrong team short\"}), 400)\n return make_response(response)\n\n else:\n home_id_result = home_id_result[0]\n visiting_id_result = visiting_id_result[0]\n\n query = f\"INSERT INTO games (home_team_id, visiting_team_id, home_team_score, visiting_team_score) \" \\\n f\"VALUES ('{home_id_result}', '{visiting_id_result}', '{body_json['home_team_score']}', '{body_json['visiting_team_score']}');\"\n insert_update_query(query)\n response = make_response(jsonify({\"success\": True, \"data\": \"Game has been added\"}), 201)\n return response\n\n elif request.method == \"GET\":\n result = select_query(\"SELECT * FROM games;\")\n rows = result.fetchall()\n result_json = {\n \"success\": True,\n \"data\": {}\n }\n for row in rows:\n home_team_name = select_query(f\"SELECT name FROM teams WHERE id = {row[1]};\").fetchone()[0]\n visiting_team_name = select_query(f\"SELECT name FROM teams WHERE id = {row[2]};\").fetchone()[0]\n\n result_json[\"data\"][row[0]] = f\"{home_team_name} {row[3]}:{row[4]} {visiting_team_name}\"\n response = make_response(result_json, 200)\n return response\n\n\n@app.route('/api/v1/team/')\ndef get_stats(short):\n name_check_query = select_query(f\"SELECT name FROM teams WHERE short='{short}'\").fetchone()\n\n if name_check_query is None:\n response = make_response(jsonify({\"success\": False, \"data\": f\"There is no team {short}\"}), 400)\n return response\n\n else:\n wins = get_wins(short)\n losses = get_losses(short)\n response = make_response(jsonify({\"success\": True, \"data\": {\"name\": name_check_query[0],\n \"short\": short,\n \"win\": wins,\n \"lost\": losses}}), 200)\n return response\n\n\n@app.route('/api/v2/games', methods=['POST', 'GET'])\ndef game_v2_request():\n if request.method == \"POST\":\n body_json = request.json\n home_team = body_json[\"home_team\"]\n visiting_team = body_json[\"visiting_team\"]\n\n home_id_query = f\"SELECT id FROM teams WHERE short = '{home_team}';\"\n visiting_id_query = f\"SELECT id FROM teams WHERE short = '{visiting_team}';\"\n\n home_id = select_query(home_id_query).fetchone()[0]\n visiting_id = select_query(visiting_id_query).fetchone()[0]\n\n insert_new_game = f\"INSERT INTO games (home_team_id, visiting_team_id, home_team_score, visiting_team_score)\" \\\n f\"VALUES ('{home_id}', '{visiting_id}', 0, 0);\"\n\n insert_update_query(insert_new_game)\n\n select_new_id_query = f\"SELECT id FROM games;\"\n new_id = select_query(select_new_id_query).fetchall()[-1][0]\n\n response = make_response(jsonify({\"success\": True, \"data\": new_id}), 201)\n\n return response\n\n elif request.method == \"GET\":\n select_games_query = \"SELECT * FROM games;\"\n games_result = select_query(select_games_query).fetchall()\n games_dict = {}\n\n for game in games_result:\n game_id = game[0]\n home_team_id = game[1]\n visiting_team_id = game[2]\n home_team_score = game[3]\n visiting_team_score = game[4]\n\n quarters_query = f\"SELECT quarters FROM quarters WHERE game_id = '{game_id}';\"\n quarters_result = select_query(quarters_query).fetchall()\n\n home_team_query = f\"SELECT name FROM teams WHERE id = '{home_team_id}';\"\n visiting_team_query = f\"SELECT name FROM teams WHERE id = '{visiting_team_id}';\"\n\n home_team_name = select_query(home_team_query).fetchone()[0]\n visiting_team_name = select_query(visiting_team_query).fetchone()[0]\n\n game_string = f\"{home_team_name} {home_team_score}:{visiting_team_score} {visiting_team_name}\"\n\n if len(quarters_result) != 0:\n quarters_string = \"(\" + \",\".join([result[0] for result in quarters_result]) + \")\"\n game_string += f\" {quarters_string}\"\n\n games_dict[f\"{game_id}\"] = game_string\n\n response = make_response(jsonify({\"success\": True, \"data\": games_dict}), 200)\n\n return response\n\n\n@app.route('/api/v2/games/', methods=[\"POST\"])\ndef add_quarter(game_id):\n check_query = f\"SELECT * FROM games WHERE id = '{game_id}'\"\n check_result = select_query(check_query).fetchone()\n\n if check_result is None:\n response = make_response(jsonify({\"success\": False, \"data\": f\"There is no game with id {game_id}\"}), 400)\n return response\n else:\n body_json = request.json\n quarters = body_json[\"quarters\"]\n\n add_quarters_query = f\"INSERT INTO quarters (game_id, quarters) VALUES ('{game_id}', '{quarters}');\"\n insert_update_query(add_quarters_query)\n\n update_query = f\"UPDATE games SET home_team_score = home_team_score + {quarters.split(':')[0]},\" \\\n f\"visiting_team_score = visiting_team_score + {quarters.split(':')[1]}\" \\\n f\" WHERE id = {game_id};\"\n\n insert_update_query(update_query)\n \n response = make_response(jsonify({\"success\": True, \"data\": \"Score updated\"}), 201)\n return response\n\n\n@app.route('/')\ndef handle_undefined_route(undefined_route):\n return jsonify({\"success\": False, \"data\": \"Wrong address\"}), 404\n\n\n# don't change the following way to run flask:\nif __name__ == '__main__':\n with app.app_context():\n db.drop_all()\n db.create_all()\n\n if len(sys.argv) > 1:\n arg_host, arg_port = sys.argv[1].split(':')\n app.run(host=arg_host, port=arg_port)\n else:\n app.run(debug=True)\n","repo_name":"cagrikurt8/NBA-Flask-API","sub_path":"basketball_API.py","file_name":"basketball_API.py","file_ext":"py","file_size_in_byte":10266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70231953391","text":"from fastapi import Request, HTTPException, status\nfrom fastapi.security import HTTPBearer\nfrom fastapi.security.utils import get_authorization_scheme_param\n\nfrom source.api.auth.auth_handler import decode_jwt\n\n\nclass JWTBearer(HTTPBearer):\n def __call__(self, request: Request) -> str | None:\n authorization = request.headers.get('Authorization')\n scheme, credentials = get_authorization_scheme_param(authorization)\n if not (authorization and scheme and credentials):\n if self.auto_error:\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN, detail='Not authenticated',\n )\n return None\n if scheme.lower() != 'bearer':\n if self.auto_error:\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN,\n detail='Invalid authentication credentials',\n )\n return None\n if not decode_jwt(credentials):\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail='Invalid authorization code')\n return credentials\n","repo_name":"F-Fadeev/School-API","sub_path":"source/api/auth/auth_bearer.py","file_name":"auth_bearer.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"44703722854","text":"import numpy as np\nfrom django.core.files import File\nfrom io import BytesIO\nimport face_recognition\nfrom PIL import Image, ImageDraw\n\n\n\"\"\"\n The business logic of the project\n\"\"\"\n\n\nclass FaceCompare:\n \"\"\"\n The class responsible for the logic of searching for similar faces\n \"\"\"\n def __init__(self, img, data):\n \"\"\"\n :param img: Picture with faces\n :param key_img_name: list of models to search for\n \"\"\"\n self.data = data\n self.img = img\n\n self.img_load = face_recognition.load_image_file(img)\n # self.img_faces_encoding = face_recognition.face_encodings(self.img_load)\n self.img_faces_locate = face_recognition.face_locations(self.img_load)\n\n # self.img_selection_faces = self._selection_faces()\n self.imgs_extracting_faces = self._extracting_faces()\n\n def _selection_faces(self):\n \"\"\"\n Highlighting faces in a photo\n \"\"\"\n pil_img = Image.fromarray(self.img_load)\n draw = ImageDraw.Draw(pil_img)\n\n for face_locations in self.img_faces_locate:\n top, right, botton, left = face_locations\n\n draw.rectangle(((left, top), (right, botton)), outline=(255, 255, 0), width=2)\n\n del draw\n\n img_io = BytesIO() # create a BytesIO object\n pil_img.save(img_io, 'PNG') # save image to BytesIO object\n img_selection_faces = File(img_io, f'selection_faces.png') # create a django friendly File object\n\n return img_selection_faces\n\n def _extracting_faces(self):\n \"\"\"\n Cuts faces with photos\n\n :return: coordinate dictionary: face_encoding\n \"\"\"\n faces = []\n locations = []\n\n for face_locations in self.img_faces_locate:\n info = []\n\n top, right, botton, left = face_locations\n\n face_img = self.img_load[top:botton, left:right]\n pil_img = Image.fromarray(face_img)\n\n img_io = BytesIO() # create a BytesIO object\n pil_img.save(img_io, 'PNG') # save image to BytesIO object\n img_selection_faces = File(img_io, f'{face_locations}.png') # create a django friendly File object\n\n info.append(img_selection_faces)\n info.append(face_recognition.face_encodings(face_recognition.load_image_file(img_selection_faces))[0])\n\n faces.append(info)\n locations.append(f'{left}:{right}, {top}:{botton}')\n\n return dict(zip(locations, faces))\n\n def compare(self):\n \"\"\"\n Compares clipped faces with faces from data list\n\n :return: list which consists of dictionaries - coordinates of faces: matches from data\n \"\"\"\n\n answer = []\n faces_encoding = []\n\n for data in self.data:\n face_encoding = np.array([np.float64(item) for item in data.face_encoding.split()])\n faces_encoding.append(face_encoding)\n\n for locate in self.imgs_extracting_faces:\n result_compare = face_recognition.compare_faces(faces_encoding, self.imgs_extracting_faces[locate][1], 0.45)\n\n result_face = []\n\n for i in range(len(result_compare)):\n if result_compare[i]:\n result_face.append(self.data[i])\n\n answer.append(\n {\n \"coordinates\": locate,\n \"coincidences\": result_face\n }\n )\n\n return answer\n\n @staticmethod\n def get_img_encoding(face):\n \"\"\"\n Calculation of encoding for all faces in the picture\n\n :param face: the path to the photo\n :return: encoding list for all faces in the picture\n \"\"\"\n img = face_recognition.load_image_file(face)\n img_encoding = face_recognition.face_encodings(img)\n\n return img_encoding\n\n @staticmethod\n def get_img_encoding_str(face):\n \"\"\"\n Calculation of encoding for one face in a photo (for example, if it is a photo from documents)\n\n :param face: the path to the photo\n :return: encoding string\n \"\"\"\n img = face_recognition.load_image_file(face)\n img_encoding = face_recognition.face_encodings(img)\n\n img_encoding_list = list(img_encoding[0])\n\n img_encoding_str = \"\"\n for item in img_encoding_list:\n img_encoding_str += str(item) + \" \"\n\n return img_encoding_str\n","repo_name":"saindi/face_search","sub_path":"services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29579751930","text":"import argparse\nimport math\n\nclass DefaultArgs:\n model = None\n output_dir = './'\n output_filename = 'out.txt'\n\n simd_low = 2\n simd_high = 16\n simd_step = 1\n prec_low = 8\n prec_high = 8\n prec_step = 1\n bw_low = 64\n bw_high = 256\n bw_step = 1\n pe_low = 128\n pe_high = 300\n pe_step = 1\n buffer_low = 32\n buffer_high = 256\n buffer_step = 8\n max_area = 4.3841e+09\n max_power = 1.34877e+05\n max_invalid = 2500\n\n trials = 1\n hw_trials = 100\n sw_trials = 100\n\n target = 'edp'\n kernel = 'linear'\n hw_point = None\n sw_point = None\n\n sw_batch_size = 1000\n hw_batch_size = 1000\n sw_batch_trials = 10\n hw_batch_trials = 10\n\n layers = None\n exclude_feat = \"raw\"\n\ndef get_args():\n parser = argparse.ArgumentParser(description='spotlight-micro21.')\n parser.add_argument(\"--model\", help=\"model name\", type=str, default=DefaultArgs.model)\n parser.add_argument(\"--output-dir\", help=\"output directory\", type=str, default=DefaultArgs.output_dir)\n parser.add_argument(\"--output-filename\", help=\"output filename\", type=str, default=DefaultArgs.output_filename)\n parser.add_argument(\"--output-to-file\", help=\"output to file\", default=False, action=\"store_true\")\n parser.add_argument(\"--dump-all\", help=\"dump full cost dictionary\", default=False, action=\"store_true\")\n\n parser.add_argument(\"--simd-low\", help=\"minimum number of SIMD lanes\", type=int, default=DefaultArgs.simd_low)\n parser.add_argument(\"--simd-high\", help=\"maxium number of SIMD lanes\", type=int, default=DefaultArgs.simd_high)\n parser.add_argument(\"--simd-step\", help=\"step size for SIMD lane values\", type=int, default=DefaultArgs.simd_step)\n\n parser.add_argument(\"--prec-low\", help=\"minimum bit precision\", type=int, default=DefaultArgs.prec_low)\n parser.add_argument(\"--prec-high\", help=\"maxium bit precision\", type=int, default=DefaultArgs.prec_high)\n parser.add_argument(\"--prec-step\", help=\"step size for bit precision\", type=int, default=DefaultArgs.prec_step)\n\n parser.add_argument(\"--bw-low\", help=\"minimum bandwidth\", type=int, default=DefaultArgs.bw_low)\n parser.add_argument(\"--bw-high\", help=\"maxium bandwidthnumber of SIMD lanes\", type=int, default=DefaultArgs.bw_high)\n parser.add_argument(\"--bw-step\", help=\"step size for bandwidth\", type=int, default=DefaultArgs.bw_step)\n\n parser.add_argument(\"--pe-low\", help=\"minimum number of PEs\", type=int, default=DefaultArgs.pe_low)\n parser.add_argument(\"--pe-high\", help=\"maxium number of PEs\", type=int, default=DefaultArgs.pe_high)\n parser.add_argument(\"--pe-step\", help=\"step size for PE values\", type=int, default=DefaultArgs.pe_step)\n\n parser.add_argument(\"--levels-low\", help=\"minimum number of levels of mem hierarchy\", type=int, default=2)\n parser.add_argument(\"--levels-high\", help=\"maxium number of levels of mem hierarchy\", type=int, default=2)\n parser.add_argument(\"--levels-step\", help=\"step size for levels of mem hierarchy\", type=int, default=1)\n\n for i in range(1, 4):\n parser.add_argument(\"--l%d-low\" % i, help=\"minimum total L%d size\" % i, type=int, default=DefaultArgs.buffer_low)\n parser.add_argument(\"--l%d-high\" % i, help=\"maximum total L%d size\" % i, type=int, default=DefaultArgs.buffer_high)\n parser.add_argument(\"--l%d-step\" % i, help=\"step size of L%d size\" % i, type=int, default=DefaultArgs.buffer_step)\n\n parser.add_argument(\"--space-template\", help=\"use template (edge, datacenter) to determine space\", type=str, default='')\n\n parser.add_argument(\"--max-area\", help=\"maximum area\", type=float, default=DefaultArgs.max_area)\n parser.add_argument(\"--max-power\", help=\"maximum power\", type=float, default=DefaultArgs.max_power)\n\n parser.add_argument(\"--trials\", help=\"number of hardware trials\", type=int, default=DefaultArgs.trials)\n parser.add_argument(\"--hw-trials\", help=\"number of hardware trials\", type=int, default=DefaultArgs.hw_trials)\n parser.add_argument(\"--sw-trials\", help=\"number of software trials\", type=int, default=DefaultArgs.sw_trials)\n parser.add_argument(\"--scale-trials\", help=\"scale number of hardware and software trials\", default=False, action=\"store_true\")\n parser.add_argument(\"--max-invalid\", help=\"number of trials before giving up\", type=int, default=DefaultArgs.max_invalid)\n parser.add_argument(\"--sw-progress-bar\", help=\"whether to use progress bar for software samples\", default=False, action=\"store_true\")\n parser.add_argument(\"--hw-progress-bar\", help=\"whether to use progress bar for hardware samples\", default=False, action=\"store_true\")\n parser.add_argument('--print-sw-samples', help=\"whether to print results for SW sample\", default=False, action=\"store_true\")\n\n parser.add_argument(\"--target\", help=\"optimization target\", type=str, default=DefaultArgs.target)\n parser.add_argument(\"--kernel\", help=\"GP kernel\", type=str, default=DefaultArgs.kernel)\n parser.add_argument(\"--sw-point\", help=\"software config\", type=str, default=DefaultArgs.sw_point)\n parser.add_argument(\"--hw-point\", help=\"hardware config\", type=str, default=DefaultArgs.hw_point)\n\n parser.add_argument(\"--sw-batch-size\", help=\"number of random samples in software BO batch\", type=int, default=DefaultArgs.sw_batch_size)\n parser.add_argument(\"--hw-batch-size\", help=\"number of random samples in hardware BO batch\", type=int, default=DefaultArgs.hw_batch_size)\n parser.add_argument(\"--sw-batch-trials\", help=\"number of software samples in BO batch to evaluate\", type=int, default=DefaultArgs.sw_batch_trials)\n parser.add_argument(\"--hw-batch-trials\", help=\"number of hardware samples in BO batch to evaluate\", type=int, default=DefaultArgs.hw_batch_trials)\n\n parser.add_argument(\"--print-bo-analysis\", dest=\"print_bo_analysis\", help=\"whether to analyze BO features\", default=False, action=\"store_true\")\n\n parser.add_argument(\"--exhaustive-hw-start-idx\", help=\"point at which to start HW space in exhaustive search\", type=int, default=0)\n parser.add_argument(\"--exhaustive-hw-end-idx\", help=\"point at which to end HW space in exhaustive search\", type=int, default=0)\n parser.add_argument(\"--no-search-permutations\", dest=\"search_permutations\", help=\"enable MAESTRO to search permutations\", default=True, action=\"store_false\")\n parser.add_argument(\"--dataflow\", help=\"type of dataflow to use\", type=str, default=\"searched\")\n\n parser.add_argument(\"--layers\", help=\"comma separated list of layers\", type=str, default=DefaultArgs.layers)\n parser.add_argument(\"--remove-duplicate-layers\", dest=\"remove_duplicate_layers\", help=\"ignore duplicate layers\", default=False, action=\"store_true\")\n parser.add_argument(\"--ignore-stride\", dest=\"ignore_stride\", help=\"ignore stride in layer shapes\", default=False, action=\"store_true\")\n\n parser.add_argument(\"--exclude-feat\", help=\"comma separated list of features to ignore\", type=str, default=DefaultArgs.exclude_feat)\n\n args = parser.parse_args()\n\n assert(args.model)\n assert(args.layers)\n\n if args.scale_trials:\n trial_scale = 1\n bo_time_per_layer = 2.71\n if 'grid' in args.model:\n trial_scale = 0.522\n elif 'random' in args.model:\n trial_scale = 1.91\n elif 'ga' in args.model:\n trial_scale = 1.68\n elif 'bo' in args.model:\n trial_scale = bo_time_per_layer\n\n trial_scale = bo_time_per_layer / trial_scale\n\n if 'hw' in args.model:\n trial_scale = math.sqrt(trial_scale)\n args.hw_trials = int(args.hw_trials * trial_scale)\n args.sw_trials = int(args.sw_trials * trial_scale)\n print(f'running {args.hw_trials} hw and {args.sw_trials} sw samples')\n\n if args.space_template != \"\":\n if args.space_template == \"edge\":\n args.pe_low = DefaultArgs.pe_low\n args.pe_high = DefaultArgs.pe_high\n args.pe_step = DefaultArgs.pe_step\n\n for i in range(1, 4):\n args.__dict__['l%d_low' % i] = DefaultArgs.buffer_low\n args.__dict__['l%d_high' % i] = DefaultArgs.buffer_high\n args.__dict__['l%d_step' % i] = DefaultArgs.buffer_step\n\n args.max_area = DefaultArgs.max_area\n args.max_power = DefaultArgs.max_power\n args.max_invalid = DefaultArgs.max_invalid\n\n elif args.space_template == 'datacenter':\n args.pe_low = 2048\n args.pe_high = 16384\n args.pe_step = 256\n\n for i in range(1, 4):\n args.__dict__['l%d_low' % i] = 8192\n args.__dict__['l%d_high' % i] = 32768\n args.__dict__['l%d_step' % i] = 2048\n\n args.max_area = 1.0e15\n args.max_power = 1.0e15\n args.max_invalid = 1000\n\n return args","repo_name":"chiragsakhuja/spotlight","sub_path":"src/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":8823,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"38"} +{"seq_id":"13279691619","text":"import pytest\nfrom nose.plugins.skip import SkipTest\nimport logging\nfrom ansible.modules.cloud.oracle import oci_load_balancer_health_checker\nfrom ansible.module_utils.oracle import oci_utils, oci_lb_utils\n\ntry:\n import oci\n from oci.util import to_dict\n from oci.load_balancer.models import HealthChecker\n from oci.exceptions import ServiceError, ClientError\nexcept ImportError:\n raise SkipTest(\"test_oci_load_balancer_health_checker.py requires `oci` module\")\n\n\nclass FakeModule(object):\n def __init__(self, **kwargs):\n self.params = kwargs\n\n def fail_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n raise Exception(kwargs[\"msg\"])\n\n def exit_json(self, *args, **kwargs):\n self.exit_args = args\n self.exit_kwargs = kwargs\n\n\n@pytest.fixture()\ndef lb_client(mocker):\n mock_lb_client = mocker.patch(\n \"oci.load_balancer.load_balancer_client.LoadBalancerClient\"\n )\n return mock_lb_client.return_value\n\n\n@pytest.fixture()\ndef get_existing_resource_patch(mocker):\n return mocker.patch.object(oci_utils, \"get_existing_resource\")\n\n\n@pytest.fixture()\ndef create_or_update_lb_resources_and_wait_patch(mocker):\n return mocker.patch.object(oci_lb_utils, \"create_or_update_lb_resources_and_wait\")\n\n\ndef setUpModule():\n logging.basicConfig(\n filename=\"/tmp/oci_ansible_module.log\", filemode=\"a\", level=logging.INFO\n )\n oci_load_balancer_health_checker.set_logger(logging)\n\n\ndef test_update_health_checker(\n lb_client, get_existing_resource_patch, create_or_update_lb_resources_and_wait_patch\n):\n module = get_module()\n health_checker = get_health_checker()\n get_existing_resource_patch.return_value = health_checker\n create_or_update_lb_resources_and_wait_patch.return_value = dict(\n health_checker=to_dict(health_checker), changed=True\n )\n result = oci_load_balancer_health_checker.update_health_checker(lb_client, module)\n assert result[\"changed\"] is True\n\n\ndef test_update_health_checker_no_change(lb_client, get_existing_resource_patch):\n additional_properties = dict({\"port\": 82})\n module = get_module(additional_properties)\n health_checker = get_health_checker()\n get_existing_resource_patch.return_value = health_checker\n result = oci_load_balancer_health_checker.update_health_checker(lb_client, module)\n assert result[\"changed\"] is False\n\n\ndef get_health_checker():\n health_checker = HealthChecker()\n health_checker.interval_in_millis = 30000\n health_checker.port = 82\n health_checker.protocol = \"HTTP\"\n health_checker.response_body_regex = \"^(500|40[1348])$\"\n health_checker.retries = 3\n health_checker.return_code = 200\n health_checker.timeout_in_millis = 6000\n health_checker.url_path = \"/healthcheck\"\n return health_checker\n\n\ndef get_response(status, header, data, request):\n return oci.Response(status, header, data, request)\n\n\ndef get_module(additional_properties=None):\n params = {\n \"load_balancer_id\": \"ocid1.loadbalancer.oc1.iad.aaaa\",\n \"backend_set_name\": \"test_backend\",\n \"interval_in_millis\": 30000,\n \"port\": 8080,\n \"protocol\": \"HTTP\",\n \"retries\": 3,\n \"timeout_in_millis\": 6000,\n \"return_code\": 200,\n \"url_path\": \"/healthcheck\",\n }\n if additional_properties is not None:\n params.update(additional_properties)\n module = FakeModule(**params)\n return module\n","repo_name":"oracle/oci-ansible-modules","sub_path":"test/units/test_oci_load_balancer_health_checker.py","file_name":"test_oci_load_balancer_health_checker.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"38"} +{"seq_id":"35284246626","text":"\n\"\"\" odev->\nLütfen Günlük Ortalama Adım Sayınızı Giriniz; 5000 \nHarcanan Kalori \nGünlük; ... \nHaftalık; ... \nAylık; ... \n\"\"\"\nort_adimsayisi=int(input(\"Lütfen Günlük Ortalama Adım Sayınızı Giriniz: \"))\nhk_gunluk=ort_adimsayisi/8\nhk_haftalik=(ort_adimsayisi*7)/8\nhk_aylik=(ort_adimsayisi*30)/8\nprint(f\"Harcanan Günlük Kalori:{hk_gunluk}\\nHarcanan Haftalık Kalori{hk_haftalik}\\nHarcanan Aylık Kalori{hk_aylik}\")\n","repo_name":"edanurkkoc/VSCode-Ecodation-Eylul","sub_path":"02_11_eylul/02_08_odev.py","file_name":"02_08_odev.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"34530347383","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\n\nclass AdalineGD(object):\n \n \"\"\" \n ADAptive LINear NEuron Classifier\n\n Parameters\n ----------\n eta: float\n n_ter: int\n\n Attributes\n ----------\n w_: 1D array\n errors_: list\n \n \"\"\"\n\n def __init__(self, eta=0.01, n_iter=50):\n self.eta = eta\n self.n_iter = n_iter\n\n def fit(self, X, y):\n \n \"\"\"\n \n Parameters\n ----------\n X : {array-like}, shape = [n_samples, n_features]\n Training vectors,\n\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : object\n \n \"\"\"\n\n self.w_ = np.zeros(1+X.shape[1])\n self.cost_ = []\n\n for i in range(self.n_iter):\n output = self.net_input(X)\n errors = y-output\n self.w_[1:] += self.eta * X.T.dot(errors)\n self.w_[0] += self.eta * errors.sum()\n cost = (errors**2).sum() / 2.0\n self.cost_.append(cost)\n return self\n\n def net_input(self, X):\n return np.dot(X, self.w_[1:]+self.w_[0])\n\n def activation(self, X):\n return self.net_input(X)\n\n\n def predict(self, X):\n return np.where(self.activation(X) >= 0.0, 1, -1)\n\n#####################################################\n### Run Perceptron on Iris Data ####################\n#####################################################\n\n\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)\n\ny = df.iloc[0:100,4].values\ny = np.where(y == 'Iris-setosa',-1, 1) # Boolean 1 - when true returns -1 and not 'Iris-setosa'\nX = df.iloc[0:100,[0, 2]].values\n\nX_std = np.copy(X)\nX_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\nX_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()\n\nada = AdalineGD(n_iter=15, eta=0.01)\nada.fit(X_std, y)\n\n####################################################\n### Plot Decision Surface ##########################\n####################################################\n\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\n\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n \n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 \n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 \n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],alpha=0.8, c=cmap(idx),marker=markers[idx], label=cl)\n\n\nplot_decision_regions(X_std, y, classifier=ada)\nplt.title('Adaline - Gradient Descent')\nplt.xlabel('sepal length [standardized]')\nplt.ylabel('petal length [standardized]')\nplt.legend(loc='upper left')\nplt.show()\nplt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')\nplt.xlabel('Epochs')\nplt.ylabel('Sum-squared-error')\nplt.show()\n","repo_name":"StephenElishaClarke/Code","sub_path":"MachineLearning/AdalineGD.py","file_name":"AdalineGD.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"71993367472","text":"from __future__ import print_function\nfrom copy import deepcopy\nfrom itertools import permutations, product\n\n\nclass Edge:\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def outputs(self):\n return self._outputs\n\n def __init__(self, outputs, inputs, label=None):\n self._label = label\n self._outputs = outputs\n self._inputs = []\n self._primary_inputs = []\n self._functions = []\n for i, elem in enumerate(inputs):\n if isinstance(elem, tuple):\n if elem[1] == 'p':\n self._primary_inputs.append(i)\n self._inputs.append(elem[0])\n else:\n self._inputs.append(elem)\n self._functions.append(\"--\")\n\n @property\n def label(self):\n return self._label\n\n @label.setter\n def label(self, value):\n self._label = value\n\n @property\n def terminal(self):\n return self._label is not None\n\n def set_primary(self, i):\n assert i < len(self.inputs)\n self._primary_inputs.append(i)\n return self\n\n def set_function(self, i, gr_function):\n assert i < len(self.inputs)\n self._functions[i] = gr_function\n return self\n\n def get_function(self, i):\n assert i < len(self.inputs)\n return self._functions[i]\n\n @property\n def primary_inputs(self):\n return self._primary_inputs\n\n def __str__(self):\n try:\n s = \"[\" + \", \".join(map(self.__function_strings, enumerate(self.inputs))) + \"] -\" \\\n + (str(self.label) if self.label is not None else \"\") \\\n + \"-> [\" + \", \".join(map(str, self.outputs)) + \"]\"\n except UnicodeEncodeError:\n # TODO: proper handling of encoding problems\n s = \"foo\"\n return s\n\n def __function_strings(self, pair):\n i, node = pair\n if self._functions[i] == '--':\n return str(node)\n else:\n return str(self._functions[i]) + ':' + str(node)\n\n def compare_labels(self, other):\n return all(map(lambda x, y: x == y, [self.label] + self._functions, [other.label] + other._functions))\n\n\nclass DirectedOrderedGraph:\n def __init__(self):\n self._nodes = []\n self._inputs = []\n self._outputs = []\n self._terminal_edges = []\n self._nonterminal_edges = []\n self._parents = {}\n self._incoming_edge = {}\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def outputs(self):\n return self._outputs\n\n @property\n def nodes(self):\n return self._nodes\n\n @property\n def parents(self):\n return self._parents\n\n def incoming_edge(self, node):\n return self._incoming_edge[node]\n\n def children(self, node):\n edge = self._incoming_edge[node]\n if edge is None:\n return []\n else:\n return edge.inputs\n\n def type(self):\n _type = []\n for edge in self._nonterminal_edges:\n _type.append((len(edge.outputs), len(edge.inputs)))\n _type.append((len(self._inputs), len(self._outputs)))\n return _type\n\n @property\n def terminal_edges(self):\n return self._terminal_edges\n\n def __str__(self):\n return \"G[inputs=[\" + \", \".join(map(str, self._inputs)) + \"], \" \\\n + \"outputs=[\" + \", \".join(map(str, self._outputs)) + \"], \" \\\n + \"nont_edges=[\" + \", \".join(map(str, self._nonterminal_edges)) + \"], \" \\\n + \"term_edges=[\" + \", \".join(map(str, self._terminal_edges)) + \"]] \"\n\n def node_closure(self, function, reflexive=False):\n closure = {}\n\n if reflexive:\n for node in self._nodes:\n closure[node] = [node]\n else:\n for node in self._nodes:\n closure[node] = deepcopy(function(node))\n\n changed = True\n while changed:\n changed = False\n for node in self._nodes:\n for _node in closure[node]:\n for __node in function(_node):\n if __node not in closure[node]:\n changed = True\n closure[node].append(__node)\n return closure\n\n def cyclic(self):\n downward_closure = self.node_closure(lambda x: self.children(x))\n\n for node in self._nodes:\n if node in downward_closure[node]:\n return True\n\n return False\n\n def output_connected(self):\n upward_closure = self.node_closure(lambda n: self._parents[n], reflexive=True)\n\n for node in self._nodes:\n if not any([True for x in upward_closure[node] if x in self._outputs]):\n return False\n\n return True\n\n def add_node(self, node):\n assert node not in self._nodes\n self._nodes.append(node)\n self._parents[node] = []\n self._incoming_edge[node] = None\n\n def add_edge(self, edge, enforce_outputs=True):\n assert not enforce_outputs or len(edge.outputs) > 0\n assert all([node in self._nodes for node in edge.inputs])\n assert all([node in self._nodes for node in edge.outputs])\n assert all([self._incoming_edge[output] is None for output in edge.outputs])\n for output in edge.outputs:\n self._incoming_edge[output] = edge\n for node in edge.inputs:\n if output not in self._parents[node]:\n self._parents[node].append(output)\n if edge.terminal:\n self._terminal_edges.append(edge)\n else:\n self._nonterminal_edges.append(edge)\n return edge\n\n def add_terminal_edge(self, inputs, label, output):\n return self.add_edge(Edge([output], deepcopy(inputs), label))\n\n def add_nonterminal_edge(self, inputs, outputs, enforce_outputs=True):\n return self.add_edge(Edge(deepcopy(outputs), deepcopy(inputs)), enforce_outputs)\n\n def add_to_inputs(self, node):\n assert self._incoming_edge[node] is None\n self._inputs.append(node)\n\n def add_to_outputs(self, node):\n self._outputs.append(node)\n\n @staticmethod\n def __replace_inplace(the_list, old, new):\n for i, elem in enumerate(the_list):\n if elem == old:\n the_list[i] = new\n\n @staticmethod\n def replace_inplace_many(the_list, translation):\n for i, elem in enumerate(the_list):\n new = translation.get(elem, elem)\n if elem != new:\n the_list[i] = new\n\n def rename_node(self, node, node_new, trace=None):\n if node == node_new:\n return\n assert node_new not in self._nodes\n\n if trace is not None:\n trace[node] = node_new\n\n self.__replace_inplace(self._nodes, node, node_new)\n self.__replace_inplace(self._inputs, node, node_new)\n self.__replace_inplace(self._outputs, node, node_new)\n\n self._incoming_edge[node_new] = self._incoming_edge[node]\n del self._incoming_edge[node]\n\n if node in self._parents:\n self._parents[node_new] = self._parents[node]\n del self._parents[node]\n for key in self._parents:\n self.__replace_inplace(self._parents[key], node, node_new)\n\n for edge in self._nonterminal_edges + self._terminal_edges:\n if edge is not None:\n self.__replace_inplace(edge.inputs, node, node_new)\n self.__replace_inplace(edge.outputs, node, node_new)\n\n def rename_nodes(self, translation):\n assert set(translation.keys()).isdisjoint(translation.values())\n self.replace_inplace_many(self._nodes, translation)\n self.replace_inplace_many(self._inputs, translation)\n self.replace_inplace_many(self._outputs, translation)\n\n self._incoming_edge.update({translation[old]: self._incoming_edge[old] for old in translation})\n for old in translation:\n del self._incoming_edge[old]\n\n self._parents.update({translation[old]: self._parents[old] for old in translation})\n for old in translation:\n del self._parents[old]\n for node in self._parents:\n self.replace_inplace_many(self._parents[node], translation)\n\n for edge in self._nonterminal_edges + self._terminal_edges:\n if edge is not None:\n self.replace_inplace_many(edge.inputs, translation)\n self.replace_inplace_many(edge.outputs, translation)\n\n def replace_by(self, i, dog):\n \"\"\"\n :param i:\n :type i: int\n :param dog:\n :type dog: DirectedOrderedGraph\n :return:\n :rtype:\n Hyperedge replacement\n We assume that nodes are integers to make the renaming easier.\n The node that is inserted is assumed to have no nonterminal edges.\n \"\"\"\n assert 0 <= i < len(self._nonterminal_edges)\n nt_edge = self._nonterminal_edges[i]\n assert isinstance(nt_edge, Edge)\n assert len(dog._inputs) == len(nt_edge.inputs)\n assert len(dog._outputs) == len(nt_edge.outputs)\n # print(len(dog._nonterminal_edges), all([edge is None for edge in dog._nonterminal_edges]))\n assert (len(dog._nonterminal_edges) == 0) or all([edge is None for edge in dog._nonterminal_edges])\n\n if False:\n dog_node_renaming = {}\n # if not set(self._nodes).isdisjoint(dog._nodes):\n max_node = max(self._nodes + dog._nodes)\n for j, node in enumerate(dog._nodes):\n dog.rename_node(node, max_node + 1 + j, dog_node_renaming)\n\n dog_node_renaming2 = {}\n for host_node, replace_node in zip(nt_edge.inputs, dog._inputs):\n dog.rename_node(replace_node, host_node, dog_node_renaming2)\n for host_node, replace_node in zip(nt_edge.outputs, dog._outputs):\n dog.rename_node(replace_node, host_node, dog_node_renaming2)\n else:\n if not set(self._nodes).isdisjoint(dog._nodes):\n max_node = max(self._nodes + dog._nodes)\n dog_node_renaming = {node: max_node + 1 + j for j, node in enumerate(dog._nodes)}\n dog.rename_nodes(dog_node_renaming)\n else:\n dog_node_renaming = {}\n dog_node_renaming2 = {replace_node: host_node\n for host_node, replace_node\n in zip(nt_edge.inputs + nt_edge.outputs, dog._inputs + dog._outputs)}\n dog.rename_nodes(dog_node_renaming2)\n\n # clean up old parent/ child database entries\n for node in nt_edge.inputs:\n self._parents[node] = [parent for parent in self._parents[node] if parent not in nt_edge.outputs]\n for node in nt_edge.outputs:\n self._incoming_edge[node] = None\n\n # add all new nodes\n for node in dog._nodes:\n # if node > max_node:\n if node not in self._nodes:\n self.add_node(node)\n\n # add new edges\n for edge in dog._terminal_edges:\n self.add_edge(edge)\n\n # remove nonterminal_edge (replace by None!)\n self._nonterminal_edges[i] = None\n return self.compose_node_renaming(dog_node_renaming, dog_node_renaming2)\n\n def ordered_nodes(self):\n ordered = []\n for root_node in self._outputs:\n self.__ordered_nodes_rec(ordered, root_node)\n # just required if not output connected\n for node in self.nodes:\n if node not in ordered:\n self.__ordered_nodes_rec(ordered, node)\n return ordered\n\n def __ordered_nodes_rec(self, ordered, v):\n if v not in ordered:\n ordered.append(v)\n for v_2 in self.children(v):\n self.__ordered_nodes_rec(ordered, v_2)\n\n @staticmethod\n def compose_node_renaming(renaming1, renaming2):\n if len(renaming1) == 0:\n return renaming2\n renaming = {}\n for node in renaming1:\n if renaming1[node] in renaming2:\n renaming[node] = renaming2[renaming1[node]]\n else:\n renaming[node] = renaming1[node]\n for node2 in renaming2:\n if node2 not in renaming:\n renaming[node2] = renaming2[node2]\n return renaming\n\n def compress_node_names(self):\n if self.ordered_nodes() == [i for i in range(len(self._nodes))]:\n return {}\n max_node = max(self._nodes) + 1\n\n renaming1 = {}\n for i, node in enumerate(self._nodes):\n self.rename_node(node, max_node + i, renaming1)\n ordered_nodes = self.ordered_nodes()\n renaming2 = {}\n for i, node in enumerate(ordered_nodes):\n self.rename_node(node, i, renaming2)\n return self.compose_node_renaming(renaming1, renaming2)\n\n def top(self, nodes, ordered_nodes=None):\n if ordered_nodes is None:\n ordered_nodes = self.ordered_nodes()\n tops = [node for node in nodes\n if node in self._outputs\n or any([node2 not in nodes for node2 in self._parents[node]])]\n return [node for node in ordered_nodes if node in tops]\n\n def bottom(self, nodes, ordered_nodes=None):\n if ordered_nodes is None:\n ordered_nodes = self.ordered_nodes()\n bottoms = [node for node in self._nodes\n if node not in nodes\n and any([node2 in nodes for node2 in self._parents[node]])]\n return [node for node in ordered_nodes if node in bottoms]\n\n def missing_children(self, nodes, ordered_nodes=None):\n if ordered_nodes is None:\n ordered_nodes = self.ordered_nodes()\n relevant_parents = {node: [(parent, self.children(parent).index(node))\n for parent in self._parents[node] if parent in nodes]\n for node in self._nodes\n if node not in nodes\n and any([node2 in nodes for node2 in self._parents[node]])}\n return [relevant_parents[node] for node in ordered_nodes if node in relevant_parents]\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __eq__(self, other):\n if self.output_connected() != other.output_connected():\n return False\n if len(self.nodes) != len(other.nodes):\n return False\n if not self.output_connected():\n return self.__compare_general(other)\n morphism = {}\n inverse_morphism = {}\n if len(self._outputs) != len(other.outputs):\n return False\n for so, oo in zip(self._outputs, other.outputs):\n if not self.__compare_rec(other, so, oo, morphism, inverse_morphism):\n return False\n return True\n\n def __compare_rec(self, other, sn, on, morphism, inverse_morphism):\n if sn in morphism:\n return on in inverse_morphism and morphism[sn] == on and inverse_morphism[on] == sn\n if on in inverse_morphism:\n return False\n morphism[sn] = on\n inverse_morphism[on] = sn\n se = self._incoming_edge[sn]\n oe = other.incoming_edge(on)\n if se is None and oe is None:\n return True\n if se is None or oe is None:\n return False\n if not se.compare_labels(oe):\n return False\n if len(se.inputs) != len(oe.inputs) or len(se.outputs) != len(oe.outputs):\n return False\n for sn2, on2 in zip(se.inputs, oe.inputs):\n if not self.__compare_rec(other, sn2, on2, morphism, inverse_morphism):\n return False\n return True\n\n def __compare_general(self, other):\n if len(self._outputs) != len(other.outputs):\n return False\n\n # first handle nodes reachable from outputs,\n # here we can use the deterministic method used for output connected DOGs\n morphism = {}\n inverse_morphism = {}\n for so, oo in zip(self._outputs, other.outputs):\n if not self.__compare_rec(other, so, oo, morphism, inverse_morphism):\n return False\n\n # next, we handle nodes which are not output connected\n # this requires non-determinism, as it is not clear which nodes correspond\n self_nodes = {node for node in self.nodes if node not in morphism}\n other_nodes = {node for node in other.nodes if node not in inverse_morphism}\n\n assert len(self_nodes) == len(other_nodes)\n assert len(self_nodes) != 0 # otherwise the graph would be output connected\n\n def candidates(sn):\n return [on for on in other_nodes if self.incoming_edge(sn).compare_labels(other.incoming_edge(on))]\n\n # Beware! This loop has worst-case time complexity exponential in the size of self_nodes.\n # The worst-case always occurs, if both graphs are not isomorphic.\n # for extension in [dict(zip(list(self_nodes_p), other_nodes)) for self_nodes_p in permutations(self_nodes)]:\n for ext_assignment in product(*map(candidates, self_nodes)):\n if len(set(ext_assignment)) < len(ext_assignment):\n continue\n extension = {sn: on for sn, on in zip(self_nodes, ext_assignment)}\n\n correct = True\n\n for sn in self_nodes:\n on = extension[sn]\n se = self._incoming_edge[sn]\n oe = other.incoming_edge(on)\n\n if not se.compare_labels(oe)\\\n or len(se.inputs) != len(oe.inputs) \\\n or len(se.outputs) != len(oe.outputs):\n correct = False\n break\n for sn2, on2 in zip(se.inputs, oe.inputs):\n if sn2 in morphism:\n if not morphism[sn2] == on2:\n correct = False\n break\n elif sn2 in extension:\n if not extension[sn2] == on2:\n correct = False\n break\n if not correct:\n break\n for sn2, on2 in zip(se.outputs, oe.outputs):\n if sn2 in morphism:\n if not morphism[sn2] == on2:\n correct = False\n break\n elif sn2 in extension:\n if not extension[sn2] == on2:\n correct = False\n break\n\n if correct:\n return True\n return False\n\n def compute_isomorphism(self, other):\n assert self.output_connected()\n assert other.output_connected()\n morphism = {}\n inverse_morphism = {}\n if len(self._outputs) != len(other.outputs):\n return None\n for so, oo in zip(self._outputs, other.outputs):\n if not self.__compare_rec(other, so, oo, morphism, inverse_morphism):\n return None\n return morphism, inverse_morphism\n\n def extract_dog(self, lhs, rhs, enforce_outputs=True, ordered_nodes=None):\n assert all([pairwise_disjoint_elem(list) for list in [lhs] + rhs])\n assert all([elem in lhs for list in rhs for elem in list])\n assert pairwise_disjoint(rhs)\n assert all([elem in self._nodes for elem in lhs])\n dog = DirectedOrderedGraph()\n\n if ordered_nodes is None:\n ordered_nodes = self.ordered_nodes()\n top_lhs = self.top(lhs, ordered_nodes)\n bot_lhs = self.bottom(lhs, ordered_nodes)\n\n bot_rhs = [self.bottom(rhs_i, ordered_nodes) for rhs_i in rhs]\n top_rhs = [self.top(rhs_i, ordered_nodes) for rhs_i in rhs]\n\n # lhs\n for node in top_lhs:\n if node not in dog._nodes:\n dog.add_node(node)\n dog.add_to_outputs(node)\n for node in bot_lhs:\n if node not in dog._nodes:\n dog.add_node(node)\n dog.add_to_inputs(node)\n\n # rhs\n for i in range(len(rhs)):\n for node in bot_rhs[i] + top_rhs[i]:\n if node not in dog._nodes:\n dog.add_node(node)\n dog.add_nonterminal_edge(bot_rhs[i], top_rhs[i], enforce_outputs)\n\n # fill recursively\n visited = []\n for node in top_lhs:\n self.__fill_rec(node, dog, visited, lhs, top_rhs, bot_rhs)\n\n # add non-output connected nodes\n if not enforce_outputs:\n # find natural roots\n for node in lhs:\n if all([node not in rhs_set for rhs_set in rhs]):\n if node not in dog.nodes:\n dog.add_node(node)\n self.__fill_rec(node, dog, visited, lhs, top_rhs, bot_rhs)\n\n return dog\n\n def __fill_rec(self, node, dog, visited, lhs, top_rhs, bot_rhs):\n if node not in lhs or node in visited:\n return\n visited.append(node)\n for i, tops in enumerate(top_rhs):\n if node in tops:\n for node2 in bot_rhs[i]:\n self.__fill_rec(node2, dog, visited, lhs, top_rhs, bot_rhs)\n return\n edge = self.incoming_edge(node)\n assert edge is not None\n for node in edge.inputs:\n if node not in dog._nodes:\n dog.add_node(node)\n dog.add_edge(deepcopy(edge))\n for node2 in edge.inputs:\n self.__fill_rec(node2, dog, visited, lhs, top_rhs, bot_rhs)\n\n def primary_is_tree(self, weak=False):\n \"\"\"\n :param weak: if weak == True, then an \"internal edge\" of the DOG is allowed to be a leave of the tree\n otherwise, each internal edge needs have at least one primary child\n :type weak: bool\n \"\"\"\n outgoing = {}\n for edge in self._terminal_edges + self._nonterminal_edges:\n if edge is None:\n continue\n if (not weak) and len(edge.inputs) > 0 and len(edge.primary_inputs) == 0:\n return False\n for i in edge.primary_inputs:\n node = edge.inputs[i]\n if node in outgoing:\n return False\n outgoing[node] = (edge, i)\n\n upward_closure = self.node_closure(lambda n: outgoing[n][0].outputs if n in outgoing else [], reflexive=True)\n\n for node in self._nodes:\n if not any([True for x in upward_closure[node] if x in self._outputs]):\n return False\n\n return True\n\n def internal_edges_without_primary_input(self):\n return [edge for edge in self._terminal_edges\n if len(edge.inputs) > 0 and len(edge.primary_inputs) == 0]\n\n def project_labels(self, proj):\n for edge in self._terminal_edges:\n edge.label = proj(edge.label)\n\n def export_dot(self, title):\n\n def node_line(node):\n s = \"\\t\" + str(node) + \" [shape=plaintext\"\n inputs = ['i' + str(i) for i, n in enumerate(self._inputs) if n == node]\n outputs = ['o' + str(i) for i, n in enumerate(self._outputs) if n == node]\n if len(inputs) + len(outputs) > 0:\n label = str(node) + '[' + ','.join(inputs + outputs) + ']'\n s += ' , label=\\\"' + label + '\\\"'\n s += '];'\n return s\n\n def edge_line(edge, idx, label):\n return \"\\t\" + idx + \"[ shape=box, label=\\\"\" + str(label) + \"\\\"];\"\n\n def tentacles(edge, idx):\n inputs = [\"\\t\" + str(inp) + \"->\" + idx + \"[label = \\\"\"\n + (str(edge.get_function(i)) + ':' if edge.get_function(i) != '--' else \"\")\n + str(i) + \"\\\"];\" for i, inp in enumerate(edge.inputs)]\n outputs = [\"\\t\" + idx + \"->\" + str(out) for out in edge.outputs]\n return inputs + outputs\n\n node_lines = [node_line(node) for node in self._nodes]\n term_edge_lines = [line for i, edge in enumerate(self._terminal_edges) for line in\n [edge_line(edge, 't' + str(i), edge.label)] + tentacles(edge, 't' + str(i))]\n nont_edge_lines = [line for i, edge in enumerate(self._nonterminal_edges) if edge is not None for line in\n [edge_line(edge, 'n' + str(i), 'e' + str(i))] + tentacles(edge, 'n' + str(i))]\n return 'digraph G {\\n\\trankdir=BT;\\n' \\\n + '\\tlabelloc=top;\\n\\tlabel=\\\"' + title + '\\\";\\n' \\\n + '\\n'.join(node_lines + term_edge_lines + nont_edge_lines) \\\n + '\\n}'\n\n def export_graph_json(self, terminal_encoding, tentacle_labels=True, terminal_labeling=str):\n def label_edge(edge):\n label = str(terminal_labeling(edge.label))\n if tentacle_labels:\n label += '_' + '_'.join([edge.get_function(i) for i in range(len(edge.inputs))])\n return label\n\n data = {\"type\": \"hypergraph\"}\n data['nodes'] = [node for node in self._nodes]\n data['edges'] = []\n idx = 0\n for edge in self._nonterminal_edges:\n data['edges'].append({\n 'id': idx\n , 'label': terminal_encoding.object_index(label_edge(edge))\n , 'attachment': edge.inputs + edge.outputs\n , 'terminal': False\n })\n idx += 1\n for edge in self._terminal_edges:\n data['edges'].append({\n 'id': idx\n , 'label': terminal_encoding.object_index(label_edge(edge))\n , 'attachment': edge.inputs + edge.outputs\n , 'terminal': True\n })\n idx += 1\n data['ports'] = self._inputs + self._outputs\n return data\n\n def binarize(self, bin_modifier=lambda x: x + '-BAR', bin_func='--'):\n bin_dog = DirectedOrderedGraph()\n for node in self.nodes:\n bin_dog.add_node(node)\n for node in self._inputs:\n bin_dog.add_to_inputs(node)\n for node in self._outputs:\n bin_dog.add_to_outputs(node)\n for edge in self._nonterminal_edges:\n bin_dog.add_nonterminal_edge(edge.inputs, edge.outputs)\n next_node = max(self.nodes) + 1\n for edge in self._terminal_edges:\n if len(edge.inputs) <= 2:\n new_edge = bin_dog.add_terminal_edge(edge.inputs, edge.label, edge.outputs[0])\n for i, _ in enumerate(edge.inputs):\n new_edge.set_function(i, edge.get_function(i))\n if i in edge.primary_inputs:\n new_edge.set_primary(i)\n else:\n new_nodes = []\n for i in range(len(edge.inputs) - 2):\n new_nodes.append(next_node)\n bin_dog.add_node(next_node)\n next_node += 1\n new_nodes.append(edge.inputs[-1])\n right_functions = [bin_func] * (len(edge.inputs) - 2) + [edge.get_function(len(edge.inputs) - 1)]\n outputs = [edge.outputs[0]] + new_nodes\n for (i, left), right, right_function, output \\\n in zip(enumerate(edge.inputs), new_nodes, right_functions, outputs):\n label = edge.label if i == 0 else bin_modifier(edge.label)\n primary_l = 'p' if i in edge.primary_inputs else 's'\n primary_r = 'p' if i < len(edge.inputs) - 2 or (i == len(edge.inputs) - 2 and (i + 1) in edge.primary_inputs) else 's'\n bin_dog.add_terminal_edge([(left, primary_l), (right, primary_r)], label, output)\\\n .set_function(0, edge.get_function(i)).set_function(1, right_function)\n return bin_dog\n\n def debinarize(self, is_bin=lambda x: x.endswith(\"-BAR\")):\n dog = DirectedOrderedGraph()\n nodes = []\n bin_nodes = []\n for node in self.nodes:\n if node in self._incoming_edge:\n incoming_edge = self.incoming_edge(node)\n if is_bin(incoming_edge.label):\n bin_nodes.append(node)\n else:\n nodes.append(node)\n else:\n nodes.append(node)\n for node in nodes:\n dog.add_node(node)\n for node in self._inputs:\n assert node not in bin_nodes\n dog.add_to_inputs(node)\n for node in self._outputs:\n assert node not in bin_nodes\n dog.add_to_outputs(node)\n if any([edge is not None for edge in self._nonterminal_edges]):\n for edge in self._nonterminal_edges:\n assert edge is not None\n assert not any([node in bin_nodes for node in edge.inputs])\n assert not any([node in bin_nodes for node in edge.outputs])\n dog.add_nonterminal_edge(edge.inputs, edge.outputs)\n\n closest_non_bin_node = {node: None for node in bin_nodes}\n left_of = {node: [] for node in bin_nodes}\n changed = True\n while changed:\n changed = False\n for node in bin_nodes:\n assert len(self._parents[node]) == 1\n parent = self._parents[node][0]\n if parent in nodes and closest_non_bin_node[node] != parent:\n closest_non_bin_node[node] = parent\n changed = True\n elif parent in bin_nodes and closest_non_bin_node[parent] != closest_non_bin_node[node]:\n left_of[node] = left_of[parent] + [parent]\n closest_non_bin_node[node] = closest_non_bin_node[parent]\n changed = True\n conflation = {}\n for node in closest_non_bin_node:\n parent = closest_non_bin_node[node]\n assert parent is not None\n if parent in conflation:\n conflation[parent] += [node]\n else:\n conflation[parent] = [node]\n for parent in conflation:\n conflation[parent] = sorted(conflation[parent], key=lambda x: left_of[x])\n\n for edge in self.terminal_edges:\n if edge.outputs[0] not in bin_nodes:\n if not any([node in bin_nodes for node in edge.inputs]):\n new_edge = dog.add_terminal_edge(edge.inputs, edge.label, edge.outputs[0])\n for i, _ in enumerate(edge.inputs):\n new_edge.set_function(i, edge.get_function(i))\n if i in edge.primary_inputs:\n new_edge.set_primary(i)\n else:\n assert edge.inputs[0] not in bin_nodes\n inputs = [(edge.inputs[0], 'p' if 0 in edge.primary_inputs else 's')]\n functions = [edge.get_function(0)]\n\n for node in conflation[edge.outputs[0]][:-1]:\n bin_edge = self.incoming_edge(node)\n inputs += [(bin_edge.inputs[0], 'p' if 0 in bin_edge.primary_inputs else 's')]\n functions += [bin_edge.get_function(0)]\n\n bin_edge = self.incoming_edge(conflation[edge.outputs[0]][-1])\n inputs += [(node, 'p' if i in bin_edge.primary_inputs else 's')\n for i, node in enumerate(bin_edge.inputs)]\n functions += [bin_edge.get_function(0), bin_edge.get_function(1)]\n new_edge = dog.add_terminal_edge(inputs, edge.label, edge.outputs[0])\n for i, func in enumerate(functions):\n new_edge.set_function(i, func)\n\n return dog\n\n def topological_order(self):\n \"\"\"\n :return: if acyclic, list of nodes from inputs to outputs in a topological order, else empty list\n :rtype: list\n \"\"\"\n order = []\n added = set()\n changed = True\n while changed:\n changed = False\n for node in self.nodes:\n if node in added:\n continue\n if all([child in added for child in self.children(node)]):\n added.add(node)\n order.append(node)\n changed = True\n if len(order) == len(self.nodes):\n return order\n else:\n return []\n\n\nclass DeepSyntaxGraph:\n def __init__(self, sentence, dog, synchronization, label=None):\n self.__dog = dog\n self.__sentence = sentence\n self.__label = label\n self.__synchronization = synchronization\n\n def get_graph_position(self, sentence_position):\n return self.__synchronization[sentence_position]\n\n @property\n def label(self):\n return self.__label\n\n def set_label(self, label):\n self.__label = label\n\n @property\n def dog(self):\n return self.__dog\n\n @property\n def sentence(self):\n return self.__sentence\n\n @sentence.setter\n def sentence(self, value):\n self.__sentence = value\n\n @property\n def synchronization(self):\n return self.__synchronization\n\n def recursive_partitioning(self, subgrouping=False, weak=False):\n assert self.dog.primary_is_tree(weak=weak)\n assert len(self.dog.outputs) == 1\n return self.__extract_recursive_partitioning_rec(self.dog.outputs[0], subgrouping)\n\n def covered_sentence_positions(self, dog_positions):\n return [sent_pos for sent_pos in range(len(self.sentence))\n if any([dog_pos in self.get_graph_position(sent_pos)\n for dog_pos in dog_positions])]\n\n def __extract_recursive_partitioning_rec(self, node, subgrouping):\n covered = self.covered_sentence_positions([node])\n edge = self.dog.incoming_edge(node)\n if edge is None:\n return set(covered), []\n children = []\n for i in edge.primary_inputs:\n child_node = edge.inputs[i]\n child_rec_par = self.__extract_recursive_partitioning_rec(child_node, subgrouping)\n if len(child_rec_par[0]) > 0:\n children += [child_rec_par]\n for sent_pos in child_rec_par[0]:\n assert sent_pos not in covered\n covered.append(sent_pos)\n covered = set(covered)\n if len(children) == 1 and covered == children[0][0]:\n return children[0]\n elif subgrouping and len(children) > 2:\n children_new = {}\n for i, child_rec_par in enumerate(children):\n func = edge.get_function(i)\n if func in children_new:\n children_new[func] += [child_rec_par]\n else:\n children_new[func] = [child_rec_par]\n new_child_list = []\n for func in children_new:\n if len(children_new[func]) > 1:\n new_child_list.append((set([sent_pos for child in children_new[func] for sent_pos in child[0]])\n , children_new[func]))\n else:\n new_child_list += children_new[func]\n if len(new_child_list) == 1 and covered == new_child_list[0][0]:\n return new_child_list[0]\n else:\n return covered, sorted(new_child_list, key=lambda pair: min(pair[0]))\n else:\n return covered, sorted(children, key=lambda pair: min(pair[0]))\n\n def id_yield(self):\n return list(map(lambda x: self.get_graph_position(x), [i for i in range(len(self.sentence))]))\n\n def export_bihypergraph_json(self, terminal_encoding, tentacle_labels=True, terminal_labeling=str):\n data = {\"type\": \"bihypergraph\"}\n data[\"G2\"] = self.dog.export_graph_json(terminal_encoding, tentacle_labels, terminal_labeling=terminal_labeling)\n max_node = max(data[\"G2\"]['nodes'])\n max_edge = max(map(lambda x: x['id'], data[\"G2\"]['edges']))\n data[\"G1\"] = self.string_to_graph_json(self.sentence, terminal_encoding, terminal_labeling=terminal_labeling,\n start_node=max_node + 1, start_edge=max_edge + 1)\n max_edge = max(map(lambda x: x['id'], data[\"G1\"]['edges']))\n data[\"alignment\"] = [{'id': idx + max_edge + 1\n , 'label': terminal_encoding.object_index(None)\n , 'attachment': [max_node + 1 + idx] + self.__synchronization[idx]\n } for idx in range(len(self.__synchronization)) if self.__synchronization[idx] != []\n ]\n return data\n\n @staticmethod\n def string_to_graph_json(string, terminal_encoding, terminal_labeling=id, start_node=0, start_edge=0):\n data = {'type': 'hypergraph'\n , 'nodes': [i for i in range(start_node, start_node + len(string) + 1)]\n , 'edges': [{'id': idx + start_edge\n , 'label': terminal_encoding.object_index(terminal_labeling(symbol))\n , 'attachment': [start_node + idx, start_node + idx + 1]\n , 'terminal': True\n } for idx, symbol in enumerate(string)]\n , 'ports': [start_node, start_node + len(string)]\n }\n return data\n\n def labeled_frames(self, replace_nodes_by_string_positions=True, guard=lambda x: True):\n frames = set()\n\n descendants = self.dog.node_closure(self.dog.children)\n\n for node in self.dog.nodes:\n edge = self.dog.incoming_edge(node)\n\n if replace_nodes_by_string_positions:\n predicate = [i for i, sync in enumerate(self.synchronization) if node in sync]\n if predicate == []:\n predicate = edge.label\n else:\n predicate = tuple(sorted(predicate))\n else:\n predicate = edge.label\n\n arg_label_set = []\n for i, child in enumerate(edge.inputs):\n func = edge.get_function(i)\n\n if replace_nodes_by_string_positions:\n arg = [i for i, sync in enumerate(self.synchronization) if child in sync]\n if arg == []:\n arg = [i for i, sync in enumerate(self.synchronization)\n if any([desc in descendants[child] for desc in sync])]\n arg = tuple(sorted(arg))\n else:\n arg = self.dog.incoming_edge(child).label\n\n arg_label_set.append((arg, func))\n\n frame = predicate, frozenset(arg_label_set)\n if guard(frame):\n frames.add(frame)\n\n return frames\n\n def binarize(self, bin_modifier=lambda x: x + \"-BAR\", bin_func=\"--\"):\n bin_dog = self.dog.binarize(bin_modifier=bin_modifier, bin_func=bin_func)\n return DeepSyntaxGraph(self.sentence, bin_dog, self.synchronization, self.label)\n\n def debinarize(self, is_bin=lambda x: x.endswith(\"-BAR\")):\n dog = self.dog.debinarize(is_bin=is_bin)\n assert all([all([node in dog.nodes for node in sync]) for sync in self.synchronization])\n return DeepSyntaxGraph(self.sentence, dog, self.synchronization)\n\n\ndef pairwise_disjoint_elem(some_list):\n for i, elem in enumerate(some_list):\n if elem in some_list[i + 1:]:\n return False\n return True\n\n\ndef pairwise_disjoint(lists):\n for i, l1 in enumerate(lists):\n for l2 in lists[i + 1:]:\n for elem in l1:\n if elem in l2:\n return False\n return True\n\n\n__all__ = [\"DeepSyntaxGraph\", \"Edge\", \"DirectedOrderedGraph\"]\n","repo_name":"kilian-gebhardt/panda-parser","sub_path":"graphs/dog.py","file_name":"dog.py","file_ext":"py","file_size_in_byte":39836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"74242557549","text":"\"\"\"\n利用递归的方式进行全排列\n对a,b,c三个字符进行全排列,那么它的全排列有abc,acb,bac,bca,cba,cab这六种可能,\n就是当指针指向第一个元素a时,它可以是其本身a(即和自己进行交换),还可以和b,c进行交换,故有3种可能,\n当第一个元素a确定以后,指针移向第二位置,第二个位置可以和其本身b及其后的元素c进行交换,又可以形成两种排列,\n当指针指向第三个元素c的时候,这个时候其后没有元素了,此时,则确定了一组排列,输出。\n但是每次输出后要把数组恢复为原来的样子。\n\"\"\"\n\n\ndef permutations(arr, position, end):\n if position == end:\n print(arr)\n else:\n for index in range(position, end):\n arr[index], arr[position] = arr[position], arr[index]\n permutations(arr, position + 1, end)\n arr[index], arr[position] = arr[position], arr[index]\n\n\n# 利用数组进行非递归的全排列\n# 一个序列的全排列个数=n!\ndef permutationN(arr):\n sum = 1\n\n # 有多少种全排列的组合\n for j in range(len(arr)):\n sum *= (j + 1)\n\n i = 0\n for k in range(sum):\n arr[i], arr[i+1] = arr[i + 1], arr[i]\n print(arr)\n i += 1\n if i == (len(arr) - 1):\n i = 0\n\n\nif __name__ == '__main__':\n arr = [1, 2, 3]\n # permutations(arr, 0, len(arr))\n permutationN(arr)\n","repo_name":"Yang-Jianlin/python-learn","sub_path":"python数据结构与算法/sixth_chapter/C-6.20.py","file_name":"C-6.20.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"3704085166","text":"\nimport os\nimport shutil\n\ncaminho_antigo = '/home/jean/PycharmProjects/CursoOM/modulo_os/antiga_pasta'\ncaminho_novo = '/home/jean/PycharmProjects/CursoOM/modulo_os/nova_pasta'\n\ntry:\n os.mkdir(caminho_novo)\n print('Pasta criada com sucesso!')\nexcept FileExistsError as erro:\n print(f'Esta pasta {caminho_novo} já existe!')\nexcept Exception as erro:\n print(erro)\n\nfor root, dirs, files in os.walk(caminho_antigo):\n for file in files:\n print(f'\\n{file}')\n old_file_path = os.path.join(root, file)\n new_file_path = os.path.join(caminho_novo, file)\n\n shutil.move(old_file_path, new_file_path)\n print(f'Arquivo {file} foi movido com sucesso!')\n","repo_name":"jeancharlles/CursoOM","sub_path":"modulo_os/move_file_cria_pasta.py","file_name":"move_file_cria_pasta.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11794661068","text":"import fuzzy\n\nsoundex = fuzzy.Soundex(4)\n\nlist = ['def', 'is', 'variable', 'string', 'integer', 'float', 'list', 'Dictionary', 'define', 'function', 'parameters',\n 'end', 'of', 'parameters', 'next', 'if', 'condition', 'equal', 'less', 'greater', 'than', 'or ', 'return',\n \"call\", 'operation', 'add', 'subtract', 'multiply', 'divide', 'by', 'to', 'from', 'remove', 'line']\n\n\ndef soundex_generator(list):\n \"\"\"\n This function gets a list of words, and return list of their soundex. soundex shows how a word should pronounce.\n Args:\n list: list fo string that each string contains just one word.\n Returns:\n list of soundex.\n \"\"\"\n keywords_soundex = []\n for i in list:\n sound = soundex(i)\n keywords_soundex.append((i, 0.5, sound))\n return keywords_soundex\n\n\nprint(soundex_generator(list))\n","repo_name":"RahilEbrahimi98/Python-Voice-based-programming","sub_path":"VBP- python files/right_keyword_interpretation/soundex_generator.py","file_name":"soundex_generator.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"23072983556","text":"import boto3\nimport json\nimport pg8000\nimport os\n\nENDPOINT = os.environ[\"DB_ENDPOINT\"]\nDBNAME = \"postgres\"\nPORT = \"5432\"\nUSR = \"rundown_v1\"\n\n\ndef lambda_handler(event, context, *args, **kwargs):\n client = boto3.client(\"lambda\")\n\n input_text = event[\"input\"]\n complexity = event[\"complexity\"]\n condense = event[\"condense\"]\n summary = event[\"summary\"]\n\n # process the input text\n processed_text = process_text(input_text, complexity, condense, summary)\n conn = pg8000.connect(\n host=ENDPOINT,\n port=PORT,\n database=DBNAME,\n user=USR,\n password=os.environ[\"DB_PASSWORD\"],\n )\n cur = conn.cursor()\n insert = \"INSERT INTO analytics_input_text (processed_text) VALUES (%s)\"\n\n cur.execute(insert, (processed_text,))\n\n # Return processed text\n return {\"statusCode\": 200, \"body\": json.dumps({\"processedText\": processed_text})}\n\n\ndef process_text(input_text, complexity, condense, summary, *args, **kwargs):\n\n processed_text = input_text\n\n if summary:\n for i in processed_text:\n if i == \"a\":\n i = \"b\"\n if condense:\n processed_text = processed_text[5:]\n\n return processed_text\n","repo_name":"willbryan13/rundown","sub_path":"backend/lambda_handler.py","file_name":"lambda_handler.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23846918233","text":"# -*- acsection: general-init -*-\nimport math\nimport pygame as pg\nimport pygamebg\n\n(sirina, visina) = (300, 300) # otvaramo prozor\nprozor = pygamebg.open_window(sirina, visina, \"Kuzni lukovi\")\n\n# -*- acsection: main -*-\n\nprozor.fill(pg.Color(\"black\"))\n\n(cx, cy) = (sirina // 2, visina // 2)\ndebljina = 5\nr = 80\npg.draw.arc(prozor, pg.Color(\"red\"), (cx-r, cy-r, 2*r, 2*r),\n math.radians(0), math.radians(180), debljina)\nr = 70\npg.draw.arc(prozor, pg.Color(\"blue\"), (cx-r, cy-r, 2*r, 2*r),\n math.radians(90), math.radians(270), debljina)\nr = 60\npg.draw.arc(prozor, pg.Color(\"green\"), (cx-r, cy-r, 2*r, 2*r),\n math.radians(-180), math.radians(0), debljina)\n\n# -*- acsection: after-main -*-\n\n# prikazujemo prozor i čekamo da ga korisnik isključi\npygamebg.wait_loop()\n","repo_name":"Petlja/os7_inf_prog","sub_path":"_includes/lukovi-1.py","file_name":"lukovi-1.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"34878193822","text":"from pathlib import Path\n\nfrom ..StepGameDataset import (\n TEXT_ANS_CHOICES,\n PREFIX_TO_CHOICES,\n SUFFIX_TO_CHOICES,\n)\n\nfrom ... import FewShotDemoGenerator\n\n\nclass StepGameDemoGenerator(FewShotDemoGenerator):\n def __init__(\n self,\n demo_file=\"stepgame-demo.json\",\n demo_path=None,\n prefix_to_choices=PREFIX_TO_CHOICES,\n suffix_to_choices=SUFFIX_TO_CHOICES,\n sent_join_char=\" \", # space or \\n\n incl_ans_choices=True,\n random_state=42,\n ):\n if demo_path is None:\n demo_path = Path(__file__).parent\n\n super().__init__(\n demo_file=demo_file,\n demo_path=demo_path,\n text_ans_choices=TEXT_ANS_CHOICES,\n prefix_to_choices=prefix_to_choices,\n suffix_to_choices=suffix_to_choices,\n sent_join_char=sent_join_char,\n incl_ans_choices=incl_ans_choices,\n random_state=random_state,\n )\n\n def _get_demo_id(self, data, fp):\n # e.g. fp = qa4_valid_reas_chain.json\n file_name_meta = fp.stem.split(\"_\")\n num_obj = int(file_name_meta[0][2:])\n split_type = file_name_meta[1]\n demo_id = f\"stepgame|{split_type}|{num_obj}\"\n return demo_id\n\n def _preprocess_demo(self, data, demo_id):\n demo = []\n for k, v in data.items():\n v[\"id\"] = f\"{demo_id}|{k}\"\n\n context = v.pop(\"story\")\n v[\"context\"] = self.sent_join_char.join(context)\n v[\"context_steps\"] = len(context)\n\n v[\"answer\"] = v.pop(\"label\")\n reasoning = v.pop(\"reasoning\")\n v[\"reasoning\"] = self.sent_join_char.join(reasoning)\n v[\"reasoning_steps\"] = len(reasoning)\n\n demo.append(v)\n\n return demo\n","repo_name":"imbesat-rizvi/spatial_bench","sub_path":"data/StepGame/reasoning_chain/StepGameDemoGenerator.py","file_name":"StepGameDemoGenerator.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35920897000","text":"def check_and_do_properly(A):\n c = ord('A')\n for i in range(len(A)):\n if A[i] >= 10:\n A[i] = chr(A[i] - 10 + c)\n return A\n\ndef perevod(chislo, base):\n A = []\n while chislo > 0:\n A.append(chislo % base)\n chislo = chislo // base\n A = A[::-1]\n A = check_and_do_properly(A)\n s = ''\n for i in A:\n s += str(i)\n return s\n\ndef perevod_any(chislo, base1, base2):\n return perevod(int(chislo, base1), base2)\n\nA = input().split()\nif int(A[1]) > 36:\n print(\"Base > 36 !!!\")\n exit(-1)\nprint(perevod_any(A[0].lower(), int(A[1]), int(A[2])))\n","repo_name":"belsawan/mipt_control_1_sem","sub_path":"perevod.py","file_name":"perevod.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"9434352758","text":"from modules import openai\nfrom modules import weather\nfrom modules import firebase\nimport random\nfrom datetime import datetime\nimport json\nimport uuid\nfrom collections import defaultdict\n\n\ndef generate_text_content(CLIENT, BUCKET):\n #generate text parameters\n categories = [\"Diy\", \"Pflegetipps\", \"Inspiration\"]\n category = random.choice(categories)\n today = datetime.today().strftime('%d.%m.%Y')\n weather_now = weather.get_weather(\"Mosbach\")\n temperature = weather_now[\"temp\"]\n sky = weather_now[\"weather\"]\n humidity = weather_now[\"humidity\"]\n wind_speed = weather_now[\"wind_speed\"]\n\n txt_id = str(uuid.uuid4())\n \n #generate text\n txt_prompt = 'Erstelle einen interessanten, lustigen und langen Wissenstext für ein Gartenmagazin in der Kategorie ' + category + 'Der Text sollte zur Saison und zum Wetter passen beschrenke dich auch auf eine spezifische Aufgabe oder ein spezifisches Gartenprojekt. Heute ist der ' + str(today) + ' und es ist ' + sky + ' bei ' + str(temperature) + ' Grad. Die Luftfeuchtigkeit beträgt ' + str(humidity) + ' Prozent und der Wind weht mit ' + str(wind_speed) + ' km/h. Liefere das Ergebnis als Array in folgendem Format zurück: {\"text\": \"Hier ist der Text\", \"headline\": \"Hier eine passende Überschrift\"}'\n ai_response = openai.request_open_ai(txt_prompt)\n text = json.loads(ai_response.replace('\\n', ''))\n\n #generate img\n img_prompt = \"Erstelle ein natürliches Bild aus einem schönen Garten für den Text in einem Gartenmagazin. Der Text hat die Überschrift\" + text[\"headline\"]+ \".\"\n img_url, txt_id = openai.request_open_ai_image(plant=txt_id, prompt=img_prompt, img_size=\"512x512\")\n\n #build data for firestore\n text['category'] = category\n text['date'] = today\n text['id'] = txt_id\n \n #upload\n text[\"firebase_path\"], text[\"img\"] = firebase.upload_image(img_url, txt_id, \"knowledge/\",512, BUCKET)\n text[\"firebase_thumbnail\"], text[\"img\"] = firebase.upload_image(img_url, txt_id, \"knowledge_thumbnails/\",256, BUCKET)\n #push to firebase\n firebase.upload_plant(txt_id, text, \"knowledge\", CLIENT)\n\n return text\n\ndef get_all_articles(CLIENT, filter_count, filter_category):\n #search for Plant in Firestore\n db = CLIENT\n \n # perform a query for all articles matching the common names\n docs = db.collection('knowledge').get()\n\n #if plant is in the database\n if len(docs) > 0:\n if filter_count:\n articles_by_category = defaultdict(list)\n\n # Group articles by category\n for doc in docs:\n article = doc.to_dict()\n category = article['category']\n articles_by_category[category].append(article)\n\n # Sort articles in each category by date (newest first)\n for category, articles in articles_by_category.items():\n articles.sort(key=lambda x: x['date'], reverse=True)\n\n # Retrieve the five newest articles for each category\n top_articles = []\n for articles in articles_by_category.values():\n top_articles.extend(articles[:int(filter_count)])\n\n return top_articles\n elif filter_category:\n articles_by_category = {}\n\n # Group articles by category\n for doc in docs:\n article = doc.to_dict()\n article_category = article['category']\n if article_category not in articles_by_category:\n articles_by_category[article_category] = []\n articles_by_category[article_category].append(article)\n\n # If category is specified, return all articles for that category\n if filter_category in articles_by_category:\n return articles_by_category[filter_category]\n # Otherwise, return all articles for all categories\n else:\n return \"The given Category does not exist\"\n else:\n articles = [doc.to_dict() for doc in docs]\n return articles\n\n #if plant is nowhere in the database\n else:\n return \"articles not found\", 400\n \ndef get_single_article(id: str, CLIENT):\n #search for article in Firestore\n db = CLIENT\n article_collection = db.collection('knowledge')\n docs = article_collection.get()\n\n #if plant is in the database\n if len(docs) > 0:\n article_doc = article_collection.where('id', '==', id).get()[0]\n article = article_doc.to_dict()\n return article","repo_name":"buehlermoriz/information-service","sub_path":"modules/knowledge.py","file_name":"knowledge.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72956875951","text":"import os\nimport time\nimport calendar\nimport json\nimport requests\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom datetime import datetime, timedelta\nfrom sqlalchemy import create_engine, inspect, text\n\n\nstart_year = 2019\nend_year = 2023\n\n\ndef drop_table(engine):\n inspector = inspect(engine)\n table_names = inspector.get_table_names(schema=\"stg\")\n if \"decision\" in table_names:\n print(\"DECISION TABLE FOUND. DROPPING\")\n with engine.connect() as connection:\n connection.execute(text(\"DROP TABLE stg.decision\"))\n connection.commit()\n\n\ndef is_hashable(x):\n try:\n hash(x)\n return True\n except TypeError:\n return False\n\n\ndef unfold_cols(df):\n def get_first_value(lst):\n if len(lst) > 0:\n return lst[0]\n else:\n return None\n\n def process_list(obj):\n if isinstance(obj, list) and len(obj) > 0:\n try:\n kae = obj[0][\"kae\"]\n return kae\n except:\n return None\n elif isinstance(obj, float):\n return None\n else:\n return None\n\n if df.shape[0] != 0:\n df = df.copy()\n\n if \"extraFieldValues.amountWithVAT.amount\" in df.columns:\n df[\"extraFieldValues.amountWithVAT.amount\"] = df[\n \"extraFieldValues.amountWithVAT.amount\"\n ].apply(\n lambda x: x[0][\"amountWithVAT\"]\n if isinstance(x, list)\n and len(x) > 0\n and isinstance(x[0], dict)\n and \"amountWithVAT\" in x[0]\n else x\n )\n\n if \"extraFieldValues.amountWithKae\" in df.columns:\n df[\"kae\"] = df[\"extraFieldValues.amountWithKae\"].apply(\n lambda x: process_list(x)\n )\n df[\"thematicCategoryIds\"] = df[\"thematicCategoryIds\"].apply(get_first_value)\n df[\"signerIds\"] = df[\"signerIds\"].apply(get_first_value)\n return df\n\n\ndef select_cols(df):\n columns_map = {\n \"ada\": \"ada\",\n \"subject\": \"subject\",\n \"protocolNumber\": \"protocol_number\",\n \"publishTimestamp\": \"publish_date\",\n \"organizationId\": \"organization\",\n \"decisionTypeId\": \"decision_type\",\n \"extraFieldValues.amountWithVAT.amount\": \"amount\",\n \"thematicCategoryIds\": \"thematic_category\",\n \"kae\": \"kae\",\n \"documentUrl\": \"pdf_url\",\n \"signerIds\": \"signer\",\n \"status\": \"status\",\n }\n for col in columns_map.keys():\n if col not in df.columns:\n df[col] = np.nan\n\n df = df[list(columns_map.keys())].rename(columns=columns_map)\n\n return df\n\n\ndef download_data(reduce=False, write_to_db=False):\n orgs_url = \"https://diavgeia.gov.gr/opendata/organizations?category=MUNICIPALITY\"\n details_url = \"https://diavgeia.gov.gr/opendata/search/advanced\"\n engine = create_engine(\"postgresql://postgres:password@localhost:5432/postgres\")\n response = requests.get(orgs_url, timeout=100)\n orgs = response.json()[\"organizations\"]\n df_master = pd.DataFrame()\n drop_table(engine)\n for org in tqdm(orgs):\n # df = pd.DataFrame()\n uid = org[\"uid\"]\n current_date = datetime(start_year, 1, 1)\n end_day = calendar.monthrange(end_year, 12)[1]\n end_date = datetime(end_year, 12, end_day)\n\n while current_date <= end_date:\n df = pd.DataFrame()\n next_date = current_date + timedelta(days=180)\n page_num = 0\n\n while True:\n params = {\n \"q\": f'organizationUid:\"{uid}\" AND decisionTypeUid:[\"Γ.3.4\",\"Δ.2.2\",\"Β.1.3\"] AND issueDate:[DT({current_date.strftime(\"%Y-%m-%dT00:00:00\")}) TO DT({next_date.strftime(\"%Y-%m-%dT23:59:59\")})]',\n \"page\": page_num,\n \"size\": 500,\n }\n\n try:\n details_response = requests.get(\n details_url, params=params, timeout=200\n )\n except requests.exceptions.ReadTimeout:\n print(f\"TIMEOUT FOR ORG: {org}. SKIPPING\")\n break\n\n details = details_response.json()\n decisions_df = pd.DataFrame(details[\"decisions\"])\n decisions_df[\"municipality_name\"] = org[\"label\"]\n flatten_df = pd.json_normalize(decisions_df.to_dict(\"records\"))\n df = pd.concat([df, flatten_df], axis=0, join=\"outer\")\n\n if details[\"info\"][\"actualSize\"] < details[\"info\"][\"size\"]:\n break\n\n page_num += 1\n time.sleep(1)\n\n # df_master = pd.concat([df_master, df], ignore_index=True)\n\n if write_to_db:\n df = unfold_cols(df)\n df = select_cols(df)\n # df = filter_df(df)\n df.to_sql(\n \"decision\",\n engine,\n if_exists=\"append\",\n index=False,\n schema=\"stg\",\n )\n\n if reduce:\n break\n\n current_date = next_date\n\n return df_master\n\n\nif __name__ == \"__main__\":\n download_data(write_to_db=True)\n","repo_name":"itsimplis/Greek-Gov-Metrics","sub_path":"server/data_prep/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8307051760","text":"'''tests for saml_configuration API endpoints'''\nimport responses\n\nRE_BASE = 'https://pytenable.tenable.ad/api'\n\n\n@responses.activate\ndef test_saml_configuration_singleton(api):\n '''tests the saml_configuration_singleton API response with\n actual saml_configuration_singleton response'''\n responses.add(responses.GET,\n f'{RE_BASE}/saml-configuration',\n json={\n 'activateCreatedUsers': True,\n 'allowedGroups': [{\n 'defaultProfileId': 1,\n 'defaultRoleIds': [1, 2],\n 'name': 'test'\n }],\n 'assertEndpoint':\n 'assert_endpoint_url',\n 'enabled': True,\n 'encryptionCertificate': 'certificate',\n 'providerLoginUrl': 'url',\n 'serviceProviderUrl': 'https://pytenable.tenable.ad',\n 'signatureCertificate': 'certificate'\n }\n )\n resp = api.saml_configuration.details()\n assert isinstance(resp, dict)\n assert resp['allowed_groups'][0]['name'] == 'test'\n\n\n@responses.activate\ndef test_saml_configuration_update(api):\n '''tests the update API response with actual update response'''\n responses.add(responses.PATCH,\n f'{RE_BASE}/saml-configuration',\n json={\n 'activateCreatedUsers': True,\n 'allowedGroups': [{\n 'defaultProfileId': 1,\n 'defaultRoleIds': [1, 2],\n 'name': 'updated_name'\n }],\n 'assertEndpoint': 'assert_endpoint_url',\n 'enabled': True,\n 'encryptionCertificate': 'certificate',\n 'providerLoginUrl': 'url',\n 'serviceProviderUrl': 'https://pytenable.tenable.ad',\n 'signatureCertificate': 'certificate'\n }\n )\n resp = api.saml_configuration.update(allowed_groups=[{\n 'name': 'updated_name',\n 'default_profile_id': 1,\n 'default_role_ids': [1, 2]\n }]\n )\n\n assert isinstance(resp, dict)\n assert resp['allowed_groups'][0]['name'] == 'updated_name'\n\n\n@responses.activate\ndef test_saml_configuration_generate_saml_certificate(api):\n '''tests the generate saml certificate API response with actual\n saml certificate response'''\n responses.add(responses.GET,\n f'{RE_BASE}/saml-configuration/generate-certificate',\n json={\n 'encryptionCertificate': 'generated_certificate',\n }\n )\n resp = api.saml_configuration.generate_saml_certificate()\n\n assert isinstance(resp, dict)\n assert resp['encryption_certificate'] == 'generated_certificate'\n","repo_name":"tenable/pyTenable","sub_path":"tests/ad/saml-configuration/test_saml_configuration_api.py","file_name":"test_saml_configuration_api.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":304,"dataset":"github-code","pt":"38"} +{"seq_id":"9749534672","text":"import json\nimport requests\n\nGEOCODING_API_URL = \"https://geocoding-api.open-meteo.com/v1/search?name={name}\"\nFORECAST_API_URL = \"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}\"\n\n\ndef location_to_coordinates(location):\n target_api_call = GEOCODING_API_URL.format(name=location)\n response = requests.get(target_api_call)\n response.raise_for_status()\n json_response = json.loads(response.text)\n return (\n json_response[\"results\"][0][\"latitude\"],\n json_response[\"results\"][0][\"longitude\"],\n )\n\ndef forecast(location, **kwargs):\n additional_suffix = ''\n if len(location) != 2:\n lat, lon = location_to_coordinates(location)\n else:\n lat, lon = location\n if 'hourly' in kwargs:\n additional_suffix += f'&hourly={kwargs[\"hourly\"]}'\n if 'daily' in kwargs:\n additional_suffix += f'&daily={kwargs[\"daily\"]}'\n if 'current_weather' in kwargs:\n additional_suffix += f'¤t_weather={kwargs[\"current_weather\"]}'\n target_api_call = FORECAST_API_URL.format(lat=lat, lon=lon)\n target_api_call += additional_suffix\n response = requests.get(target_api_call)\n response.raise_for_status()\n json_response = json.loads(response.text)\n return json_response","repo_name":"srakrn/project-mekha","sub_path":"src/mekha/open_meteo.py","file_name":"open_meteo.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"11062243851","text":"from typing import Callable, Optional, Tuple\n\nfrom shared.insn_yaml import Insn, InsnsFile\n\nfrom .program import Program\nfrom .model import Model\nfrom .snippet import Snippet\n\n# The return type of a single generator. This is a tuple (snippet, model).\n# snippet is a generated snippet. If the program is done (i.e. every execution\n# ends with ecall) then model is None. Otherwise it is a Model object\n# representing the state of the processor after executing the code in the\n# snippet(s).\nGenRet = Tuple[Snippet, Optional[Model]]\n\n# The return type of repeated generator calls. If the snippet is None, no\n# generators managed to generate anything.\nGensRet = Tuple[Optional[Snippet], Model]\n\n# A continuation type that allows a generator to recursively generate some more\n# stuff.\nGenCont = Callable[[Model, Program], GensRet]\n\n\nclass SnippetGen:\n '''A parameterised sequence of instructions\n\n These can be added to the instructions generated so far for a given random\n binary.\n\n '''\n def __init__(self) -> None:\n self.disabled = False\n\n def gen(self,\n cont: GenCont,\n model: Model,\n program: Program) -> Optional[GenRet]:\n '''Try to generate instructions for this type of snippet.\n\n On success, inserts the instructions into program, updates the model,\n and returns a GenRet tuple. See comment above the type definition for\n more information.\n\n On failure, leaves program and model unchanged and returns None.\n Failure is interpreted as \"this snippet won't work with the current\n program state\", but the generator may be retried later.\n\n The cont argument is a continuation, used to call out to more\n generators in order to do recursive generation. It takes a (mutable)\n model and program and picks a sequence of instructions. The paths\n through the generated code don't terminate with an ECALL but instead\n end up at the resulting model.pc.\n\n This will only be called when model.fuel > 0 and\n program.get_insn_space_at(model.pc) > 0.\n\n '''\n raise NotImplementedError('gen not implemented by subclass')\n\n def pick_weight(self,\n model: Model,\n program: Program) -> float:\n '''Pick a weight by which to multiply this generator's default weight\n\n This is called for each generator before we start trying to generate a\n snippet for a given program and model state. This can be used to\n disable a generator when we know it won't work (if model.fuel is too\n small, for example).\n\n It can also be used to alter weights depending on where we are in the\n program. For example, a generator that generates ECALL to end the\n program could decrease its weight when size is large, to avoid\n generating tiny programs by accident.\n\n This will only be called when model.fuel > 0 and\n program.get_insn_space_at(model.pc) > 0.\n\n The default implementation always returns 1.0.\n\n '''\n return 1.0\n\n def _get_named_insn(self, insns_file: InsnsFile, mnemonic: str) -> Insn:\n '''Get an instruction from insns_file by mnemonic\n\n This is used for specialized snippets that need to generate a specific\n instruction and wraps the error handling for when someone has removed\n the instruction from the file.\n\n '''\n insn = insns_file.mnemonic_to_insn.get(mnemonic.lower())\n if insn is None:\n raise RuntimeError('No {} instruction in instructions file.'\n .format(mnemonic.upper()))\n return insn\n","repo_name":"sigasi/sigasi_demos","sub_path":"OpenTitan/hw/ip/otbn/dv/rig/rig/snippet_gen.py","file_name":"snippet_gen.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"38"} +{"seq_id":"73688599789","text":"import vtk\nfrom vtk.util import numpy_support\nimport os\nimport numpy\n\n\nimport chart_studio.plotly as py\nfrom plotly.graph_objs import *\n# plotly.plotly.sign_in(\"somada141\", \"1t2qb5b9y1\")\n\n\ndef vtkImageToNumPy(image, pixelDims):\n pointData = image.GetPointData()\n arrayData = pointData.GetArray(0)\n ArrayDicom = numpy_support.vtk_to_numpy(arrayData)\n ArrayDicom = ArrayDicom.reshape(pixelDims, order='F')\n \n return ArrayDicom\n\n\ndef plotHeatmap(array, name=\"plot\"):\n data = Data([\n Heatmap(\n z=array,\n colorscale='Greys'\n )\n ])\n layout = Layout(\n autosize=False,\n title=name\n )\n fig = Figure(data=data, layout=layout)\n\n return py.iplot(fig, filename=name)\n\n\nimport vtk\nfrom IPython.display import Image\ndef vtk_show(renderer, width=400, height=300):\n \"\"\"\n Takes vtkRenderer instance and returns an IPython Image with the rendering.\n \"\"\"\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.SetOffScreenRendering(1)\n renderWindow.AddRenderer(renderer)\n renderWindow.SetSize(width, height)\n renderWindow.Render()\n \n windowToImageFilter = vtk.vtkWindowToImageFilter()\n windowToImageFilter.SetInput(renderWindow)\n windowToImageFilter.Update()\n \n writer = vtk.vtkPNGWriter()\n writer.SetWriteToMemory(1)\n writer.SetInputConnection(windowToImageFilter.GetOutputPort())\n writer.Write()\n data = writer.GetResult()\n \n return Image(data)\n\n\n\nfrom glob import glob\nPathDicom = \"Skull with hole/\"\n# PathDicom = glob(data_path + '/*.dcm')\nreader = vtk.vtkDICOMImageReader()\nreader.SetDirectoryName(PathDicom)\nreader.Update()\n\n\n# Load dimensions using `GetDataExtent`\n_extent = reader.GetDataExtent()\nConstPixelDims = [_extent[1]-_extent[0]+1, _extent[3]-_extent[2]+1, _extent[5]-_extent[4]+1]\n\n# Load spacing values\nConstPixelSpacing = reader.GetPixelSpacing()\n\n\n#shiftScale = vtk.vtkImageShiftScale()\n#shiftScale.SetScale(reader.GetRescaleSlope())\n#shiftScale.SetShift(reader.GetRescaleOffset())\n#shiftScale.SetInputConnection(reader.GetOutputPort())\n#shiftScale.Update()\n\n# In the next cell you would simply get the output with 'GetOutput' from 'shiftScale' instead of 'reader'\n\nArrayDicom = vtkImageToNumPy(reader.GetOutput(), ConstPixelDims)\n# plotHeatmap(numpy.rot90(ArrayDicom[:, 256, :]), name=\"CT_Original\")\n\n\nthreshold = vtk.vtkImageThreshold ()\nthreshold.SetInputConnection(reader.GetOutputPort())\nthreshold.ThresholdByLower(400) # remove all soft tissue\nthreshold.ReplaceInOn()\nthreshold.SetInValue(0) # set all values below 400 to 0\nthreshold.ReplaceOutOn()\nthreshold.SetOutValue(1) # set all values above 400 to 1\nthreshold.Update()\n\n\nArrayDicom = vtkImageToNumPy(threshold.GetOutput(), ConstPixelDims)\n# plotHeatmap(numpy.rot90(ArrayDicom[:, 256, :]), name=\"CT_Thresholded\")\n\n\n\ndmc = vtk.vtkDiscreteMarchingCubes()\ndmc.SetInputConnection(threshold.GetOutputPort())\ndmc.GenerateValues(1, 1, 1)\ndmc.Update()\n\n\nmapper = vtk.vtkPolyDataMapper()\nmapper.SetInputConnection(dmc.GetOutputPort())\n\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\n\nrenderer = vtk.vtkRenderer()\nrenderer.AddActor(actor)\nrenderer.SetBackground(1.0, 1.0, 1.0)\n\ncamera = renderer.MakeCamera()\ncamera.SetPosition(-500.0, 245.5, 122.0)\ncamera.SetFocalPoint(301.0, 245.5, 122.0)\ncamera.SetViewAngle(30.0)\ncamera.SetRoll(-90.0)\nrenderer.SetActiveCamera(camera)\nvtk_show(renderer, 600, 600)\n\n\ncamera = renderer.GetActiveCamera()\ncamera.SetPosition(301.0, 1045.0, 122.0)\ncamera.SetFocalPoint(301.0, 245.5, 122.0)\ncamera.SetViewAngle(30.0)\ncamera.SetRoll(0.0)\nrenderer.SetActiveCamera(camera)\nvtk_show(renderer, 600, 600)\n\n\n\nwriter = vtk.vtkSTLWriter()\nwriter.SetInputConnection(dmc.GetOutputPort())\nwriter.SetFileTypeToBinary()\nwriter.SetFileName(\"Skull with hole.stl\")\nwriter.Write()\n\n","repo_name":"HaneenIbrahim2/Orange-Ai-Diploma","sub_path":"Sessions topics/Material/19- Audio Analysis/Handling Medical Data/dicomtoSTL.py","file_name":"dicomtoSTL.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"38"} +{"seq_id":"19081653083","text":"import itertools\n\n\n\ndef is_possible(n, s1, s2):\n d1 = n//10\n d2 = n % 10\n if(6 in s1 or 9 in s1):\n s1 = s1.union({6,9})\n if(6 in s2 or 9 in s2):\n s2 = s2.union({6,9})\n \n if((d1 in s2 and d2 in s1) or (d1 in s1 and d2 in s2)):\n return(True)\n return(False)\n\ncount = 0\nfor s1,s2 in itertools.product(itertools.combinations(range(10),6),repeat=2):\n if(all([is_possible(i**2,set(s1),set(s2)) for i in range(1,10)])):\n count += 1\nprint(count/2)","repo_name":"Abe27342/project-euler","sub_path":"src/90.py","file_name":"90.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"1691862875","text":"\"\"\"Defines the logic of built-in VBA operators\"\"\"\n\n# To implement support for a new operator, you must:\n# - ensure the types it works with are implemented, see type.py\n# - add a new entry to OPERATORS_MAP\n# - add it to the list of operator tokens\n\n# TODO:\n# - For deobfuscation: add support for Operator.COMMUTATIVE/ASSOCIATIVE ...\n# with <<\n# - Take Let-coercion into account, improve conversion process\n# - Extends supported types\n# - Implement Like and Is\n\nfrom abc import ABC, abstractmethod\nfrom dataclasses import InitVar, dataclass\nfrom inspect import signature\nfrom typing import (\n Any,\n Callable,\n Dict,\n Generic,\n List,\n Optional,\n Tuple,\n TypeVar,\n Union,\n)\nfrom typing import Type as pType\n\nfrom pyparsing import Or, opAssoc\n\nfrom .error import OperatorError\nfrom .type import Type\nfrom .value import ConversionError, Value\n\n\nclass Operation(ABC):\n pass\n\n\n@dataclass\nclass UnaryOperation(Operation):\n \"\"\"Represent the underlying function of a unary operator\"\"\"\n\n function: InitVar[Callable[[Any], Any]]\n arg_type: Type\n return_type: Type\n\n # Workaround for mypy not supporting class-level hints for callbacks\n def __post_init__(self, function: Callable[[Any], Any]) -> None:\n self.function = function\n\n\n@dataclass\nclass BinaryOperation(Operation):\n \"\"\"Represent the underlying function of a binary operator\"\"\"\n\n function: InitVar[Callable[[Any, Any], Any]]\n left_type: Type\n right_type: Type\n return_type: Type\n\n # Workaround for mypy not supporting class-level hints for callbacks\n def __post_init__(self, function: Callable[[Any, Any], Any]) -> None:\n self.function = function\n\n\nT = TypeVar(\"T\", UnaryOperation, BinaryOperation)\n\n\nclass Operator(Generic[T], ABC):\n \"\"\"\n Abstract operator, be it unary or binary.\n \"\"\"\n\n symbol: str\n operations: List[T]\n\n @abstractmethod\n def __init__(self, symbol: str, operations: List[T]) -> None:\n self.symbol = symbol\n self.operations = operations\n\n @staticmethod\n def from_symbol(symbol: str) -> \"Operator\":\n \"\"\"Build an operator from its symbol. It uses OPERATOR_MAP.\"\"\"\n try:\n return OPERATORS_MAP[symbol]\n except KeyError:\n msg = f\"Operator {symbol} is not supported yet\"\n raise NotImplementedError(msg)\n\n def __str__(self) -> str:\n return self.symbol\n\n @staticmethod\n def from_operation(symbol: str, operation: Operation) -> \"Operator\":\n \"\"\"Build an operator from a symbol and a single.\"\"\"\n operator_type: Union[pType[BinaryOperator], pType[UnaryOperator]]\n if isinstance(operation, UnaryOperation):\n return UnaryOperator(symbol, [operation])\n elif isinstance(operation, BinaryOperation):\n return BinaryOperator(symbol, [operation])\n\n msg = f\"Can't build Operator from type {type(operation)}\"\n raise RuntimeError(msg)\n\n\nclass UnaryOperator(Operator[UnaryOperation]):\n \"\"\"\n Class used to evaluate unary operations with operators that may accept\n different types.\n \"\"\"\n\n def __init__(self, symbol: str, operations: List[UnaryOperation]) -> None:\n super().__init__(symbol, operations)\n\n def operate(self, value: Value) -> Value:\n \"\"\"\n Find the first UnaryOperation in self.operations that can be called\n with the given value, and return its result. Raise a ConversionError if\n there is no compatible operator.\n \"\"\"\n for operation in self.operations:\n try:\n converted_arg = value.convert_to(operation.arg_type)\n value = operation.function(converted_arg)\n value_type = operation.return_type\n return Value.from_value(value, value_type)\n except ConversionError:\n pass\n\n msg = (\n f\"Type {value.base_type} does not match with operator \"\n f\"{self.symbol}\"\n )\n raise OperatorError(msg)\n\n\nclass BinaryOperator(Operator[BinaryOperation]):\n \"\"\"\n Class used to evaluate binary operations with operators that may accept\n different types.\n \"\"\"\n\n def __init__(self, symbol: str, operations: List[BinaryOperation]) -> None:\n super().__init__(symbol, operations)\n\n def operate(self, left_value: Value, right_value: Value) -> Value:\n \"\"\"\n Find the first BinaryOperation in self.operations that can be called\n with the two given values, and return its result. Raise a\n ConversionError if there is no compatible operator.\n \"\"\"\n for operation in self.operations:\n try:\n converted_left = left_value.convert_to(operation.left_type)\n converted_right = right_value.convert_to(operation.right_type)\n value = operation.function(converted_left, converted_right)\n value_type = operation.return_type\n return Value.from_value(value, value_type)\n except ConversionError:\n pass\n\n msg = (\n f\"Types {left_value.base_type}, {right_value.base_type} do \"\n f\"not match with operator {self.symbol}\"\n )\n raise OperatorError(msg)\n\n\nclass OperatorsMap:\n \"\"\"\n Main class used to add operators to the parser. A single global instance is\n used, OPERATORS_MAP.\n\n To implement support for a new operator, use add_operation or the <<\n operator. Precedence is managed by start_precedence_group and\n end_precedence_group. Finally, you can use get_precedence_list with\n pyparsing.infixNotation.\n \"\"\"\n\n __operators: Dict[str, Operator]\n __ordered_operators: List[List[str]]\n __growing_precedence: bool\n\n def __init__(self) -> None:\n self.__operators = dict()\n self.__ordered_operators = []\n self.__growing_precedence = True\n\n def start_precedence_group(self) -> None:\n \"\"\"\n Start a precedence group: all the following added operations will have\n the same precedence.\n \"\"\"\n self.__ordered_operators.append([])\n self.__growing_precedence = False\n\n def end_precedence_group(self) -> None:\n \"\"\"\n Stop the current precedence group: all the added operations will have\n increasing precedence.\n \"\"\"\n self.__growing_precedence = True\n\n def get_precedence_list(self, parse_unary, parse_binary):\n \"\"\"Build a precedence list to be used with pyparsing.infixNotation.\"\"\"\n precedence_list = []\n for symbol_list in self.__ordered_operators:\n expression = Or(symbol_list)\n if type(self.__operators[symbol_list[0]]) is UnaryOperator:\n arity = 1\n parsing_function = parse_unary\n associativity = opAssoc.RIGHT\n else:\n arity = 2\n parsing_function = parse_binary\n associativity = opAssoc.LEFT\n\n entry = (expression, arity, associativity, parsing_function)\n precedence_list.append(entry)\n\n return precedence_list\n\n def __insert_operation(self, symbol: str, operation: Operation) -> None:\n \"\"\"\n Insert an operation associated with a given symbol, updating\n __operators and __ordered_operators\n \"\"\"\n if symbol in self.__operators:\n assert isinstance(\n operation, type(self.__operators[symbol].operations[-1])\n )\n self.__operators[symbol].operations.append(operation)\n else:\n if self.__growing_precedence:\n self.__ordered_operators.append([symbol])\n else:\n self.__ordered_operators[-1].append(symbol)\n\n operator = Operator.from_operation(symbol, operation)\n self.__operators[symbol] = operator\n\n def add_unary_operation(\n self,\n symbol: str,\n function: Callable[[Any], Any],\n arg_type: Type,\n rtype: Optional[Type] = None,\n ) -> None:\n \"\"\"\n Add a unary operation to the map. Be carefull of the call order of this\n method: operators are added with decreasing precedence.\n \"\"\"\n assert len(signature(function).parameters) == 1\n\n if rtype is None:\n rtype = arg_type\n\n operation = UnaryOperation(function, arg_type, rtype)\n self.__insert_operation(symbol, operation)\n\n def add_binary_operation(\n self,\n symbol: str,\n function: Callable[[Any, Any], Any],\n ltype: Type,\n rtype: Optional[Type] = None,\n return_type: Optional[Type] = None,\n ) -> None:\n \"\"\"\n Add a binary operation to the map. Be carefull of the call order of\n this method: operators are added with decreasing precedence.\n \"\"\"\n assert len(signature(function).parameters) == 2\n\n if return_type is None:\n return_type = ltype\n\n if rtype is None:\n rtype = ltype\n\n operation = BinaryOperation(function, ltype, rtype, return_type)\n self.__insert_operation(symbol, operation)\n\n def __getitem__(self, symbol: str) -> Operator:\n \"\"\"Return the BinaryOperator corresponding to the given symbol.\"\"\"\n return self.__operators[symbol]\n\n def __lshift__(self, operation_tuple: Tuple[Any, ...]) -> None:\n \"\"\"\n Helper function to call add_operation. Pass it a tuple with the same\n arguments you would pass to add_operation, in the same order. There\n is one exception : you can specify only the return type and left\n argument type of a binary operator, with (symbol, function, left,\n return).\n \"\"\"\n assert len(operation_tuple) >= 3\n symbol = operation_tuple[0]\n function = operation_tuple[1]\n ltype = operation_tuple[2]\n\n arity = len(signature(function).parameters)\n assert arity in (1, 2)\n\n if arity == 1:\n assert len(operation_tuple) in (3, 4)\n self.add_unary_operation(*operation_tuple)\n elif arity == 2:\n assert len(operation_tuple) in (3, 4, 5)\n\n if len(operation_tuple) == 4:\n return_type = operation_tuple[3]\n self.add_binary_operation(\n symbol, function, ltype, return_type=return_type\n )\n else:\n self.add_binary_operation(*operation_tuple)\n\n\nOPERATORS_MAP = OperatorsMap()\n\n# Arithmetic operators\nOPERATORS_MAP << (\"^\", lambda l, r: l.value ** r.value, Type.Integer)\n\nOPERATORS_MAP.start_precedence_group()\nOPERATORS_MAP << (\"*\", lambda l, r: l.value * r.value, Type.Integer)\nOPERATORS_MAP << (\"/\", lambda l, r: l.value / r.value, Type.Integer)\nOPERATORS_MAP.end_precedence_group()\n\nOPERATORS_MAP << (\"\\\\\", lambda l, r: l.value // r.value, Type.Integer)\nOPERATORS_MAP << (\"Mod\", lambda l, r: l.value % r.value, Type.Integer)\n\nOPERATORS_MAP.start_precedence_group()\nOPERATORS_MAP << (\"+\", lambda l, r: l.value + r.value, Type.Integer)\nOPERATORS_MAP << (\"+\", lambda l, r: l.value + r.value, Type.String)\nOPERATORS_MAP << (\"-\", lambda l, r: l.value - r.value, Type.Integer)\nOPERATORS_MAP.end_precedence_group()\n\n# Concatenation operator\nOPERATORS_MAP << (\"&\", lambda l, r: l.value + r.value, Type.String)\n\n# Relational operators\nOPERATORS_MAP.start_precedence_group()\nOPERATORS_MAP << (\n \"=\",\n lambda l, r: l.value == r.value,\n Type.Integer,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \"=\",\n lambda l, r: l.value == r.value,\n Type.String,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \"<>\",\n lambda l, r: l.value != r.value,\n Type.Integer,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \"<>\",\n lambda l, r: l.value != r.value,\n Type.String,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \"><\",\n lambda l, r: l.value != r.value,\n Type.Integer,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \"><\",\n lambda l, r: l.value != r.value,\n Type.String,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \"<\",\n lambda l, r: l.value < r.value,\n Type.Integer,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \">\",\n lambda l, r: l.value > r.value,\n Type.Integer,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \"<=\",\n lambda l, r: l.value <= r.value,\n Type.Integer,\n Type.Boolean,\n)\nOPERATORS_MAP << (\n \">=\",\n lambda l, r: l.value >= r.value,\n Type.Integer,\n Type.Boolean,\n)\nOPERATORS_MAP.end_precedence_group()\n\n\n# Logical and bitwise operators\nOPERATORS_MAP << (\"Not\", lambda a: not a.value, Type.Boolean)\nOPERATORS_MAP << (\"And\", lambda l, r: l.value and r.value, Type.Boolean)\nOPERATORS_MAP << (\"Or\", lambda l, r: l.value or r.value, Type.Boolean)\nOPERATORS_MAP << (\"Xor\", lambda l, r: l.value != r.value, Type.Boolean)\nOPERATORS_MAP << (\"Eqv\", lambda l, r: l.value == r.value, Type.Boolean)\nOPERATORS_MAP << (\"Imp\", lambda l, r: (not l.value) or r.value, Type.Boolean)\n","repo_name":"ldbo/SpuriousEmu","sub_path":"emu/operator.py","file_name":"operator.py","file_ext":"py","file_size_in_byte":12896,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"38"} +{"seq_id":"32662440821","text":"import signal\nfrom state_name import StateName\nfrom state import State\nfrom time import sleep\n\nclass GracefulKiller:\n kill_now = False\n\n def __init__(self, state):\n signal.signal(signal.SIGINT, self.exit_handler(state))\n signal.signal(signal.SIGTERM, self.exit_handler(state))\n\n def exit_handler(self, state):\n def exit(signum, frame):\n state.set_state(StateName.TERMINATE)\n sleep(0.5)\n self.kill_now = True\n\n return exit\n","repo_name":"Pirolf/princessfoi_raspberry","sub_path":"graceful_killer.py","file_name":"graceful_killer.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39583369728","text":"from sqlalchemy import Column, Integer, String , ForeignKey , Float , Boolean\nfrom sqlalchemy.orm import relationship, backref\nfrom Model.proyecto_model import Proyecto\nfrom webargs.core import T\nfrom database import Base\nfrom Utils.utils import ToDict\n\n\nclass Historia(Base,ToDict):\n __tablename__ = 'HISTORIA_USUARIO'\n id = Column(Integer, primary_key=True)\n titulo = Column(String(100))\n criterio_aceptacion = Column(String(1000))\n como = Column(String(1000))\n quiero = Column(String(1000))\n para = Column(String(1000))\n id_proyecto = Column(Integer,ForeignKey('PROYECTO.id')) \n estado = Column(Boolean(True))\n proyecto = relationship(\n Proyecto,\n backref=backref('historiales', uselist=True)\n )\n \n#id_escenario = Column(Integer,ForeignKey('ESCENARIO_HISTORIA.id'))\n \n\ndef __init__(self, titulo ,criterio_aceptacion):\n self.titulo = titulo\n self.criterio_aceptacion = criterio_aceptacion","repo_name":"emanuelrgomez19/gestor","sub_path":"Model/historiaUsuario_model.py","file_name":"historiaUsuario_model.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"29225121223","text":"from PyQt.import_module import *\nfrom PyQt import sample_widget_template, color_variable, styleSheet\nfrom collections import OrderedDict\nfrom nodeEditor.node_sertializable import Serializable\nclass QDMNodeContentWidget(QWidget, Serializable):\n def __init__(self, node, parent=None):\n super().__init__(parent)\n\n self.sample_widget_template = sample_widget_template.SAMPLE_WIDGET_TEMPLATE()\n self.color_variable = color_variable.COLOR_VARIABLE()\n self.styleSheet = styleSheet.STYLESHEET()\n self.node = node\n\n self.initUI()\n\n def initUI(self):\n '''\n\n :return:\n '''\n self.layout = self.sample_widget_template.vertical_layout(parent_self=self)\n self.setLayout(self.layout)\n\n\n label = self.sample_widget_template.label(set_text='Test')\n self.layout.addWidget(label)\n\n self.layout.addWidget(QDMTextEdit('Sample'))\n\n def setEditingFlag(self, val):\n self.node.scene.grScene.views()[0].editingFlag = val\n\n\n def serialize(self):\n\n data = OrderedDict()\n data['id'] = self.node.id\n data['content'] = 'content'\n\n return data\n\n\n def deserialize(self, data, hashmap={}):\n print(\"deserializing node content for %s\" % self.node.id)\n\n return True\n\n\n\n\nclass QDMTextEdit(QTextEdit):\n def fousInEvent(self, event):\n self.parentWidget().setEditingFlag(True)\n super().focusInEvent(event)\n\n def focusOutEvent(self, event):\n self.parentWidget().setEditingFlag(False)\n super().focusOutEvent(event)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nikPipe/nodeEditor","sub_path":"node_content_widget.py","file_name":"node_content_widget.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"19519571728","text":"import keras\nfrom keras.datasets import cifar10\n\n\ndef load_data(num_classes):\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n return (x_train, y_train), (x_test, y_test)","repo_name":"HuanjunWang/gobang","sub_path":"nn/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22713212355","text":"import random\n\n\"\"\"\nimplement an algorith to find the kth to last element of a singly linked list\n\"\"\"\n\nclass Node:\n def __init__(self, dataval=None):\n self.dataval = dataval\n self.nextval = None\n def appendToTaik(self, dataVal = None):\n end = Node(dataVal)\n n = self\n while n.nextval != None:\n n = n.nextval\n n.nextval = end\n\nclass SLinkedList:\n def __init__(self):\n self.headval = None\n\ndef findkth(linkedList:SLinkedList, k:int):\n dups = {}\n n = linkedList.headval\n while n.nextval != None:\n print(n.dataval)\n n = n.nextval\n n = linkedList.headval\n future = linkedList.headval\n for i in range(k):\n if future.nextval != None:\n future = future.nextval\n else:\n print(\"Not possible. Not enough elements\")\n break\n while future.nextval != None:\n n = n.nextval\n future = future.nextval\n print(n.dataval)\n\nif __name__ == \"__main__\":\n linkedList = SLinkedList()\n linkedList.headval = Node(10)\n for _ in range(200):\n linkedList.headval.appendToTaik(random.randint(1,100))\n findkth(linkedList, 5)\n# Complexity: o(n) time, o(1) space","repo_name":"AntonioDehesa/InterPrep","sub_path":"CTCI/ReturnkthToLast/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"38024732551","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\n# User Fields\n# username\n# password\n# email\n# first_name\n# last_name\n\n\nclass TokenConvite(models.Model):\n PROFESSOR = 3\n MONITOR = 2\n ALUNO = 1\n TIPOS = [\n (PROFESSOR, \"Professor\"),\n (MONITOR, \"Monitor\"),\n (ALUNO, \"Aluno\")\n ]\n tipo = models.IntegerField(\n max_length=20,\n choices=TIPOS,\n default=ALUNO,\n blank=False\n )\n token = models.CharField(max_length=20, primary_key=True)\n usado = models.BooleanField(default=False)\n\n def __unicode__(self):\n return \"TokenConvite - %s (%s)\" % (self.token, self.get_tipo_display())\n\n\nclass Usuario(models.Model):\n # um usuario padrão do Django para cada Usuario da nossa aplicação\n user = models.OneToOneField(User)\n token = models.ForeignKey(TokenConvite)\n pagina = models.ForeignKey(\"pages.Pagina\")\n\n class Meta:\n abstract = True\n\n def __unicode__(self):\n return \"%s - %s\" % (self.user.get_username(), self.user.get_full_name())\n\n\nclass Professor(Usuario):\n\n class Meta:\n verbose_name = 'Professor'\n verbose_name_plural = 'Professores'\n\n\nclass Monitor(Usuario):\n\n class Meta:\n verbose_name = 'Monitor'\n verbose_name_plural = 'Monitores'\n\n\nclass Aluno(Usuario):\n\n class Meta:\n verbose_name = 'Aluno'\n verbose_name_plural = 'Alunos'\n","repo_name":"rodrigolc/PAP","sub_path":"usuarios/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"72206989231","text":"\n# coding: utf-8\n\nimport requests\nimport json\n\n# \\が入っているので、raw文字列として保存\n#searchJSON = r'C:\\Users\\tkuji\\Desktop\\search.json'\nresult = r'C:\\Users\\tkuji\\Desktop\\result.txt'\n\n# とりあえず見たやつ\n#with open(searchJSON, 'r', encoding='utf-8') as f:\n# data = json.load(f)\n\n#print(json.dumps(data, ensure_ascii=False, indent=4))\n\n# パースしたやつをファイルに書いたやつ\n#data = open(searchJSON, 'r', encoding='utf-8')\n#outfile = open(result, 'w')\n\n#json.dump(json.load(data),outfile,ensure_ascii=False,indent=4)\n\n#outfile.close()\n\nurlBase = 'https://itp.ne.jp/search?size=20&sortby=01&media=pc&kw=病院'\narea = '青森県三戸郡階上町'\ngetPoint = 0\n\n# 最初のリスト取得\nurl = urlBase + '&from=' + str(getPoint) + '&area=' + area\nprint('fetching ' + url)\njsonDict = json.loads( requests.get(url).content )\n\nhitsTotal = jsonDict['hits']['total']\nprint('hit total = ' + str(hitsTotal))\nbufferList = []\n\nwhile True:\n for i in jsonDict['hits']['hits']:\n dataStr = i['_source']['ki']['name'] + ','\n dataStr += i['_source']['ki']['jusyo'] + i['_source']['ki']['jyusyo_banti']\n\n for j in i['_source']['ki']['sginfo']:\n dataStr += ',' + j\n\n bufferList.append(dataStr)\n\n getPoint += 20\n \n if getPoint > hitsTotal:\n break\n\n url = urlBase + '&from=' + str(getPoint) + '&area=' + area\n print('fetching ' + url)\n jsonDict = jsonDict = json.loads( requests.get(url).content )\n\nprint('fetch done. bufferWriting...')\nbuffer = \"\\n\".join(bufferList)\n\nwith open(result, 'w', encoding='utf-8') as outf:\n outf.writelines(buffer)\n\nprint('ALL DONE!')\n\n","repo_name":"laygan/iTownpage-scraper","sub_path":"iTownpage scraper/iTownpage_scraper.py","file_name":"iTownpage_scraper.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"10745649683","text":"def sq(a,b):\n if a%1 == 0 or b%1 == 0:\n return(1)\n else:\n return(0)\nfrom math import *\nfor i in range(int(input())):\n n=int(input())\n r1 = sqrt(5*n**2+4)\n r2 = sqrt(5*n**2-4)\n if sq(r1,r2):\n print (\"IsFibo\")\n else:\n print (\"IsNotFibo\")\n","repo_name":"AhmedHelalAhmed/My_Hackerrank_Solutions","sub_path":"Practice/Algorithm/Warmup/Is Fibo.py","file_name":"Is Fibo.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8822388512","text":"\"\"\"Schema for validating API group resources.\"\"\"\n\nfrom h.i18n import TranslationString as _\nfrom h.models.group import (\n GROUP_DESCRIPTION_MAX_LENGTH,\n GROUP_NAME_MAX_LENGTH,\n GROUP_NAME_MIN_LENGTH,\n)\nfrom h.schemas.base import JSONSchema, ValidationError\nfrom h.util.group import GROUPID_PATTERN, split_groupid\n\nGROUP_SCHEMA_PROPERTIES = {\n \"name\": {\n \"type\": \"string\",\n \"minLength\": GROUP_NAME_MIN_LENGTH,\n \"maxLength\": GROUP_NAME_MAX_LENGTH,\n },\n \"description\": {\"type\": \"string\", \"maxLength\": GROUP_DESCRIPTION_MAX_LENGTH},\n \"groupid\": {\"type\": \"string\", \"pattern\": GROUPID_PATTERN},\n}\n\n\nclass GroupAPISchema(JSONSchema):\n \"\"\"Base class for validating group resource API data.\"\"\"\n\n schema = {\"type\": \"object\", \"properties\": GROUP_SCHEMA_PROPERTIES}\n\n def __init__(self, group_authority=None, default_authority=None):\n \"\"\"\n Initialize a new group schema instance.\n\n The ``group_authority`` and ``default_authority`` args are used for\n validating any ``groupid`` present in the data being validated.\n\n :arg group_authority: The authority associated with the group resource.\n (default None)\n :arg default_authority: The service's default authority (default None)\n\n \"\"\"\n super().__init__()\n self.group_authority = group_authority\n self.default_authority = default_authority\n\n def validate(self, data):\n \"\"\"\n Validate against the JSON schema and also valid any ``groupid`` present.\n\n :raise h.schemas.ValidationError: if any part of validation fails\n :return: The validated data\n :rtype: dict\n\n \"\"\"\n appstruct = super().validate(data)\n appstruct = self._whitelisted_fields_only(appstruct)\n self._validate_groupid(appstruct)\n\n return appstruct\n\n def _validate_groupid(self, appstruct):\n \"\"\"\n Validate the ``groupid`` to make sure it adheres to authority restrictions.\n\n ``groupid`` is only allowed if the authority of the group associated\n with it is not the default authority—i.e. this is a third-party group.\n\n :arg appstruct: Data, which may or may not contain a ``groupid`` entry\n :type appstruct: dict\n :raise h.schemas.ValidationError:\n\n \"\"\"\n groupid = appstruct.get(\"groupid\", None)\n if groupid is None: # Nothing to validate\n return\n\n if (self.group_authority is None) or (\n self.group_authority == self.default_authority\n ):\n # This is a first-party group\n raise ValidationError(\n # pylint:disable=consider-using-f-string\n \"{err_msg} '{authority}'\".format(\n err_msg=_(\n \"groupid may only be set on groups oustide of the default authority\"\n ),\n authority=self.default_authority,\n )\n )\n\n groupid_parts = split_groupid(groupid)\n\n if groupid_parts[\"authority\"] != self.group_authority:\n # The authority part of the ``groupid`` doesn't match the\n # group's authority\n raise ValidationError(\n # pylint:disable=consider-using-f-string\n \"{err_msg} '{groupid}'\".format(\n err_msg=_(\"Invalid authority specified in groupid\"), groupid=groupid\n )\n )\n\n @staticmethod\n def _whitelisted_fields_only(appstruct):\n \"\"\"Return a new appstruct containing only schema-defined fields.\"\"\"\n\n new_appstruct = {}\n\n for allowed_field in GROUP_SCHEMA_PROPERTIES:\n if allowed_field in appstruct:\n new_appstruct[allowed_field] = appstruct[allowed_field]\n\n return new_appstruct\n\n\nclass CreateGroupAPISchema(GroupAPISchema):\n \"\"\"Schema for validating create-group API data.\"\"\"\n\n schema = {\n \"type\": \"object\",\n \"properties\": GROUP_SCHEMA_PROPERTIES,\n \"required\": [\"name\"], # ``name`` is a required field when creating\n }\n\n\nclass UpdateGroupAPISchema(GroupAPISchema):\n \"\"\"\n Class for validating update-group API data.\n\n Currently identical to base schema\n \"\"\"\n","repo_name":"hypothesis/h","sub_path":"h/schemas/api/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":2810,"dataset":"github-code","pt":"38"} +{"seq_id":"37966445782","text":"#%%\nimport torch\nimport torchvision.models as models\nfrom torch import torch, nn\nimport numpy as np\n\n\n#%%\nclass SubBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n super(SubBlock, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n self.downsample = downsample\n self.batch_norm1 = nn.BatchNorm2d(out_channels)\n self.batch_norm2 = nn.BatchNorm2d(out_channels)\n\n self.conv1 = nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=(3,3),\n stride=self.stride,\n padding=1,\n bias=False)\n\n self.conv2 = nn.Conv2d(in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=(3,3),\n stride=1,\n padding=1,\n bias=False)\n\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n _id = x\n output = self.conv1(x) #here\n output = self.batch_norm1(output)\n output = self.relu(output)\n\n output = self.conv2(output)\n output = self.batch_norm2(output)\n\n if self.downsample is not None:\n # print('downsample:', self.downsample)\n _id = self.downsample(x)\n\n #THE RESENT PART\n output += _id\n output = self.relu(output)\n\n return output\n\n\n#%%\nclass Cancer_model(nn.Module):\n def __init__(self, block, path, layer_sizes=[2,3,4,5], residual = True):\n super(Cancer_model, self).__init__()\n self.k_size_1 = 50\n self.k_size_2 = 30\n self.residual = residual\n self.layer_sizes = layer_sizes\n self.block = block\n self.in_planes = 64\n\n self.path = path\n\n\n\n #we want to get the output to have three \n \n \n self.conv1 = nn.Conv2d(in_channels=3, \n out_channels=64,\n kernel_size=(7,7),\n stride=2,\n padding=3)\n\n\n self.max_pool1 = nn.MaxPool2d(kernel_size=(3,3),\n stride=2,\n padding=1)\n\n self.max_pool2 = nn.MaxPool2d(kernel_size=(2,2),\n stride=2,\n padding=1)\n\n # numfeatures is C from a tensot of size (N, C, H, W) - basically the number of channels\n # default eps = 1e-5\n # default momentum = 0.1 \n num_feat = 64\n epsilon = 1e-5\n mom = 0.1 \n self.batch_norm = nn.BatchNorm2d(num_features=num_feat,\n eps=epsilon, \n momentum=mom )\n\n # self.mod_list_1 = nn.ModuleList([ nn.Conv2d(in_channels=,\n # out_channels=,\n # kernel_size=,\n # stride=) for i in range(3)]) \n self.relu = nn.ReLU(inplace=True)\n #try using leaky RELU instead of real RELU\n self.leaky_relu = nn.LeakyReLU(inplace=True)\n\n # self.mod_list = nn.ModuleList([self.conv1, self.conv2, self.max_pool1])\n # self.nn_seq = nn.Sequential(*[self.conv1, self.conv2, self.max_pool1])\n\n self.subBlock1 = self.make_subBlock(num_ch=64,\n num_blocks=layer_sizes[0],\n stride=1)\n self.subBlock2 = self.make_subBlock(num_ch=128,\n num_blocks=layer_sizes[1],\n stride=2)\n self.subBlock3 = self.make_subBlock(num_ch=256,\n num_blocks=layer_sizes[2],\n stride=2)\n self.subBlock4 = self.make_subBlock(num_ch=512,\n num_blocks=layer_sizes[3],\n stride=2)\n\n self.avg_pool = nn.AdaptiveAvgPool2d((1,1))\n self.mid_val = 250\n self.fc1 = nn.Linear(in_features=512,\n out_features=self.mid_val)\n\n self.fc2 = nn.Linear(in_features=self.mid_val,\n out_features=2)\n\n\n def make_subBlock(self, num_ch, num_blocks, stride):\n #take care of adding stuff and batch norm\n if (stride != 1) or (self.in_planes != num_ch):\n downsample = nn.Sequential(\n nn.Conv2d(in_channels=self.in_planes,\n out_channels=num_ch,\n kernel_size=(1,1),\n stride=stride),\n nn.BatchNorm2d(num_ch)\n )\n else:\n downsample = None\n # print('setting num inplanes', self.in_planes)\n block_list = []\n block_list.append(self.block(in_channels=self.in_planes, \n out_channels=num_ch, \n stride=stride, \n downsample=downsample))\n \n self.in_planes = num_ch\n for _ in range(1, num_blocks):\n block_list.append(self.block(in_channels=self.in_planes, \n out_channels=num_ch))\n \n return nn.Sequential(*block_list)\n\n def forward(self, x):\n #input size is 500, 1000\n\n #base model off of resnet\n #use stride > 1 to downsample\n\n #1 input channel\n # print('shape of x:', x.size())\n\n # print(x)\n x = self.conv1(x)\n # print('size after the first conv:', x.size())\n x = self.batch_norm(x)\n x = self.relu(x)\n x = self.max_pool1(x)\n \n x = self.subBlock1(x)\n # print('shape of x after the first block:', x.size())\n x = self.subBlock2(x)\n x = self.subBlock3(x)\n x = self.subBlock4(x)\n\n # print('shape of x ', x.size())\n x = self.avg_pool(x)\n \n x = x.view(x.size(0), -1)\n # print('shape of x after:', x.size())\n \n x = self.fc1(x)\n\n x = self.fc2(x)\n\n return x\n\n#%%\n#initialize the cancer model\ndef get_model(device, path = None):\n model = Cancer_model(block=SubBlock, \n layer_sizes=[2,3,4,5],\n residual=True,\n path=path).double().to(device)\n\n return model\n\n# def get_empty_model(device):\n# model = Cancer_model(*args, **kwards).double().to(device)\n# return model\n","repo_name":"adithyaBellary/cancer-detection","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"24687986932","text":"import numpy as np\nimport json\nfrom config import *\nimport sqlite3\nfrom statistics import median\nfrom build_school_dict import build_school_class\n\nclass data_processing(build_school_class):\n def __init__(self, db_fname, pub_fname):\n self.conn = sqlite3.connect(db_fname)\n self.cur = self.conn.cursor()\n self.load_author()\n self.load_publication(pub_fname)\n self.conn.close()\n\n def load_publication(self, pub_fname):\n \"\"\"read publication information\"\"\"\n with open(pub_fname, 'r') as jf:\n bdata_ = json.load(jf)\n self.bdata = {}\n for k in bdata_.keys():\n self.bdata[k] = bdata_[k]\n bdata_[k] = None\n bdata_ = None\n\n\n def save_data(self, sn_list, data, fname):\n with open(\"result/\" + fname + \".txt\",\"a+\",encoding = 'utf-8-sig') as output_file:\n output_file.write('{:<80} {:<5} '.format(\"School Name\", fname) + \"\\n\")\n for i in range(len(sn_list)):\n output_file.write('{:<80} {:<5} '.format(sn_list[i], data[i]) + \"\\n\")\n\n def cal_faculty_number(self, save = False):\n '''Calcualte number of faculty in each department. \n If \"save\" is True, save to Faculty_number.txt file'''\n fnum = []\n sn_list = []\n for sn in self.author_dict.keys():\n sn_list.append(sn)\n fnum.append(len(self.author_dict[sn].keys()))\n if save:\n self.save_data(sn_list, fnum, \"Faculty_number\")\n \n\n def calc_medi_hindex(self, save = False):\n '''Calcualte median index of each department. \n If \"save\" is True, save to Median_hindex.txt file'''\n medi_hindex = []\n sn_list = []\n for sn in self.author_dict.keys():\n sn_list.append(sn)\n medi_hindex.append(median([self.author_dict[sn][x][\"hindex\"] for x in self.author_dict[sn].keys()]))\n if save:\n self.save_data(sn_list, medi_hindex, \"Median_hindex\")\n \n def calc_medi_citation(self, save = False):\n '''Calcualte median citation of each department. If \"save\" is True, save to Median_citation.txt file'''\n medi_cit = []\n sn_list = []\n for sn in self.author_dict.keys():\n sn_list.append(sn)\n medi_cit.append(median([self.author_dict[sn][x][\"ncitation\"] for x in self.author_dict[sn].keys()]))\n if save:\n self.save_data(sn_list, medi_cit, \"Median_hindex\")\n\n def calc_medi_publication_all(self, save = False):\n '''Calcualte the median value of publication number of each \n faculty in his/her whole career. If \"save\" is True, data save to Median_publication_all.txt file'''\n medi_pub_all = []\n sn_list = []\n for sn in self.author_dict.keys():\n sn_list.append(sn)\n medi_pub_all.append(median([self.author_dict[sn][x][\"npapers\"] for x in self.author_dict[sn].keys()]))\n if save:\n self.save_data(sn_list, medi_pub_all, \"Median_publication_all\")\n\n def calc_medi_publication_part(self, start = 1900, end = 2019, save = False):\n '''Calcualte the median value of publication number of each \n faculty within certain time interval. If 'save' is ture, data is saved to Median_publication_from_\"start\"_to_\"end\".txt file\n default time range is from 1900 to 2019'''\n medi_pub_part = []\n sn_list = []\n for i in self.bdata.keys():\n sn_list.append(self.bdata[i][0][0][\"affil\"])\n pub_list = []\n for author in self.bdata[i]:\n num_of_pub = 0\n for publication in author:\n if publication[\"year\"]:\n if start <= int(publication[\"year\"]) <= end:\n num_of_pub += 1\n pub_list.append(num_of_pub)\n medi_pub_part.append(median(pub_list))\n if save:\n self.save_data(sn_list, medi_pub_part, \"Median_publication_from_\" + str(start) + \"_to_\" + str(end))\n\n def calc_medi_high_impact_journals(self, start = 1900, end = 2019, save = False):\n '''Calcualte the number of papers published on high impact journals (Nature and Nature X, Science, Cell) from each \n faculty within certain time interval in each department. If 'save' is ture, data is saved to Median_high_impact_journals_from_\"start\"_to_\"end\".txt file\n default time range is from 1900 to 2019'''\n medi_num = []\n sn_list = []\n for i in self.bdata.keys():\n sn_list.append(self.bdata[i][0][0][\"affil\"])\n num_list = []\n for author in self.bdata[i]:\n num_of_pub = 0\n for publication in author:\n if publication[\"year\"]:\n if start <= int(publication[\"year\"]) <= end and publication[\"journalName\"]:\n jname = publication[\"journalName\"].lower()\n if (\"nature\" in jname and len(jname) < 45) or jname == \"science\" or jname == \"cell\":\n num_of_pub += 1\n print(\"Journal name is \", publication[\"journalName\"])\n num_list.append(num_of_pub)\n medi_num.append(median(num_list))\n if save:\n self.save_data(sn_list, medi_num, \"Median_high_impact_journals_from_\" + str(start) + \"_to_\" + str(end))\n\n\n\n\nif __name__ == \"__main__\":\n dp = data_processing(db_fname, pub_fname)\n dp.cal_faculty_number(save = True)","repo_name":"shubinzhang/ChemRank-Scopus","sub_path":"data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"14320100161","text":"### BOJ 3182\n\nN = int(input())\n\n# 선배들 번호가 1부터 시작하니까 앞에 하나 추가해 주기\nsunbae = [0]\nresult = [0]\n\n# 선배 대답 입력\nfor _ in range(N):\n sunbae.append(int(input()))\n\ndef dfs(start):\n visited[start] = True # 방문\n if not visited[sunbae[start]]: # 해당 선배 방문하지 않았으면\n dfs(sunbae[start]) # 재귀\n\nfor i in range(1, N+1):\n visited = [False] * (N+1)\n dfs(i) # 1부터 (첫 번째 선배부터)\n result.append(visited.count(True)) # 몇 명에게 방문했는지 result 배열에 추가\n\nprint(result.index(max(result))) # 가장 큰 방문수를 갖고 있는 인덱스 번호 출력하기 (인덱스 번호 = 선배 번호)\n","repo_name":"nakyung128/Python_Algorithm","sub_path":"230605/한동이는 공부가 하기 싫어!.py","file_name":"한동이는 공부가 하기 싫어!.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"7354132809","text":"import re\n\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\nfrom courses.models import Course\n\ndef normalize_query(query_string, findterms=re.compile(r'\"([^\"]+)\"|(\\S+)').findall, normspace=re.compile('\\s{2,}').sub):\n return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]\n\ndef get_query(query_string, search_fields):\n query = None\n terms = normalize_query(query_string)\n for term in terms:\n or_query = None\n for field_name in search_fields:\n q = Q(**{\"%s__icontains\" % field_name: term})\n if or_query is None:\n or_query = q\n else:\n or_query = or_query | q\n if query is None:\n query = or_query\n else:\n query = query & or_query\n return query\n\n\nclass SignupView(TemplateView):\n template_name = 'registration/signup.html'\n\n\ndef index(request):\n # if request.user.is_authenticated:\n # if request.user.is_teacher:\n # return redirect('teacher_quiz_change_list')\n # else:\n # return redirect('student_quiz_list')\n\n query_string = ''\n found_results = None\n\n if ('q' in request.GET) and request.GET['q'].strip():\n query_string = request.GET['q']\n result_query = get_query(query_string, ['title', 'overview', ])\n found_results = Course.objects.filter(result_query).order_by('-created')\n\n return render(request, 'students/index.html', {\n 'query_string': query_string,\n 'found_results': found_results\n })\n","repo_name":"preetisaroj/E-Learning-portal---Django","sub_path":"students/views/classroom.py","file_name":"classroom.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"19456596138","text":"import sqlite3\nfrom sqlite3 import Error\n\n\ndef create_connection():\n \"\"\" create a database connection to a database that resides\n in the memory\n \"\"\"\n conn = None;\n \n try:\n conn = sqlite3.connect(':memory:')\n conn.executescript(\"\"\"DROP TABLE IF EXISTS Roster;\\\n CREATE TABLE Roster(Name TEXT, Species TEXT, IQ INT);\\\n INSERT INTO Roster VALUES('Jean-baptise Zorg', 'Human', 122);\\\n INSERT INTO Roster VALUES('Korben Dallas', 'Meat Popsicle', 100);\\\n INSERT INTO Roster VALUES('Ak not Mangalore', 'Human', 122);\"\"\")\n print(sqlite3.version)\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()\n\n\nif __name__ == '__main__':\n create_connection()\n\n\n\n \n\n\n \n","repo_name":"Topaz557/Python-Projects","sub_path":"database-py-challenge.py","file_name":"database-py-challenge.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"70748178990","text":"\"\"\"Add users table\n\nRevision ID: 1595f4d4b09a\nRevises: \nCreate Date: 2020-10-07 20:44:04.930922\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '1595f4d4b09a'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users_user',\n sa.Column('id', postgresql.UUID(), nullable=False),\n sa.Column('email', sa.String(length=255), nullable=False),\n sa.Column('is_active', sa.Boolean(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=False),\n sa.Column('password', sa.String(length=248), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_users_user_email'), 'users_user', ['email'], unique=False)\n op.create_index(op.f('ix_users_user_username'), 'users_user', ['username'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_users_user_username'), table_name='users_user')\n op.drop_index(op.f('ix_users_user_email'), table_name='users_user')\n op.drop_table('users_user')\n # ### end Alembic commands ###\n","repo_name":"guibeira/fast-api-template","sub_path":"{{cookiecutter.project_name}}/migrations/versions/1595f4d4b09a_add_users_table.py","file_name":"1595f4d4b09a_add_users_table.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"} +{"seq_id":"28955849533","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom app_api.controller import bmiController, temperatureController, exchangeRateController\n\n@csrf_exempt\ndef bmi(request, type):\n if request.method == 'POST':\n if type == 'calculate':\n return bmiController.calculate(request)\n \n@csrf_exempt\ndef temperature(request, type):\n if request.method == 'POST':\n if type == 'calculate':\n return temperatureController.calculate(request)\n@csrf_exempt\ndef exchangeRate(request, type):\n if request.method == 'POST':\n if type == 'calculate':\n return exchangeRateController.calculate(request)\n","repo_name":"edwinyoo44/Django_some_tool_website","sub_path":"website_bmi_v1/app_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"34880620200","text":"import csv\r\nimport codecs\r\nimport os\r\n\r\ndef remove_invalid_characters(text, encoding):\r\n cleaned_text = ''\r\n for char in text:\r\n try:\r\n char.encode(encoding)\r\n cleaned_text += char\r\n except UnicodeEncodeError:\r\n pass\r\n return cleaned_text\r\n\r\ndef convert_csv_encoding(input_file, output_directory, from_encoding, to_encoding):\r\n try:\r\n if not os.path.exists(output_directory):\r\n os.makedirs(output_directory)\r\n\r\n with codecs.open(input_file, 'r', encoding=from_encoding) as file:\r\n reader = csv.reader(file)\r\n data = [row for row in reader]\r\n \r\n # Loại bỏ ký tự không hợp lệ trước khi chuyển đổi\r\n cleaned_data = []\r\n for row in data:\r\n cleaned_row = [remove_invalid_characters(cell, to_encoding) for cell in row]\r\n cleaned_data.append(cleaned_row)\r\n \r\n output_file = os.path.join(output_directory, os.path.basename(input_file))\r\n with codecs.open(output_file, 'w', encoding=to_encoding) as file:\r\n writer = csv.writer(file)\r\n writer.writerows(cleaned_data)\r\n \r\n print(f\"Successfully converted and saved to {output_file}\")\r\n except Exception as e:\r\n print(f\"An error occurred: {str(e)}\")\r\n\r\n# Config tham số\r\ninput_file = 'staffs-utf8.csv'\r\noutput_directory = 'output'\r\nfrom_encoding = 'utf-8' # Encoding\r\nto_encoding = 'shift_jis' # Encoding\r\n\r\n# Chuyển đổi từ UTF-8 sang Shift JIS\r\nconvert_csv_encoding(input_file, output_directory, from_encoding, to_encoding)\r\n\r\n# Chuyển đổi từ Shift JIS sang UTF-8\r\n# convert_csv_encoding(input_file, output_directory, to_encoding, from_encoding)\r\n","repo_name":"builyviettrinh/python_test","sub_path":"convert/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"2410434663","text":"from django.http import HttpResponse\n\n\ndef make_shopping_list(ingredients):\n shopping_cart = '\\n'.join([\n f'{ingredient[\"ingredient__name\"]}: {ingredient[\"number\"]}'\n f'{ingredient[\"ingredient__measurement_unit\"]}'\n for ingredient in ingredients\n ])\n response = HttpResponse(shopping_cart, content_type='text')\n response['Content-Disposition'] = (\n 'attachment;filename=shopping_cart.pdf')\n return response\n","repo_name":"AntonSpark/foodgram-project-react","sub_path":"backend/recipes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20624602115","text":"'''Exercício Python 076: Crie um programa que tenha uma tupla única com nomes de produtos e seus\nrespectivos preços, na sequência. \nNo final, mostre uma listagem de preços, organizando os dados em forma tabular.'''\nprint(f'{\"=\"*12}Lista de produtos{\"=\"*12}')\nprodutos = ('Lapis',0.50,\n 'Caneta',1.25,\n 'Caderno',9.99,\n 'Apontador', 2.30,\n 'Mesa de escritório', 249.99)\n\nfor posicao in range(0, len(produtos)):\n if posicao % 2 == 0:\n print(f'{produtos[posicao]:.<30}',end = '')\n else:\n print(f'R${produtos[posicao]:>7.2f}')\nprint(f'{\"=\"*12}{\".\"*15}{\"=\"*12}')","repo_name":"eric-ruan/Python","sub_path":"Mundo 3/Exercicios tuplas/lista_de_precos.py","file_name":"lista_de_precos.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23095168149","text":"import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nclass GoogleSheetMGR(object):\n def __init__(self, key, sheet=None, json_file=None):\n self.key = key\n self.sheet = sheet\n if json_file:\n self.json_file = json_file\n else:\n self.json_file = \"/root/gspread2.json\"\n\n self._authorize()\n\n self.keys = self.get_keys()\n\n def _authorize(self):\n \"\"\"\n See: http://gspread.readthedocs.io/en/latest/oauth2.html\n \"\"\"\n scope = ['https://spreadsheets.google.com/feeds']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(self.json_file, scope)\n gc = gspread.authorize(credentials)\n self.Worksheet = gc.open_by_key(self.key)\n\n if self.sheet:\n self.ws_obj = self.Worksheet.get_worksheet(self.sheet)\n else:\n self.ws_obj = self.Worksheet.sheet1\n\n def get_all_values(self):\n return self._get_all_values()\n\n def _worksheet_action(self, action, *args):\n \"\"\"\n Wrap gspread worksheet actions to make sure no authorization issue\n \"\"\"\n retry = 0\n max_retry = 5\n while True:\n try:\n func = getattr(self.ws_obj, action)\n return func(*args)\n except Exception as details:\n self._authorize()\n retry += 1\n if retry > max_retry:\n raise details\n\n def _get_all_values(self):\n \"\"\"\n Get all values from worksheet\n \"\"\"\n return self._worksheet_action('get_all_values')\n\n def _update_cell(self, *args):\n \"\"\"\n Update a cell in spreadsheet\n \"\"\"\n return self._worksheet_action('update_cell', *args)\n\n def get_keys(self):\n \"\"\"\n 1st row is the key\n \"\"\"\n return self._get_all_values()[0]\n\n def rework_sheet(self, data):\n old_data = self.get_all_values()\n\n for i_row, row_data in enumerate(old_data):\n for i_cell, cell_data in enumerate(row_data):\n new_val = data[i_row][i_cell]\n if new_val != cell_data:\n self._update_cell(i_row, i_cell, new_val)\n\n def add_new_row(self, row_data, row=None):\n if row:\n new_row = row\n else:\n new_row = len(self._get_all_values()) + 1\n\n for index, cell_data in enumerate(row_data):\n self._update_cell(new_row, index + 1, cell_data)\n\n def add_new_row_by_dict(self, data_dict, row=None):\n if row:\n new_row = row\n else:\n new_row = len(self._get_all_values()) + 1\n\n for index, key in enumerate(self.keys):\n if key in data_dict.keys():\n self._update_cell(new_row, index + 1, data_dict[key])\n\n def search_update_by_dict(self, search_dict, data_dict):\n table = self._get_all_values()\n index_dict = {}\n index_dict2 = {}\n for index, key in enumerate(self.keys):\n if key in search_dict.keys():\n index_dict[index] = search_dict[key]\n if key in data_dict.keys():\n index_dict2[index] = data_dict[key]\n\n for row, i in enumerate(table):\n for index, val in index_dict.items():\n if i[index] != str(val):\n found = False\n break\n else:\n found = True\n\n if not found:\n continue\n\n for key, val in index_dict2.items():\n self._update_cell(row + 1, key + 1, val)\n\n def search_info_by_dict(self, search_dict, table=None):\n # TODO: merge code with search_update_by_dict\n if not table:\n table = self._get_all_values()\n\n index_dict = {}\n for index, key in enumerate(self.keys):\n if key in search_dict.keys():\n index_dict[index] = search_dict[key]\n\n ret = []\n for row, i in enumerate(table):\n for index, val in index_dict.items():\n if i[index] != str(val):\n found = False\n break\n else:\n found = True\n\n if not found:\n continue\n\n tmp = {}\n for index, key in enumerate(self.keys):\n tmp[key] = i[index]\n ret.append(tmp)\n\n return ret\n","repo_name":"LuyaoHuang/CoveragePool","sub_path":"coveragepool/upload/google_api.py","file_name":"google_api.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42086904393","text":"def insertNodeAtPosition(head, data, position):\n count = 0\n newNode = SinglyLinkedListNode(data)\n if head is None:\n return newNode\n currentNode = head\n while count < position - 1 and currentNode:\n currentNode = currentNode.next\n count = count + 1\n \n\n nextNode = currentNode.next\n currentNode.next = newNode\n newNode.next = nextNode\n \n return head\n","repo_name":"RobertShaw/HackerRankPractice","sub_path":"Practice/Interview Preparation Kit/Linked Lists/Insert a node a specific position.py","file_name":"Insert a node a specific position.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"376816090","text":"import numba\nimport numpy as np\nfrom math import gcd\n\n\n@numba.njit\ndef ks_2samp(data1, data2):\n \"\"\"\n Compute the Kolmogorov-Smirnov statistic on 2 samples.\n This is a two-sided test for the null hypothesis that 2 independent samples\n are drawn from the same continuous distribution. The alternative hypothesis\n is 'two-sided'\n Parameters\n ----------\n data1, data2 : array_like, 1-Dimensional\n Two arrays of sample observations assumed to be drawn from a continuous\n distribution, sample sizes can be different.\n Returns\n -------\n statistic : float\n KS statistic.\n pvalue : float\n Two-tailed p-value.\n See Also\n --------\n kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp\n Notes\n -----\n This tests whether 2 samples are drawn from the same distribution. Note\n that, like in the case of the one-sample KS test, the distribution is\n assumed to be continuous.\n In the one-sided test, the alternative is that the empirical\n cumulative distribution function F(x) of the data1 variable is \"less\"\n or \"greater\" than the empirical cumulative distribution function G(x)\n of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.\n If the KS statistic is small or the p-value is high, then we cannot\n reject the hypothesis that the distributions of the two samples\n are the same.\n If the mode is 'auto', the computation is exact if the sample sizes are\n less than 10000. For larger sizes, the computation uses the\n Kolmogorov-Smirnov distributions to compute an approximate value.\n The 'two-sided' 'exact' computation computes the complementary probability\n and then subtracts from 1. As such, the minimum probability it can return\n is about 1e-16. While the algorithm itself is exact, numerical\n errors may accumulate for large sample sizes. It is most suited to\n situations in which one of the sample sizes is only a few thousand.\n We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.\n References\n ----------\n .. [1] Hodges, J.L. Jr., \"The Significance Probability of the Smirnov\n Two-Sample Test,\" Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.\n Examples\n --------\n >>> from scipy import stats\n >>> np.random.seed(12345678) #fix random seed to get the same result\n >>> n1 = 200 # size of first sample\n >>> n2 = 300 # size of second sample\n\n For a different distribution, we can reject the null hypothesis since the\n pvalue is below 1%:\n\n >>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)\n >>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)\n >>> stats.ks_2samp(rvs1, rvs2)\n KstestResult(statistic=0.2083333333..., pvalue=5.12927959...)\n\n For a slightly different distribution, we cannot reject the null hypothesis\n at a 10% or lower alpha since the p-value at 0.144 is higher than 10%\n >>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)\n >>> stats.ks_2samp(rvs1, rvs3)\n KstestResult(statistic=0.10333333..., pvalue=0.1469143786...)\n\n For an identical distribution, we cannot reject the null hypothesis since\n the p-value is high, 41%:\n >>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)\n >>> stats.ks_2samp(rvs1, rvs4)\n KstestResult(statistic=0.08, pvalue=0.411543202...)\n \"\"\"\n data1 = np.sort(data1)\n data2 = np.sort(data2)\n n1 = data1.shape[0]\n n2 = data2.shape[0]\n if min(n1, n2) == 0:\n raise ValueError(\"Data passed to ks_2samp must not be empty\")\n\n data_all = np.concatenate((data1, data2))\n # using searchsorted solves equal data problem\n cdf1 = np.searchsorted(data1, data_all, side=\"right\") / n1\n cdf2 = np.searchsorted(data2, data_all, side=\"right\") / n2\n cddiffs = cdf1 - cdf2\n # minS = np.clip(-np.min(cddiffs), 0, 1) # Ensure sign of minS is not negative.\n # np.clip not yet implemented in numba 0.53, next version\n minS = -np.min(cddiffs)\n if minS < 0:\n minS = 0\n elif minS > 1:\n minS = 1\n maxS = np.max(cddiffs)\n d = max(minS, maxS)\n g = gcd(n1, n2)\n prob = -np.inf\n\n # n1g = n1 // g\n # n2g = n2 // g\n # # If lcm(n1, n2) is too big, switch from exact to asymp\n # if n1g >= np.iinfo(np.int_).max / n2g:\n # mode = \"asymp\"\n # raise ValueError(\n # f\"Exact ks_2samp calculation not possible with samples sizes \"\n # f\"{n1} and {n2}.\"\n # )\n\n d, prob = _attempt_exact_2kssamp(n1, n2, g, d)\n\n # prob = np.clip(prob, 0, 1)\n if prob > 1:\n prob = 1\n elif prob < 0:\n prob = 0\n # return KstestResult(d, prob)\n return (d, prob)\n\n\n@numba.njit\ndef _compute_prob_outside_square(n, h):\n \"\"\"\n Compute the proportion of paths that pass outside the two diagonal lines.\n Parameters\n ----------\n n : integer\n n > 0\n h : integer\n 0 <= h <= n\n Returns\n -------\n p : float\n The proportion of paths that pass outside the lines x-y = +/-h.\n \"\"\"\n # Compute Pr(D_{n,n} >= h/n)\n # Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) / binom(2n, n)\n # This formulation exhibits subtractive cancellation.\n # Instead divide each term by binom(2n, n), then factor common terms\n # and use a Horner-like algorithm\n # P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))\n\n P = 0.0\n k = int(np.floor(n / h))\n while k >= 0:\n p1 = 1.0\n # Each of the Ai terms has numerator and denominator with h simple terms.\n for j in range(h):\n p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)\n P = p1 * (1.0 - P)\n k -= 1\n return 2 * P\n\n\n@numba.njit\ndef _attempt_exact_2kssamp(n1, n2, g, d):\n \"\"\"Attempts to compute the exact 2sample probability.\n n1, n2 are the sample sizes\n g is the gcd(n1, n2)\n d is the computed max difference in ECDFs\n Returns (success, d, probability)\n \"\"\"\n lcm = (n1 // g) * n2\n h = int(np.round(d * lcm))\n d = h * 1.0 / lcm\n if h == 0:\n return d, 1.0\n prob = _compute_prob_outside_square(n1, h)\n return d, prob\n","repo_name":"scottstanie/apertools","sub_path":"apertools/ks.py","file_name":"ks.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"38"} +{"seq_id":"8379232473","text":"#Musa Odabaşı, 180401039, https://github.com/Musa3719/Programlama_Lab/blob/master/180401039_Sympy_hw_3.py\r\n#Exponential Distribution delta = 1 / Ortalama olay süresi\r\n#Şirketin bir ürünü satması ortalama 4 dakika sürüyor.\r\n#Şirketin 12 dakika boyunca ürün satmama ihtimali\r\n#Delta = 1/4 =0.25\r\nfrom sympy import Symbol,pprint,exp\r\nfrom sympy.plotting import plot\r\nimport matplotlib.pyplot as plt\r\nx = Symbol('x')\r\ndelta = Symbol('delta')\r\nED = exp((-delta)*x)#1-(1-exp)=exp\r\nplot(ED.subs({delta: 0.25}),(x,0,12),title='12 dakika ürün satılmama olasılığı')\r\nx_values = []\r\ny_values = []\r\nfor value in range(0, 13):\r\n y = ED.subs({delta: 0.25, x: value}).evalf()\r\n y_values.append(y)\r\n x_values.append(value)\r\nplt.plot(x_values,y_values)\r\nplt.show()\r\n","repo_name":"Musa3719/Programming_Lab","sub_path":"180401039_Sympy_hw_3.py","file_name":"180401039_Sympy_hw_3.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20665188924","text":"from django.urls import path\nfrom .views import (\n ListCreateCertificateAPIView,\n RetrieveUpdateDestroyCertificateAPIView\n)\n\n\nurlpatterns = [\n path('', ListCreateCertificateAPIView.as_view(), name='list-create-certificate'),\n path('/', RetrieveUpdateDestroyCertificateAPIView.as_view(), name='retrieve-update-destroy-certificate'),\n]\n","repo_name":"linhfishCR7/hrm-api","sub_path":"certificate/hrm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39428780735","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pubs', '0005_auto_20141229_0011'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='publication',\n name='indexed',\n ),\n migrations.AddField(\n model_name='line',\n name='line',\n field=models.IntegerField(default=1),\n preserve_default=False,\n ),\n ]\n","repo_name":"drtjmb/kingsf","sub_path":"pubs/migrations/0006_auto_20141229_2147.py","file_name":"0006_auto_20141229_2147.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"29563765417","text":"get_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation, rc\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom IPython.display import HTML\nimport xarray as xr\n\n# Define the default parameters values\nsigma = 10\nrho = 28\nbeta = 8/3\n\nclass L63():\n def __init__(self, sigma, rho, beta, init, dt):\n self.sigma, self.rho, self.beta = sigma, rho, beta \n self.x, self.y, self.z = init\n self.dt = dt\n self.hist = [init]\n \n def step(self):\n self.x += (self.sigma * (self.y - self.x)) * self.dt\n self.y += (self.x * (self.rho - self.z)) * self.dt\n self.z += (self.x * self.y - self.beta * self.z) * self.dt\n self.hist.append([self.x, self.y, self.z])\n \n def integrate(self, n_steps):\n for n in range(n_steps): self.step()\n\nl1 = L63(sigma, rho, beta, init=[1, 10, 20], dt=1e-2)\nl1.integrate(3000)\n\nl2 = L63(sigma, rho, beta, init=[1.1, 10, 20], dt=1e-2)\nl2.integrate(3000)\n\n# The hist attribute contains the history for the three variables x, y, and z\nnp.array(l1.hist).shape\n\n# Grab every tenth time step to speed up plotting\ny1 = np.array(l1.hist)[::10, 0]\ny2 = np.array(l2.hist)[::10, 0]\n\n# We want to animate something like this\nplt.plot(y1);\n\nfig, ax = plt.subplots(figsize=(10, 5));\nax.set_ylim(np.min(y1), np.max(y1))\nax.set_xlim(0, len(y1))\nax.set_title('Time step = 0');\n\nline1, = ax.plot([], [], lw=2)\nline2, = ax.plot([], [], lw=2)\n\ndef animate(i):\n line1.set_data(range(i), y1[:i])\n line2.set_data(range(i), y2[:i])\n ax.set_title(f'Time step = {i}')\n return (line1, line2,) # Note that the comma is important!\n\nanim = animation.FuncAnimation(fig, animate, frames=300, interval=100, blit=True)\n\nHTML(anim.to_html5_video())\n\nanim.save('tmp.mp4')\n\ndef plot_attractor(hists):\n if np.array(hists).ndim == 2: hists = [hists]\n hists = [np.array(h) for h in hists]\n fig = plt.figure(figsize=(10, 10)); ax = fig.gca(projection='3d')\n [ax.plot(h[:,0], h[:,1], h[:,2]) for h in hists]\n\nplot_attractor([l1.hist, l2.hist])\n\nds = xr.open_dataset('../week4/CAM02.nc', decode_times=False)\n\nds\n\nds.time.size\n\ndef animate_2D():\n fig, ax = plt.subplots(figsize=(10, 7))\n I = ds['SOLIN'].isel(time=0).plot(ax=ax)\n def animate(i):\n I = ds['SOLIN'].isel(time=i).plot(ax=ax, add_colorbar=False)\n return (I,)\n plt.close() # Not necessary but will avoid a stray figure popping up\n return animation.FuncAnimation(fig, animate, frames=ds.time.size, interval=150, blit=True)\n\nanim = animate_2D()\n\nHTML(anim.to_html5_video())\n\nHTML(anim.to_jshtml())\n\n\n\n","repo_name":"mixmikmic/GH_code_analysis","sub_path":"python/mpl-animations.py","file_name":"mpl-animations.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"42983873979","text":"'''\n연속된 세 개의 정수를 더해 12가 되는 경우는 3, 4, 5입니다. 두 정수 num과 total이 주어집니다. 연속된 수 num개를 더한 값이 total이 될 때, 정수 배열을 오름차순으로 담아 return하도록 solution함수를 완성해보세요.\n1 ≤ num ≤ 100\n0 ≤ total ≤ 1000\nnum개의 연속된 수를 더하여 total이 될 수 없는 테스트 케이스는 없습니다.\n'''\ndef solution(num, total):\n for tmp in range(total*(-1)-num,total+1):\n tmp_list = list(range(tmp,tmp+num))\n if sum(tmp_list) == total:\n break\n return tmp_list\n# 내 풀이가 깔끔하지 않은것 같아서 적어둔다.","repo_name":"noir1458/coding_test","sub_path":"programmers/Python_test/연속된 수의 합.py","file_name":"연속된 수의 합.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"15743607967","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('agriApp', '0016_rainfalldata'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='soilsAndRainfall',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('soilID', models.CharField(default=None, max_length=150, null=True)),\n ('soilCode', models.CharField(max_length=25, null=True)),\n ('soilName', models.CharField(max_length=150, null=True)),\n ('soilLabel', models.CharField(max_length=150, null=True)),\n ('mapunit', models.CharField(max_length=25, null=True)),\n ('hectares', models.FloatField()),\n ('acres', models.FloatField()),\n ('secerosion', models.IntegerField()),\n ('terslope', models.IntegerField()),\n ('secslope', models.IntegerField()),\n ('secsoil', models.CharField(default=None, max_length=30)),\n ('capability', models.IntegerField()),\n ('water', models.CharField(default=None, max_length=25)),\n ('erosion', models.IntegerField()),\n ('domslope', models.IntegerField()),\n ('domsoil', models.IntegerField()),\n ('soil2001_i', models.IntegerField()),\n ('soil2001', models.IntegerField()),\n ('gavprimary', models.IntegerField()),\n ('gridcode', models.IntegerField()),\n ('geomData', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"uwidcit/AgriNeTT_AgriMaps","sub_path":"geoProj/agriApp/migrations/0017_soilsandrainfall.py","file_name":"0017_soilsandrainfall.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"17140255966","text":"\"\"\"\n\" views.py\n\" --------\n\" To use Braintree it is mandatory to configure the API credentials, that could\n\" be gotten from Braintree account. In this example, we'll use Sandbox API\n\" credentials from the Commerce Factory Braintree account. More information at:\n\" * github.com/commercefactory\n\" * commercefactory.org\n\"\"\"\n\nimport braintree\nimport json\n\nfrom django.http import HttpResponse\nfrom django.core import serializers\n\n\"\"\"\n\" Space reserved for configuration.\n\" 1. API credentials to use Braintree\n\" 2. API credentials to use Spark core\n\"\"\"\n\nbraintree.Configuration.configure(\n braintree.Environment.Sandbox,\n merchant_id=\"ffdqc9fyffn7yn2j\",\n public_key=\"qj65nndbnn6qyjkp\",\n private_key=\"a3de3bb7dddf68ed3c33f4eb6d9579ca\"\n )\n\n\n\"\"\"\n\" Getting the client_token, which contains\n\" all authorization and configuration information your client needs to\n\" initialize a Braintree Client SDK to communicate with Braintree.\n\"\"\"\n\ndef get_client_token(request):\n\n clientToken=braintree.ClientToken.generate({})\n\n responseData = {}\n responseData['clientToken'] = clientToken\n return HttpResponse(json.dumps(responseData), content_type=\"application/json\")\n\ndef get_bill_plans(request):\n billPlans=braintree.Plan.all()\n\n planIDs = []\n for simplePlan in billPlans:\n planIDs.append(simplePlan.id)\n\n responseData = {}\n responseData['planIDs'] = planIDs\n return HttpResponse(json.dumps(responseData), content_type=\"application/json\")\n\ndef create_subscription(request):\n\n \"\"\"\n \" Creating the costumer and adding payment_method_nonce\n \"\"\"\n result = braintree.Customer.create({\n \"first_name\": \" \",\n \"last_name\": \" \",\n })\n\n if result.is_success:\n customerId = result.customer.id\n\n \"\"\"\n \" Creating the subscription\n \"\"\"\n resultPaymentMethod = braintree.PaymentMethod.create({\n \"customer_id\": customerId,\n \"payment_method_nonce\": request.POST.get(\"payment_method_nonce\"),\n })\n\n paymentMethodToken = resultPaymentMethod.payment_method.token\n\n resultSubscription = braintree.Subscription.create({\n \"payment_method_token\": paymentMethodToken,\n \"plan_id\": request.POST.get(\"plan_id\"),\n })\n\n if resultSubscription.is_success:\n responseData = {}\n responseData['subscription_id'] = resultSubscription.subscription.id\n return HttpResponse(json.dumps(responseData), content_type=\"application/json\")\n else:\n return \"Error: {0}\".format(result.message)\n else:\n return \"Error: {0}\".format(result.message)\n","repo_name":"braintreedev/sample-subscription-ios-objc-django","sub_path":"server/serverSubs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"16376043012","text":"\"Find unique paragraphs in wikipedia data set and save each to file\"\n\n\nimport ast\nimport os\nimport sys\nfrom uuid import uuid4\n\nimport pandas as pd\n\n\ndef load_text_data(file_path):\n df = pd.read_csv(file_path)\n return df[\"text\"]\n\n\ndef find_unique_paragraphs(text_col):\n all_paragraphs = set()\n for edit in text_col:\n all_paragraphs.update(ast.literal_eval(edit))\n return all_paragraphs\n\n\ndef main(input_dir, output_dir):\n all_files = os.listdir(input_dir)\n for file in all_files:\n texts = load_text_data(os.path.join(input_dir, file))\n all_paragraphs = find_unique_paragraphs(texts)\n for para in all_paragraphs:\n clean = para.strip()\n if clean:\n with open(\n os.path.join(output_dir, f\"{uuid4()}\"), \"w\", encoding=\"utf-8\"\n ) as ptr:\n ptr.write(clean)\n\n\nif __name__ == \"__main__\":\n input_dir, output_dir = sys.argv[1:]\n main(input_dir, output_dir)\n","repo_name":"knit-bee/bsc-ndd-paraeval","sub_path":"data-generation/preprocessing/wikipedia/split_wiki_articles.py","file_name":"split_wiki_articles.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"17441105409","text":"from tdidf import TfIdf\nimport operator\nclass Keywords(object):\n def __init__(self, number_of_keywords=1):\n self.number_of_keywords = number_of_keywords\n\n\n def top_keywords_in_document(self, doc, corpus):\n \"\"\"\n Top n keywords for a document compared with a corpus\n :param doc: The document\n :param corpus: The corpus of documents\n :return: The top n keywords for the document\n \"\"\"\n word_dictionary = {}\n for word in set(doc):\n word_dictionary[word] = TfIdf(word,doc,corpus)\n sorted_d = sorted(word_dictionary.iteritems(), key=operator.itemgetter(1))\n sorted_d.reverse()\n return [w[0] for w in sorted_d[:self.number_of_keywords]]\n\n def top_keywords_in_corpus(self, corpus):\n \"\"\"\n Top keywords in a corpus\n :param corpus: The corpus\n :return:Top keywords in a corpus\n \"\"\"\n keyword_list = set()\n\n [[keyword_list.add(x) for x in self.top_keywords_in_document(doc,corpus)] for doc in corpus]\n return keyword_list\n","repo_name":"domenicosolazzo/Providentia","sub_path":"providentia/core/ml/nlp/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"30259501636","text":"from __future__ import unicode_literals\nimport pygame\nimport sys\nfrom googlesearch import search\nimport threading\nimport myclass\nimport myclasspg as pg\n\n\ndef proba_to_graph(proba):\n if 25 > proba >= 0:\n return \"IMG/graph/0.png\"\n if 50 > proba >= 25:\n return \"IMG/graph/1.png\"\n if 75 > proba >= 50:\n return \"IMG/graph/2.png\"\n if 100 > proba >= 75:\n return \"IMG/graph/3.png\"\n else:\n pass\n \n \n#####################################\n## Fenetre du texte ##\n#####################################\n\ndownload = False # on fait des sauvegardes pour le download\nfinished = False # on fait des sauvegardes pour le finished\ndef texte(nom_musique,lien_dl,parole,text,X,Y):\n global titre_chanson , download , finished\n \n titre_chanson = nom_musique\n \n d = {}\n \n pygame.init()\n \n icon = pygame.image.load(\"IMG/icon.png\")\n \n pygame.display.set_icon(icon)\n\n pygame.display.set_caption(\"Scrapping\")\n \n \n screen = pygame.display.set_mode([900, 500])# it will display on screen\n if download:\n img_dl = pg.img(\"IMG/chargement.png\",840,47.5,60,60)\n elif finished:\n img_dl = pg.img(\"IMG/ok.png\",840,47.5,60,60)\n else:\n img_dl = pg.bouton(\"IMG/dl.png\",840,47.5,60,60)\n \n img_stat = pg.bouton(\"IMG/stat.png\",840,132.5,60,60)\n \n img_cherch = pg.bouton(\"IMG/quit.png\",47.5,47.5,60,60)\n \n while True:\n if download:\n if lien_dl.return_satus() == \"finished\": # si le téléchargement est fini\n img_dl = pg.img(\"IMG/ok.png\",840,47.5,60,60) # on met un image comme quoi c'est fini\n download = False # on télécharge plus\n finished = True\n \n screen.fill((223, 242, 255))\n \n text.blit(screen)\n \n if download:\n img_dl.rotate_iblit(screen,1) # si on télécharge on met un image qui tourne sur elle meme\n else : \n img_dl.iblit(screen)\n \n img_stat.iblit(screen)\n \n img_cherch.iblit(screen)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n \n if event.type == pygame.KEYDOWN: # si une touche a été tapée, le marque dans le dictionnaire pressed la touche enfoncer\n d[event.key] = True\n if event.type == pygame.KEYUP:\n d[event.key] = False\n if not download: # si on télécharge pas\n if not finished: # si on a pas fini de dl\n if img_dl.click(pygame.mouse.get_pos(),event): # on peut cliquer sur le logo téléchargement\n threading.Thread(target=lien_dl.start).start() # on lance un thread en parallèle qui lance le téléchargement\n img_dl = pg.img(\"IMG/chargement.png\",840,47.5,60,60) # on met l'image de chargment\n download = True # on dit qu'on télécharge\n \n if img_stat.click(pygame.mouse.get_pos(),event): # si on click sur le graph\n graphique(myclass.champ_lexical(parole).compter()) # on appelle la f graphique\n \n if img_cherch.click(pygame.mouse.get_pos(),event):\n debut()\n \n \n if d.get(pygame.K_UP): # Si on monte\n if Y < 10: # on block pour pas monter trop haut \n Y += 4\n X = X\n text.update((X,Y))\n \n if d.get(pygame.K_DOWN): # Si on baisse\n if Y > -1000: # on block pour pas déscendre trop bas\n Y -= 4\n X = X\n text.update((X,Y))\n \n pygame.display.update()\n\n\n#####################################\n## Fenetre du graphique ##\n#####################################\n \n \ndef graphique(d):\n global titre_chanson\n pygame.init()\n \n icon = pygame.image.load(\"IMG/icon.png\")\n pygame.display.set_icon(icon)\n \n clock = pygame.time.Clock()\n \n screen = pygame.display.set_mode([900, 500])# it will display on screen\n \n p_amour = round(d[\"conteur_amour\"]*100,1) # on fait le proba\n p_rap = round(d[\"conteur_rap\"]*100,1) # round pour arondir\n p_joie = round(d[\"conteur_joie\"]*100,1)\n\n path_img1 = proba_to_graph(p_amour)\n path_img2 = proba_to_graph(p_rap)\n path_img3 = proba_to_graph(p_joie)\n\n img_quit = pg.bouton(\"IMG/quit.png\",840, 60,50,50)\n\n while True:\n \n screen.fill((223, 242, 255))\n pg.img(path_img1,110,110,130,130).iblit(screen)\n pg.img(path_img2,110,250,130,130).iblit(screen)\n pg.img(path_img3,110,390,130,130).iblit(screen)\n img_quit.iblit(screen)\n \n\n pg.text(str(p_amour)+\" % d'amour\",110+130+10,110-65,\"autre\",color = (0,0,0), size = 70).iblit(screen) # on affiche les texts\n pg.text(str(p_rap)+\" % sombre\",110+130+10,250-65,\"autre\",color = (0,0,0), size = 70).iblit(screen) # on affiche les texts\n pg.text(str(p_joie)+\" % joie\",110+130+10,390-65,\"autre\",color = (0,0,0), size = 70).iblit(screen) # on affiche les texts\n \n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if img_quit.click(pygame.mouse.get_pos(),event):\n nom_musique = str(titre_chanson)\n threading.Thread(target=charger, args=(nom_musique,)).start()\n chargement()\n \n pygame.display.update()\n clock.tick(60)\n\n\n#####################################\n## Fenetre du debut ##\n#####################################\n\n\ndef debut():\n pygame.init()\n \n icon = pygame.image.load(\"IMG/icon.png\")\n pygame.display.set_icon(icon)\n \n pygame.display.set_caption(\"Scrapping\")\n \n clock = pygame.time.Clock()\n \n screen = pygame.display.set_mode([900, 500])\n \n rect = pg.zone_ecriture(200,200, 500, 32,30, \"lightskyblue3\",color_t = \"white\") # on crée notre zone d'écriture\n \n nom_musique = \"\"\n \n while True:\n\n screen.fill((223, 242, 255))\n \n rect.iblit(screen)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_BACKSPACE:\n rect.supr() # on suprime dans la zone de texte\n \n elif event.key == pygame.K_RETURN:\n threading.Thread(target=charger, args=(rect.nom,)).start() # on démare la fonction qui charge les trucs compliquer\n chargement() # on démarre le chargement\n else:\n rect.add(event) # on ajoute le texte a la zone de texte\n \n\n \n pygame.display.flip()\n clock.tick(60)\n\n\n#####################################\n## chargement ##\n#####################################\n\n\ndef charger(nom_musique):\n \"\"\"[fontion qui est appeler par un thread et qui permet de charger tout les trucs un peu long a faire]\n\n Args:\n nom_musique (str): [ le nom de la musique que l'on traite ]\n \"\"\"\n global cond_fin_chargement,lien_dl,parole,text,X,Y,titre\n \n titre = nom_musique\n X , Y = 450 , 10\n lien_yt = myclass.chercher(nom_musique,10,\"youtube.com\").lien() # on fait la recherche du lien youtube\n lien_dl = myclass.dl_musique(lien_yt,\"mp3\") # prepare le téléchargement\n parole = myclass.chercher(nom_musique,10,\"paroles.net\").une_balise(\"div\",\"song-text\") # on cherche les paroles de la chanson\n font = pygame.font.Font('freesansbold.ttf', 14) \n text = pg.textealign(parole,font,(X,Y),(0,0,0),\"center\", 1) # on crée le texte en texte pygame \n cond_fin_chargement = True # on a fini de charger\n \n \ndef chargement():\n global cond_fin_chargement\n cond_fin_chargement = False\n \n pygame.init()\n\n pygame.display.set_caption(\"Scrapping\")\n\n clock = pygame.time.Clock()\n\n screen = pygame.display.set_mode([900, 500])# it will display on screen\n\n img = pg.img(\"IMG/chargement.png\",530,205,50,50)\n\n t_chargement = pg.text(\"Chargement\",410,200,\"center\",size = 32)\n\n while True:\n if cond_fin_chargement: # quand on a fini de charger on lance texte\n texte(titre,lien_dl,parole,text,X , Y) \n screen.fill((223, 242, 255))\n \n img.rotate_iblit(screen,1)\n \n t_chargement.iblit(screen)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n pygame.display.flip()\n clock.tick(60)\n\n\n#####################################\n## main ##\n#####################################\n\n\nif __name__ == \"__main__\":\n debut()\n\n\n\n \n","repo_name":"MMMatth/Scrapping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8991,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12686159192","text":"\"\"\"Terminal output manipulation capabilities\"\"\"\nfrom __future__ import annotations\n\nimport dataclasses\nimport re\nfrom collections import deque\nfrom dataclasses import dataclass\nfrom typing import Callable, Deque, List, Match, Optional, Tuple, TypeVar\n\nfrom typing_extensions import Protocol\n\n\n@dataclass\nclass State:\n \"\"\"Store stateful information about the term\"\"\"\n\n cursor_stack: list[Tuple[int, int]] = dataclasses.field(default_factory=list)\n cursor_loc: int = 0\n\n\nSequenceProcessorSignature = Callable[[Match[bytes], List[str], State], None]\nsequence_processors: dict[re.Pattern[bytes], SequenceProcessorSignature] = {}\n\"\"\"Stores all the escape sequence processor in the form of pattern -> function\"\"\"\n\nF = TypeVar(\"F\", bound=SequenceProcessorSignature)\n\n\ndef accept(*pattern: bytes) -> Callable[[F], F]:\n \"\"\"Register a function as a escape sequence processor\"\"\"\n\n def inner(func: F) -> F:\n for pat in pattern:\n sequence_processors[re.compile(pat)] = func\n return func\n\n return inner\n\n\n@dataclass\nclass Sequence:\n \"\"\"Escape sequences used by xterm.\n\n Regex capture group names and nemonics are extracted from the document\n below.\n\n See:\n - https://www.x.org/docs/xterm/ctlseqs.pdf\n \"\"\"\n\n ESC = b\"\\x1B\"\n CSI = b\"\\x1B\\\\[\"\n\n Ps = b\"(?P[0-9]*)\" # As defined by xterm ctrseqs.pdf\n Pm = b\"([0-9]*)(;[0-9]*)*\"\n\n CR = b\"\\r\"\n ICH = CSI + Ps + b\"@\"\n CUU = CSI + Ps + b\"A\"\n CUD = CSI + Ps + b\"B\"\n CUF = CSI + Ps + b\"C\"\n CUB = CSI + Ps + b\"D\"\n \"CUrsor Back -- move cursor back *Ps* times.\"\n CUP = CSI + Pm + b\"H\"\n \"CUrsor Position -- position the cursor in the given coord.\"\n EL = CSI + Ps + b\"K\"\n \"Erase in Line -- Erase from left, right or whole line\"\n DECSC = ESC + b\"7\"\n \"Save Cursor\"\n DECRC = ESC + b\"8\"\n \"Restore Cursor\"\n\n\n@accept(Sequence.CUB)\ndef cub_repl(ctrlseq: Match[bytes], lines: list[str], state: State) -> None:\n \"\"\"Replace CUB sequence by creating a new line with the same content, and put\n the cursor in the value set by CUB\n \"\"\"\n n = int(str(ctrlseq[\"Ps\"], \"utf8\") or \"1\")\n lines.append(lines[-1])\n state.cursor_loc -= n\n\n\n@accept(Sequence.DECSC, Sequence.DECRC)\ndef dec(ctrlseq: Match[bytes], lines: list[str], state: State):\n \"\"\"Transform save and restore cursor\"\"\"\n\n x: None | int\n y: None | int\n\n if ctrlseq.re.pattern == Sequence.DECSC:\n try:\n x = state.cursor_loc\n y = len(lines) - 1\n state.cursor_stack.append((x, y))\n except IndexError: # In case there are not lines\n pass\n return\n\n if ctrlseq.re.pattern == Sequence.DECRC:\n x, y = state.cursor_stack.pop()\n lines.extend(lines[y:])\n state.cursor_loc = x\n return\n raise ValueError(\"Cannot process sequence\")\n\n\n@accept(Sequence.EL)\ndef el(ctrlseq: Match[bytes], lines: list[str], state: State):\n \"\"\"\n *Erase in line* removes characters from the current working line; it\n is typically used for animations, progress bars, etc. Consdiering that,\n the best approach for loggifing the sequence is duplicate the current line,\n and remove characters.\n \"\"\"\n n = int(str(ctrlseq[\"Ps\"], \"utf8\") or \"0\")\n\n if n == 0:\n lines[-1] = lines[-1][: state.cursor_loc]\n elif n == 1:\n lines[-1] = \" \" * state.cursor_loc + lines[-1][state.cursor_loc :]\n elif n == 2:\n lines[-1] = \" \" * len(lines[-1])\n else:\n raise ValueError(\"Ps can only be 0, 1, 2\")\n\n\n@accept(Sequence.CUF, Sequence.ICH)\ndef move_cursor(ctrlseq: Match[bytes], lines: list[str], state: State) -> None:\n \"\"\"Move the cursor forward N places\"\"\"\n n = int(str(ctrlseq[\"Ps\"], \"utf8\") or \"1\")\n state.cursor_loc += n\n if len(lines[-1]) < state.cursor_loc:\n # If the cursor is beyond the end, extend with spaces\n extra = \" \" * (state.cursor_loc - len(lines[-1]))\n lines[-1] += extra\n\n\nclass StreamProcessor(Protocol):\n def __init__(self, loggifier: Loggifier) -> None:\n ...\n\n @classmethod\n def accepts(cls, data: Deque[int]) -> bool:\n \"\"\"Returns ``True`` if the class can process the given bytes\"\"\"\n ...\n\n def process(self, data: Deque[int]) -> bool:\n \"\"\"Process all the bytes in *data*.\n\n Returns:\n ``True`` if the processor can keep processing data; ``False``\n if the processor has completed their corresponding sequence.\n \"\"\"\n ...\n\n\ndef make_stream_processor(\n loggifier: Loggifier, data: Optional[Deque[int]] = None\n) -> StreamProcessor:\n return make_stream_processor_impl(loggifier, data)\n\n\nclass Loggifier:\n def __init__(self) -> None:\n self.lines: list[str] = [\"\"]\n self.state: State = State()\n self.processor: StreamProcessor = make_stream_processor(self)\n self.raw: list[int] = []\n\n def add(self, data: bytes) -> list[str]:\n \"\"\"Add new bytes sent by the guest. Return the new lines.\n\n Args:\n data: The new bytes sent by the guest machine.\n\n Returns:\n The new lines found in the bytes sent by the guest.\n \"\"\"\n\n before = len(self.lines) - 1\n self.raw.extend(data)\n dequed_data = deque(data)\n while len(dequed_data) != 0:\n keep_going = self.processor.process(dequed_data)\n if not keep_going:\n if len(dequed_data) == 0:\n self.processor = make_stream_processor(self)\n else:\n self.processor = make_stream_processor(self, dequed_data)\n # Here -1 means do not print the current end line\n return self.lines[before:-1]\n\n\nclass StandardProcessor(StreamProcessor):\n \"\"\"Process standard *Unicode* characters (i.e. no escape sequences)\"\"\"\n\n LINETERMINATION = {b\"\\n\"[0], b\"\\r\"[0]}\n REMOVE_CHARS = set(range(31)) - LINETERMINATION - {Sequence.ESC[0], b\"\\t\"[0]}\n\n def __init__(self, loggifier: Loggifier) -> None:\n self.log = loggifier\n self.unicode_buf: list[int] = []\n self.prev_was_cr = False\n \"Previous character was CR. Prevents double newline with CRLF\"\n\n @classmethod\n def accepts(cls, data: Deque[int]) -> bool:\n return data[0] != Sequence.ESC[0]\n\n def process(self, data: Deque[int]) -> bool:\n while len(data) > 0:\n curr_byte = data[0]\n\n if curr_byte in self.LINETERMINATION:\n data.popleft()\n if not (curr_byte == ord(\"\\n\") and self.prev_was_cr):\n self.log.lines.append(\"\")\n self.log.state.cursor_loc = 0\n self.prev_was_cr = curr_byte == ord(\"\\r\")\n continue\n\n if curr_byte in self.REMOVE_CHARS:\n data.popleft()\n continue\n\n if curr_byte == Sequence.ESC[0]:\n return False\n\n char: None | bytes = None\n if curr_byte & 0b1000_0000 != 0 or len(self.unicode_buf) != 0:\n # Multi-byte unicode character, ignore the ugliness\n # of the ifs\n self.unicode_buf.append(data.popleft())\n\n byte1 = self.unicode_buf[0]\n\n if byte1 & 0b1110_0000 == 0b1100_0000:\n if len(self.unicode_buf) == 2:\n char = bytes(self.unicode_buf)\n self.unicode_buf.clear()\n\n elif byte1 & 0b1111_0000 == 0b1110_0000:\n if len(self.unicode_buf) == 3:\n char = bytes(self.unicode_buf)\n self.unicode_buf.clear()\n elif byte1 & 0b1111_1000 == 0b1111_0000:\n if len(self.unicode_buf) == 4:\n char = bytes(self.unicode_buf)\n self.unicode_buf.clear()\n else:\n char = bytes([data.popleft()])\n\n if char is not None:\n cursor = self.log.state.cursor_loc\n self.log.lines[-1] = (\n self.log.lines[-1][:cursor]\n + str(char, encoding=\"utf8\") # HACK: Get encoding\n + self.log.lines[-1][cursor + 1 :]\n )\n self.log.state.cursor_loc += 1\n\n return True\n\n\nclass CtrlSeqProc(StreamProcessor):\n \"\"\"Control sequence processor\"\"\"\n\n def __init__(self, loggifier: Loggifier) -> None:\n self.log = loggifier\n self.seq_buffer: list[int] = []\n\n @classmethod\n def accepts(cls, data: Deque[int]) -> bool:\n return data[0] == Sequence.ESC[0]\n\n def is_complete(self) -> bool:\n \"\"\"Evaluate a byte sequence\n\n Returns:\n True if the sequence is complete, False if needs more bytes.\n \"\"\"\n seq = self.seq_buffer\n if len(seq) <= 1:\n return False\n\n if len(seq) == 2 and seq[0] == 0x1B:\n if 0x30 <= seq[1] <= 0x3F:\n # Private sequence\n return True\n\n return False\n\n if len(seq) > 2 and seq[0:2] == list(bytes(\"\\x1B[\", encoding=\"utf8\")):\n return 0x40 <= seq[-1] <= 0x7E\n return False\n\n def process(self, data: Deque[int]) -> bool:\n while len(data) > 0:\n byte = data.popleft()\n\n self.seq_buffer.append(byte)\n\n if self.is_complete():\n sequence = bytes(self.seq_buffer)\n for pattern, action in sequence_processors.items():\n res = pattern.match(sequence)\n\n if res is not None:\n action(res, self.log.lines, self.log.state)\n return False\n # Fallback: do nothing\n return False\n return True\n\n\ndef make_stream_processor_impl(\n loggifier: Loggifier, data: Optional[Deque[int]] = None\n) -> StreamProcessor:\n \"\"\"Return a stream processor for the given byte sequence\"\"\"\n if data is None:\n return StandardProcessor(loggifier)\n order = [StandardProcessor, CtrlSeqProc]\n for proc in order:\n if proc.accepts(data):\n return proc(loggifier)\n raise ValueError(\"No stream processor for the given data\")\n","repo_name":"martinparadiso/spin","sub_path":"spin/machine/term.py","file_name":"term.py","file_ext":"py","file_size_in_byte":10191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"12570602232","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@Author: yangwenhao\n@Contact: 874681044@qq.com\n@Software: PyCharm\n@File: 3.4_LossFunctionInLinearRegression.py\n@Time: 2018/12/13 下午8:49\n@Overview:We use the same irirs dataset as in the prior recipe, but we will change our loss functions and learning rates to see how convergence changes.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom tensorflow.python.framework import ops\nimport tensorflow as tf\n\nops.reset_default_graph()\nsess = tf.Session()\n\n#Import Iris dataset\niris = datasets.load_iris()\nx_vals = np.array([x[3] for x in iris.data])\ny_vals = np.array([y[0] for y in iris.data])\n\n#Declare the learning rate, batch size, placeholders, and variables\nlearning_rate = 0.1\nbatch_size = 25\niterations = 50\nx_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)\ny_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)\nA = tf.Variable(tf.random_normal(shape=[1, 1]))\nb = tf.Variable(tf.random_normal(shape=[1, 1]))\n\n#Write the formula for the linear model, y=Ax+b\nmodel_output = tf.add(tf.matmul(x_data, A), b)\n\n#The loss function will change to the L1 loss.\nloss_l1 = tf.reduce_mean(tf.abs(y_target - model_output))\nloss_l2 = tf.reduce_mean(tf.square(y_target - model_output))\n\n#Initialize the variables, declare the optmizer, and loop through training part\ninit = tf.global_variables_initializer()\nsess.run(init)\nmy_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate)\nmy_opt_l2 = tf.train.GradientDescentOptimizer(learning_rate)\ntrain_step_l1 = my_opt_l1.minimize(loss_l1)\ntrain_step_l2 = my_opt_l2.minimize(loss_l2)\n\nprint('Train with Loss L1')\nloss_vec_l1 = []\nfor i in range(iterations):\n rand_index = np.random.choice(len(x_vals), size=batch_size)\n rand_x = np.transpose([x_vals[rand_index]])\n rand_y = np.transpose([y_vals[rand_index]])\n sess.run(train_step_l1, feed_dict={x_data: rand_x, y_target: rand_y})\n temp_loss = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y})\n loss_vec_l1.append(temp_loss)\n\n if (i+1)%25==0:\n print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))\n print('Loss = ' + str(temp_loss))\n\nprint('Train with loss L2')\nloss_vec_l2 = []\nfor i in range(iterations):\n rand_index = np.random.choice(len(x_vals), size=batch_size)\n rand_x = np.transpose([x_vals[rand_index]])\n rand_y = np.transpose([y_vals[rand_index]])\n sess.run(train_step_l2, feed_dict={x_data: rand_x, y_target: rand_y})\n temp_loss = sess.run(loss_l2, feed_dict={x_data: rand_x, y_target: rand_y})\n loss_vec_l2.append(temp_loss)\n\n if (i+1)%25==0:\n print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))\n print('Loss = ' + str(temp_loss))\n\nplt.plot(loss_vec_l1, 'k-', label='L1 Loss')\nplt.plot(loss_vec_l2, 'r--', label='L2 Loss')\nplt.title('L1 and L2 Loss per Generation')\nplt.xlabel('Generation')\nplt.ylabel('Loss Value')\nplt.legend(loc='upper right')\nplt.show()","repo_name":"Wenhao-Yang/PythonLearning","sub_path":"Chapter 3 Linear Regression/3.4_LossFunctionInLinearRegression.py","file_name":"3.4_LossFunctionInLinearRegression.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"42876653269","text":"import csv\n\nfrom django.core.management import BaseCommand\nfrom recipes.models import Ingredients\n\n\nclass Command(BaseCommand):\n help = 'Load Ingredients from csv'\n\n def add_arguments(self, parser):\n parser.add_argument('--path', type=str)\n\n def handle(self, *args, **options):\n path = options['path']\n with open(path, 'r', encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n Ingredients.objects.create(\n name=row[0],\n measurement_unit=row[1])\n","repo_name":"IlyaBurkhanov/foodgram-project-react","sub_path":"backend/recipes/management/commands/load_ingredients.py","file_name":"load_ingredients.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"41164769708","text":"import ast\nimport collections\nimport copy\nfrom typing import Any, Dict, Text\nimport tensorflow as tf\nimport yaml\n\n\ndef eval_str_fn(val):\n if '|' in val:\n return [eval_str_fn(v) for v in val.split('|')]\n if val in {'true', 'false'}:\n return val == 'true'\n try:\n return ast.literal_eval(val)\n except (ValueError, SyntaxError):\n return val\n\n\n# pylint: disable=protected-access\nclass Config(dict):\n \"\"\"A config utility class.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n input_config_dict = dict(*args, **kwargs)\n self.update(input_config_dict)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __setattr__(self, k, v):\n if isinstance(v, dict) and not isinstance(v, Config):\n self.__dict__[k] = Config(v)\n else:\n self.__dict__[k] = copy.deepcopy(v)\n\n def __getattr__(self, k):\n return self.__dict__[k]\n\n def __setitem__(self, k, v):\n self.__setattr__(k, v)\n\n def __getitem__(self, k):\n return self.__dict__[k]\n\n def __iter__(self):\n for key in self.__dict__:\n yield key\n\n def items(self):\n for key, value in self.__dict__.items():\n yield key, value\n\n def __repr__(self):\n return repr(self.as_dict())\n\n def __getstate__(self):\n return self.__dict__\n\n def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n for k, v in self.__dict__.items():\n result[k] = v\n return result\n\n def __str__(self):\n try:\n return yaml.dump(self.as_dict(), indent=4)\n except TypeError:\n return str(self.as_dict())\n\n def _update(self, config_dict, allow_new_keys=True):\n \"\"\"Recursively update internal members.\"\"\"\n if not config_dict:\n return\n\n for k, v in config_dict.items():\n if k not in self.__dict__:\n if allow_new_keys:\n self.__setattr__(k, v)\n else:\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\n else:\n if isinstance(self.__dict__[k], Config) and isinstance(v, dict):\n self.__dict__[k]._update(v, allow_new_keys)\n elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):\n self.__dict__[k]._update(v.as_dict(), allow_new_keys)\n else:\n self.__setattr__(k, v)\n\n def get(self, k, default_value=None):\n return self.__dict__.get(k, default_value)\n\n def update(self, config_dict):\n \"\"\"Update members while allowing new keys.\"\"\"\n self._update(config_dict, allow_new_keys=True)\n\n def keys(self):\n return self.__dict__.keys()\n\n def override(self, config_dict_or_str, allow_new_keys=False):\n \"\"\"Update members while disallowing new keys.\"\"\"\n if not config_dict_or_str:\n return\n if isinstance(config_dict_or_str, str):\n if '=' in config_dict_or_str:\n config_dict = self.parse_from_str(config_dict_or_str)\n elif config_dict_or_str.endswith('.yaml'):\n config_dict = self.parse_from_yaml(config_dict_or_str)\n else:\n raise ValueError(\n 'Invalid string {}, must end with .yaml or contains \"=\".'.format(\n config_dict_or_str))\n elif isinstance(config_dict_or_str, dict):\n config_dict = config_dict_or_str\n else:\n raise ValueError('Unknown value type: {}'.format(config_dict_or_str))\n\n self._update(config_dict, allow_new_keys)\n\n def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:\n \"\"\"Parses a yaml file and returns a dictionary.\"\"\"\n with tf.io.gfile.GFile(yaml_file_path, 'r') as f:\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\n return config_dict\n\n def save_to_yaml(self, yaml_file_path):\n \"\"\"Write a dictionary into a yaml file.\"\"\"\n with tf.io.gfile.GFile(yaml_file_path, 'w') as f:\n yaml.dump(self.as_dict(), f, default_flow_style=False)\n\n def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:\n \"\"\"Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}.\"\"\"\n if not config_str:\n return {}\n config_dict = {}\n try:\n for kv_pair in config_str.split(','):\n if not kv_pair: # skip empty string\n continue\n key_str, value_str = kv_pair.split('=')\n key_str = key_str.strip()\n\n def add_kv_recursive(k, v):\n \"\"\"Recursively parse x.y.z=tt to {x: {y: {z: tt}}}.\"\"\"\n if '.' not in k:\n return {k: eval_str_fn(v)}\n pos = k.index('.')\n return {k[:pos]: add_kv_recursive(k[pos + 1:], v)}\n\n def merge_dict_recursive(target, src):\n \"\"\"Recursively merge two nested dictionary.\"\"\"\n for k in src.keys():\n if ((k in target and isinstance(target[k], dict) and\n isinstance(src[k], collections.abc.Mapping))):\n merge_dict_recursive(target[k], src[k])\n else:\n target[k] = src[k]\n\n merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str))\n return config_dict\n except ValueError:\n raise ValueError('Invalid config_str: {}'.format(config_str))\n\n def as_dict(self):\n \"\"\"Returns a dict representation.\"\"\"\n config_dict = {}\n for k, v in self.__dict__.items():\n if isinstance(v, Config):\n config_dict[k] = v.as_dict()\n elif isinstance(v, (list, tuple)):\n config_dict[k] = [\n i.as_dict() if isinstance(i, Config) else copy.deepcopy(i)\n for i in v\n ]\n else:\n config_dict[k] = copy.deepcopy(v)\n return config_dict\n # pylint: enable=protected-access\n\n\nregistry_map = {}\n\n\ndef register(cls, prefix='effnet:'):\n \"\"\"Register a function, mainly for config here.\"\"\"\n registry_map[prefix + cls.__name__.lower()] = cls\n return cls\n\n\ndef lookup(name, prefix='effnet:') -> Any:\n name = prefix + name.lower()\n if name not in registry_map:\n raise ValueError(f'{name} not registered: {registry_map.keys()}')\n return registry_map[name]\n\n\n# needed?\n# --params_override \n# --arch or model_name\n\n\nbase_config = Config(\n # model related params.\n model=Config(), # must be provided in full via model cfg files\n \n # train related params.\n train=Config(\n \n img_size=224,\n max_epochs=300, \n steps_per_epoch=None, \n batch_size=32, # renamed from train_batch_size\n use_dali=0, \n \n # optimizer\n optimizer='rmsprop', \n momentum=0.9, # rmsprop, momentum opt\n beta_1=0.0, # for adam.adamw\n beta_2=0.0, # for adam,adamw\n nesterov=0, # for sgd, momentum opt\n epsilon=.001, # for adamw, adam, rmsprop\n decay=0.9, # for rmsprop\n # While the original implementation used a weight decay of 1e-5,\n # tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras\n weight_decay=5e-6, # for adamw or can be used in learnable layers as L2 reg.\n label_smoothing=0.1, \n # The optimizer iteratively updates two sets of weights: the search directions for weights\n # are chosen by the inner optimizer, while the \"slow weights\" are updated each k steps \n # based on the directions of the \"fast weights\" and the two sets of weights are \n # synchronized. This method improves the learning stability and lowers the variance of\n # its inner optimizer.\n lookahead=0, # binary\n # Empirically it has been found that using the moving average of the trained parameters\n # of a deep network is better than using its trained parameters directly. This optimizer\n # allows you to compute this moving average and swap the variables at save time so that\n # any code outside of the training loop will use by default the average values instead\n # of the original ones.\n moving_average_decay=0.0,\n # model evaluation during training can be done using the original weights\n # or using EMA weights. The latter takes place if moving_average_decay > 0 and intratrain_eval_using_ema is True)\n intratrain_eval_using_ema=True,\n # to simulate a large batch size\n grad_accum_steps=1,\n # grad clipping is used in the custom train_step, which is called when grad_accum_steps > 1\n grad_clip_norm=0,\n # to optimize grad reducing across all workers\n hvd_fp16_compression = True,\n create_SavedModel=False,\n\n #lr schedule\n lr_decay='exponential',\n lr_init=0.008, \n lr_decay_epochs=2.4, \n lr_decay_rate=0.97, \n lr_warmup_epochs=5, \n \n # metrics\n metrics = ['accuracy', 'top_5'], # used in tr and eval\n \n # load and save ckpt\n resume_checkpoint=1, # binary\n save_checkpoint_freq=5, \n \n # progressive training (active when n_stages>1)\n n_stages=1, # progressive tr\n base_img_size=128, \n base_mixup=0,\n base_cutmix=0,\n base_randaug_mag=5,\n \n #callbacks\n enable_checkpoint_and_export=1, # binary\n enable_tensorboard=0, # binary\n tb_write_model_weights=0, # tb: tensorboard, binary\n ),\n eval=Config(\n skip_eval=0, # binary\n num_epochs_between_eval=1, \n use_dali=0, # binary, renamed from use_dali_eval\n batch_size=100, # for accurate eval, it should divide the number of validation samples \n img_size=224,\n export=0\n ),\n predict=Config(\n ckpt=None, # renamed from inference_checkpoint\n img_dir='/infer_data/', # renamed from to_predict\n batch_size=32, # renamed from predict_batch_size\n img_size=224,\n benchmark=0, \n ),\n # data related params.\n data=Config(\n dataset='ImageNet', \n augmenter_name='autoaugment', \n \n #Rand-augment params\n raug_num_layers=None, \n raug_magnitude=None, \n cutout_const=None, \n mixup_alpha=0., \n cutmix_alpha=0., \n defer_img_mixing=True,\n translate_const=None, \n \n #Auto-augment params\n autoaugmentation_name=None, \n \n # used in dali\n index_file='', \n \n #dataset and split\n data_dir='/data/', \n num_classes=1000, # must match the one in model config\n train_num_examples=1281167, \n eval_num_examples=50000, \n \n # image normalization\n mean_subtract_in_dpipe=False, \n standardize_in_dpipe=False,\n \n # Set to False for 1-GPU training\n map_parallelization=True\n ),\n runtime=Config(\n\n use_amp=1, # binary\n log_steps=100, \n mode='tran_and_eval', #OK\n time_history=1, # binary\n use_xla=1, # binary\n intraop_threads='', \n interop_threads='', \n model_dir='/results/', # ckpts\n log_filename='log.json',\n display_every=10, \n seed=None,\n data_format='channels_first',\n run_eagerly=0, # binary\n memory_limit=None, ##set max memory that can be allocated by TF to avoid hanging\n ))","repo_name":"NVIDIA/DeepLearningExamples","sub_path":"TensorFlow2/Classification/ConvNets/config/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":11087,"program_lang":"python","lang":"en","doc_type":"code","stars":11741,"dataset":"github-code","pt":"38"} +{"seq_id":"31965176835","text":"import uuid\n\nfrom cloudinary.models import CloudinaryField\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nMB = 1\nMAX_SIZE = MB * 1024 * 1024\n\n\ndef validate_file_size(file):\n if file.size > MAX_SIZE:\n raise ValidationError(\"File exced maximum size 1MB\")\n\n\nclass Image(models.Model):\n id = models.UUIDField(default=uuid.uuid4, primary_key=True, editable=False)\n photo = CloudinaryField(validators=[validate_file_size])\n\n user = models.ForeignKey(\n \"users.User\",\n on_delete=models.CASCADE,\n related_name=\"images\",\n default=\"\",\n null=True,\n )\n event = models.ForeignKey(\n \"events.Event\",\n on_delete=models.CASCADE,\n related_name=\"images\",\n default=\"\",\n null=True,\n )\n\n def photo_link(self):\n return f\"https://res.cloudinary.com/anchorteam/image/upload/{self.photo}.jpg\"\n","repo_name":"matheusprado1/anchor-tcm-m5","sub_path":"images/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27374568094","text":"import csv\nimport flask\nimport io\nimport itertools\n\nfrom albumlist import constants\nfrom albumlist.delayed import queued\nfrom albumlist.models import DatabaseError\nfrom albumlist.models import albums as albums_model, list as list_model\nfrom albumlist.scrapers import bandcamp, links\n\n\napi_blueprint = flask.Blueprint(name='api',\n import_name=__name__,\n url_prefix='/api')\n\n\n@api_blueprint.after_request\ndef after_request(response):\n if hasattr(response, 'headers'):\n response.headers['Access-Control-Allow-Origin'] = '*'\n return response\n\n\n@api_blueprint.route('/list', methods=['GET'])\ndef api_list_albums():\n try:\n return flask.jsonify(list_model.get_list()), 200\n except DatabaseError as e:\n print('[db]: failed to get list')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/list/count', methods=['GET'])\ndef api_id_count():\n try:\n return flask.jsonify({'count': list_model.get_list_count()}), 200\n except DatabaseError as e:\n print('[db]: failed to get list count')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/albums', methods=['GET'])\ndef api_list_album_details():\n channel = flask.request.args.get('channel')\n if channel:\n albums = albums_model.get_albums_by_channel_with_tags(channel)\n key = f'api-albums-{channel}'\n else:\n albums = albums_model.get_albums_with_tags()\n key = 'api-albums'\n try:\n details = flask.current_app.cache.get(key)\n if not details:\n details = albums_model.Album.details_map_from_albums(albums)\n details = [{key: d} for key, d in details.items()]\n flask.current_app.cache.set(key, details, 60 * 5)\n return flask.jsonify(details), 200\n except DatabaseError as e:\n print('[db]: failed to get albums')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/albums/count', methods=['GET'])\ndef api_count_albums():\n try:\n return flask.jsonify({'count': albums_model.get_albums_count()}), 200\n except DatabaseError as e:\n print('[db]: failed to get albums count')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/albums/dump', methods=['GET'])\ndef api_dump_album_details():\n # need StringIO for csv.writer\n proxy = io.StringIO()\n albums = albums_model.get_albums_with_users()\n first_album = next(albums)\n csv_writer = csv.DictWriter(proxy, fieldnames=first_album.fieldnames)\n csv_writer.writeheader()\n for album in itertools.chain([first_album], albums):\n csv_writer.writerow(album.to_dict())\n # and BytesIO for flask.send_file\n mem = io.BytesIO()\n mem.write(proxy.getvalue().encode('utf-8'))\n mem.seek(0)\n proxy.close()\n # see: https://stackoverflow.com/a/45111660\n return flask.send_file(mem,\n as_attachment=True,\n attachment_filename=\"albums.csv\",\n mimetype='text/csv',\n cache_timeout=0 if flask.request.args.get('fresh') else None)\n\n\n@api_blueprint.route('/album/', methods=['GET'])\ndef api_album(album_id):\n try:\n if flask.request.args.get('reviews'):\n album = albums_model.get_album_details_with_reviews(album_id)\n else:\n album = flask.current_app.get_cached_album_details(album_id)\n if album is None:\n return flask.jsonify({'text': 'not found'}), 404\n response = {\n 'text': 'success',\n 'album': album.to_dict(),\n }\n return flask.jsonify(response), 200\n except DatabaseError as e:\n print(f'[db]: failed to get album: {album_id}')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/album//reviews', methods=['GET'])\ndef api_album_reviews(album_id):\n try:\n album = albums_model.get_album_details_with_reviews(album_id)\n if album is None:\n return flask.jsonify({'text': 'not found'}), 404\n response = {\n 'text': 'success',\n 'reviews': album.reviews,\n }\n return flask.jsonify(response), 200\n except DatabaseError as e:\n print(f'[db]: failed to get album: {album_id}')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/tags/', methods=['GET'])\ndef api_album_by_tag(tag):\n key = f'api-tags-{tag}'\n try:\n details = flask.current_app.cache.get(key)\n if not details:\n albums = albums_model.get_albums_by_tag(tag)\n details = albums_model.Album.details_map_from_albums(albums)\n details = [{key: d} for key, d in details.items()]\n flask.current_app.cache.set(key, details, 60 * 30)\n return flask.jsonify(details), 200\n except DatabaseError as e:\n print(f'[db]: failed to get tag: {tag}')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/bc/', methods=['GET'])\ndef api_bc(album_id):\n return flask.redirect(constants.BANDCAMP_URL_TEMPLATE.format(album_id=album_id), code=302)\n\n\n@api_blueprint.route('/albums/random', methods=['GET'])\ndef api_random():\n try:\n album = albums_model.get_random_album()\n if album is None:\n return flask.jsonify({'text': 'not found'}), 404\n response = {\n 'text': 'success',\n 'album': album.to_dict(),\n }\n return flask.jsonify(response), 200\n except DatabaseError as e:\n print(f'[db]: failed to get random album')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/albums/available/urls', methods=['GET'])\ndef available_urls():\n try:\n key = 'api-albums-available-urls'\n urls = flask.current_app.cache.get(key)\n if not urls:\n urls = [album.album_url for album in albums_model.get_albums_available()]\n flask.current_app.cache.set(key, urls, 60 * 30)\n return flask.jsonify(urls), 200\n except DatabaseError as e:\n print('[db]: failed to get album urls')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/albums/unavailable/count', methods=['GET'])\ndef unavailable_count():\n try:\n return flask.jsonify({'count': albums_model.get_albums_unavailable_count()}), 200\n except DatabaseError as e:\n print('[db]: failed to get unavailable albums count')\n print(f'[db]: {e}')\n return flask.jsonify({'text': 'failed'}), 500\n\n\n@api_blueprint.route('/albums/scrape', methods=['POST'])\ndef scrape_album():\n form_data = flask.request.form\n for url in links.scrape_links_from_text(form_data.get('url', '')):\n flask.current_app.logger.info(f'[api]: scraping {url}...')\n queued.deferred_consume.delay(\n url,\n bandcamp.scrape_bandcamp_album_ids_from_url_forced,\n list_model.add_to_list,\n )\n return 'OK', 200\n\n\n@api_blueprint.route('', methods=['GET'])\ndef all_endpoints():\n rules = [ \n (list(rule.methods), rule.rule) \n for rule in flask.current_app.url_map.iter_rules() \n if rule.endpoint.startswith('api')\n ]\n return flask.jsonify({'api': rules}), 200\n","repo_name":"Ogreman/albumlist","sub_path":"albumlist/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"73132166831","text":"from tkinter import *\n\ndef move_by_keys(event):\n if event.keysym == \"Up\":\n canvas.move(oval, 0, -20)\n elif event.keysym == \"Down\":\n canvas.move(oval,0 , 20)\n elif event.keysym == \"Left\":\n canvas.move(oval,-20,0)\n elif event.keysym == \"Right\":\n canvas.move(oval,20,0)\nwin = Tk()\nlabel = Label(win,text=\"IT-Park\")\nlabel.pack()\ncanvas = Canvas(win,bg=\"#fff\",width=700,height=700)\noval=canvas.create_oval((300,300),(400,400),fill=\"yellow\")\ncanvas.pack()\nwin.bind(\"\",move_by_keys)\nwin.mainloop()","repo_name":"iliyasupermonstr/IT-Park-programming","sub_path":"Lesson9/Canvas_Move.py","file_name":"Canvas_Move.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"71629775790","text":"import decimal\n\nfrom django.http import JsonResponse, HttpResponseRedirect\nfrom django.apps import apps\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\n\nfrom altcomp_app.notifications.models import Notification\n\n\ndef get_notifications(request):\n if Notification.objects.filter(viewed=False):\n notifications = ''\n for notification in Notification.objects.filter(viewed=False):\n msg = ''\n for key, value in notification.changes.items():\n if key == 'price':\n msg += f'{key.capitalize()} increase {str(value)}' if decimal.Decimal(value) > 0 else f'{key} drop {str(value)}'\n else:\n msg += f'{key.capitalize()}: not avaliable' if value is False else f'{key}: avaliable'\n\n notifications += f'{msg}'\n else:\n notifications = 'No new notifications.'\n return JsonResponse({'html': mark_safe(notifications), 'count': Notification.objects.filter(viewed=False).count()})\n\n\ndef notifications_seen(request):\n model = request.POST.get('obj_model')\n app_label = request.POST.get('obj_app')\n obj_id = request.POST.get('obj')\n model = apps.get_model(app_label, model)\n obj = model.objects.get(pk=obj_id)\n obj.notification.all().update(viewed=True)\n return HttpResponseRedirect(reverse(f'admin:{obj._meta.app_label}_{obj._meta.model_name}_change', args=[obj.id]))\n","repo_name":"Koomin/altcomp_app","sub_path":"app/altcomp_app/notifications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"32155808976","text":"\"\"\"\r\nScript python pour ouvrir les fichiers de traces de clavier\r\n\r\n\"\"\"\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport time\r\n\r\ndef read_int(f):\r\n ba = bytearray(4)\r\n f.readinto(ba)\r\n prm = np.frombuffer(ba, dtype=np.int32)\r\n return prm[0]\r\n \r\ndef read_double(f):\r\n ba = bytearray(8)\r\n f.readinto(ba)\r\n prm = np.frombuffer(ba, dtype=np.double)\r\n return prm[0]\r\n\r\ndef read_double_tab(f, n):\r\n ba = bytearray(8*n)\r\n nr = f.readinto(ba)\r\n if nr != len(ba):\r\n return []\r\n else:\r\n prm = np.frombuffer(ba, dtype=np.double)\r\n return prm\r\n \r\ndef get_pics_from_file(filename):\r\n # Lecture du fichier d'infos + pics detectes (post-processing KeyFinder)\r\n print(\"Ouverture du fichier de pics \"+filename)\r\n f_pic = open(filename, \"rb\")\r\n info = dict()\r\n info[\"nb_pics\"] = read_int(f_pic)\r\n print(\"Nb pics par trame: \" + str(info[\"nb_pics\"]))\r\n info[\"freq_sampling_khz\"] = read_double(f_pic)\r\n print(\"Frequence d'echantillonnage: \" + str(info[\"freq_sampling_khz\"]) + \" kHz\")\r\n info[\"freq_trame_hz\"] = read_double(f_pic)\r\n print(\"Frequence trame: \" + str(info[\"freq_trame_hz\"]) + \" Hz\")\r\n info[\"freq_pic_khz\"] = read_double(f_pic)\r\n print(\"Frequence pic: \" + str(info[\"freq_pic_khz\"]) + \" kHz\")\r\n info[\"norm_fact\"] = read_double(f_pic)\r\n print(\"Facteur de normalisation: \" + str(info[\"norm_fact\"]))\r\n tab_pics = []\r\n pics = read_double_tab(f_pic, info[\"nb_pics\"])\r\n nb_trames = 1\r\n while len(pics) > 0:\r\n nb_trames = nb_trames+1\r\n tab_pics.append(pics)\r\n pics = read_double_tab(f_pic, info[\"nb_pics\"])\r\n print(\"Nb trames: \" + str(nb_trames))\r\n f_pic.close()\r\n return tab_pics, info\r\n\r\nif __name__ == \"__main__\":\r\n tripletmoy = []\r\n tripletmed = []\r\n alphabet = \"abcdefghijklmnopqrstuvwxyz0123456789\"\r\n pics_ref, info = get_pics_from_file(\"../data/pics_LOGINMDP.bin\")\r\n for j in range(0, len(alphabet)):\r\n pics_nokey, info = get_pics_from_file(\"../data/pics_\" + alphabet[j] + \".bin\")\r\n\r\n moyenne = np.mean(pics_nokey, axis=0)\r\n medianne = np.median(pics_nokey, axis=0)\r\n listemoy = []\r\n listemed = []\r\n for i in range(0, len(pics_ref)):\r\n distmoy = np.linalg.norm(pics_ref[i] - moyenne)\r\n distmed = np.linalg.norm(pics_ref[i] - medianne)\r\n listemoy.append(distmoy)\r\n listemed.append(distmed)\r\n \r\n for i in range(0, len(listemoy)):\r\n if listemoy[i] < 0.5:\r\n trimoy = (alphabet[j], i, listemoy[i])\r\n tripletmoy.append(trimoy)\r\n if listemed[i] < 0.5:\r\n trimed = (alphabet[j], i, listemed[i])\r\n tripletmed.append(trimed)\r\n\r\n touchesspe = [\"CTRL\", \"ENTER\", \"NOKEY\", \"SHIFT\", \"SPACE\", \"SUPPR\"]\r\n for j in range(0, len(touchesspe)):\r\n pics_nokey, info = get_pics_from_file(\"../data/pics_\" + touchesspe[j] + \".bin\")\r\n\r\n moyenne = np.mean(pics_nokey, axis=0)\r\n medianne = np.median(pics_nokey, axis=0)\r\n listemoy = []\r\n listemed = []\r\n for i in range(0, len(pics_ref)):\r\n distmoy = np.linalg.norm(pics_ref[i] - moyenne)\r\n distmed = np.linalg.norm(pics_ref[i] - medianne)\r\n listemoy.append(distmoy)\r\n listemed.append(distmed)\r\n \r\n for i in range(0, len(listemoy)):\r\n if listemoy[i] < 0.5:\r\n trimoy = (touchesspe[j], i, listemoy[i])\r\n tripletmoy.append(trimoy)\r\n if listemed[i] < 0.5:\r\n trimed = (touchesspe[j], i, listemed[i])\r\n tripletmed.append(trimed)\r\n\r\n\r\n\r\nwith open('moys.csv','w', newline='') as out:\r\n csv_out=csv.writer(out)\r\n csv_out.writerow(['name','num', 'coef'])\r\n for row in tripletmoy:\r\n csv_out.writerow(row)\r\n\r\nwith open('meds.csv','w', newline='') as out:\r\n csv_out=csv.writer(out)\r\n csv_out.writerow(['name','num', 'coef'])\r\n for row in tripletmoy:\r\n csv_out.writerow(row)\r\n exit()\r\n","repo_name":"TheNizzo/hackathon-epita","sub_path":"Approche Statistique/frequential_analysis.py","file_name":"frequential_analysis.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"23847076943","text":"# -*- acsection: general-init -*-\nimport pygame as pg\nimport pygamebg\n\n(sirina, visina) = (180, 300) # otvaramo prozor\nprozor = pygamebg.open_window(sirina, visina, \"Sedmosegmentni displej\")\n\n# -*- acsection: main -*-\n\nbroj = 0 # broj\n\ndef crtaj():\n prozor.fill(pg.Color(\"white\"))\n signali = [\"1110111\", \"0010010\", \"1011101\", \"1011011\", \"0111010\",\n \"1101011\", \"1101111\", \"1010010\", \"1111111\", \"1111011\"]\n signal = signali[broj]\n\n margina = 20\n debljina = 20\n visina = 100\n sirina = 100\n\n x1 = margina + debljina / 2\n x2 = x1 + sirina + debljina\n y1 = margina + debljina / 2\n y2 = y1 + visina + debljina\n y3 = y2 + visina + debljina\n\n if signal[0] == '1':\n pg.draw.line(prozor, pg.Color(\"black\"), (x1 + debljina / 2, y1), (x1 + debljina / 2 + sirina, y1), debljina)\n if signal[1] == '1':\n pg.draw.line(prozor, pg.Color(\"black\"), (x1, y1 + debljina / 2), (x1, y1 + debljina / 2 + visina), debljina)\n if signal[2] == '1':\n pg.draw.line(prozor, pg.Color(\"black\"), (x2, y1 + debljina / 2), (x2, y1 + debljina / 2 + visina), debljina)\n if signal[3] == '1':\n pg.draw.line(prozor, pg.Color(\"black\"), (x1 + debljina / 2, y2), (x1 + debljina / 2 + sirina, y2), debljina)\n if signal[4] == '1':\n pg.draw.line(prozor, pg.Color(\"black\"), (x1, y2 + debljina / 2), (x1, y2 + debljina / 2 + visina), debljina)\n if signal[5] == '1':\n pg.draw.line(prozor, pg.Color(\"black\"), (x2, y2 + debljina / 2), (x2, y2 + debljina / 2 + visina), debljina)\n if signal[6] == '1':\n pg.draw.line(prozor, pg.Color(\"black\"), (x1 + debljina / 2, y3), (x1 + debljina / 2 + sirina, y3), debljina)\n\ndef novi_frejm():\n global broj\n broj = (broj + 1) % 10 # brojimo od 0 do 9 u krug\n crtaj()\n\n# -*- acsection: after-main -*-\n\n# funkcija novi_frejm se poziva 2 puta u sekundi\npygamebg.frame_loop(2, novi_frejm)\n","repo_name":"Petlja/os7_inf_prog","sub_path":"_includes/sedmosegmentni_displej.py","file_name":"sedmosegmentni_displej.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"13674021336","text":"import os\n# import numpy as np\nimport subprocess\nimport time\n\nstart = time.time()\n\nos.system('rm -f runned.dat')\n\ncurrent_path = os.getcwd()\nfor folderName, subfolders, filenames in os.walk( current_path ):\n workPath = os.path.join( current_path, folderName)\n os.chdir( workPath )\n os.system('rm -f runned.dat')\n for filename in os.listdir('.'):\n if filename.startswith('in.'):\n inputfile=filename\n # bash_return, inputfile = subprocess.getstatusoutput('$(ls -t in.* | head -n 1)')\n print('='*80)\n print('input file {:s}'.format(inputfile))\n os.system( 'mpiexec lmp < '+inputfile+' > lammps.out' )\n print('path: ', workPath)\n os.chdir( current_path )\n\n with open('runned.dat', 'a') as f:\n f.write('='*80 + '\\n')\n f.write( workPath +'\\n' )\n \nwith open('runned.dat', 'a') as f:\n f.write('\\n')\nend = time.time()\nprint('='*80)\nprint('The running time : {:10.6f} hours'.format((end-start)/3600) )\n","repo_name":"jianhuizhai/LAMMPS","sub_path":"job/inOne_relax_volume.py","file_name":"inOne_relax_volume.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"38"} +{"seq_id":"23128908728","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.list import ListView\nfrom .models import Category, Product, Article, Feedback\nfrom .forms import FeedbackForm\nfrom cart.forms import CartAddProductForm\n\n\nclass IndexView(TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['articles'] = Article.objects.filter(is_active=True)\n\n return context\n\n\ndef product_detail(request, pk):\n context = {}\n template_name = 'store/detail.html'\n product = get_object_or_404(Product, id=pk, available=True)\n context['product'] = product\n context['feedbacks'] = Feedback.objects.filter(product=product)\n cart_product_form = CartAddProductForm()\n context['cart_product_form'] = cart_product_form\n\n if request.method == 'POST':\n f = FeedbackForm(request.POST)\n new_feedback = f.save(commit=False)\n new_feedback.product = Product.objects.get(pk=pk)\n new_feedback.save()\n\n request.session[\"reviewed_products\"] += [pk]\n context['is_review_exist'] = True\n\n return render(request, template_name, context)\n\n if not request.session.get('reviewed_products', False):\n request.session[\"reviewed_products\"] = []\n context['is_review_exist'] = False\n context['form'] = FeedbackForm()\n else:\n if pk not in request.session[\"reviewed_products\"]:\n context['is_review_exist'] = False\n context['form'] = FeedbackForm()\n else:\n context['is_review_exist'] = True\n\n return render(request, template_name, context)\n\n\nclass CategoryView(ListView):\n template_name = 'store/category.html'\n context_object_name = 'products'\n paginate_by = 3\n\n def get_queryset(self):\n return Product.objects.filter(category=self.kwargs['pk'])\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n cart_product_form = CartAddProductForm()\n context['cart_product_form'] = cart_product_form\n context['category'] = Category.objects.get(pk=self.kwargs['pk'])\n\n return context\n","repo_name":"alekseykonotop/online_store_django","sub_path":"project/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72359289391","text":"from flask import Blueprint, request, redirect, url_for, g\nfrom lms.model.auth import Auth\nfrom lms.model.materials import Materials\nimport logging\n\nfrom . import Session\n\nmaterial_api = Blueprint('material_api', __name__)\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n@material_api.before_request\ndef before_request():\n g.token = request.cookies.get('token')\n email = Auth.verify_auth_token(str(g.token))\n session = Session()\n g.user = session.query(Auth).filter_by(email=email).first()\n session.close()\n if g.user is None:\n redirect(url_for('/')) \n\n# add material\n@material_api.route('/material', methods = ['POST'])\ndef post_material():\n logger.log(logging.INFO, request.form)\n session = Session()\n session.add(Materials(\n **request.form))\n session.commit()\n session.close()\n return 'OK', 200\n\n# modify material\n@material_api.route('/material/', methods = ['PUT', 'DELETE'])\ndef modify_material(material_id):\n logger.log(logging.INFO, request.form)\n session = Session()\n if request.method == 'PUT':\n session.query(Materials) \\\n .filter(Materials.material_id == material_id) \\\n .update(\n **request.form,\n material_id = material_id)\n session.commit()\n elif request.method == 'DELETE':\n session.query(Materials).filter(Materials.material_id == material_id).delete()\n session.commit()\n session.close()\n return 'OK', 200\n","repo_name":"care1e55/LMS","sub_path":"app/lms/controllers/materials.py","file_name":"materials.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13576261363","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport pandas as pd\n\npath_data = '/home/sgmap/data'\n\n## Ambassades\n# données téléchargées sur data.gouv.fr\n\npath_ambassades_csv = os.path.join(path_data, 'MAEDI', 'representations_francaises.csv')\namb = pd.read_csv(path_ambassades_csv, encoding='cp1252', sep=';')\n# => 101 ambassades\n\n\n# Annuaire\n# csv chargé depuis le travail fait et reposté sur data.gouv.fr\npath_dila = os.path.join(path_data, 'annuaire', 'annuaire_20160712.csv')\ndila = pd.read_csv(path_dila)\n# on cherche les ambassades\ncontains_amb = dila['http://www.w3.org/2000/01/rdf-schema#label'].str.contains('stitut fr')\n#mieux :\ncontains_amb = dila['parent'] == 'an/171940'\n\n# dila['enfant'] == dila['index']\ncolumns_to_remove = ['enfant', 'df/formulaireContactHerite',\n 'df/coordonneeComplementaireHeritee', ]\ndel dila['enfant']\nfor col in dila.columns:\n if dila.loc[contains_amb, col].nunique() > 1 :\n print(dila.loc[contains_amb, col].value_counts())\n\nlabel = dila.loc[contains_amb, 'http://www.w3.org/2000/01/rdf-schema#label']\nassert all(label.str.startswith('Ambassade de France '))\nlabel = label.str[20:]\nassert all(label.str.split(' - ').str.len() == 2)\npays = label.str.split(' - ').str[0].str.upper()\npays = pays.str.replace('É', 'E')\nville = label.str.split(' - ').str[1]\nville = ville.str.replace('-', ' ')\n\n#### Début de comparaison\n#on regarde sur un pays\namb['Pays']\nPays_amb = amb['Pays']\n# Pas d'accent !!!\namb['Ville'] # contient des dates, Majuscule en 1ere lettre\nville.isin(amb['Ville'])\n\n\nlen(pays[~pays.isin(amb['Pays'])])\n\nlen(ville[~ville.isin(amb['Ville'])])\n# 40 villes","repo_name":"AlexisEidelman/Hackathons","sub_path":"MAEDI/annuaire_ambassades.py","file_name":"annuaire_ambassades.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"4049688294","text":"\"\"\"\nmake the maximum sliding window for the array \n\"\"\"\n\nlst1 = [4,5,4,4,6,1,7,8,0]\nlst2 = [8,6,4,7,0,1,4,3,6,4,3,2,1,0]\n\n\n\nclass Solution:\n def monotonic_queue(self,lst):\n \"\"\"\n find the max value in the sliding window to find in the window\n \"\"\"\n queue = []\n l = 0\n r = 0\n\n ","repo_name":"abhishekprakash256/Data-Structures-Algorithms-in-Python","sub_path":"Sliding_window/maximum_sliding_3.py","file_name":"maximum_sliding_3.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24376062602","text":"import PodSixNet.Channel\nimport PodSixNet.Server\nimport os\nos.environ['PYGAME_FREETYPE'] = '1'\nimport pygame\nimport random\nimport math\nfrom pygame.locals import *\nfrom time import sleep\nimport eztext\nimport sys\n\n#player object\nfrom Player import Player\n#various bullet objects\nfrom Bullet import Bullet\nfrom Bullet import PlayerBullet1\nfrom Bullet import PlayerBullet2\nfrom Bullet import PlayerBullet3\nfrom Bullet import Enemy1BulletMid\nfrom Bullet import Enemy1BulletLeft\nfrom Bullet import Enemy1BulletRight\nfrom Bullet import Enemy2Bullet\nfrom Bullet import Enemy3Bullet\nfrom Bullet import BossBullet1\nfrom Bullet import BossBullet2\nfrom Bullet import BossBullet3\nfrom Bullet import BossBullet4\n#enemy objects\nfrom Enemy import Enemy\nfrom Enemy import Enemy1\nfrom Enemy import Enemy2\nfrom Enemy import Enemy3\nfrom Enemy import Boss\n#powerup objects\nfrom Powerup import Bombup\nfrom Powerup import Lifeup\nfrom Powerup import Weaponup\n#explosion object\nfrom Explosion import Explosion\n#background cloud objects\nfrom Cloud import Cloud1\nfrom Cloud import Cloud2\nfrom Cloud import Cloud3\nfrom Cloud import Cloud4\n\n#Adapted from https://github.com/JRock007/boxxy/tree/master\ndef textInput(screen, maxLength, prompt):\n # defining some colors\n blue = (0, 0, 255)\n green = (0, 255, 0)\n red = (255, 0, 0)\n white = (255, 255, 255)\n black = (0, 0, 0)\n # fill the screen w/ black\n screen.fill(black)\n xpos = 250\n ypos = 25\n deltay = 25\n txtbx = []\n # For getting the return values\n a = ['']\n # here is the magic: making the text input\n # create an input with a max length of 45,\n # and a red color and a prompt saying 'type here $i: '\n txtbx.append(eztext.Input(maxlength=maxLength,\n color=blue, x=xpos, y=ypos,\n prompt=prompt))\n ypos += deltay\n\n # create the pygame clock\n clock = pygame.time.Clock()\n # main loop!\n\n while True:\n # make sure the program is running at 30 fps\n clock.tick(30)\n\n # events for txtbx\n events = pygame.event.get()\n # process other events\n for event in events:\n # close it x button si pressed\n if event.type == QUIT:\n return \"None\"\n\n # clear the screen\n screen.fill(white) # I like black better :)\n # update txtbx and get return val\n a[0] = txtbx[0].update(events)\n txtbx[0].focus = True\n txtbx[0].color = black\n\n # blit txtbx[i] on the screen\n txtbx[0].draw(screen)\n\n # Changing the focus to the next element\n # every time enter is pressed\n if a[0] != None:\n return a[0]\n\n # refresh the display\n pygame.display.flip()\n\nclass ClientChannel(PodSixNet.Channel.Channel):\n def Network(self, data):\n pass\n # print(data)\n\n def Network_shoot(self, data):\n # player number (1 or 0)\n num = data[\"num\"]\n\n # id of game given by server at start of game\n self.gameid = data[\"gameid\"]\n\n # tells server to shoot\n self._server.playerShoot(data, num, self.gameid)\n\n def Network_move(self, data):\n num = data[\"num\"]\n self.gameid = data[\"gameid\"]\n #tells server to move the player\n self._server.playerMove(data, num, self.gameid)\n\n def Network_keypressed(self, data):\n num = data[\"num\"]\n self.gameid = data[\"gameid\"]\n #tells server to run keypressed\n self._server.keypressed(data, num, self.gameid)\n\n def Network_keyreleased(self, data):\n num = data[\"num\"]\n self.gameid = data[\"gameid\"]\n #tells server to run keyreleased\n self._server.keyreleased(data, num, self.gameid)\n\n def Network_usebomb(self, data):\n num = data[\"num\"]\n self.gameid = data[\"gameid\"]\n #tells server that player uses a bomb\n self._server.usebomb(data, num, self.gameid)\n\n def Close(self):\n self._server.close(self.gameid)\n\n'''Adapted from https://github.com/JRock007/boxxy/tree/master\nchannelClass, init, Connected, close are from the github. \nEvery other function was made by me'''\nclass GameServer(PodSixNet.Server.Server):\n\n channelClass = ClientChannel\n\n def __init__(self, *args, **kwargs):\n PodSixNet.Server.Server.__init__(self, *args, **kwargs)\n self.games = []\n self.queue = None\n self.currentIndex = 0\n print(\"Waiting for clients...\")\n\n def Connected(self, channel, addr):\n print(\"new connection:\", channel)\n if self.queue is None:\n print(\"Player 1 joined the game !\")\n self.currentIndex += 1\n channel.gameid = self.currentIndex\n # starts a game when the first player connects to the channel\n self.queue = Game(channel, self.currentIndex)\n self.queue.init()\n else:\n print(\"Player 2 joined the game !\")\n channel.gameid = self.currentIndex\n self.queue.player2 = channel\n # when both players join, the game starts\n self.queue.player1.Send({\"action\": \"startgame\", \"player\": 0, \\\n \"gameid\": self.queue.gameid})\n self.queue.player2.Send({\"action\": \"startgame\", \"player\": 1, \\\n \"gameid\": self.queue.gameid})\n self.games.append(self.queue)\n self.queue = None\n\n def playerShoot(self, data, num, gameid):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n #tells game to shoot\n game[0].multiPlayerShoot(data, num)\n\n def playerMove(self, data, num, gameid):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n #tells game to move player\n game[0].multiPlayerMove(data, num)\n\n def keypressed(self, data, num, gameid):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n #tells game to check for key pressed\n game[0].multiKeyPressed(data, num)\n\n def keyreleased(self, data, num, gameid):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n #tells game to check for key released\n game[0].multiKeyReleased(data, num)\n\n def usebomb(self, data, num, gameid):\n game = [a for a in self.games if a.gameid == gameid]\n if len(game) == 1:\n #tells game to use a bomb\n game[0].multiUseBomb(data, num)\n\n def close(self, gameid):\n try:\n game = [a for a in self.games if a.gameid == gameid][0]\n game.player1.Send({\"action\": \"close\"})\n game.player2.Send({\"action\": \"close\"})\n except:\n pass\n\n def tick(self):\n #runs the game on the server and calls timer fired\n clock = pygame.time.Clock()\n for game in self.games:\n time = clock.tick(7)\n game.menuMode = False\n game.multiplayerMode = True\n game.gameStarted = True\n game.timerFired(time)\n self.Pump()\n\nclass Game:\n\n def init(self):\n pygame.init()\n #initializes player sprite\n player = Player(self.width / 2, self.height / 2, 0)\n self.playerGroup = pygame.sprite.Group(player)\n #checks to see if player is firing\n self.firing = False\n #checks to see if player is using ability to slow time\n self.abilityUsed = False\n #enemy groups and time intervals for appearance\n self.enemy1Group = pygame.sprite.Group()\n self.enemy1Timer = 1\n self.enemy2Group = pygame.sprite.Group()\n self.enemy2Timer = 300\n self.enemy3Group = pygame.sprite.Group()\n self.enemy3Timer = 500\n self.bossGroup = pygame.sprite.Group()\n self.bossTimer = 1000\n self.totalScore = 0\n #details for the player\n self.playerLives = 20\n self.playerBombs = 20\n self.bombUsed = False\n #player's power depends on enemies killed\n self.enemyKillCount = 0\n self.playerPower = 1 + self.enemyKillCount//10\n #weapon level defines columns of bullets\n self.playerWeaponLevel = 1\n self.bombAnimationTimer = -1\n #how much energy is available to use the slow time ability\n self.abilityGauge = 100\n self.abilityTimer = 0\n #powerup groups\n self.bombupGroup = pygame.sprite.Group()\n self.lifeupGroup = pygame.sprite.Group()\n self.weaponupGroup = pygame.sprite.Group()\n #is the game over?\n self.isGameOver = False\n #did you win?\n self.gameWon = False\n #Main Menu Mode\n self.menuMode = True\n #Instructions Page\n self.instructionsMode = False\n #High Scores Page\n self.highScoresMode = False\n #Single Player Game Mode\n self.gameMode1 = False\n #Two Player Game Mode\n self.gameMode2 = False\n #pause function\n self.isPaused = False\n #text box for player to input name\n self.textbox = eztext.Input(maxlength=45, color=(255,255,255), \\\n prompt='Enter your name here: ')\n #Audiowide font was created by Astigmatic One Eye Typographic Institute\n textboxFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n self.textbox.set_font(textboxFont)\n #checks to see if player is finished entering their name\n self.nameEnterDone = False\n #two player details\n self.firing1 = False\n self.firing2 = False\n self.player1Lives = 3\n self.player2Lives = 3\n self.player1Bombs = 3\n self.player2Bombs = 3\n self.player1EnemyKillCount = 0\n self.player2EnemyKillCount = 0\n self.player1Power = 1 + self.player1EnemyKillCount//10\n self.player2Power = 1 + self.player2EnemyKillCount//10\n self.player1WeaponLevel = 1\n self.player2WeaponLevel = 1\n #Server-Based Multiplayer\n self.multiplayerMode = False\n self.running = False\n self.gameStarted = False\n player1 = Player(self.width / 4, self.height / 2, 0)\n player2 = Player(3*self.width / 4, self.height / 2, 1)\n self.player1Group = pygame.sprite.Group(player1)\n self.player2Group = pygame.sprite.Group(player2)\n #explosions\n self.explosionGroup = pygame.sprite.Group()\n #background clouds\n self.makeClouds()\n #initializes sounds\n ''' Sounds are from timgormly's 8-bit sound package on Freesound\n https://freesound.org/people/timgormly/packs/10094/?page=1#sound'''\n pygame.mixer.init()\n self.hitSound = pygame.mixer.Sound('sounds/hit.ogg')\n self.fireSound = pygame.mixer.Sound('sounds/laser.ogg')\n self.bombupSound = pygame.mixer.Sound('sounds/bombup.ogg')\n self.lifeupSound = pygame.mixer.Sound('sounds/lifeup.ogg')\n self.weaponupSound = pygame.mixer.Sound('sounds/weaponup.ogg')\n self.playerHitSound = pygame.mixer.Sound('sounds/playerHit.ogg')\n ''' Explosion sound from\\\n https://freesound.org/people/dkmedic/sounds/104439/'''\n self.explodeSound = pygame.mixer.Sound('sounds/explosion.wav')\n ''' Sound for using a bomb is from\\\n https://freesound.org/people/CGEffex/sounds/93846/'''\n self.bombUseSound = pygame.mixer.Sound('sounds/bombUse.ogg')\n #fixes volume for sounds\n self.fireSound.set_volume(0.05)\n self.hitSound.set_volume(0.3)\n self.explodeSound.set_volume(1.0)\n #Game Music is Instrumental Core - Battlefield Main Theme\n pygame.mixer.music.load('sounds/gameMusic.ogg')\n #changes mouse for game purposes\n self.mouseImages = []\n self.mouseImagesAppend()\n self.mouseImageIndex = 0\n self.mouseImage = pygame.transform.scale(self.mouseImages\\\n [self.mouseImageIndex].convert_alpha(),(24,24))\n\n def makeClouds(self):\n #creates all of the background clouds in the game\n cloud1 = Cloud1(100, 500)\n cloud2 = Cloud3(150, 30)\n cloud3 = Cloud2(300, 280)\n cloud4 = Cloud4(20, 370)\n cloud5 = Cloud4(420, 60)\n cloud6 = Cloud3(220, 170)\n cloud7 = Cloud2(390, 430)\n cloud8 = Cloud1(280, 620)\n self.cloudGroup = pygame.sprite.Group()\n self.cloudGroup.add(cloud1)\n self.cloudGroup.add(cloud2)\n self.cloudGroup.add(cloud3)\n self.cloudGroup.add(cloud4)\n self.cloudGroup.add(cloud5)\n self.cloudGroup.add(cloud6)\n self.cloudGroup.add(cloud7)\n self.cloudGroup.add(cloud8)\n\n def mouseImagesAppend(self):\n #images for animating the mouse cursor\n '''Images from http://www.rw-designer.com/cursor-view/81839.gif'''\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/1.gif'))\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/2.gif'))\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/3.gif'))\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/4.gif'))\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/5.gif'))\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/6.gif'))\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/7.gif'))\n self.mouseImages.append(pygame.image.load(\\\n 'images/mouse_gif_files/8.gif'))\n\n def mousePressedLeft(self, x, y):\n if self.gameMode1 == True:\n #fires when mouse is pressed\n self.firing = True\n elif self.gameMode2 == True:\n #player 1 is controlled with the mouse\n self.firing1 = True\n elif self.menuMode == True:\n (x, y) = pygame.mouse.get_pos()\n mouseBounds = (x,y,x,y)\n singlePlayerBounds = (140,200,340,260)\n twoPlayerBounds = (140,280,340,340)\n multiPlayerBounds = (140,360,340,420)\n instructionsBounds = (140,440,340,500)\n highScoresBounds = (140,520,340,580)\n #checks to see if you click on a menu button\n if self.boundsIntersect(mouseBounds, singlePlayerBounds):\n self.gameMode1 = True\n #single player mode starts\n player = Player(self.width / 2, self.height / 2, 0)\n self.playerGroup = pygame.sprite.Group(player)\n self.menuMode = False\n pygame.mixer.music.play(5,7)\n elif self.boundsIntersect(mouseBounds, twoPlayerBounds):\n self.gameMode2 = True\n #local two player mode starts\n player1 = Player(self.width / 4, self.height / 2, 0)\n player2 = Player(3*self.width / 4, self.height / 2, 1)\n self.player1Group = pygame.sprite.Group(player1)\n self.player2Group = pygame.sprite.Group(player2)\n self.menuMode = False\n pygame.mixer.music.play(5,7)\n elif self.boundsIntersect(mouseBounds, multiPlayerBounds):\n multiplayerMode = True\n #starts a server for server based multiplayer\n self.serverMode()\n self.gameStarted = True\n player1 = Player(self.width / 4, self.height / 2, 0)\n player2 = Player(3*self.width / 4, self.height / 2, 1)\n self.player1Group = pygame.sprite.Group(player1)\n self.player2Group = pygame.sprite.Group(player2)\n self.menuMode = False\n pygame.mixer.music.play(5,7)\n elif self.boundsIntersect(mouseBounds, instructionsBounds):\n self.instructionsMode = True\n #opens instructions page\n self.menuMode = False\n elif self.boundsIntersect(mouseBounds, highScoresBounds):\n self.highScoresMode = True\n #opens highscores page\n self.menuMode = False\n\n def serverMode(self):\n print(\"STARTING SERVER ON LOCALHOST\")\n\n \n width, height = 500, 50\n screen = pygame.display.set_mode((width, height))\n pygame.display.set_caption(\"Game Server\")\n\n # try:\n address = textInput(screen, 200, \"Host:Port (localhost:8000): \")\n if address == \"None\":\n sys.exit()\n\n if not address:\n host, port = \"localhost\", 8000\n else:\n host, port = address.split(\":\")\n gameServe = GameServer(localaddr=(host, int(port)))\n while True:\n events = pygame.event.get()\n # process other events\n for event in events:\n # close it x button si pressed\n if event.type == QUIT:\n sys.exit()\n\n gameServe.tick()\n sleep(0.01)\n\n def mousePressedRight(self, x, y):\n if self.isPaused == False:\n if self.gameMode1 == True:\n #uses bomb with right mouse click\n if self.playerBombs > 0:\n self.bombUsed = True\n self.playerBombs -= 1\n elif self.gameMode2 == True:\n #player 1 uses bomb with right mouse click\n if self.player1Bombs > 0:\n self.bombUsed = True\n self.player1Bombs -= 1\n\n def mouseReleased(self, x, y):\n if self.gameMode1 == True:\n self.firing = False\n elif self.gameMode2 == True:\n self.firing1 = False\n\n def mouseMotion(self, x, y):\n if self.isPaused == False:\n if self.gameMode1 == True:\n #moves player\n player = self.playerGroup.sprites()[0]\n (player.x,player.y) = pygame.mouse.get_pos()\n elif self.gameMode2 == True:\n #player 1 is moved by mouse\n if self.player1Lives > 0:\n player1 = self.player1Group.sprites()[0]\n (player1.x, player1.y) = pygame.mouse.get_pos()\n\n def mouseDrag(self, x, y):\n if self.isPaused == False:\n if self.gameMode1 == True:\n #can move player while firing\n player = self.playerGroup.sprites()[0]\n (player.x,player.y) = pygame.mouse.get_pos()\n elif self.gameMode2 == True:\n if self.player1Lives > 0:\n player1 = self.player1Group.sprites()[0]\n (player1.x, player1.y) = pygame.mouse.get_pos()\n\n def movePlayer(self, player):\n if self.isKeyPressed(pygame.K_LEFT):\n if player.x - player.width/2 > 0:\n player.x -= 10\n\n if self.isKeyPressed(pygame.K_RIGHT):\n if player.x + player.width/2 < self.width:\n player.x += 10\n\n if self.isKeyPressed(pygame.K_UP):\n if player.y - player.height/2 > 0:\n player.y -= 10\n\n if self.isKeyPressed(pygame.K_DOWN):\n if player.y + player.height/2 < self.height:\n player.y += 10\n\n def keyPressed(self, keyCode, modifier):\n #pauses the game\n if keyCode == pygame.K_p:\n if self.isPaused == False:\n self.isPaused = True\n else:\n self.isPaused = False\n if self.isPaused == False:\n if self.gameMode1 == True:\n player = self.playerGroup.sprites()[0]\n #spaces slows down time\n if keyCode == pygame.K_SPACE:\n if self.abilityGauge > 0:\n self.abilityUsed = True\n #so does the shift key\n elif keyCode == pygame.K_LSHIFT:\n if self.abilityGauge > 0:\n self.abilityUsed = True\n #alternative way of firing with z key\n elif keyCode == pygame.K_z:\n self.firing = True\n #alternative way of using a bomb\n elif keyCode == pygame.K_x:\n if self.playerBombs > 0:\n self.bombUsed = True\n self.playerBombs -= 1\n #skip to boss\n elif keyCode == pygame.K_b:\n self.bossTimer = 1\n elif keyCode == pygame.K_ESCAPE:\n self.init()\n if self.isGameOver == True or self.gameWon == True:\n if keyCode == pygame.K_RETURN:\n self.name = self.textbox.value\n if len(self.name) > 0:\n information = str(self.totalScore) + \"/\"+self.name\n #appends name and score to a file\n with open(\"file.txt\", \"a\") as highscores_file:\n highscores_file.write(information + \"\\n\")\n self.init()\n elif self.gameMode2 == True:\n #player 2 fires with z key\n if keyCode == pygame.K_z:\n self.firing2 = True\n #player 2 uses bombs with x\n elif keyCode == pygame.K_x:\n if self.player2Bombs > 0:\n self.bombUsed = True\n self.player2Bombs -= 1\n #skip to boss\n elif keyCode == pygame.K_b:\n self.bossTimer = 1\n #pauses the game\n elif keyCode == pygame.K_p:\n if self.isPaused == False:\n self.isPaused = True\n else:\n self.isPaused = False\n elif keyCode == pygame.K_ESCAPE:\n self.init()\n #no high scores for two player mode\n if self.isGameOver or self.gameWon == True:\n if keyCode == pygame.K_RETURN:\n self.init()\n elif self.instructionsMode == True or self.highScoresMode == True:\n if keyCode == pygame.K_BACKSPACE:\n self.init()\n\n def keyReleased(self, keyCode, modifier):\n if self.gameMode1 == True:\n if keyCode == pygame.K_SPACE:\n self.abilityUsed = False\n elif keyCode == pygame.K_LSHIFT:\n self.abilityUsed = False\n elif keyCode == pygame.K_z:\n self.firing = False\n elif self.gameMode2 == True:\n if keyCode == pygame.K_z:\n self.firing2 = False\n\n #taken from side scroller\n def boundsIntersect(self, boundsA, boundsB):\n # return l2<=r1 and t2<=b1 and l1<=r2 and t1<=b2\n if boundsA != None and boundsB != None:\n (ax0, ay0, ax1, ay1) = boundsA\n (bx0, by0, bx1, by1) = boundsB\n return ((ax1 >= bx0) and (bx1 >= ax0) and\n (ay1 >= by0) and (by1 >= ay0))\n\n def definePlayer(self, index):\n #defines a player based on index so functions can be repeatedly used\n player = None\n if self.gameMode1 == True:\n player = self.playerGroup.sprites()[index]\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if index == 0:\n if self.player1Lives > 0:\n player = self.player1Group.sprites()[0]\n else:\n if self.player2Lives > 0:\n player = self.player2Group.sprites()[0]\n return player\n\n def hitEnemy1(self,bullet,index=0):\n player = self.definePlayer(index)\n bulletBounds = bullet.getBulletBounds()\n #checks to see if player bullets hit enemy1\n for enemy1 in self.enemy1Group:\n enemyBounds = enemy1.getEnemyBounds()\n if self.boundsIntersect(bulletBounds,enemyBounds):\n self.hitSound.play()\n if self.gameMode1 == True:\n #enemy loses health\n enemy1.health -= self.playerPower\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playHitSound\"})\n self.player2.Send({\"action\": \"playHitSound\"})\n if index == 0:\n enemy1.health -= self.player1Power\n else:\n enemy1.health -= self.player2Power\n #enemy dies if its health reaches 0\n if enemy1.health <= 0:\n #chance for powerups to appear when enemy dies\n self.powerupAppear(enemy1)\n #makes an explosion when enemy dies\n self.explosionGroup.add(Explosion(enemy1.x,\\\n enemy1.y, enemy1.width, enemy1.height))\n enemy1.kill()\n #plays an explosion sound\n self.explodeSound.play()\n #sends sound data to clients\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playExplodeSound\"})\n self.player2.Send({\"action\": \"playExplodeSound\"})\n self.enemyKillCount += 1\n #adds the score of the hit enemy\n self.totalScore += enemy1.score\n\n player.bullets.remove(bullet)\n\n def hitEnemy2(self,bullet,index=0):\n player = self.definePlayer(index)\n bulletBounds = bullet.getBulletBounds()\n #checks to see if player bullets hit enemy1\n for enemy2 in self.enemy2Group:\n enemyBounds = enemy2.getEnemyBounds()\n if self.boundsIntersect(bulletBounds,enemyBounds):\n self.hitSound.play()\n if self.gameMode1 == True:\n #enemy loses health\n enemy2.health -= self.playerPower\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playHitSound\"})\n self.player2.Send({\"action\": \"playHitSound\"})\n if index == 0:\n enemy2.health -= self.player1Power\n else:\n enemy2.health -= self.player2Power\n #enemy dies if its health reaches 0\n if enemy2.health <= 0:\n #chance for powerups to appear when enemy dies\n self.powerupAppear(enemy2)\n self.explosionGroup.add(Explosion(enemy2.x,\\\n enemy2.y, enemy2.width, enemy2.height))\n enemy2.kill()\n self.explodeSound.play()\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playExplodeSound\"})\n self.player2.Send({\"action\": \"playExplodeSound\"})\n self.enemyKillCount += 1\n #adds the score of the hit enemy\n self.totalScore += enemy2.score\n\n player.bullets.remove(bullet)\n\n def hitEnemy3(self,bullet,index=0):\n player = self.definePlayer(index)\n bulletBounds = bullet.getBulletBounds()\n #checks to see if player bullets hit enemy1\n for enemy3 in self.enemy3Group:\n enemyBounds = enemy3.getEnemyBounds()\n if self.boundsIntersect(bulletBounds,enemyBounds):\n self.hitSound.play()\n if self.gameMode1 == True:\n #enemy loses health\n enemy3.health -= self.playerPower\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playHitSound\"})\n self.player2.Send({\"action\": \"playHitSound\"})\n if index == 0:\n enemy3.health -= self.player1Power\n else:\n enemy3.health -= self.player2Power\n #enemy dies if its health reaches 0\n if enemy3.health <= 0:\n #chance for powerups to appear when enemy dies\n self.powerupAppear(enemy3)\n #centers the explosion on the enemy's body\n if enemy3.movingRight == True:\n self.explosionGroup.add(Explosion(enemy3.x+40,\\\n enemy3.y, 48, enemy3.height))\n else:\n self.explosionGroup.add(Explosion(enemy3.x-40,\\\n enemy3.y, 48, enemy3.height))\n enemy3.kill()\n self.explodeSound.play()\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playExplodeSound\"})\n self.player2.Send({\"action\": \"playExplodeSound\"})\n self.enemyKillCount += 1\n #adds the score of the hit enemy\n self.totalScore += enemy3.score\n\n player.bullets.remove(bullet)\n\n def hitBoss(self,bullet,index=0):\n player = self.definePlayer(index)\n bulletBounds = bullet.getBulletBounds()\n #checks to see if player bullets hit enemy1\n for boss in self.bossGroup:\n bossBounds = boss.getEnemyBounds()\n if self.boundsIntersect(bulletBounds,bossBounds):\n self.hitSound.play()\n if self.gameMode1 == True:\n #enemy loses health\n boss.health -= self.playerPower\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playHitSound\"})\n self.player2.Send({\"action\": \"playHitSound\"})\n if index == 0:\n boss.health -= self.player1Power\n else:\n boss.health -= self.player2Power\n #enemy dies if its health reaches 0\n if boss.health <= 0:\n self.explosionGroup.add(Explosion(boss.x,\\\n boss.y, boss.width, boss.height))\n boss.kill()\n self.explodeSound.play()\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playExplodeSound\"})\n self.player2.Send({\"action\": \"playExplodeSound\"})\n #adds the score of the hit enemy\n self.totalScore += boss.score\n self.gameWon = True\n\n player.bullets.remove(bullet)\n\n def hitsPowerup(self,index=0):\n player = self.definePlayer(index)\n playerBounds = player.getPlayerBounds()\n #checks to see if player hits a bombup\n for bombup in self.bombupGroup:\n bombupBounds = bombup.getPowerupBounds()\n if self.boundsIntersect(playerBounds, bombupBounds):\n #plays sound for collecting a bombup\n self.bombupSound.play()\n #player's bombs increase by 1\n if self.gameMode1 == True:\n self.playerBombs += 1\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playBombupSound\"})\n self.player2.Send({\"action\": \"playBombupSound\"})\n if index == 0:\n self.player1Bombs += 1\n else:\n self.player2Bombs += 1\n #deletes the bombup after a player gets it\n bombup.kill()\n #checks to see if player hits a lifeup\n for lifeup in self.lifeupGroup:\n lifeupBounds = lifeup.getPowerupBounds()\n if self.boundsIntersect(playerBounds, lifeupBounds):\n #plays sound for collecting a lifeup\n self.lifeupSound.play()\n #player's lives increase by 1\n if self.gameMode1 == True:\n self.playerLives += 1\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playLifeupSound\"})\n self.player2.Send({\"action\": \"playLifeupSound\"})\n if index == 0:\n self.player1Lives += 1\n else:\n self.player2Lives += 1\n # deletes the lifeup after a player gets it\n lifeup.kill()\n #checks to see if player hits a weaponup\n for weaponup in self.weaponupGroup:\n weaponupBounds = weaponup.getPowerupBounds()\n if self.boundsIntersect(playerBounds, weaponupBounds):\n #plays the sound for collecting a weaponup\n self.weaponupSound.play()\n #player's weapon level increase by 1\n if self.gameMode1 == True:\n if self.playerWeaponLevel < 3:\n self.playerWeaponLevel += 1\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playWeaponupSound\"})\n self.player2.Send({\"action\": \"playWeaponupSound\"})\n if index == 0:\n if self.player1WeaponLevel < 3:\n self.player1WeaponLevel += 1\n else:\n if self.player2WeaponLevel < 3:\n self.player2WeaponLevel += 1\n #deletes weaponup when a player gets it\n weaponup.kill()\n\n def playerMovement(self,index=0):\n player = self.definePlayer(index)\n if self.gameMode1 == True:\n self.playerGroup.update(self.width, self.height)\n self.movePlayer(player)\n #fires bullets\n if self.firing == True:\n self.fireSound.play()\n player.fireBullet(self.playerWeaponLevel)\n elif self.gameMode2 == True or self.multiplayerMode == True:\n #checks if player 1 fires a bullet\n if index == 0:\n self.player1Group.update(self.width, self.height)\n if self.firing1 == True:\n self.fireSound.play()\n if self.multiplayerMode == True:\n #sends fireSound data to cleints\n self.player1.Send({\"action\": \"playFireSound\"})\n self.player2.Send({\"action\": \"playFireSound\"})\n player.fireBullet(self.player1WeaponLevel)\n #checks if player 2 fires a bullet\n else:\n self.player2Group.update(self.width, self.height)\n if self.gameMode2 == True:\n self.movePlayer(player)\n if self.firing2 == True:\n self.fireSound.play()\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playFireSound\"})\n self.player2.Send({\"action\": \"playFireSound\"})\n player.fireBullet(self.player2WeaponLevel)\n for bullet in player.bullets:\n #moves each bullet and removes when goes off screen\n bullet.move()\n if bullet.y < 0:\n player.bullets.remove(bullet)\n #checks to see if bullets hit enemies\n self.hitEnemy1(bullet, index)\n self.hitEnemy2(bullet, index)\n self.hitEnemy3(bullet, index)\n self.hitBoss(bullet, index)\n self.hitsPowerup(index)\n player.bullets.update(self.width,self.height)\n\n def hitPlayer(self, bullet, index=0):\n player = self.definePlayer(index)\n if player != None:\n playerBounds = (player.x,player.y,player.x,player.y)\n if isinstance(bullet, Bullet):\n bulletBounds = bullet.getBulletBounds()\n # boss can hit player in blitzMode\n elif isinstance(bullet, Enemy):\n bulletBounds = bullet.getEnemyBounds()\n if player.isHit == False:\n if self.boundsIntersect(bulletBounds,playerBounds):\n self.playerHitSound.play()\n if self.gameMode1 == True:\n #if the bullet hits you you use a life\n self.playerLives -= 1\n if self.playerLives > 0: \n player = Player(self.width / 2, self.height / 2, 0)\n self.playerGroup = pygame.sprite.Group(player)\n player.isHit = True\n #player loses power when hit\n self.losePower()\n else: self.isGameOver = True\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playPlayerHitSound\"})\n self.player2.Send({\"action\": \"playPlayerHitSound\"})\n if index == 0:\n self.player1Lives -= 1\n player.kill()\n if self.player1Lives > 0:\n player1 = Player(self.width/4, self.height/2, 0)\n self.player1Group = pygame.sprite.Group(player1)\n player1.isHit = True\n #player loses power when hit\n self.losePower(index)\n else:\n self.player2Lives -= 1\n player.kill()\n if self.player2Lives > 0:\n player2 = Player(3*self.width/4, self.height/2,\\\n 1)\n self.player2Group = pygame.sprite.Group(player2)\n player2.isHit = True\n #player loses power when hit\n self.losePower(index)\n if self.player1Lives == 0 and self.player2Lives == 0:\n self.isGameOver = True\n\n def enemy1Event(self):\n self.enemy1Group.update(self.width,self.height)\n self.enemy1Timer += 1\n #spawns an enemy1 at a random x coordinate along the top\n if self.enemy1Timer == 50:\n self.enemy1Timer = 0\n enemyX = random.randint(30,450)\n enemyDir = random.randint(0,1)\n self.enemy1Group.add(Enemy1(enemyX,0,enemyDir))\n for enemy1 in self.enemy1Group:\n enemy1.bulletTimer += 1\n #enemy fires bullets at intervals\n if enemy1.bulletTimer == 20:\n enemy1.bulletTimer = 0\n enemy1.fireBullet()\n for bullet in enemy1.bullets:\n #checks to see if a bullet hits the player\n bullet.move()\n #removes bullets if it gets out of bounds\n if bullet.y > self.height or bullet.x < 0 or \\\n bullet.x > self.width:\n enemy1.bullets.remove(bullet)\n self.hitPlayer(bullet, 0)\n if self.gameMode2 == True or self.multiplayerMode == True:\n self.hitPlayer(bullet, 1)\n enemy1.bullets.update(self.width,self.height)\n\n def enemy2Event(self):\n self.enemy2Group.update(self.width,self.height)\n self.enemy2Timer -= 1\n #spawns an enemy2 at a random x coordinate along the top\n if self.enemy2Timer == 0:\n self.enemy2Timer = 100\n enemyX = random.randint(30,450)\n self.enemy2Group.add(Enemy2(enemyX,0))\n for enemy2 in self.enemy2Group:\n enemy2.bulletTimer += 1\n #enemy fires bullets at intervals\n if enemy2.bulletTimer == 50:\n enemy2.bulletTimer = 0\n enemy2.fireBullet()\n for bullet in enemy2.bullets:\n #checks to see if a bullet hits the player\n bulletBounds = bullet.getBulletBounds()\n bullet.move()\n if bullet.y > self.height or bullet.x < 0 or \\\n bullet.x > self.width:\n enemy2.bullets.remove(bullet)\n self.hitPlayer(bullet, 0)\n if self.gameMode2 == True or self.multiplayerMode == True:\n self.hitPlayer(bullet, 1)\n enemy2.bullets.update(self.width,self.height)\n\n def enemy3Event(self):\n self.enemy3Group.update(self.width,self.height)\n self.enemy3Timer -= 1\n #spans an enemy3 either from the left top or right top of screen\n if self.enemy3Timer == 0:\n self.enemy3Timer = 100\n movingRight = random.randint(0,1)\n #enemy will move right if it spawns on the left and vice versa\n if movingRight:\n self.enemy3Group.add(Enemy3(0,0,True))\n else:\n self.enemy3Group.add(Enemy3(self.width,0,False))\n for enemy3 in self.enemy3Group:\n enemy3.bulletTimer += 1\n #enemy fires bullets at intervals\n if enemy3.bulletTimer == 4:\n enemy3.bulletTimer = 0\n #enemy fires bullet at player\n if self.gameMode1 == True:\n player = self.playerGroup.sprites()[0]\n enemy3.fireBullet(player.x, player.y)\n #enemy randomly fires at both players\n elif self.gameMode2 == True or self.multiplayerMode == True:\n if self.player1Lives > 0 and self.player2Lives>0:\n randomPlayer = random.randint(0,1)\n elif self.player1Lives > 0:\n randomPlayer = 0\n elif self.player2Lives > 0:\n randomPlayer = 1\n if randomPlayer == 0:\n player = self.player1Group.sprites()[0]\n else:\n player = self.player2Group.sprites()[0]\n enemy3.fireBullet(player.x, player.y)\n for bullet in enemy3.bullets:\n #checks to see if a bullet hits the player\n bulletBounds = bullet.getBulletBounds()\n bullet.move()\n if bullet.y > self.height or bullet.x < 0 or \\\n bullet.x > self.width:\n enemy3.bullets.remove(bullet)\n self.hitPlayer(bullet, 0)\n if self.gameMode2 == True or self.multiplayerMode == True:\n self.hitPlayer(bullet, 1)\n enemy3.bullets.update(self.width,self.height)\n\n def bossEvent(self):\n self.bossGroup.update(self.width,self.height)\n self.bossTimer -= 1\n #spans an enemy3 either from the left top or right top of screen\n if self.bossTimer == 0:\n self.boss = Boss(self.width/2, 60)\n self.bossGroup.add(self.boss)\n #all other enemies are deleted when the boss is active\n for enemy1 in self.enemy1Group:\n enemy1.kill()\n for enemy2 in self.enemy2Group:\n enemy2.kill()\n for enemy3 in self.enemy3Group:\n enemy3.kill()\n for boss in self.bossGroup:\n #times the various bullets\n if boss.bullet1Timer > 0:\n boss.bullet1Timer -= 1\n if boss.bullet2Timer > 0:\n boss.bullet2Timer -= 1\n if boss.bullet3Timer > 0:\n boss.bullet3Timer -= 1\n if boss.bullet4Timer > 0:\n boss.bullet4Timer -= 1\n # boss fires different bullets at different stages of health\n if boss.health > 700: \n if boss.bullet1Timer == 0:\n boss.bullet1Timer = 1\n if self.multiplayerMode == True:\n boss.bullet1Timer = 3\n boss.fireBullet1()\n #at 333 health, it goes into frenzy and attacks the player\n if boss.health < 400 and boss.blitzed == False:\n boss.blitzMode = 1\n if boss.health < 400 and boss.blitzMode == 0:\n boss.y = 60\n if boss.bullet4Timer == 0:\n boss.bullet4Timer = 50\n boss.fireBullet4()\n # 2nd stage, moves left and right, shooting two different bullets\n if boss.health <= 700 and boss.health >= 400: \n if boss.bullet2Timer == 0:\n boss.bullet2Timer = 10\n boss.fireBullet2()\n if boss.health <= 700 and boss.health >= 400: \n if boss.bullet3Timer == 0:\n boss.bullet3Timer = 30\n boss.fireBullet3()\n #boss collision is active\n self.hitPlayer(boss, 0)\n if self.gameMode2 == True or self.multiplayerMode == True:\n self.hitPlayer(boss, 1)\n for bullet in boss.bullets:\n #checks to see if a bullet hits the player\n bulletBounds = bullet.getBulletBounds()\n bullet.move()\n if bullet.y > self.height or bullet.x < 0 or \\\n bullet.x > self.width:\n bullet.kill()\n self.hitPlayer(bullet, 0)\n if self.gameMode2 == True or self.multiplayerMode == True:\n self.hitPlayer(bullet, 1)\n # the fourth type of boss bullet splits into 4 after some time\n if type(bullet) == BossBullet4:\n if bullet.timer <= 0:\n if bullet.size-7 > 0:\n for i in [1,3,5,7]:\n #the bullets' speeds increase after split\n newBullet = BossBullet4(bullet.x, bullet.y, \\\n bullet.size-7, i*math.pi/4, \\\n bullet.speed + 1)\n boss.bullets.add(newBullet)\n bullet.kill()\n boss.bullets.update(self.width,self.height)\n\n def losePower(self, index=0):\n if self.gameMode1 == True:\n #player goes down one power level when hit\n if self.playerPower == 3:\n self.enemyKillCount = 10\n else:\n self.enemyKillCount = 0\n if self.playerWeaponLevel > 1:\n self.playerWeaponLevel -= 1\n if self.gameMode2 == True or self.multiplayerMode == True:\n if index == 0:\n if self.player1Power == 3:\n self.player1EnemyKillCount = 10\n else:\n self.player1EnemyKillCount = 0\n if self.player1WeaponLevel > 1:\n self.player1WeaponLevel -= 1\n else:\n if self.player2Power == 3:\n self.player2EnemyKillCount = 10\n else:\n self.player2EnemyKillCount = 0\n if self.player2WeaponLevel > 1:\n self.player2WeaponLevel -= 1\n\n def abilityUsage(self):\n #slows down time if ability is used\n if self.abilityUsed == True:\n if self.abilityGauge > 0:\n #ability gauge/energy decreases\n self.abilityGauge -= 3\n self.abilityTimer = 30\n pygame.time.delay(100)\n #energy does not increase for a while after use\n if self.abilityTimer == 0:\n if self.abilityGauge < 100:\n self.abilityGauge += 1\n elif self.abilityTimer > 0:\n self.abilityTimer -= 1\n\n def bombUsage(self):\n #kills all enemies on screen when bomb is used\n if self.bombUsed == True:\n self.bombUseSound.play()\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playBombUseSound\"})\n self.player2.Send({\"action\": \"playBombUseSound\"})\n for enemy1 in self.enemy1Group:\n self.powerupAppear(enemy1)\n #makes explosion and plays sound for each enemy\n self.explosionGroup.add(Explosion(enemy1.x,\\\n enemy1.y, enemy1.width, enemy1.height))\n self.explodeSound.play()\n enemy1.kill()\n #adds the score of each enemy killed by the bomb\n self.totalScore += enemy1.score\n for enemy2 in self.enemy2Group:\n self.powerupAppear(enemy2)\n self.explosionGroup.add(Explosion(enemy2.x,\\\n enemy2.y, enemy2.width, enemy2.height))\n self.explodeSound.play()\n enemy2.kill()\n #adds the score of each enemy killed by the bomb\n self.totalScore += enemy2.score\n for enemy3 in self.enemy3Group:\n self.powerupAppear(enemy3)\n #centers the explosion on the enemy's body\n if enemy3.movingRight == True:\n self.explosionGroup.add(Explosion(enemy3.x+40,\\\n enemy3.y, 48, enemy3.height))\n else:\n self.explosionGroup.add(Explosion(enemy3.x-40,\\\n enemy3.y, 48, enemy3.height))\n self.explodeSound.play()\n enemy3.kill()\n #adds the score of each enemy killed by the bomb\n self.totalScore += enemy3.score\n for boss in self.bossGroup:\n boss.health -= 75\n boss.bullets = pygame.sprite.Group()\n #enemy dies if its health reaches 0\n if boss.health <= 0:\n self.explosionGroup.add(Explosion(boss.x,\\\n boss.y, boss.width, boss.height))\n boss.kill()\n self.explodeSound.play()\n if self.multiplayerMode == True:\n self.player1.Send({\"action\": \"playExplodeSound\"})\n self.player2.Send({\"action\": \"playExplodeSound\"})\n #adds the score of the hit enemy\n self.totalScore += boss.score\n self.gameWon = True\n self.bombUsed = False\n self.bombAnimationTimer = 0\n\n def powerupEvent(self):\n #the powerups only remain for a temporary amount of time\n for bombup in self.bombupGroup:\n bombup.timer += 1\n if bombup.timer > 140:\n bombup.kill()\n #updates the sprite group\n self.bombupGroup.update(self.width, self.height)\n for lifeup in self.lifeupGroup:\n lifeup.timer += 1\n if lifeup.timer > 140:\n lifeup.kill()\n self.lifeupGroup.update(self.width, self.height)\n for weaponup in self.weaponupGroup:\n weaponup.timer += 1\n if weaponup.timer > 140:\n weaponup.kill()\n self.weaponupGroup.update(self.width, self.height)\n\n def explosionEvent(self):\n #explosion event ends after animated sprite finishes\n for explosion in self.explosionGroup:\n if explosion.index >= 18:\n explosion.kill()\n self.explosionGroup.update(self.width, self.height)\n\n def cloudEvent(self):\n #clouds continuously move down and wrap around to simulate flying\n for cloud in self.cloudGroup:\n cloud.move(self.height)\n self.cloudGroup.update(self.width, self.height)\n\n def powerupAppear(self,enemy):\n #power up randomly appears when enemies are killed\n powerupChance = random.randint(0,3)\n if powerupChance == 0:\n self.bombupGroup.add(Bombup(enemy.x, enemy.y))\n elif powerupChance == 1:\n self.lifeupGroup.add(Lifeup(enemy.x, enemy.y))\n elif powerupChance == 2:\n self.weaponupGroup.add(Weaponup(enemy.x, enemy.y))\n\n def updatePlayerHelper(self):\n #update player 1 & 2 separately so if one dies the other can still play\n if self.player1Lives > 0:\n player1 = self.player1Group.sprites()[0]\n #updates information to client side\n data = {\"action\": \"updateplayer1\", \"player1X\": player1.x, \\\n \"player1Y\": player1.y, \"player1Index\":player1.index,\\\n \"player1Timer\": player1.timer, \"player1Countdown\": \\\n player1.countdown, \"player1Hit\": player1.isHit, \"gameid\": \\\n self.gameid, \"type\": 1}\n self.player1.Send(data)\n self.player2.Send(data)\n #data is separate from player details so player disappears after death\n data = {\"action\": \"updateplayer1\", \"player1Lives\": self.player1Lives, \\\n \"player1Power\": self.player1Power, \"player1Bombs\": \\\n self.player1Bombs, \"player1Weapon\": self.player1WeaponLevel, \\\n \"gameid\": self.gameid, \"type\": 2}\n self.player1.Send(data)\n self.player2.Send(data)\n\n if self.player2Lives > 0:\n player2 = self.player2Group.sprites()[0]\n #updates information to client side\n data = {\"action\": \"updateplayer2\", \"player2X\": player2.x, \\\n \"player2Y\": player2.y, \"player2Index\":player2.index, \\\n \"player2Timer\": player2.timer, \"player2Countdown\": \\\n player2.countdown, \"player2Hit\": player2.isHit, \"gameid\": \\\n self.gameid, \"type\": 1}\n self.player1.Send(data)\n self.player2.Send(data)\n data = {\"action\": \"updateplayer2\", \"player2Lives\": self.player2Lives, \\\n \"player2Power\": self.player2Power, \"player2Bombs\": \\\n self.player2Bombs, \"player2Weapon\": self.player2WeaponLevel, \\\n \"gameid\": self.gameid, \"type\": 2}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def updatePlayerBulletsHelper(self):\n #updates the players' bullets\n if self.player1Lives > 0:\n player1 = self.player1Group.sprites()[0]\n for bullet in player1.bullets:\n data = {\"action\": \"updatePlayerBullets\", \"x\":bullet.x,\\\n \"y\": bullet.y, \"size\": bullet.size, \"player\": 1}\n self.player1.Send(data)\n self.player2.Send(data)\n #separate for the two players so only updates if player is alive\n if self.player2Lives > 0:\n player2 = self.player2Group.sprites()[0]\n for bullet in player2.bullets:\n data = {\"action\": \"updatePlayerBullets\", \"x\":bullet.x,\\\n \"y\": bullet.y, \"size\": bullet.size, \"player\": 2}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def updateEnemy1Helper(self):\n #updates enemy 1 to client\n for enemy1 in self.enemy1Group:\n data = {\"action\": \"updateEnemies\", \"x\": enemy1.x, \\\n \"y\": enemy1.y, \"health\": enemy1.health, \"bulletTimer\":\\\n enemy1.bulletTimer, \"direction\": enemy1.direction, \\\n \"enemyType\": 1}\n self.player1.Send(data)\n self.player2.Send(data)\n #updates three different types of enemy1 bullets\n for bullet in enemy1.bullets:\n if type(bullet) == Enemy1BulletMid:\n bulletType = 1\n if type(bullet) == Enemy1BulletLeft:\n bulletType = 2\n else:\n bulletType = 3\n data = {\"action\": \"updateEnemy1Bullets\", \"x\": \\\n bullet.x, \"y\": bullet.y, \"size\": bullet.size, \\\n \"bulletType\": bulletType}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def updateEnemy2Helper(self):\n #updates enemy2\n for enemy2 in self.enemy2Group:\n data = {\"action\": \"updateEnemies\", \"x\": enemy2.x, \\\n \"y\": enemy2.y, \"health\": enemy2.health, \"bulletTimer\":\\\n enemy2.bulletTimer, \"index\": enemy2.index, \"enemyType\": 2}\n self.player1.Send(data)\n self.player2.Send(data)\n #updates enemy2 bullets\n for bullet in enemy2.bullets:\n data = {\"action\": \"updateEnemy2Bullets\", \"x\": \\\n bullet.x, \"y\": bullet.y, \"size\": bullet.size, \"angle\": \\\n bullet.angle}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def updateEnemy3Helper(self):\n #updates enemy3 sprites\n for enemy3 in self.enemy3Group:\n data = {\"action\": \"updateEnemies\", \"x\": enemy3.x, \\\n \"y\": enemy3.y, \"health\": enemy3.health, \"bulletTimer\":\\\n enemy3.bulletTimer, \"movingRight\": enemy3.movingRight,\\\n \"index\": enemy3.index, \"enemyType\": 3}\n self.player1.Send(data)\n self.player2.Send(data)\n #updates enemy3 bullets\n for bullet in enemy3.bullets:\n data = {\"action\": \"updateEnemy3Bullets\", \"x\": \\\n bullet.x, \"y\": bullet.y, \"size\": bullet.size, \"xDistance\": \\\n bullet.xDistance, \"yDistance\": bullet.yDistance, \\\n \"timesMove\": bullet.timesMove, \"xMoveInterval\": \\\n bullet.xMoveInterval}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def updateBossHelper(self):\n #updates the boss information to client\n for boss in self.bossGroup:\n data = {\"action\": \"updateEnemies\", \"x\": boss.x, \\\n \"y\": boss.y, \"health\": boss.health, \"bullet1Timer\": \\\n boss.bullet1Timer, \"bullet2Timer\": boss.bullet2Timer,\\\n \"bullet3Timer\": boss.bullet3Timer, \"movingRight\": \\\n boss.movingRight, \"index\": boss.index, \"delta\": \\\n boss.delta, \"blitzMode\": boss.blitzMode, \"blitzed\": boss.blitzed,\\\n \"direction\": boss.direction, \"enemyType\": 4}\n self.player1.Send(data)\n self.player2.Send(data)\n #updates the four types of boss bullets\n for bullet in boss.bullets:\n if type(bullet) == BossBullet1:\n bulletType = 1\n elif type(bullet) == BossBullet2:\n bulletType = 2\n elif type(bullet) == BossBullet3:\n bulletType = 3\n elif type(bullet) == BossBullet4:\n bulletType = 4\n if bulletType == 1:\n data = {\"action\": \"updateBossBullets\", \"x\": \\\n bullet.x, \"y\": bullet.y, \"size\": bullet.size, \\\n \"bulletType\": bulletType, \"angle\": bullet.angle, \"delta\": \\\n bullet.delta}\n self.player1.Send(data)\n self.player2.Send(data)\n if bulletType == 2:\n data = {\"action\": \"updateBossBullets\", \"x\": \\\n bullet.x, \"y\": bullet.y, \"size\": bullet.size, \\\n \"bulletType\": bulletType, \"dx\": bullet.dx}\n self.player1.Send(data)\n self.player2.Send(data)\n if bulletType == 3:\n data = {\"action\": \"updateBossBullets\", \"x\": \\\n bullet.x, \"y\": bullet.y, \"size\": bullet.size, \\\n \"bulletType\": bulletType, \"angle\": bullet.angle}\n self.player1.Send(data)\n self.player2.Send(data)\n if bulletType == 4:\n data = {\"action\": \"updateBossBullets\", \"x\": \\\n bullet.x, \"y\": bullet.y, \"size\": bullet.size, \\\n \"bulletType\": bulletType, \"angle\": bullet.angle, \\\n \"speed\": bullet.speed, \"timer\": bullet.timer}\n\n def updatePowerupsHelper(self):\n #resets the powerup groups so they don't stack\n self.player1.Send({\"action\": \"resetPowerups\"})\n self.player2.Send({\"action\": \"resetPowerups\"})\n #updates bombups, lifeups, and weaponups\n for bombup in self.bombupGroup:\n data = {\"action\": \"updatePowerups\", \"x\": bombup.x, \"y\": bombup.y, \\\n \"timer\": bombup.timer, \"type\": 1}\n self.player1.Send(data)\n self.player2.Send(data)\n for lifeup in self.lifeupGroup:\n data = {\"action\": \"updatePowerups\", \"x\": lifeup.x, \"y\": lifeup.y, \\\n \"timer\": lifeup.timer, \"type\": 2}\n self.player1.Send(data)\n self.player2.Send(data)\n for weaponup in self.weaponupGroup:\n data = {\"action\": \"updatePowerups\", \"x\": weaponup.x, \"y\": \\\n weaponup.y, \"timer\": weaponup.timer, \"type\": 3}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def updateExplosionsHelper(self):\n #resets explosions before update so they don't stack\n self.player1.Send({\"action\": \"resetExplosions\"})\n self.player2.Send({\"action\": \"resetExplosions\"})\n #updates explosion information\n for explosion in self.explosionGroup:\n data = {\"action\": \"updateExplosions\", \"x\": explosion.x, \\\n \"y\": explosion.y, \"width\": explosion.width, \"height\": \\\n explosion.height, \"index\": explosion.index}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def updateGameInfo(self):\n #updates other various game info to clients\n data = {\"action\": \"updateGameInfo\", \"score\": self.totalScore, \\\n \"gameover\": self.isGameOver, \"gamewon\": self.gameWon, \\\n \"bombanimation\": self.bombAnimationTimer}\n self.player1.Send(data)\n self.player2.Send(data)\n\n def timerFired(self, dt):\n # game plays if it's not paused\n if not self.isPaused:\n if self.gameMode1 == True:\n #if not game over and game not won\n if self.isGameOver == False and self.gameWon == False:\n self.playerMovement()\n self.abilityUsage()\n self.bossEvent()\n if self.bossTimer > 0:\n self.enemy1Event()\n self.enemy2Event()\n self.enemy3Event()\n self.bombUsage()\n self.powerupEvent()\n self.explosionEvent()\n self.cloudEvent()\n #player power increases from enemies killed to a max of 3\n self.playerPower = 1 + self.enemyKillCount//10\n if self.playerPower >= 3:\n self.playerPower = 3\n elif self.isGameOver == True or self.gameWon == True:\n # you can enter a name into the textbox at end of game\n if self.nameEnterDone == False:\n events = pygame.event.get()\n keyEntered = self.textbox.update(events)\n if keyEntered != None:\n self.nameEnterDone = True\n elif self.gameMode2 == True or (self.multiplayerMode == True and \\\n self.gameStarted == True):\n if self.isGameOver == False and self.gameWon == False:\n if self.player1Lives > 0:\n self.playerMovement(0)\n if self.player2Lives > 0:\n self.playerMovement(1)\n self.bossEvent()\n if self.bossTimer > 0:\n self.enemy1Event()\n self.enemy2Event()\n self.enemy3Event()\n self.bombUsage()\n self.powerupEvent()\n self.explosionEvent()\n self.cloudEvent()\n #player power increases from enemies killed up to a max of 3\n self.player1Power = 1 + self.player1EnemyKillCount//10\n self.player2Power = 1 + self.player2EnemyKillCount//10\n if self.player1Power >= 3:\n self.player1Power = 3\n if self.player2Power >= 3:\n self.player2Power = 3\n if self.multiplayerMode == True:\n #updates data to client\n self.updatePlayerHelper()\n self.updatePlayerBulletsHelper()\n #resets enemy sprites and bullet sprites\n self.player1.Send({\"action\": \"resetEnemies\"})\n self.player2.Send({\"action\": \"resetEnemies\"})\n self.player1.Send({\"action\": \"resetBullets\"})\n self.player2.Send({\"action\": \"resetBullets\"})\n self.updateEnemy1Helper()\n self.updateEnemy2Helper()\n self.updateEnemy3Helper()\n self.updateBossHelper()\n self.updatePowerupsHelper()\n self.updateExplosionsHelper()\n self.updateGameInfo()\n if self.bombAnimationTimer >= 0:\n self.bombAnimationTimer += 1\n if self.bombAnimationTimer == 30:\n self.bombAnimationTimer = -1\n\n def drawEnemies(self, screen):\n #draws all 3 enemy types\n self.enemy1Group.draw(screen)\n for enemy1 in self.enemy1Group:\n enemy1.bullets.draw(screen)\n self.enemy2Group.draw(screen)\n for enemy2 in self.enemy2Group:\n enemy2.bullets.draw(screen)\n self.enemy3Group.draw(screen)\n for enemy3 in self.enemy3Group:\n enemy3.bullets.draw(screen)\n self.bossGroup.draw(screen)\n for boss in self.bossGroup:\n boss.bullets.draw(screen)\n #draws boss health\n bossFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n bossMessage = bossFont.render(\"Boss: \",\\\n 1, (0,0,0))\n screen.blit(bossMessage, (15, 10))\n pygame.draw.rect(screen,(255,0,0),((60,10),\\\n (int(boss.health/10),15)))\n\n def drawPowerups(self, screen):\n #the powerups flash when they are about to disappear\n for bombup in self.bombupGroup:\n if bombup.timer < 100:\n self.bombupGroup.draw(screen)\n elif bombup.timer >= 100 and bombup.timer % 2 == 0:\n self.bombupGroup.draw(screen)\n for lifeup in self.lifeupGroup:\n if lifeup.timer < 100:\n self.lifeupGroup.draw(screen)\n elif lifeup.timer >= 100 and lifeup.timer % 2 == 0:\n self.lifeupGroup.draw(screen)\n for weaponup in self.weaponupGroup:\n if weaponup.timer < 100:\n self.weaponupGroup.draw(screen)\n elif weaponup.timer >= 100 and weaponup.timer % 2 == 0:\n self.weaponupGroup.draw(screen)\n\n def drawNormalGameScreen(self, screen):\n #draws background clouds\n self.cloudGroup.draw(screen)\n #draws the player, enemies and bullets\n player = self.playerGroup.sprites()[0]\n if player.countdown % 2 == 0:\n self.playerGroup.draw(screen)\n player.bullets.draw(screen)\n self.drawEnemies(screen)\n self.drawPowerups(screen)\n #draws explosions\n self.explosionGroup.draw(screen)\n #draws the score on bottom right\n scoreFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n scoreMessage = scoreFont.render(\"Score: %d\" % self.totalScore,\\\n 1, (255,255,255))\n screen.blit(scoreMessage, (self.width - 80, self.height - 40))\n #draws the player power level on bottom right\n powerFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n if self.playerPower < 3:\n powerMessage = powerFont.render(\"Power: %d\" % self.playerPower,\\\n 1, (255,255,255))\n else:\n powerMessage = powerFont.render(\"Power: MAX\",\\\n 1, (255,255,255))\n screen.blit(powerMessage, (self.width - 80, self.height - 60))\n #draws the lives on bottom left\n livesFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n livesMessage = livesFont.render(\"Lives: %d\" % self.playerLives,\\\n 1, (255,255,255))\n screen.blit(livesMessage, (30, self.height - 60))\n #draws the bombs on bottom left\n bombsFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n bombsMessage = bombsFont.render(\"Bombs: %d\" % self.playerBombs,\\\n 1, (255,255,255))\n screen.blit(bombsMessage, (30, self.height - 40))\n \n #draws the ability gauge on left of screen\n pygame.draw.rect(screen,(0,0,0),((50,560),\\\n (10,-100)))\n pygame.draw.rect(screen,(255,255,0),((50,560),\\\n (10,-self.abilityGauge)))\n\n #draws special mouse pointer for game\n #idea from Andy Shen\n self.mouseImageIndex += 1\n if self.mouseImageIndex >= len(self.mouseImages):\n self.mouseImageIndex = 0\n self.mouseImage = pygame.transform.scale(self.mouseImages\\\n [self.mouseImageIndex].convert_alpha(),(24,24))\n pygame.mouse.set_visible(False)\n (x,y) = pygame.mouse.get_pos()\n screen.blit(self.mouseImage, (x-12,y-12))\n\n if self.isPaused == True:\n pauseFont = pygame.font.Font(\"Audiowide.ttf\", 20)\n pause = pauseFont.render(\"Paused\", 1, (255,255,255))\n pausePos = pause.get_rect()\n pausePos.centerx = screen.get_size()[0]/2\n pausePos.centery = screen.get_size()[1]/2\n screen.blit(pause, pausePos)\n\n\n def drawPlayer1Details(self, screen):\n #tells you that its player 1\n player1Font = pygame.font.Font(\"Audiowide.ttf\", 18, bold=True)\n player1Font.set_underline(1)\n player1Message = player1Font.render(\"Player 1\",\\\n 1, (255,255,255))\n screen.blit(player1Message, (30, self.height - 110))\n #draws player1 power level on bottom left\n powerFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n if self.player1Power < 3:\n powerMessage = powerFont.render(\"Power: %d\" % self.player1Power,\\\n 1, (255,255,255))\n else:\n powerMessage = powerFont.render(\"Power: MAX\",\\\n 1, (255,255,255))\n screen.blit(powerMessage, (30, self.height - 80))\n #draws the lives on bottom left\n livesFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n livesMessage = livesFont.render(\"Lives: %d\" % self.player1Lives,\\\n 1, (255,255,255))\n screen.blit(livesMessage, (30, self.height - 60))\n #draws the bombs on bottom left\n bombsFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n bombsMessage = bombsFont.render(\"Bombs: %d\" % self.player1Bombs,\\\n 1, (255,255,255))\n screen.blit(bombsMessage, (30, self.height - 40))\n\n def drawPlayer2Details(self, screen):\n #tells you that its player 1\n player2Font = pygame.font.Font(\"Audiowide.ttf\", 18, bold=True)\n player2Font.set_underline(1)\n player2Message = player2Font.render(\"Player 2\",\\\n 1, (255,255,255))\n screen.blit(player2Message, (self.width-90, self.height - 110))\n #draws the player power level on bottom right\n powerFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n if self.player2Power < 3:\n powerMessage = powerFont.render(\"Power: %d\" % self.player2Power,\\\n 1, (255,255,255))\n else:\n powerMessage = powerFont.render(\"Power: MAX\",\\\n 1, (255,255,255))\n screen.blit(powerMessage, (self.width - 80, self.height - 80))\n #draws the lives on bottom right\n livesFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n livesMessage = livesFont.render(\"Lives: %d\" % self.player2Lives,\\\n 1, (255,255,255))\n screen.blit(livesMessage, (self.width - 80, self.height - 60))\n #draws the bombs on bottom right\n bombsFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n bombsMessage = bombsFont.render(\"Bombs: %d\" % self.player2Bombs,\\\n 1, (255,255,255))\n screen.blit(bombsMessage, (self.width - 80, self.height - 40))\n\n def drawTwoPlayerScreen(self, screen):\n #draws background clouds\n self.cloudGroup.draw(screen)\n #draws both players and their bullets\n if self.player1Lives > 0:\n player1 = self.player1Group.sprites()[0]\n if player1.countdown % 2 == 0:\n self.player1Group.draw(screen)\n player1.bullets.draw(screen)\n if self.player2Lives > 0:\n player2 = self.player2Group.sprites()[0]\n if player2.countdown % 2 == 0:\n self.player2Group.draw(screen)\n player2.bullets.draw(screen)\n #draws the enemies and powerups\n self.drawEnemies(screen)\n self.drawPowerups(screen)\n #draws explosions\n self.explosionGroup.draw(screen)\n #draws the score on top right\n scoreFont = pygame.font.Font(\"Audiowide.ttf\", 14, bold=True)\n scoreMessage = scoreFont.render(\"Score: %d\" % self.totalScore,\\\n 1, (0,0,0))\n scorePos = scoreMessage.get_rect()\n scorePos.top = 30\n scorePos.right = self.width-10\n screen.blit(scoreMessage, scorePos)\n self.drawPlayer1Details(screen)\n self.drawPlayer2Details(screen)\n \n #draws special mouse pointer for game\n #idea from Andy Shen\n self.mouseImageIndex += 1\n if self.mouseImageIndex >= len(self.mouseImages):\n self.mouseImageIndex = 0\n self.mouseImage = pygame.transform.scale(self.mouseImages\\\n [self.mouseImageIndex].convert_alpha(),(24,24))\n pygame.mouse.set_visible(False)\n (x,y) = pygame.mouse.get_pos()\n screen.blit(self.mouseImage, (x-12,y-12))\n\n #blits paused text\n if self.isPaused == True:\n pauseFont = pygame.font.Font(\"Audiowide.ttf\", 20)\n pause = pauseFont.render(\"Paused\", 1, (255,255,255))\n pausePos = pause.get_rect()\n pausePos.centerx = screen.get_size()[0]/2\n pausePos.centery = screen.get_size()[1]/2\n screen.blit(pause, pausePos)\n\n def drawGameOverScreen(self, screen):\n #draws game over screen\n gameOverFont = pygame.font.Font(\"Crushed.ttf\", 40, bold=True)\n gameOver = gameOverFont.render(\"Game Over!\", 1, (255,255,255))\n gameOverPos = gameOver.get_rect()\n gameOverPos.centerx = screen.get_size()[0]/2\n gameOverPos.centery = screen.get_size()[1]/2 - 60\n screen.blit(gameOver, gameOverPos)\n\n #score\n scoreFont = pygame.font.Font(\"Crushed.ttf\", 20)\n score = scoreFont.render(\"Score: %d\" % self.totalScore\\\n , 1, (255,255,255))\n scorePos = score.get_rect()\n scorePos.centerx = screen.get_size()[0]/2\n scorePos.centery = screen.get_size()[1]/2\n screen.blit(score, scorePos)\n\n #restart game message\n if self.gameMode1 == True:\n restartFont = pygame.font.Font(\"Crushed.ttf\", 13)\n submitName = restartFont.render(\"Press Enter Once to Submit Name\",\\\n 1, (255,255,255))\n submitNamePos = submitName.get_rect()\n submitNamePos.centerx = screen.get_size()[0]/2\n submitNamePos.centery = screen.get_size()[1]/2 + 200\n screen.blit(submitName, submitNamePos)\n restart = restartFont.render(\"Press Enter to Go Back to Main Menu\"\\\n , 1, (255,255,255))\n restartPos = restart.get_rect()\n restartPos.centerx = screen.get_size()[0]/2\n restartPos.centery = screen.get_size()[1]/2 + 225\n screen.blit(restart, restartPos)\n\n #Enter player name text box\n self.textbox.set_pos(screen.get_size()[0]/2, \\\n screen.get_size()[1]/2 + 40)\n self.textbox.draw(screen)\n elif self.gameMode2 == True:\n restartFont = pygame.font.Font(\"Crushed.ttf\", 13)\n restart = restartFont.render(\"Press Enter to Go Back to Main Menu\"\\\n , 1, (255,255,255))\n restartPos = restart.get_rect()\n restartPos.centerx = screen.get_size()[0]/2\n restartPos.centery = screen.get_size()[1]/2 + 225\n screen.blit(restart, restartPos)\n\n def drawGameWonScreen(self, screen):\n #draws game won screen\n gameWonFont = pygame.font.Font(\"Crushed.ttf\", 40)\n gameWon = gameWonFont.render(\"You Win!\", 1, (255,255,255))\n gameWonPos = gameWon.get_rect()\n gameWonPos.centerx = screen.get_size()[0]/2\n gameWonPos.centery = screen.get_size()[1]/2 - 60\n screen.blit(gameWon, gameWonPos)\n\n #score\n scoreFont = pygame.font.Font(\"Crushed.ttf\", 20)\n score = scoreFont.render(\"Score: %d\" % self.totalScore\\\n , 1, (255,255,255))\n scorePos = score.get_rect()\n scorePos.centerx = screen.get_size()[0]/2\n scorePos.centery = screen.get_size()[1]/2\n screen.blit(score, scorePos)\n\n #restart game message\n if self.gameMode1 == True:\n restartFont = pygame.font.Font(\"Crushed.ttf\", 13)\n submitName = restartFont.render(\"Press Enter Once to Submit Name\",\\\n 1, (255,255,255))\n submitNamePos = submitName.get_rect()\n submitNamePos.centerx = screen.get_size()[0]/2\n submitNamePos.centery = screen.get_size()[1]/2 + 200\n screen.blit(submitName, submitNamePos)\n restart = restartFont.render(\"Press Enter to Go Back to Main Menu\"\\\n , 1, (255,255,255))\n restartPos = restart.get_rect()\n restartPos.centerx = screen.get_size()[0]/2\n restartPos.centery = screen.get_size()[1]/2 + 225\n screen.blit(restart, restartPos)\n\n #Enter player name text box\n self.textbox.set_pos(screen.get_size()[0]/2, \\\n screen.get_size()[1]/2 + 40)\n self.textbox.draw(screen)\n elif self.gameMode2 == True:\n restartFont = pygame.font.Font(\"Crushed.ttf\", 13)\n restart = restartFont.render(\"Press Enter to Go Back to Main Menu\"\\\n , 1, (255,255,255))\n restartPos = restart.get_rect()\n restartPos.centerx = screen.get_size()[0]/2\n restartPos.centery = screen.get_size()[1]/2 + 225\n screen.blit(restart, restartPos)\n\n '''Created by Alistair Buxton found online\n http://archives.seul.org/pygame/users/Mar-2008/msg00538.html '''\n def DrawRoundRect(self, surface, color, rect, width, xr, yr):\n clip = surface.get_clip()\n \n # left and right\n surface.set_clip(clip.clip(rect.inflate(0, -yr*2)))\n pygame.draw.rect(surface, color, rect.inflate(1-width,0), width)\n\n # top and bottom\n surface.set_clip(clip.clip(rect.inflate(-xr*2, 0)))\n pygame.draw.rect(surface, color, rect.inflate(0,1-width), width)\n\n # top left corner\n surface.set_clip(clip.clip(rect.left, rect.top, xr, yr))\n pygame.draw.ellipse(surface, color, pygame.Rect(rect.left, rect.top, \\\n 2*xr, 2*yr), width)\n\n # top right corner\n surface.set_clip(clip.clip(rect.right-xr, rect.top, xr, yr))\n pygame.draw.ellipse(surface, color, pygame.Rect(rect.right-2*xr, \\\n rect.top, 2*xr, 2*yr), width)\n\n # bottom left\n surface.set_clip(clip.clip(rect.left, rect.bottom-yr, xr, yr))\n pygame.draw.ellipse(surface, color, pygame.Rect(rect.left, \\\n rect.bottom-2*yr, 2*xr, 2*yr), width)\n\n # bottom right\n surface.set_clip(clip.clip(rect.right-xr, rect.bottom-yr, xr, yr))\n pygame.draw.ellipse(surface, color, pygame.Rect(rect.right-2*xr, \\\n rect.bottom-2*yr, 2*xr, 2*yr), width)\n\n surface.set_clip(clip)\n\n def drawMainMenu(self, screen):\n #Game Title\n #Crushed font was created by Hypefonts on fontspace.com\n titleFont = pygame.font.Font('Crushed.ttf', 40)\n title1 = titleFont.render(\"Bullet\"\\\n , 1, (255,255,255))\n title1Pos = title1.get_rect()\n title1Pos.centerx = 240\n title1Pos.centery = 77\n screen.blit(title1, title1Pos)\n title2 = titleFont.render(\"Armageddon\"\\\n , 1, (255,255,255))\n title2Pos = title2.get_rect()\n title2Pos.centerx = 240\n title2Pos.centery = 140\n screen.blit(title2, title2Pos)\n #menu button outline\n self.DrawRoundRect(screen,(255,255,255),pygame.Rect(137,197,206,66), \\\n 0, 32, 64)\n self.DrawRoundRect(screen,(255,255,255),pygame.Rect(137,277,206,66), \\\n 0, 32, 64)\n self.DrawRoundRect(screen,(255,255,255),pygame.Rect(137,357,206,66), \\\n 0, 32, 64)\n self.DrawRoundRect(screen,(255,255,255),pygame.Rect(137,437,206,66), \\\n 0, 32, 64)\n self.DrawRoundRect(screen,(255,255,255),pygame.Rect(137,517,206,66), \\\n 0, 32, 64)\n #menu buttons\n self.DrawRoundRect(screen,(0,0,0),pygame.Rect(140,200,200,60), \\\n 0, 64, 64)\n self.DrawRoundRect(screen,(0,0,0),pygame.Rect(140,280,200,60), \\\n 0, 64, 64)\n self.DrawRoundRect(screen,(0,0,0),pygame.Rect(140,360,200,60), \\\n 0, 64, 64)\n self.DrawRoundRect(screen,(0,0,0),pygame.Rect(140,440,200,60), \\\n 0, 64, 64)\n self.DrawRoundRect(screen,(0,0,0),pygame.Rect(140,520,200,60), \\\n 0, 64, 64)\n #text on menu buttons\n menuFont = pygame.font.Font(\"Crushed.ttf\", 14)\n singlePlayer = menuFont.render(\"1 Player\"\\\n , 1, (255,255,255))\n singlePlayerPos = singlePlayer.get_rect()\n singlePlayerPos.centerx = 240\n singlePlayerPos.centery = 230\n screen.blit(singlePlayer, singlePlayerPos)\n twoPlayer = menuFont.render(\"2 Player\"\\\n , 1, (255,255,255))\n twoPlayerPos = twoPlayer.get_rect()\n twoPlayerPos.centerx = 240\n twoPlayerPos.centery = 310\n screen.blit(twoPlayer, twoPlayerPos)\n multiPlayer = menuFont.render(\"Server\"\\\n , 1, (255,255,255))\n multiPlayerPos = twoPlayer.get_rect()\n multiPlayerPos.centerx = 250\n multiPlayerPos.centery = 390\n screen.blit(multiPlayer, multiPlayerPos)\n instructions = menuFont.render(\"Instructions\"\\\n , 1, (255,255,255))\n instructionsPos = instructions.get_rect()\n instructionsPos.centerx = 240\n instructionsPos.centery = 470\n screen.blit(instructions, instructionsPos)\n highScores = menuFont.render(\"High Scores\"\\\n , 1, (255,255,255))\n highScoresPos = highScores.get_rect()\n highScoresPos.centerx = 240\n highScoresPos.centery = 550\n screen.blit(highScores, highScoresPos)\n\n def drawHighScores(self, screen):\n highScoreNames = []\n highScorePoints = []\n scoresAndNames = []\n #reads high scores from a file\n with open(\"file.txt\", \"r\") as highscores_file:\n for line in highscores_file:\n scoresAndNames.append(line)\n #sorts by score\n scoresAndNames = sorted(scoresAndNames)\n scoresAndNames = reversed(scoresAndNames)\n for i in scoresAndNames:\n splitList = i.split(\"/\")\n score = splitList[0]\n name = splitList[1][:-1]\n #adds the names and points to separate lists in order\n highScorePoints.append(score)\n highScoreNames.append(name)\n scoreFont = pygame.font.Font(\"Audiowide.ttf\", 20)\n #draws the top 10 high scores\n highScoreTitleFont = pygame.font.Font(\"Audiowide.ttf\", 26)\n highScoreTitleFont.set_underline(1)\n highScoreName = highScoreTitleFont.render(\"Player Name\"\\\n , 1, (255,255,255))\n screen.blit(highScoreName, (30, 150))\n highScore = highScoreTitleFont.render(\"Score\"\\\n , 1, (255,255,255))\n highScorePos = highScore.get_rect()\n highScorePos.top = 150\n highScorePos.right = self.width-25\n screen.blit(highScore, highScorePos)\n for i in range(9):\n if i < (len(highScorePoints)):\n scoreName = scoreFont.render(highScoreNames[i]\\\n , 1, (255,255,255))\n screen.blit(scoreName, (30, 200 + 42*i))\n score = scoreFont.render(str(highScorePoints[i])\\\n , 1, (255,255,255))\n scorePos = score.get_rect()\n scorePos.top = 200 + 42*i\n scorePos.right = self.width-25\n screen.blit(score, scorePos)\n #return to main menu\n returnMainMenuFont = pygame.font.Font(\"Audiowide.ttf\", 18, bold=True)\n returnMainMenu = returnMainMenuFont.render(\\\n \"Press Backspace to go back to Main Menu.\", \\\n 1, (255,255,255))\n screen.blit(returnMainMenu, (30, 600))\n\n def drawInstructions(self, screen):\n #blits lines of instructions onto the screen\n instructionsTitleFont = pygame.font.Font(\"Audiowide.ttf\", 18, bold=True)\n instructionsTitleFont.set_underline(1)\n instructionsTitle1 = instructionsTitleFont.render(\"Single Player Mode\"\\\n , 1, (255,255,255))\n screen.blit(instructionsTitle1, (20, 30))\n instructionsFont = pygame.font.Font(\"Audiowide.ttf\", 11)\n instructions1 = instructionsFont.render(\\\n \"Use either the mouse or the arrow keys to move the player.\", \\\n 1, (255,255,255))\n screen.blit(instructions1, (20, 60))\n instructions2 = instructionsFont.render(\\\n \"Press the left mouse button or the 'z' key to shoot.\", \\\n 1, (255,255,255))\n screen.blit(instructions2, (20, 90))\n instructions3 = instructionsFont.render(\\\n \"Press the right mouse button or the 'x' key to use a bomb and kill\",\\\n 1, (255,255,255))\n screen.blit(instructions3, (20, 120))\n instructions32 = instructionsFont.render(\"all enemies on screen\", \\\n 1, (255,255,255))\n screen.blit(instructions32, (20,135))\n instructions4 = instructionsFont.render(\\\n \"Press space or shift to use your ability to slow time. This drains\", \\\n 1, (255,255,255))\n screen.blit(instructions4, (20, 165))\n instructions42 = instructionsFont.render(\\\n \"your energy bar, which will slowly refill.\", 1, (255,255,255))\n screen.blit(instructions42, (20,180))\n instructions6 = instructionsFont.render(\\\n \"You start out with three lives and three bombs, but can get more\", \\\n 1, (255,255,255))\n screen.blit(instructions6, (20, 210))\n instructions62 = instructionsFont.render(\\\n \"from powerups, which drop after enemies die.\", 1, (255,255,255))\n screen.blit(instructions62, (20, 225))\n instructions7 = instructionsFont.render(\\\n \"The 'P' powerup will increase your weapon level so you can\",\\\n 1, (255,255,255))\n screen.blit(instructions7, (20, 255))\n instructions72 = instructionsFont.render(\\\n \"shoot more bullets at enemies.\", 1, (255,255,255))\n screen.blit(instructions72, (20, 270))\n instructions8 = instructionsFont.render(\\\n \"Your weapon also gains power when you kill a certain amount of\",\\\n 1, (255,255,255))\n screen.blit(instructions8, (20, 300))\n instructions82 = instructionsFont.render(\\\n \"enemies, and will do more damage.\", 1, (255,255,255))\n screen.blit(instructions82, (20, 315))\n instructionsTitle2 = instructionsTitleFont.render(\"Two Player Mode\"\\\n , 1, (255,255,255))\n screen.blit(instructionsTitle2, (20, 365))\n instructions10 = instructionsFont.render(\\\n \"Two player mode is similar to single player mode.\", \\\n 1, (255,255,255))\n screen.blit(instructions10, (20, 395))\n instructions11 = instructionsFont.render(\\\n \"However, the first player uses the mouse to move while the second\",\\\n 1, (255,255,255))\n screen.blit(instructions11, (20, 425))\n instructions112 = instructionsFont.render(\\\n \"uses the arrow keys.\", 1, (255,255,255))\n screen.blit(instructions112, (20, 440))\n instructions12 = instructionsFont.render(\\\n \"The first player shoots with the left mouse button and uses\", \\\n 1, (255,255,255))\n screen.blit(instructions12, (20, 470))\n instructions122 = instructionsFont.render(\\\n \"bombs with the right.\", 1, (255,255,255))\n screen.blit(instructions122, (20, 485))\n instructions13 = instructionsFont.render(\\\n \"The second player uses 'z' to shoot and 'x' to use bombs.\", \\\n 1, (255,255,255))\n screen.blit(instructions13, (20, 515))\n instructions15 = instructionsFont.render(\\\n \"Each player has separate lives, bombs, powers, and weapon\",\\\n 1, (255,255,255))\n screen.blit(instructions15, (20, 545))\n instructions152 = instructionsFont.render(\\\n \"levels, but share a total score.\", 1, (255,255,255))\n screen.blit(instructions152, (20, 560))\n instructionsTitleFont.set_underline(0)\n instructionsTitle3 = instructionsTitleFont.render(\\\n \"Press Backspace to go back to Main Menu.\", \\\n 1, (255,255,255))\n screen.blit(instructionsTitle3, (20, 600))\n\n def redrawAll(self, screen):\n if self.gameMode1 == True:\n if self.isGameOver == False and self.gameWon == False:\n self.drawNormalGameScreen(screen)\n #draws bomb animation\n if self.bombAnimationTimer >= 0:\n if self.bombAnimationTimer % 5 == 0:\n screen.fill((255,255,255))\n self.bombAnimationTimer += 1\n if self.bombAnimationTimer == 30:\n self.bombAnimationTimer = -1\n elif self.isGameOver == True:\n self.drawGameOverScreen(screen)\n else:\n self.drawGameWonScreen(screen)\n elif self.gameMode2 == True:\n if self.isGameOver == False and self.gameWon == False:\n self.drawTwoPlayerScreen(screen)\n #draws bomb animation\n if self.bombAnimationTimer >= 0:\n if self.bombAnimationTimer % 5 == 0:\n screen.fill((255,255,255))\n self.bombAnimationTimer += 1\n if self.bombAnimationTimer == 30:\n self.bombAnimationTimer = -1\n elif self.isGameOver == True:\n self.drawGameOverScreen(screen)\n else:\n self.drawGameWonScreen(screen)\n #draws the menu pages\n elif self.menuMode == True:\n #makes mouse visible\n pygame.mouse.set_visible(True)\n self.drawMainMenu(screen)\n elif self.instructionsMode == True:\n pygame.mouse.set_visible(True)\n self.drawInstructions(screen)\n elif self.highScoresMode == True:\n pygame.mouse.set_visible(True)\n self.drawHighScores(screen)\n self.screen = screen\n\n def isKeyPressed(self, key):\n ''' return whether a specific key is being held '''\n return self._keys.get(key, False)\n\n def __init__(self, player1=None, currentIndex=None, width=480, height=640, fps=15, title=\"Term Project\"):\n self.width = width\n self.height = height\n self.fps = fps\n self.title = title\n self.player1 = player1\n self.player2 = None\n self.gameid = currentIndex\n '''Menu background from \\\n http://topwalls.net/wallpapers/2012/11/Wallpaper-Rays-Color-\\\n Black-Background-640x480.jpg'''\n self.bg = pygame.image.load(\"images/mainmenu.jpg\")\n pygame.init()\n\n def run(self):\n clock = pygame.time.Clock()\n screen = pygame.display.set_mode((self.width, self.height))\n self.screen = screen\n # set the title of the window\n pygame.display.set_caption(self.title)\n\n # stores all the keys currently being held down\n self._keys = dict()\n\n # call game-specific initialization\n self.init()\n playing = True\n while playing:\n pygame.event.pump()\n time = clock.tick(self.fps)\n self.timerFired(time)\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\n self.mousePressedLeft(*(event.pos))\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:\n self.mousePressedRight(*(event.pos))\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n self.mouseReleased(*(event.pos))\n elif (event.type == pygame.MOUSEMOTION and\n event.buttons == (0, 0, 0)):\n self.mouseMotion(*(event.pos))\n elif (event.type == pygame.MOUSEMOTION and\n event.buttons[0] == 1):\n self.mouseDrag(*(event.pos))\n elif event.type == pygame.KEYDOWN:\n self._keys[event.key] = True\n self.keyPressed(event.key, event.mod)\n elif event.type == pygame.KEYUP:\n self._keys[event.key] = False\n self.keyReleased(event.key, event.mod)\n elif event.type == pygame.QUIT:\n playing = False\n if self.gameMode1 == True or self.gameMode2 == True:\n '''Background image is from \n http://1.bp.blogspot.com/-iMMViPyVOnk/Ukr \\\n iYf7srMI/AAAAAAAACJs/70U17vjatQI/s640/P1010098.jpg'''\n self.bg = pygame.image.load(\"images/bg.jpg\")\n elif self.instructionsMode == True:\n '''Background image is from\n http://wallpaperswiki.org/wallpapers/2012/11/Wallpaper-Line-\\\n Node-Lights-Background-Abstract-640x480.jpg'''\n self.bg = pygame.image.load(\"images/instructions.jpg\")\n elif self.highScoresMode == True:\n '''Background image is from\n http://img.wallsus.com/download/20140228/abstraction,\\\n -abstraction,-background,-rays,-band,-band,-line,-line,-light,\\\n -lights-480x640.jpg'''\n self.bg = pygame.image.load(\"images/highscores.jpg\")\n elif self.menuMode == True:\n self.bg = pygame.image.load(\"images/mainmenu.jpg\")\n screen.blit(self.bg, (0, 0))\n if self.isGameOver == True:\n screen.fill((0,0,0))\n self.redrawAll(screen)\n pygame.display.flip()\n\n # pygame.quit()z\n\n def multiPlayerShoot(self, data, num):\n #player 1\n if num == 0:\n #server functions for shooting from client input\n if self.firing1 == True:\n self.firing1 = False\n else:\n self.firing1 = True\n #player 2\n else:\n if self.firing2 == True:\n self.firing2 = False\n else:\n self.firing2 = True\n\n def multiPlayerMove(self, data, num):\n #player 1\n if num == 0:\n #moves the player if he is alive\n if self.player1Lives > 0:\n player1 = self.player1Group.sprites()[0]\n player1.x = data[\"x\"]\n player1.y = data[\"y\"]\n #player 2\n else:\n if self.player2Lives > 0:\n player2 = self.player2Group.sprites()[0]\n player2.x = data[\"x\"]\n player2.y = data[\"y\"]\n\n def multiUseBomb(self, data, num):\n #player 1\n if num == 0:\n #if the player still has bombs, use one\n if self.player1Bombs > 0:\n self.bombUsed = True\n self.player1Bombs -= 1\n #player 2\n else:\n if self.player2Bombs > 0:\n self.bombUsed = True\n self.player2Bombs -= 1\n\n def multiMovePlayer(self, keyCode, player):\n #moves the player with keyboard\n if keyCode == pygame.K_LEFT:\n if player.x - player.width/2 > 0:\n player.x -= 10\n\n if keyCode == pygame.K_RIGHT:\n if player.x + player.width/2 < self.width:\n player.x += 10\n\n if keyCode == pygame.K_UP:\n if player.y - player.height/2 > 0:\n player.y -= 10\n\n if keyCode == pygame.K_DOWN:\n if player.y + player.height/2 < self.height:\n player.y += 10\n\n def multiKeyPressed(self, data, num):\n keyCode = data[\"keyCode\"]\n if num == 0:\n if self.player1Lives > 0:\n player1 = self.player1Group.sprites()[0]\n #alternative way of firing with z key\n if keyCode == pygame.K_z:\n self.firing1 = True\n #alternative way of using a bomb\n elif keyCode == pygame.K_x:\n if self.player1Bombs > 0:\n self.bombUsed = True\n self.player1Bombs -= 1\n elif keyCode == pygame.K_ESCAPE:\n self.init()\n self.multiMovePlayer(keyCode, player1)\n if self.isGameOver or self.gameWon == True:\n if keyCode == pygame.K_RETURN:\n self.init()\n else:\n if self.player2Lives > 0:\n player2 = self.player2Group.sprites()[0]\n #alternative way of firing with z key\n if keyCode == pygame.K_z:\n self.firing2 = True\n #alternative way of using a bomb\n elif keyCode == pygame.K_x:\n if self.player2Bombs > 0:\n self.bombUsed = True\n self.player2Bombs -= 1\n elif keyCode == pygame.K_ESCAPE:\n self.init()\n self.multiMovePlayer(keyCode, player2)\n if self.isGameOver or self.gameWon == True:\n if keyCode == pygame.K_RETURN:\n self.init()\n \n def multiKeyReleased(self, data, num):\n keyCode = data[\"keyCode\"]\n if num == 0:\n player1 = self.player1Group.sprites()[0]\n #alternative way of firing with z key\n if keyCode == pygame.K_z:\n self.firing1 = False\n else:\n player2 = self.player2Group.sprites()[0]\n #alternative way of firing with z key\n if keyCode == pygame.K_z:\n self.firing2 = False\n\ndef main():\n game = Game()\n game.run()\n\nif __name__ == '__main__':\n main()","repo_name":"lawrence914/15-112-Term-Project","sub_path":"Project-Source-and-Support-Files/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":99969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"21212117034","text":"import sys\n\nclass WeirdRecurrenceRelation():\n def __init__(self):\n self.cached = {}\n self.cached[1] = 1\n self.cached[3] = 3\n\n def get(self, n):\n if n in self.cached:\n return self.cached[n]\n if n % 2 == 0:\n results = self.get(n // 2)\n self.cached[n] = results\n return results\n elif n % 4 == 1:\n m = n // 4\n results = 2 * self.get(2*m + 1) - self.get(m)\n self.cached[n] = results\n return results\n elif n % 4 == 3:\n m = n // 4\n results = 3 * self.get(2*m + 1) - 2 * self.get(m)\n self.cached[n] = results\n return results\n\nclass WeirdRecurrenceSumRelation():\n def __init__(self):\n self.cached = {\n 0: 0,\n 1: 1,\n 2: 2,\n 3: 5,\n }\n \n def get(self, n):\n if n in self.cached:\n return self.cached[n]\n results = None\n q, r = divmod(n, 4)\n if r == 0:\n results = 6 * self.get(2*q) - 5 * self.get(q) - 3 * self.get(q - 1) - 1\n elif r == 1:\n results = 2 * self.get(2*q + 1) + 4 * self.get(2 * q) - 6 * self.get(q) - 2 * self.get(q - 1) - 1\n elif r == 2:\n results = 3 * self.get(2*q + 1) + 3 * self.get(2 * q) - 6 * self.get(q) - 2 * self.get(q - 1) - 1\n elif r == 3:\n results = 6 * self.get(2*q + 1) - 8 * self.get(q) - 1\n self.cached[n] = results\n return results\n\nclass Problem():\n def solve(self):\n relation = WeirdRecurrenceSumRelation()\n assert(relation.get(8) == 22)\n assert(relation.get(100) == 3604)\n print(relation.get(3**37) % 10**10)\n\ndef main():\n problem = Problem()\n problem.solve()\n \nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"syurskyi/Algorithms_and_Data_Structure","sub_path":"_algorithms_challenges/projecteuler/ProjectEuler-master(2)/ProjectEuler-master/463.py","file_name":"463.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"38"} +{"seq_id":"37838740630","text":"import numpy as np\nimport sys\nfrom pathlib import Path\n\npaths = [\n Path.cwd() / \"../../src/nn\",\n Path.cwd() / \"../..\",\n Path.cwd()\n]\n\nfor path in paths:\n sys.path.append(path.resolve().as_posix())\n\nprint(sys.path)\n","repo_name":"sampath017/researchai","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39108270088","text":"# this is duplicated in snipppets of code from all over the web, credit to no one\n# in particular - to all those that have gone before me!\nfrom future.moves.urllib.request import urlopen\n\n\ndef shorten(aUrl):\n tinyurl = 'http://tinyurl.com/api-create.php?url='\n req = urlopen(tinyurl + aUrl)\n data = req.read()\n\n # should be a tiny url\n return data\n","repo_name":"robweber/xbmcbackup","sub_path":"resources/lib/tinyurl.py","file_name":"tinyurl.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"38"} +{"seq_id":"9763342447","text":"import random\r\nfrom random import randint\r\nimport pygame\r\nfrom pygame.draw import circle\r\npygame.init()\r\n\r\n#screen\r\nFPS = 50\r\nscreen = pygame.display.set_mode((900, 500))\r\n\r\n#create colors\r\ncolor_1 = (221, 160, 221)\r\ncolor_2 = (199, 20, 133)\r\ncolor_3 = (255, 165, 0)\r\ncolor_4 = (255, 255, 50)\r\ncolor_5 = (255, 180, 190)\r\ncolor_6 = (185, 85, 210)\r\ncolors = [color_1, color_2, color_3, color_4, color_5, color_6]\r\ncolor_screen = (35, 0, 90)\r\n\r\n#variable for ball\r\nballs = []\r\ndef new_ball():\r\n x = randint(100, 800)\r\n y = randint(100, 400)\r\n r = randint(30, 50)\r\n color = colors[randint(0, 5)]\r\n dx = randint(-9, 9)\r\n dy = randint(-9, 9)\r\n newball = []\r\n newball.append([x, y])\r\n newball.append([dx, dy])\r\n newball.append(r)\r\n newball.append(color)\r\n balls.append(newball)\r\n\r\n#balls motion\r\ndef move_balls(screen):\r\n for ball in balls:\r\n if ball[0][0]+ball[2] >= 900 or ball[0][0]-ball[2] <= 0:\r\n ball[1][0] *= -1\r\n if ball[0][1]+ball[2] >= 500 or ball[0][0]-ball[2] <= 0:\r\n ball[1][1] *= -1\r\n ball[0][0] += ball[1][0]\r\n ball[0][1] += ball[1][1]\r\n circle(screen, ball[3], (ball[0][0], ball[0][1]), ball[2])\r\n\r\n\r\npygame.display.update()\r\nclock = pygame.time.Clock()\r\nfinished = False\r\npoints = 0\r\n\r\nfor i in range(3):\r\n new_ball()\r\n\r\nwhile not finished:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n finished = True\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n for ball in balls:\r\n if ((event.pos[0] - ball[0][0]) ** 2 + (event.pos[1] - ball[0][1]) ** 2) <= ball[2] ** 2:\r\n print('Поймал!')\r\n balls.remove(ball)\r\n points += 1\r\n print('Вы заработали', points, 'очков')\r\n new_ball()\r\n break\r\n else:\r\n print('Мимо!')\r\n\r\n move_balls(screen)\r\n\r\n pygame.display.update()\r\n screen.fill(color_screen)\r\n\r\npygame.quit()","repo_name":"AngelinaMB/infa_2","sub_path":"lab_4/lab_4.py","file_name":"lab_4.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"35992343399","text":"__all__ = [ 'Label' ]\n\n# cairo imports\nimport cairo\n\n# kaa.candy imports\nimport image\nimport core\n\nclass Label(image.CairoTexture):\n\n def draw(self, cr):\n \"\"\"\n Render the cairo context\n \"\"\"\n fade = self.font.get_width(self.text) > self.width\n # draw new text string\n if self.color:\n cr.set_source_rgba(*self.color.to_cairo())\n cr.select_font_face(self.font.name, cairo.FONT_SLANT_NORMAL)\n cr.set_font_size(self.font.size)\n if fade and self.color:\n s = cairo.LinearGradient(0, 0, self.width, 0)\n c = self.color.to_cairo()\n s.add_color_stop_rgba(0, *c)\n # 50 pixel fading\n s.add_color_stop_rgba(1 - (50.0 / self.width), *c)\n s.add_color_stop_rgba(1, c[0], c[1], c[2], 0)\n cr.set_source(s)\n cr.move_to(0, cr.font_extents()[0])\n cr.show_text(self.text)\n return True\n","repo_name":"freevo/kaa-candy","sub_path":"src/backend/widgets/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"38"} +{"seq_id":"18749236333","text":"#multithriding in python\r\nfrom threading import Thread\r\nfrom time import sleep\r\nclass Google(Thread):\r\n def run(self):\r\n for i in range(1,51):\r\n\r\n print(\"Google task\",i)\r\n sleep(1)\r\n\r\n\r\nclass Facebook(Thread):\r\n def run(self):\r\n for i in range(1,51):\r\n\r\n print(\"Facebook task\",i)\r\n sleep(1)\r\n\r\n\r\nclass LinkedIn(Thread):\r\n def run(self):\r\n for i in range(1,51):\r\n\r\n print(\"LinkedIn task\",i)\r\n sleep(1)\r\n\r\nthread1=Google()\r\nthread2=Facebook()\r\nthread3=LinkedIn()\r\nthread1.start()\r\nsleep(0.2)\r\nthread2.start()\r\nsleep(0.2)\r\nthread3.start()\r\nthread1.join()\r\nthread2.join()\r\nthread3.join()\r\nprint(\"bye\")","repo_name":"mukund7296/Python-Brushup","sub_path":"45 Multithreding.py","file_name":"45 Multithreding.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"27189399094","text":"\"\"\"# Q1. Augmentation Implementation\n## Implement augmentation by finishing train_tfm in the code with image size of your choice.\n## Directly copy the following block and paste it on GradeScope after you finish the code\n### Your train_tfm must be capable of producing 5+ different results when given an identical image multiple times.\n### Your train_tfm in the report can be different from train_tfm in your training code.\n\n\"\"\"\n\ntrain_tfm = transforms.Compose([\n # Resize the image into a fixed shape (height = width = 128)\n transforms.Resize((128, 128)),\n # You need to add some transforms here.\n transforms.ToTensor(),\n])\n\n\"\"\"# Q2. Residual Implementation\n![](https://i.imgur.com/GYsq1Ap.png)\n## Directly copy the following block and paste it on GradeScope after you finish the code\n\n\"\"\"\n\nfrom torch import nn\n\n\nclass Residual_Network(nn.Module):\n def __init__(self):\n super(Residual_Network, self).__init__()\n\n self.cnn_layer1 = nn.Sequential(\n nn.Conv2d(3, 64, 3, 1, 1),\n nn.BatchNorm2d(64),\n )\n\n self.cnn_layer2 = nn.Sequential(\n nn.Conv2d(64, 64, 3, 1, 1),\n nn.BatchNorm2d(64),\n )\n\n self.cnn_layer3 = nn.Sequential(\n nn.Conv2d(64, 128, 3, 2, 1),\n nn.BatchNorm2d(128),\n )\n\n self.cnn_layer4 = nn.Sequential(\n nn.Conv2d(128, 128, 3, 1, 1),\n nn.BatchNorm2d(128),\n )\n self.cnn_layer5 = nn.Sequential(\n nn.Conv2d(128, 256, 3, 2, 1),\n nn.BatchNorm2d(256),\n )\n self.cnn_layer6 = nn.Sequential(\n nn.Conv2d(256, 256, 3, 1, 1),\n nn.BatchNorm2d(256),\n )\n self.fc_layer = nn.Sequential(\n nn.Linear(256 * 32 * 32, 256),\n nn.ReLU(),\n nn.Linear(256, 11)\n )\n self.relu = nn.ReLU()\n\n def forward(self, x):\n # input (x): [batch_size, 3, 128, 128]\n # output: [batch_size, 11]\n\n # Extract features by convolutional layers.\n x1 = self.cnn_layer1(x)\n\n x1 = self.relu(x1)\n\n x2 = self.cnn_layer2(x1)\n\n x2 = self.relu(x2)\n\n x3 = self.cnn_layer3(x2)\n\n x3 = self.relu(x3)\n\n x4 = self.cnn_layer4(x3)\n\n x4 = self.relu(x4)\n\n x5 = self.cnn_layer5(x4)\n\n x5 = self.relu(x5)\n\n x6 = self.cnn_layer6(x5)\n\n x6 = self.relu(x6)\n\n # The extracted feature map must be flatten before going to fully-connected layers.\n xout = x6.flatten(1)\n\n # The features are transformed by fully-connected layers to obtain the final logits.\n xout = self.fc_layer(xout)\n return xout\n","repo_name":"lordleeber/ML2022-Spring-pytorch","sub_path":"HW03/others.py","file_name":"others.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"13218203686","text":"# Note: converted from .ipynb format\nfrom matplotlib import pylab as plt\nfrom sklearn import linear_model\nimport matplotlib.cm as cm\nfrom scipy import misc\nimport numpy as np\nimport scipy as sp\nget_ipython().magic(u'matplotlib inline')\n\nTRAIN_PATH = \"data/train.txt\"\nTEST_PATH = \"data/test.txt\"\n\ndef parse_data(data_set_path):\n data, labels = [], []\n for line in open(data_set_path):\n im = misc.imread(line.strip().split()[0])\n data.append(im.reshape(2500,))\n labels.append(line.strip().split()[1])\n data, labels = np.array(data, dtype=float), np.array(labels, dtype=int)\n return (data, labels)\n\ndef display_image(image):\n plt.imshow(image.reshape(50,50), cmap = cm.Greys_r)\n plt.show()\n\n# B. Download and parse the train and test data sets\ntrain_data, train_labels = parse_data(TRAIN_PATH)\ntest_data, test_labels = parse_data(TEST_PATH)\n\n# Display an aribitrary image from the data set\ndisplay_image(train_data[19, :])\n\n# C. Compute and display the average face\nsum_face = np.sum(train_data, axis=0)\ntotal_faces = len(train_data)\naverage_face = np.divide(sum_face, total_faces)\n\ndisplay_image(average_face)\n\n# D. Compute mean subtraction\ndef get_mean_sub(data_set, average_face):\n mean_sub = []\n for x in data_set:\n mean_sub.append(np.diff([x,average_face], axis=0).flatten())\n return mean_sub\n\ntrain_mean_sub = get_mean_sub(train_data, average_face)\ntest_mean_sub = get_mean_sub(test_data, average_face)\n\ndisplay_image(train_mean_sub[19]) # display arbitrary image\n\n# E. Compute Eigenfaces\nu, s, v = sp.linalg.svd(np.asmatrix(train_mean_sub))\n\n# Display first 10 Eigenfaces\nfor eig in v[:10]: display_image(eig)\n\ndef rank_r_approx(u, s, v, r):\n sigma = sp.linalg.diagsvd(s, len(u), len(v)) # Reconstruct sigma from singular value\n x = (u[:,:r]).dot(sigma[:r,:r]).dot(v[:r,:])\n return x\n\nlow_rank_approx = {}\nfor r in range(1,201):\n low_rank_approx[r] = rank_r_approx(u, s, v, r)\n\ndef compute_approx_error(x, xr):\n return np.linalg.norm(np.subtract(x, xr))\n\napprox_error = {}\nfor r in range(1,201):\n approx_error[r] = compute_approx_error(train_mean_sub, low_rank_approx[r])\n\ndef plot_approx_error(r, error):\n plt.scatter(r, error, c='g', alpha=0.5)\n plt.title('Rank r Approximation Error')\n plt.xlabel('r')\n plt.ylabel('Frobenius Norm')\n plt.show()\n return\n\nplot_approx_error(approx_error.keys(), approx_error.values())\n\n# G. Eigenface Feature\ndef eigdenface_feature(x, v, r):\n vt = np.transpose(v[:r,:])\n f = x.dot(vt)\n return f\n\nf_train, f_test = {}, {}\n\nfor r in range(1,201):\n f_train[r] = eigdenface_feature(np.asmatrix(train_mean_sub), v, r)\n f_test[r] = eigdenface_feature(np.asmatrix(test_mean_sub), v, r)\n\ndef classify(f_train, train_labels, f_test, test_labels, r): \n # Fit the model using logistic regression\n lr = linear_model.LogisticRegression(random_state=1).fit(f_train, train_labels)\n \n # Predict using test set\n predicted_values = lr.predict(f_test)\n accuracy = (predicted_values == test_labels).sum() / float(len(test_labels)) \n return accuracy\n\naccuracy = {}\nfor r in range(1,201):\n accuracy[r] = classify(f_train[r], train_labels, f_test[r], test_labels, r)\n\ndef plot_prediction_accuracy(x, y):\n plt.scatter(x, y, c='g', alpha=0.5)\n plt.title('Logistic Regression')\n plt.xlabel('r')\n plt.ylabel('Prediction Accuracy')\n plt.xlim(0,200)\n plt.show()\n\nplot_prediction_accuracy(accuracy.keys(), accuracy.values())\n","repo_name":"dspeiser/cornell-tech","sub_path":"machine-learning/assignment-2/facial recognition/facial_recognition.py","file_name":"facial_recognition.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"33579642052","text":"# Tendencias e Innovación en Tecnología Agricola (TEA)\ndef calcularSalario (horas, tarifa):\n HORAS_SEMANALES = 40\n horas_extra = 0\n if (horas > HORAS_SEMANALES): \n horas_extra = horas - HORAS_SEMANALES\n calculo = (HORAS_SEMANALES * tarifa) + (horas_extra * (tarifa*1.5))\n else:\n calculo = (horas * tarifa) / 0\n return calculo\n\ntry:\n horas = int (input(\"Ingrese número de horas trabajadas: \"))\n tarifa = float(input(\"Ingrese tarifa por horas:\"))\n salario = calcularSalario (horas, tarifa)\n print(\"Salario: \", salario)\n\nexcept:\n print(\"Error, ingresar un valor numérico\")","repo_name":"ayleennunez/TEA","sub_path":"lab004/parte1/calculo_salario.py","file_name":"calculo_salario.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22353317166","text":"import streamlit as st\nimport pandas as pd\nimport requests\nfrom pymongo import MongoClient\nimport gridfs\nfrom db_methods import get_similarities, get_movies\nimport pickle\n\n# movies = get_movies()\n# similarity = get_similarities()\n\n### Old Methods. Replaced by GridFS & MongoDB Find in Real Time\nmovies = pickle.load(open('processed_movie_list.pkl','rb'))\nsimilarity = pickle.load(open('similarity.pkl','rb'))\n\nst.header('MongoDB Movie Recommender System')\n\nmovie_list = movies['title'].values\nselected_movie = st.selectbox(\n \"Type or select a movie from the dropdown\",\n movie_list\n)\n\napiUrl = \"https://api.themoviedb.org/3/movie/{}?api_key=8265bd1679663a7ea12ac168da84d2e8&language=en-US\"\n\n\ndef fetch_movie_details(movie_id):\n url = apiUrl.format(movie_id)\n data = requests.get(url)\n data = data.json()\n # st.write(data)\n # poster_path = data['poster_path']\n return data\n\ndef fetch_poster(movie_id):\n url = apiUrl.format(movie_id)\n data = requests.get(url)\n data = data.json()\n poster_path = data['poster_path']\n # st.write(poster_path)\n full_path = \"https://image.tmdb.org/t/p/w500/\" + poster_path\n return full_path\n\ndef recommendMovie(movie):\n index = movies[movies['title'] == movie].index[0]\n distances = sorted(list(enumerate(similarity[index])), reverse=True, key=lambda x: x[1])\n recommended_movie_names = []\n recommended_movie_posters = []\n\n movie_details = fetch_movie_details(movies.iloc[distances[0][0]].movie_id)\n for i in distances[0:6]:\n # fetch the movie poster\n\n movie_id = movies.iloc[i[0]].movie_id\n recommended_movie_posters.append(fetch_poster(movie_id))\n recommended_movie_names.append(movies.iloc[i[0]].title)\n\n return recommended_movie_names,recommended_movie_posters,movie_details\n\nif st.button('Search Movie'):\n\n recommended_movie_names,recommended_movie_posters,movie_details = recommendMovie(selected_movie)\n\n colName, colPoster = st.columns([1, 2]) # 1 col take 1 part 2nd col takes 3 parts\n\n with colName:\n st.image(recommended_movie_posters[0])\n with colPoster:\n st.subheader(recommended_movie_names[0])\n genre = []\n for genres in movie_details['genres']:\n genre.append(genres['name'])\n st.caption(f\"Genre: {', '.join(genre)} | Released: {movie_details['release_date']}\")\n st.write(movie_details['overview'])\n st.text(f\"TMDB Rating: {round(movie_details['vote_average'], 1)}\")\n st.progress(float(movie_details['vote_average'])/10)\n\n st.markdown(\"\"\"
\"\"\", unsafe_allow_html=True)\n st.subheader(\"Similar to what you searched\")\n\n col1, col2, col3, col4, col5 = st.columns([5, 5, 5, 5, 5])\n\n\n\n with col1:\n st.image(recommended_movie_posters[1])\n st.markdown(recommended_movie_names[1])\n\n with col2:\n st.image(recommended_movie_posters[2])\n st.markdown(recommended_movie_names[2])\n\n with col3:\n st.image(recommended_movie_posters[3])\n st.markdown(recommended_movie_names[3])\n\n with col4:\n st.image(recommended_movie_posters[4])\n st.markdown(recommended_movie_names[4])\n\n with col5:\n st.image(recommended_movie_posters[5])\n st.markdown(recommended_movie_names[5])\n\n","repo_name":"Overdrive141/movie-recommender-system","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"8013099417","text":"#!/usr/bin/env python\n\n'''\nAMiGA library fo the microbial growth curve class.\n'''\n\n__author__ = \"Firas S Midani\"\n__email__ = \"midani@bcm.edu\"\n\n# TABLE OF CONTENTS (2 functions and 1 class with 13 sub-functions)\n\n# linearize\n# maxValueArg\n#\n# Curve (CLASS)\n# \t__init__\n# compute_mse\n# log_to_linear\n# describe\n# AreaUnderCurve\n# CarryingCapacity\n# MaxGrowthRate\n# MinGrowthRate\n# StationaryDelta\n# LagTime\n# data\n# sample\n# plot\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.stats import norm\n\nfrom libs.diauxie import detectDiauxie\nfrom libs.utils import getValue\n\n\ndef linearize(arr,baseline,floor=True,logged=True):\n '''\n Converts arr from log space to linear space.\n\n Args:\n arr (numpy.ndarray): typically log OD measurmenets over time\n baseline (float): should be baseline OD at first time point\n floor (float): if you want to shift curves to start at zero, pass True, else False\n\n Returns:\n self.predicted_OD (numpy.ndarray): same shape as input\n '''\n\n if arr is None: return None\n\n # add ln OD(0) and exponentiate\n if logged: arr = np.exp(arr+np.log(baseline))\n elif not logged: arr = arr + baseline\n\n # subtract OD(0)\n if floor: arr = arr - arr[0] \n\n return arr\n\n\ndef maxValueArg(x,y):\n '''\n Find the maximum value (of y) and argument (of x) that maximizes it.\n\n Args:\n \tx (np.ndarray)\n \ty (np.ndarray)\n\n Returns:\n maxValue (float): x value corresponding to maximum value of y\n maxArg (float): maximum value of y\n '''\n\n assert x.shape[0] == y.shape[0]\n assert x.shape[1] == 1\n assert y.shape[1] == 1\n\n ind = np.where(y==y.max())[0][0]\n maxValue = y[ind,0]\n maxArg = float(x[ind][0])\n\n return maxValue, maxArg\n\n\nclass GrowthCurve(object):\n\n def __init__(self,x=None,y=None,y0=None,y1=None,y2=None,cov0=None,cov1=None,baseline=1.0,name=None,logged=True):\n '''\n Data structure for a growth curves. This is primarily used for computing \n growth curve parameters and characteistics or converting curve and its fit to \n real space. \n\n Args:\n x (numpy.ndarray): independent variable N x D, where N is number of measurements, and D is number of dimesions\n y (nump.ndaray): dependent variable Nx1, typically log OD (input to GP model)\n y0 (numpy.ndarray): dependent variable N x 1, typically log OD (outupt of GP model)\n y1 (numpy.ndarray): first-order derivative of y0, typically d/dt log OD\n y2 (numpy.ndarray): second-order derivative of y1, typically d^2/dt^2 log OD\n cov0 (numpy.ndarray): covariance matrix for dependent variable y0\n cov1 (numpy.ndarray): covariance matrix for first-order derivatie y1\n baseline (float): the OD at time zero for dependent variable, y\n '''\n\n # verify data types\n for arg,value in [('x',x),('y0',y0),('y1',y1)]:\n assert type(value) == np.ndarray, \"{} must be a numpy ndaray\".format(arg)\n\n assert x.shape[1] == 1, \"x must be one-dimensional\"\n assert y0.shape == y1.shape, \"y0 and y1 must have the same shape\"\n\n if y2 is not None:\n assert type(y2) == np.ndarray, \"y2 must be a numpy ndarray\"\n assert y0.shape == y1.shape == y2.shape, \"y2 must have the same shape as y0 and y1\"\n\n # define attributes\n self.name = name\n self.baseline = baseline \n self.logged = logged\n self.x = x\n self.y = y\n self.y0 = y0\n self.y1 = y1\n self.y2 = y2\n self.cov0 = cov0\n self.cov1 = cov1\n\n # derive linear transformations of select attributes\n self.log_to_linear()\n\n # compute all growth parameters or characteristics\n self.describe()\n\n def compute_mse(self,pooled=False):\n '''\n Computes Mean Squared Error\n '''\n\n if pooled:\n self_data = self.data()\n y = self_data.GP_Input.values\n y_hat = [ii for ii in self_data.GP_Output.values if ~np.isnan(ii)]\n y_hat = y_hat * int(len(y)/len(y_hat))\n else:\n y = self.linear_input_raised\n y_hat = self.linear_output_raised\n\n mse = (1./y.shape[0]) * sum((y-y_hat)**2)\n\n return mse\n\n\n def log_to_linear(self):\n '''\n Converts actual and predicted log OD from log space to linear space.\n '''\n\n self.linear_input = linearize(self.y,baseline=self.baseline,floor=True,logged=self.logged)\n self.linear_input_raised = linearize(self.y,baseline=self.baseline,floor=False,logged=self.logged)\n self.linear_output = linearize(self.y0,baseline=self.baseline,floor=True,logged=self.logged)\n self.linear_output_raised = linearize(self.y0,baseline=self.baseline,floor=False,logged=self.logged)\n\n\n def describe(self):\n\n dx_ratio_min = getValue('diauxie_ratio_min')\n dx_ratio_varb = getValue('diauxie_ratio_varb')\n\n self.AreaUnderCurve()\n self.CarryingCapacity()\n self.MaxGrowthRate()\n self.MinGrowthRate()\n self.LagTime()\n self.StationaryDelta()\n \n params = {'auc_lin':self.auc_lin,\n 'auc_log':self.auc_log,\n 'k_lin':self.K_lin,\n 'k_log':self.K_log,\n 't_k':self.t_K,\n 'gr':self.gr,\n 'dr':self.dr,\n 'td':self.td,\n 't_gr':self.t_gr,\n 't_dr':self.t_dr,\n 'death_lin':self.death_lin,\n 'death_log':self.death_log,\n 'lagC':self.lagC,\n 'lagP':self.lagP}\n\n if self.y2 is not None: \n\n dx = detectDiauxie(self.x,self.y0,self.y1,self.y2,self.cov0,self.cov1,\n thresh=dx_ratio_min,varb=dx_ratio_varb)\n\n # describe all phases\n df_dx = []\n for idx,row in dx.iterrows():\n t0,t1 = row['t_left'],row['t_right'] # indices\n t0,t1 = [np.where(self.x==ii)[0][0] for ii in [t0,t1]] # time at indices\n if (t0 == 0) and (t1==(len(self.x)-1)):\n dx_params = params\n dx_params['t0'] = row['t_left']\n dx_params['tf'] = row['t_right']\n df_dx.append(pd.DataFrame(dx_params,index=[idx]))\n else:\n curve = GrowthCurve(x=self.x[t0:t1],\n y0=self.y0[t0:t1]-self.y0[t0],\n y1=self.y1[t0:t1],\n cov0=self.cov0[t0:t1,t0:t1],\n cov1=self.cov1[t0:t1,t0:t1])\n dx_params = curve.params\n dx_params['t0'] = row['t_left']\n dx_params['tf'] = row['t_right']\n df_dx.append(pd.DataFrame(dx_params,index=[idx]))\n\n df_dx = pd.concat(df_dx,axis=0)\n df_dx.columns = ['dx_{}'.format(ii) for ii in df_dx.columns]\n\n params.update({'diauxie':[1 if dx.shape[0] > 1 else 0][0],'df_dx':df_dx})\n\n self.params = params\n\n\n def AreaUnderCurve(self):\n '''\n Computes the Area Under the Curve (AUC).\n '''\n\n dt = np.mean(self.x[1:,0]-self.x[:-1,0]) # time interval (int)\n\n mu_Log = self.y0 # get log or linear OD\n mu_Lin = self.linear_output # get log or linear OD\n\n D_Log = np.repeat(dt,mu_Log.shape[0]).T # area under each interval (np.ndarray), size is (x.shape[0],)\n D_Lin = np.repeat(dt,mu_Lin.shape[0]).T # area under each interval (np.ndarray), size is (x.shape[0],) \n\n self.auc_lin = np.dot(D_Lin,mu_Lin)[0] # cumulative sum of areas\n self.auc_log = np.dot(D_Log,mu_Log)[0] # cumulative sum of areas\n\n\n def CarryingCapacity(self):\n '''\n Computes the maximum carrying capacity.\n ''' \n\n #print(self.x)\n self.K_log, self.t_K = maxValueArg(self.x,self.y0)\n self.K_lin, self.t_K = maxValueArg(self.x,self.linear_output)\n\n\n def MaxGrowthRate(self):\n '''\n Computes the maximum specific growth rate (gr) and generation doubling Time (td).\n '''\n\n self.gr, self.t_gr = maxValueArg(self.x,self.y1)\n\n # compu\n if self.gr == 0: self.td = np.inf\n else: self.td = (np.log(2.0))/self.gr\n\n\n def MinGrowthRate(self,after_max=True):\n '''\n Computes the minimum of the derivative, which will often be the maximum death rate (dr). \n '''\n\n x_K = int(np.where(self.x[:,0]==self.t_K )[0]) # index (not time) at maximum growth\n\n mu = self.y1\n if after_max: mu = mu[x_K:,0]\n\n x_dr = np.where(mu==mu.min())[0][0]\n t_dr = float(self.x[x_dr+x_K][0])\n\n minGrowthRate = mu[x_dr]\n\n self.dr, self.t_dr = minGrowthRate, t_dr\n\n\n def StationaryDelta(self):\n \t'''\n \tComputes difference between carrying capcaity and final OD.\n \t'''\n\n \tif self.K_lin <= 0:\n \t\tself.death_log = 0\n \t\tself.death_lin = 0\n \telse: \n \t\tself.death_log = np.abs(self.y0[-1][0] - self.K_log)\n \t\tself.death_lin = np.abs(self.linear_output[-1][0] - self.K_lin)\n\n\n def LagTime(self):\n '''\n Computes the lag time either the classical definition or a probabilistic definition.\n The former defines the lag time as the intersection with the axis parallel to time \n of the tangent intersecting the derivative of the latent function at maximum growth. \n This tangent has slope m equivalent to the maximum of the derivative of the latent.\n The latter defines lag time as the time at which the 95-percent credible interval of \n the growth rate (i.e. derivative of latent) deviates from zero. \n\n Args:\n mode (str): either 'Classical' or 'Probabilistic\n threshold (float): Confidence Interval, used for probabilistic inference of lag time.\n '''\n\n x = self.x\n y0 = self.y0\n y1 = self.y1\n cov1 = self.cov1\n\n # CLASSICAL MODE\n\n t_gr = self.t_gr # time at maximal growth rate\n x_gr = int(np.where(x[:,0]==t_gr)[0]) # index at maximal growth rate\n\n m1 = y1[x_gr] # slope at maximal growth rate\n m0 = y0[x_gr] # log OD at maximal growth rate\n\n if m1 == 0: lagC = np.inf # no growth, then infinite lag\n else: lagC = (t_gr - (m0/m1))[0]\n\n # PROBABILISTIC MODE\n\n confidence = getValue('confidence_adapt_time')\n\n prob = np.array([norm.cdf(0,m,np.sqrt(v)) for m,v in zip(y1[:,0],np.diag(cov1))])\n\n ind = 0\n while (ind < prob.shape[0]) and (prob[ind] > confidence): ind += 1\n\n if ind == prob.shape[0]: lagP = np.inf\n else: lagP = float(self.x[ind][0])\n\n self.lagC = lagC\n self.lagP = lagP\n\n\n def data(self):\n '''\n Summarizes the object's data, including estimates of best fit for data and its derivative using GPs.\n\n Args:\n sample_id (varies): can possibly be float, int, or str\n Returns:\n df (pandas.DataFrame): each row is a specific time-point. Identifying columns is Time (and possibly Sample_ID).\n Data columns include\n + OD_Fit (gp model fit of original data)\n + OD_Derivative (gp model fit of derivative, insensitive to y-value, i.e. whether OD is centered)\n + GP_Input (input to gp.GP() object), this is usually log-transformed and log-baseline-subtracted\n + GP_Output (output of gp.GP().predict()), hence also log-trasnformed and log-baseline-subtracted\n + OD_Growth_Data (GP_Input but converted to real OD and centered at zero)\n + OD_Growth_Fit (GP_Output but converted to real OD and centered at zero) \n '''\n\n gp_time = self.x.ravel()\n gp_input = self.y.ravel()\n gp_output = self.y0.ravel()\n gp_derivative = self.y1.ravel()\n\n od_growth_data = self.linear_input.ravel()\n od_growth_fit = self.linear_output.ravel()\n od_fit = self.linear_output_raised.ravel()\n\n data = [gp_time,\n gp_input,\n gp_output,\n gp_derivative,\n od_growth_data,\n od_growth_fit,\n od_fit,\n ]\n\n labels = ['Time',\n 'GP_Input',\n 'GP_Output',\n 'GP_Derivative',\n 'OD_Growth_Data',\n 'OD_Growth_Fit',\n 'OD_Fit',]\n\n data = pd.DataFrame(data,index=labels).T\n\n if self.name is not None:\n sample_id = pd.DataFrame([self.name]*data.shape[0],columns=['Sample_ID'])\n data = sample_id.join(data)\n\n return data\n\n\n def sample(self):\n '''\n Sample the posterior distribution of the latent function and its derivative \n n times, estimate growth parametes for each sample, then summarize with \n mean and standard deviation. \n '''\n\n n = getValue('n_posterior_samples')\n\n samples0 = np.random.multivariate_normal(self.y0.ravel(),self.cov0,n)\n samples1 = np.random.multivariate_normal(self.y1.ravel(),self.cov1,n)\n\n list_params = []\n\n for ii,y0,y1 in zip(range(n),samples0,samples1):\n\n y0_ii = y0[:,np.newaxis]\n y1_ii = y1[:,np.newaxis]\n\n curve_ii = GrowthCurve(x=self.x,y=self.y,y0=y0_ii,y1=y1_ii,\n cov0=self.cov0,cov1=self.cov1)\n list_params.append(curve_ii.params)\n\n df_params = pd.DataFrame(list_params)\n df_params_avg = df_params.mean()\n df_params_std = df_params.std()\n\n df_params_avg.index = ['mean({})'.format(ii) for ii in df_params_avg.index]\n df_params_std.index = ['std({})'.format(ii) for ii in df_params_std.index]\n\n self.posterior = pd.concat([df_params_avg,df_params_std]).to_dict()\n\n return self\n\n\n def plot(self,ax_arg=None):\n\n if not ax_arg: fig,ax = plt.subplots(2,1,figsize=[6,8],sharex=True)\n else: ax = ax_arg\n\n t = self.x.ravel()\n y = self.y.ravel()\n y0 = self.y0.rave()\n y1 = self.y1.ravel()\n\n xmin = 0\n xmax = int(np.ceil(t[-1]))\n\n ax[0].plot(t,y,lw=5,color=(0,0,0,0.65))\n ax[0].plot(t,y0,lw=5,color=(1,0,0,0.65))\n ax[1].plot(t,y1,lw=5,color=(0,0,0,0.65))\n\n [ii.set(fontsize=20) for ii in ax[0].get_xticklabels()+ax[0].get_yticklabels()]\n [ii.set(fontsize=20) for ii in ax[1].get_xticklabels()+ax[1].get_yticklabels()]\n\n ylabel = getValue('hypo_plot_y_label')\n ax[1].set_xlabel('Time',fontsize=20)\n ax[0].set_ylabel(ylabel ,fontsize=20)\n ax[1].set_ylabel('d/dt {}'.format(ylabel),fontsize=20)\n\n ax[0].set_xlim([xmin,xmax])\n\n if not ax_arg: return fig,ax\n else: return ax\n\n","repo_name":"firasmidani/amiga","sub_path":"libs/curve.py","file_name":"curve.py","file_ext":"py","file_size_in_byte":15035,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"38"} +{"seq_id":"2359623160","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 1 15:01:38 2021\n\n@author: User\n\"\"\"\n# import numpy as np\n# # import scipy as sc\nfrom LIBRARY.rpi_car import rpi_movement\nfrom LIBRARY.rpi_car_mag_calibration import write_calibration_file\nfrom LIBRARY.rpi_telemetry import mb_telemetry\nfrom LIBRARY.car_iekf import car_iekf\nfrom time import time, sleep\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_data_n_labels(x, ys, title = '', xlabel = '',\n ylabel = '', legend =None):\n fig = plt.figure(figsize = (10,10))\n \n for y in ys:\n plt.plot(x,y)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.grid()\n \n if legend != None:\n plt.legend(legend)\n\ndef measure_bias_n_std(mb, points = 10):\n gxs,gys, gzs, axs, ays, azs = [],[],[],[],[],[]\n biases = np.empty((1, 6), dtype = 'double')\n stds = biases.copy()\n g = 9.780318\n print(\"Measuring bias and std, amount of poinst: \", points)\n for i in range(points):\n end = '\\t'\n print(i, end = end)\n mb.telemetry()\n gxs.append(mb.gyrox)\n gys.append(mb.gyroy)\n gzs.append(mb.gyroz)\n \n axs.append(mb.accx)\n ays.append(mb.accy)\n azs.append(mb.accz)\n \n print()\n biases[:, :3] = np.array([np.mean(gxs), np.mean(gys), np.mean(gzs)])\n biases[:, 3:] = np.array([np.mean(axs), np.mean(ays), np.mean(azs)])\n \n stds[:, :3] = np.array([np.std(gxs), np.std(gys), np.std(gzs)])\n stds[:, 3:] = np.array([np.std(axs), np.std(ays), np.std(azs)])\n return biases, stds\n\ncar = rpi_movement()\ncar.init()\nmb = mb_telemetry()\nmb.init_all()\nmb.telemetry()\n\n\ntoCalibrate = False \nattempts = 3\nif toCalibrate:\n write_calibration_file(car, auto = False, attempts = attempts)\n \n\nmb.read_mag_calibration_file()\nprint(\"Magnetometer caibration:\") \nprint(mb.magx_offset, mb.magx_scale, mb.magy_offset, mb.magy_scale, mb.magz_offset, mb.magz_scale)\n\ndt = time()\ng = 9.780318\n\n\nsdata = 'Time x y z'\n\n# obstacle_threshold = 15\nvoltage_threshold = 6.7\nuv_counter = 0\n\nts, ps = [], []\n\nbiases, stds = measure_bias_n_std(mb)\n\n \nmb.telemetry()\ngyros = np.array([mb.gyrox, mb.gyroy, mb.gyroz])\naccs = np.array([mb.accx, mb.accy, mb.accz])\nkf = car_iekf(gyros, accs, \n gyro_bias= biases[:,:3], acc_bias=biases[:,3:],\n gyro_std = stds[:, :3], acc_std = stds[:, 3:]) \n\n#print(\"P: \", kf.P)\n\nprint(sdata)\n\ntoMove = True\ncounter = 0\n\ntmp = []\ndata = []\nwhile(1):\n if counter > 50:\n break\n try:\n counter += 1\n dt = time() - dt\n _dt = dt\n mb.time += dt\n dt = time()\n\n \n \n mb.telemetry()\n gyros = np.array([mb.gyrox, mb.gyroy, mb.gyroz])\n accs = np.array([mb.accx, mb.accy, mb.accz])\n \n biases = np.concatenate((gyros, accs))\n tmp.append(biases)\n bss = np.array(tmp)\n for i in range(biases.shape[0]):\n biases[i] = np.mean(bss.T[i])\n \n #print(biases)\n gyro_bias = biases[:3]\n acc_bias = biases[3:]\n ts.append(round(mb.time, 3))\n \n kf.propagate(gyros, accs, _dt)\n kf.update(gyros, accs, gyro_bias, acc_bias, _dt)\n \n \n \n x ,y ,z = kf.p[0][0], kf.p[0][1], kf.p[0][2]\n vx, vy, vz = kf.v[0][0], kf.v[0][1], kf.v[0][2]\n ps.append([x,y,z])\n\n sdata = \"{:.4f} {:.3f} {:.3f} {:.3f}\".format(_dt, x, y, z)\n print(sdata)\n #print(mb.gyrox, mb.gyroy, mb.gyroz, mb.accx, mb.accy, mb.accz)\n data.append([mb.gyrox, mb.gyroy, mb.gyroz, mb.accx, mb.accy, mb.accz])\n# sleep(0.1)\n #print(kf.x)\n \n try: \n if(mb.motors_voltage < voltage_threshold):\n uv_counter += 1\n if uv_counter > 4:\n motors = car.stop()\n rul = car.turn_center()\n print(\"Undervoltage!!\")\n break \n elif (toMove):\n motors = car.move_forward()\n rul = car.turn_right()\n uv_counter = 0 \n except Exception:\n pass\n\n \n\n\n except KeyboardInterrupt:\n break\n# car.stop()\n# car.turn_center()\n# \n# x = [ps[i][0][0] for i in range(len(ts))]\n# y = [ps[i][0][1] for i in range(len(ts))]\n# z = [ps[i][0][2] for i in range(len(ts))]\n# \n# plt.plot(ts, x)\n# plt.plot(ts, y)\n# plt.plot(ts, z)\n\ncar.stop()\ncar.turn_center() \n\nxs = [ps[i][0] for i in range(len(ts))]\nys = [ps[i][1] for i in range(len(ts))]\nzs = [ps[i][2] for i in range(len(ts))]\n\nplt.plot(ts, xs)\nplt.plot(ts, ys)\nplt.plot(ts, zs)\nplt.legend(['x(t)','y(t)','z(t)'])\nplt.grid()","repo_name":"Owluska/RPi_car1.0_soft","sub_path":"backup/orientation_iekf.py","file_name":"orientation_iekf.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"24262205654","text":"#!/usr/bin/python3\n\nimport sys\nimport argparse\nimport subprocess\n\nclass Payload:\n\tparams = {\"url\":None,\n\t\"value\":None,\n\t\"cookie\":None,\n\t\"file\":None\n\t}\n\t\n\tdef __str__(self):\n\t\ts = \"\"\n\t\tprint(self.params)\n\t\tfor key in self.params:\n\t\t\tif self.params[key] != None:\n\t\t\t\ts += (key + \": \" + self.params[key] + \"\\n\")\n\t\t\telse:\n\t\t\t\ts += (key + \": None\\n\")\n\t\t\n\t\treturn s\n\n\ndef create_payload(args):\n\tpayload = Payload()\n\tpayload.params[\"url\"] = args.url\n\tpayload.params[\"cookie\"] = args.c\n\t\n\ttry:\n\t\tpayload.params[\"file\"] = open(args.payload_file, \"r\")\n\texcept FileNotFoundError as msg:\n\t\tprint(msg)\n\t\tsys.exit(1)\n\t\n\treturn payload\n\n\ndef attack(payload):\n\tcmd = [\"curl\", \"-s\"]\n\t\n\tif payload.params[\"cookie\"]:\n\t\tcmd.append(\"--cookie\")\n\t\tcmd.append(payload.params[\"cookie\"])\n\t\n\tfor item in payload.params[\"file\"].readlines():\n\t\titem = item.rstrip()\n\t\tcmd.append(payload.params[\"url\"]+item)\n\t\tp1 = subprocess.run(cmd, stdout=subprocess.PIPE)\n\t\tp2 = subprocess.run([\"wc\", \"-w\"], input=p1.stdout, stdout=subprocess.PIPE)\n\t\toutput = p2.stdout.decode().rstrip()\n\t\tprint(\" * \" + item + \": \" + output)\n\t\tcmd.pop()\n\n\nif __name__ == \"__main__\":\n\t\n\tparser = argparse.ArgumentParser(prog=sys.argv[0])\n\tparser.add_argument(\"-c\", metavar=\"cookie\", help='set header Cookie')\n\tparser.add_argument(\"url\", help=\"url to attack\")\n\tparser.add_argument(\"payload_file\", help=\"file that contains different values of the parameter LFI\")\n\t\n\targs = parser.parse_args()\n\tpayload = create_payload(args)\n\tattack(payload)\n\t\n\tpayload.params[\"file\"].close()\n\t\n","repo_name":"Carlosalpha1/CTF-tools","sub_path":"local-file-inclusion/lfi-detect.py","file_name":"lfi-detect.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37458643729","text":"def in_range(x, y):\n global n\n return 0 <= x < n and 0 <= y < n\n\ndef dfs(i, j):\n global grid, cnt\n dx = [0, 0, -1, 1]\n dy = [-1, 1, 0, 0]\n for k in range(4):\n nx, ny = i + dx[k], j + dy[k]\n if in_range(nx, ny) and grid[nx][ny]:\n grid[nx][ny] = 0\n cnt += 1\n dfs(nx, ny)\n\nif __name__ == \"__main__\":\n n = int(input().rstrip())\n grid = [list(map(int, input().rstrip().split(\" \"))) for _ in range(n)]\n people_cnt = []\n for i in range(n):\n for j in range(n):\n if grid[i][j]:\n grid[i][j] = 0\n cnt = 1\n dfs(i, j)\n people_cnt.append(cnt)\n print(len(people_cnt))\n people_cnt.sort()\n for p in people_cnt:\n print(p)","repo_name":"chaeyeongyun/codetree-TILs","sub_path":"231205/마을 구분하기/seperate-village.py","file_name":"seperate-village.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"72507109229","text":"#!/usr/bin/python3\nimport json\nimport unittest\n\nimport pandas as pd\n\nimport app.calculators as calc\n\n\nclass TestMenus(unittest.TestCase):\n # pylint: disable=too-many-statements\n def test_process_menus(self):\n dtf = pd.DataFrame({\n 'index_date': [\"2011-12-15\", \"2016-09-04\", \"2016-09-05\", \"2016-09-06\", \"2016-09-07\"],\n 'date_col': [\"2011-12-15\", \"2016-09-04\", \"2016-09-05\", \"2016-09-06\", \"2016-09-07\"]})\n dtf.set_index('index_date', inplace=True)\n\n train_dtf = calc.add_feature_special_meals(dtf.copy(), \"date_col\", \"%Y-%m-%d\", \"tests/data\")\n\n dict_special_dishes = {}\n with open(\"tests/data/calculators/menus.json\") as f_in:\n dict_special_dishes = json.load(f_in)\n\n self.assertEqual(train_dtf.shape, (5, 2 + len(list(dict_special_dishes.keys()))))\n\n for col in list(dict_special_dishes.keys()):\n self.assertTrue(col in train_dtf)\n\n pd.testing.assert_frame_equal(pd.read_csv(\"tests/fixtures/menus_dataset.csv\", index_col=0), train_dtf)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nantesmetropole/school_meal_forecast_xgboost","sub_path":"tests/calculators/test_process_menus.py","file_name":"test_process_menus.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"38"} +{"seq_id":"37983634885","text":"import numpy as np\nimport os\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.feature_selection import RFE\nfrom skopt.space import Integer, Real, Categorical\nfrom skopt import BayesSearchCV\nfrom sklearn.metrics import accuracy_score, f1_score, confusion_matrix\nfrom datetime import datetime\nfrom math import ceil\nfrom pdb import set_trace as st\nimport csv\nimport re\nfrom time import time\n\n\n#################################################################################################################################################\n### Meta parameters\ndata_folder = './data/hand_crafted_features' # Path where the features are saved in .npy format\nlabel_folder = './data/segmented_data_frames' # Path where the features are saved in .npy format\nresult_path = './' # Path where to save csv file of results\nimu_rgbd_synchronisation_path = '../test scripts/offsets_df.xlsx' # Path to the file containing the offsets\nestimations_save_path = '../test scripts/rf_estimations/' # Path where to save the estimations of the classifier\nclassification = 'rf' # 'rf' or 'mlp' | NOTE: MLP not compatible with RFE\n#nb_trees = 100 # Initial number of decision trees in the random forest for RFE\n#max_depth = 10 # Initial maximum depth for the random forest for RFE\n# Parameter for the Bayesian/grid search\nsearch_params = {}\nif classification == 'rf':\n search_params['n_estimators'] = (50,200) # Parameters to explore for number of trees\n search_params['max_depth'] = (15,50) # Parameters to explore for max depth\n search_params['max_features'] = ['sqrt','log2',None] # Parameters to explore for the mex number of features to consider when looking for the best split\n search_params['class_weight'] = [None] # Parameters to explore for class weights\nelif classification == 'mlp':\n search_params['hidden_layer_sizes'] = [(100),(500,100,10),(50,50,50,50,50)]\n search_params['activation'] = ['relu']\n search_params['alpha'] = [1e-4,1e-6] # Coefficient of the L2 regulariser\n search_params['learning_rate'] = ['invscaling']\n \nsubjects_to_consider = ['032a','033','035','040','041','042']\nfeatures_to_consider = ['imu', 'mccay', 'marchi', 'marchi_prep', 'chambers', 'chambers_prep'] # List of elements in {'imu', 'mccay', 'marchi', 'marchi_prep', 'chambers', 'chamber_prep'}\nrfe = True # Enable or disable RFE\nstep = 1 # RFE step size\nsave_ranking = '../test scripts/rfe_rankings/' # If not empty, save the ranking of features obtained by RFE for each subject\nnb_features_to_select = 20 # Number or proportion of features to select with RFE\n#################################################################################################################################################\n\n### Main\nif __name__ == '__main__':\n\n assert len(features_to_consider) >= 1\n # Create table of correspondencies for subject indices: (key, value) = (id_int, id_str)\n subject_idx_table = {}\n for index in subjects_to_consider:\n char_present = re.search('[a-zA-Z]', index)\n if char_present is None:\n subject_idx_table[int(index)] = index\n else:\n subject_idx_table[int(index[:char_present.start()])] = index\n\n nb_subjects = len(subjects_to_consider)\n if nb_subjects <2:\n print('Not enough subjects to consider for the cross-validation! Process aborted.')\n exit()\n accuracies = np.zeros(nb_subjects,dtype=np.float32)\n f1_scores = np.zeros(nb_subjects,dtype=np.float32)\n subject_indices = np.zeros(nb_subjects,dtype=int)\n\n # Load relevant labels\n label_list = os.listdir(label_folder)\n label_list = [e for e in label_list if 'Labels.npy' in e]\n labels_to_keep = []\n for label_name in label_list:\n pos = label_name.find(' - ')\n subject_index = int(label_name[pos+3:pos+6]) # Subject index encoded in a 3-digit number\n if subject_index in subject_idx_table.keys():\n labels_to_keep += [label_name]\n\n # Load all relevant features\n file_list = os.listdir(data_folder)\n features_to_load = {}\n\n if 'imu' in features_to_consider:\n feature_file_list = [e for e in file_list if 'HandCraftedFeaturesIMU.npy' in e]\n feature_to_keep = []\n for feature_name in feature_file_list:\n pos = feature_name.find(' - ')\n subject_index = int(feature_name[pos+3:pos+6]) # Subject index encoded in a 3-digit number\n if subject_index in subject_idx_table.keys():\n feature_to_keep += [feature_name]\n feature_to_keep.sort()\n features_to_load['imu'] = feature_to_keep\n\n if 'mccay' in features_to_consider:\n feature_file_list = [e for e in file_list if 'mccay2019.npy' in e]\n feature_to_keep = []\n for feature_name in feature_file_list:\n pos = feature_name.find('_moto_')\n if feature_name[pos-1].isdigit():\n subject_index = int(feature_name[pos-3:pos]) # Subject index encoded in a 3-digit number\n else: # e.g. '032a'\n subject_index = int(feature_name[pos-4:pos-1])\n if subject_index in subject_idx_table.keys():\n feature_to_keep += [feature_name]\n feature_to_keep.sort()\n features_to_load['mccay'] = feature_to_keep\n\n if 'marchi' in features_to_consider:\n feature_file_list = [e for e in file_list if 'marchi2019.npy' in e]\n feature_to_keep = []\n for feature_name in feature_file_list:\n pos = feature_name.find('_moto_')\n if feature_name[pos-1].isdigit():\n subject_index = int(feature_name[pos-3:pos]) # Subject index encoded in a 3-digit number\n else: # e.g. '032a'\n subject_index = int(feature_name[pos-4:pos-1])\n if subject_index in subject_idx_table.keys():\n feature_to_keep += [feature_name]\n feature_to_keep.sort()\n features_to_load['marchi'] = feature_to_keep\n\n if 'marchi_prep' in features_to_consider:\n feature_file_list = [e for e in file_list if 'marchi2019_with_prepro.npy' in e]\n feature_to_keep = []\n for feature_name in feature_file_list:\n pos = feature_name.find('_moto_')\n if feature_name[pos-1].isdigit():\n subject_index = int(feature_name[pos-3:pos]) # Subject index encoded in a 3-digit number\n else: # e.g. '032a'\n subject_index = int(feature_name[pos-4:pos-1])\n if subject_index in subject_idx_table.keys():\n feature_to_keep += [feature_name]\n feature_to_keep.sort()\n features_to_load['marchi_prep'] = feature_to_keep\n\n if 'chambers' in features_to_consider:\n feature_file_list = [e for e in file_list if 'chambers2020.npy' in e]\n feature_to_keep = []\n for feature_name in feature_file_list:\n pos = feature_name.find('_moto_')\n if feature_name[pos-1].isdigit():\n subject_index = int(feature_name[pos-3:pos]) # Subject index encoded in a 3-digit number\n else: # e.g. '032a'\n subject_index = int(feature_name[pos-4:pos-1])\n if subject_index in subject_idx_table.keys():\n feature_to_keep += [feature_name]\n feature_to_keep.sort()\n features_to_load['chambers'] = feature_to_keep\n\n if 'chambers_prep' in features_to_consider:\n feature_file_list = [e for e in file_list if 'chambers2020_with_prepro.npy' in e]\n feature_to_keep = []\n for feature_name in feature_file_list:\n pos = feature_name.find('_moto_')\n if feature_name[pos-1].isdigit():\n subject_index = int(feature_name[pos-3:pos]) # Subject index encoded in a 3-digit number\n else: # e.g. '032a'\n subject_index = int(feature_name[pos-4:pos-1])\n if subject_index in subject_idx_table.keys():\n feature_to_keep += [feature_name]\n feature_to_keep.sort()\n features_to_load['chambers_prep'] = feature_to_keep\n\n # Load IMU-Kinect synchronisation information\n imu_kinect_sync = pd.read_excel(imu_rgbd_synchronisation_path)\n\n # Load and synchronise the data and labels for all subjects\n subject_data = {}\n\n for subject_index in subject_idx_table.keys():\n\n print('Preparing data and labels for subject %d ...' % subject_index)\n\n # Get associated label file for the current subject\n label_list = [e for e in labels_to_keep if str(subject_index).zfill(3)+' - Labels.npy' in e]\n label_file_name = label_list[0]\n labels = np.load(os.path.join(label_folder,label_file_name)) \n\n # Retrieve data\n data_dictionary = {}\n for key in features_to_load.keys():\n # Look for the correct file path to be loaded\n paths = features_to_load[key]\n #file_to_load = [e for e in paths if ' - '+str(subject_index).zfill(3)+' - ' in e or '_'+str(subject_index).zfill(3)+'_' in e]\n file_to_load = [e for e in paths if ' - '+str(subject_index).zfill(3)+' - ' in e or '_'+subject_idx_table[subject_index]+'_' in e]\n # ### DEBUG\n # t = np.load(os.path.join(data_folder,file_to_load[0]),allow_pickle=True)\n # if np.isnan(t).any():\n # st()\n data_dictionary[key] = np.load(os.path.join(data_folder,file_to_load[0]),allow_pickle=True)\n\n # NOTE: IMU and RGBD feature arrays do not have the same size, and need to be synchronised\n # Look for the IMU and Kinect offset\n # NOTE: Kinect recording not necessarily shorter than IMU/iPhone\n b = imu_kinect_sync['Filename Kinect'].str.contains('_'+subject_idx_table[subject_index]+'.more')\n subject_info = imu_kinect_sync.loc[b]\n offset = subject_info['First synced timestamp Iphone'].iloc[0] # in ms\n frame_offset = subject_info['Offset Frames'].iloc[0]\n\n # Remove unaligned segments at the beginning of the recording\n nb_frames_to_discard_begin = ceil(offset/1000)\n kinect_features = [e for e in features_to_consider if 'imu' not in e]\n if frame_offset < 0: # Kinect starts later than iPhone+IMU\n if 'imu' in features_to_consider:\n data_dictionary['imu'] = data_dictionary['imu'][nb_frames_to_discard_begin:]\n labels = labels[nb_frames_to_discard_begin:]\n elif frame_offset > 0: # iPhone+IMU starts later than Kinect\n for key in kinect_features:\n data_dictionary[key] = data_dictionary[key][nb_frames_to_discard_begin:]\n # Remove unaligned segments at the end of the recording\n if len(labels) > len(data_dictionary[kinect_features[0]]): # More IMU segments remaining than Kinect\n nb_frames_to_discard_end = len(labels)-len(data_dictionary[kinect_features[0]])\n if 'imu' in features_to_consider:\n data_dictionary['imu'] = data_dictionary['imu'][:-nb_frames_to_discard_end]\n labels = labels[:-nb_frames_to_discard_end]\n elif len(labels) < len(data_dictionary[kinect_features[0]]): # More Kinect segments remaining than IMU\n nb_frames_to_discard_end = len(data_dictionary[kinect_features[0]])-len(labels)\n for key in kinect_features:\n data_dictionary[key] = data_dictionary[key][:-nb_frames_to_discard_end]\n\n array_list = []\n for feature in features_to_consider:\n array_list += [data_dictionary[feature]]\n data = np.concatenate(array_list,axis=1)\n subject_data[subject_index] = (data,labels) \n \n # Perform Leave-One-Subject-Out cross validation\n subject_counter = 0\n\n for subject_index in subject_idx_table.keys():\n\n print('Preparing the data for subject %d ...' % subject_index)\n start = time()\n # Get associated data and label files for the test subject\n test_labels = subject_data[subject_index][1]\n test_data = subject_data[subject_index][0]\n\n # Build training set\n # nb_train_examples = 0 # Compute the size of the training data for faster numpy initialisation\n train_subjects = [e for e in subject_idx_table.keys()]\n train_subjects.remove(subject_index)\n train_data_list = []\n train_label_list = []\n\n for idx in train_subjects:\n train_data_list += [subject_data[idx][0]]\n train_label_list += [subject_data[idx][1]]\n\n train_data = np.concatenate(train_data_list,axis=0)\n train_labels = np.concatenate(train_label_list)\n \n # Remove invalid examples from the sets\n train_idx_to_keep = [e!=-1 for e in train_labels]\n test_idx_to_keep = [e!=-1 for e in test_labels]\n train_data = train_data[train_idx_to_keep]\n train_labels = train_labels[train_idx_to_keep]\n test_data = test_data[test_idx_to_keep]\n test_labels = test_labels[test_idx_to_keep]\n\n # Randomly shuffle training labels and data in unisson\n random_permutation = np.random.permutation(len(train_labels))\n train_data = train_data[random_permutation]\n train_labels = train_labels[random_permutation]\n\n # Replace any NaN value by 0\n if np.isnan(train_data).any():\n np.nan_to_num(train_data,copy=False,nan=0)\n if np.isnan(test_data).any():\n np.nan_to_num(test_data,copy=False,nan=0)\n\n end = time()\n print('Data prepared in %.2f seconds' % (end-start))\n\n # Apply RFE\n if rfe:\n if classification == 'rf':\n start = time()\n rfe_classifier = RandomForestClassifier() \n print('Applying RFE for subject %d ...' % subject_index)\n selector = RFE(rfe_classifier,step=step,n_features_to_select=nb_features_to_select)\n selector = selector.fit(train_data,train_labels)\n if len(save_ranking) > 0:\n np.save(os.path.join(save_ranking,classification+'_rfe_ranking_'+str(subject_index).zfill(3)+'.npy'),selector.ranking_)\n train_data = train_data[:,selector.support_]\n test_data = test_data[:,selector.support_]\n end = time()\n print('RFE performed in %.2f seconds' % (end-start))\n elif classification == 'mlp':\n print('RFE not implemented for MLP classifier -> skipping this part')\n\n # Bayesian hyper-parameter optimisation\n start = time()\n if classification == 'rf':\n print('Starting Bayesian hyper-parameter optimisation for subject %d ...' % subject_index)\n search = BayesSearchCV(estimator=RandomForestClassifier(),search_spaces=search_params)\n elif classification == 'mlp':\n print('Starting grid-search hyper-parameter optimisation for subject %d ...' % subject_index)\n #search = BayesSearchCV(estimator=MLPClassifier(),search_spaces=search_params) # NOTE: GridSearchCV does not work with tuple configurations (e.g. hidden_layer_sizes)\n search = GridSearchCV(estimator=MLPClassifier(),param_grid=search_params)\n search.fit(train_data,train_labels)\n best_params = search.best_params_\n print(search.best_score_)\n print(best_params)\n end = time()\n print('Bayesian search performed in %.2f seconds' % (end-start))\n\n # Train classifier with optimal parameters returned by Bayesian search\n start = time()\n print('Training classifier for subject %d ...' % subject_index)\n if classification == 'rf':\n classifier = RandomForestClassifier(\n n_estimators=best_params['n_estimators'],\n max_depth=best_params['max_depth'],\n class_weight=best_params['class_weight'],\n max_features=best_params['max_features'])\n elif classification == 'mlp':\n classifier = MLPClassifier(\n hidden_layer_sizes=best_params['hidden_layer_sizes'],\n activation=best_params['activation'],\n alpha=best_params['alpha'],\n learning_rate=best_params['learning_rate'])\n\n classifier.fit(train_data,train_labels)\n end = time()\n print('Classifier trained in %.2f seconds' % (end-start))\n\n # Get classifier predictions and save them\n estimations = classifier.predict(test_data)\n np.save(os.path.join(estimations_save_path,classification+'_estimations_subject'+str(subject_index).zfill(3)+'.npy'),estimations)\n\n # Compute evaluation metrics\n current_accuracy = accuracy_score(test_labels,estimations)\n current_af1 = f1_score(test_labels,estimations,average='macro')\n accuracies[subject_counter] = 100*current_accuracy\n f1_scores[subject_counter] = 100*current_af1\n conf_mat = confusion_matrix(test_labels, estimations)\n print(' Accuracy = %.2f %%' % (100*current_accuracy))\n print(' AF1 = %.2f %%' % (100*current_af1))\n print(conf_mat)\n\n subject_counter += 1\n\n # Print results\n print('')\n print('######################################################################')\n print('Average accuracy: %.2f +- %.2f %%' % (np.mean(accuracies),np.std(accuracies)))\n print('Average AF1: %.2f +- %.2f %%' % (np.mean(f1_scores),np.std(f1_scores)))\n print('######################################################################')\n print('')\n\n # Write results in csv file\n current_time = datetime.now()\n file_name = str(current_time.year)+str(current_time.month).zfill(2)+str(current_time.day).zfill(2)+'_'+str(current_time.hour).zfill(2)+\\\n str(current_time.minute).zfill(2)+str(current_time.second).zfill(2)+' - IMU LOSOCV Results.csv'\n\n with open(result_path+file_name,'w',newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['Subject ID','Accuracy','F1 score'])\n for idx in range(len(accuracies)):\n writer.writerow([str(subject_indices[idx]).zfill(3),accuracies[idx],f1_scores[idx]])\n writer.writerow(['Average',np.mean(accuracies),np.mean(f1_scores)])\n writer.writerow(['Std',np.std(accuracies),np.std(f1_scores)])\n\n\n","repo_name":"Frederic-Li-Hanchen/ScreenFM-test-scripts","sub_path":"subject_classification/classification_IMU_video.py","file_name":"classification_IMU_video.py","file_ext":"py","file_size_in_byte":18311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"22743221837","text":"\"\"\"\nSimulate changes in variance in the dDR space for a variety of different conditions\n modulating indpendent noise only\n modulating shared noise only\n each for a variety of different numbers of neurons (always keep trial n high)\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom charlieTools.dim_reduction import TDR\nfrom charlieTools.plotting import compute_ellipse\n\nnp.random.seed(123)\n\nnNeurons = np.arange(1, 100, 2)\nNtrials = 1000\n\ncols = ['low_tot_var', 'low_tdr_var', 'high_tot_var', 'high_tdr_var']\nindRes = pd.DataFrame(index=nNeurons, columns=cols)\nsharRes = pd.DataFrame(index=nNeurons, columns=cols)\n\nfor Ndim in nNeurons:\n var_ratio = 1.2 # pc1 has X times the variance as pc2\n\n # simulated data\n u1 = 4\n u2 = 4\n u = np.stack((np.random.poisson(u1, Ndim), np.random.poisson(u2, Ndim)))\n\n # make two dimensional noise:\n # one large dim ~orthogonal to dU and one smaller dim ~ parallel to dU\n dU = u[[1], :] - u[[0], :]\n dU = dU / np.linalg.norm(dU)\n\n diff_cor = dU + np.random.normal(0, 0.001, dU.shape)\n diff_cor = diff_cor / np.linalg.norm(diff_cor) * 2\n pc1 = np.random.normal(0, 1, dU.shape)\n pc1 = (pc1 / np.linalg.norm(pc1)) * 2 * var_ratio\n\n noise_axis = pc1\n evecs = np.concatenate((diff_cor, pc1), axis=0)\n cov = evecs.T.dot(evecs)\n\n # simulate full data matrix\n _X = np.random.multivariate_normal(np.zeros(Ndim), cov, Ntrials)\n X1 = _X + u[0, :]\n X2 = _X + u[1, :]\n X_raw = np.stack((X1, X2)).transpose([-1, 1, 0])\n\n # add random noise to data matrix to make things behave well\n X_raw += np.random.normal(0, 0.5, X_raw.shape)\n\n # simulate four different datasets:\n # one vs. two differ in strength of INDEPENDENT NOISE\n # three vs. four differ in strength of SHARED NOISE (pc1 / pc2)\n\n\n # ==================== modulate independent variance ======================\n ind_noise = np.random.normal(0, 0.5, X_raw.shape)\n x1_noise = ind_noise * 0.1\n x2_noise = ind_noise * 5\n\n # dataset one\n X1 = X_raw + x1_noise\n # dataset two\n X2 = X_raw + x2_noise\n\n # fit dDR to all data\n xall = np.concatenate((X1, X2), axis=1)\n tdr = TDR(tdr2_init=noise_axis)\n tdr.fit(xall[:, :, 0].T, xall[:, :, 1].T)\n\n x11 = X1[:,:,0].T.dot(tdr.weights.T)\n x12 = X1[:,:,1].T.dot(tdr.weights.T)\n x21 = X2[:,:,0].T.dot(tdr.weights.T)\n x22 = X2[:,:,1].T.dot(tdr.weights.T)\n\n # low var\n low_tot_var = round(np.var(X1, axis=(1,2)).sum(), 2)\n low_tdr_var = round(np.var(np.stack([x11, x12]), axis=(0, 1)).sum(), 2)\n\n # high var\n high_tot_var = round(np.var(X2, axis=(1,2)).sum(), 2)\n high_tdr_var = round(np.var(np.stack([x21, x22]), axis=(0, 1)).sum(), 2)\n\n indRes.at[Ndim, 'low_tot_var'] = low_tot_var\n indRes.at[Ndim, 'low_tdr_var'] = low_tdr_var\n indRes.at[Ndim, 'high_tot_var'] = high_tot_var\n indRes.at[Ndim, 'high_tdr_var'] = high_tdr_var\n\n # ===================== modulate shared variance ===========================\n evecs = np.concatenate((diff_cor, pc1), axis=0)\n cov = evecs.T.dot(evecs)\n _X = np.random.multivariate_normal(np.zeros(Ndim), cov, Ntrials)\n _X1 = _X + u[0, :]\n _X2 = _X + u[1, :]\n X1 = np.stack((_X1, _X2)).transpose([-1, 1, 0])\n # add random noise to data matrix to make things behave well\n X1 += np.random.normal(0, 0.5, X1.shape)\n\n # twice as much shared variance\n evecs = np.concatenate((diff_cor*2, pc1*2), axis=0)\n cov = evecs.T.dot(evecs)\n _X = np.random.multivariate_normal(np.zeros(Ndim), cov, Ntrials)\n _X1 = _X + u[0, :]\n _X2 = _X + u[1, :]\n X2 = np.stack((_X1, _X2)).transpose([-1, 1, 0])\n # add random noise to data matrix to make things behave well\n X2 += np.random.normal(0, 0.5, X2.shape)\n\n # fit dDR to all data\n xall = np.concatenate((X1, X2), axis=1)\n tdr = TDR(tdr2_init=noise_axis)\n tdr.fit(xall[:, :, 0].T, xall[:, :, 1].T)\n\n x11 = X1[:,:,0].T.dot(tdr.weights.T)\n x12 = X1[:,:,1].T.dot(tdr.weights.T)\n x21 = X2[:,:,0].T.dot(tdr.weights.T)\n x22 = X2[:,:,1].T.dot(tdr.weights.T)\n\n # low var\n low_tot_var = round(np.var(X1, axis=(1,2)).sum(), 2)\n low_tdr_var = round(np.var(np.stack([x11, x12]), axis=(0, 1)).sum(), 2)\n\n # high var\n high_tot_var = round(np.var(X2, axis=(1,2)).sum(), 2)\n high_tdr_var = round(np.var(np.stack([x21, x22]), axis=(0, 1)).sum(), 2)\n\n sharRes.at[Ndim, 'low_tot_var'] = low_tot_var\n sharRes.at[Ndim, 'low_tdr_var'] = low_tdr_var\n sharRes.at[Ndim, 'high_tot_var'] = high_tot_var\n sharRes.at[Ndim, 'high_tdr_var'] = high_tdr_var\n\n\n\nf, ax = plt.subplots(2, 1, figsize=(6, 8))\n\nax[0].set_title(r\"$\\Delta$ Total population variance\")\nax[0].plot(nNeurons, indRes['high_tot_var'] - indRes['low_tot_var'], label='Mod. indpendent noise')\nax[0].plot(nNeurons, sharRes['high_tot_var'] - sharRes['low_tot_var'], label='Mod. shared noise')\nax[0].legend()\n\nax[1].set_title(r\"$\\Delta dDR$ space variance\")\nax[1].plot(nNeurons, indRes['high_tdr_var'] - indRes['low_tdr_var'], label='Mod. indpendent noise')\nax[1].plot(nNeurons, sharRes['high_tdr_var'] - sharRes['low_tdr_var'], label='Mod. shared noise')\nax[1].legend()\n\nax[1].set_xlabel('Number of neurons')\n\nf.tight_layout()\n\nplt.show()","repo_name":"crheller/nat_pup_ms","sub_path":"simulated_decoding/ind_vs_shared_noise2.py","file_name":"ind_vs_shared_noise2.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"37765957308","text":"from flask import Blueprint, request, jsonify, render_template\nfrom helpers import token_required\nfrom models import db, Storm, storm_schema, storms_schema\n\napi = Blueprint('api', __name__, url_prefix='/api')\n\n@api.route('/storms', methods=['POST'])\n@token_required\ndef add_storm(current_user_token):\n type_storm = request.json['type_storm']\n severity = request.json['severity']\n date_happened = request.json['data_happened']\n damage_cost = request.json['damage_cost']\n user_token = current_user_token.token\n\n storm = Storm(type_storm=type_storm, severity=severity, user_token=user_token, date_happened=date_happened, damage_cost=damage_cost)\n\n db.session.add(storm)\n db.session.commit()\n\n response = storm_schema.dump(storm)\n return jsonify(response)\n\n\n@api.route('/storms', methods = ['GET'])\n@token_required\ndef get_all_storms(current_user_token):\n a_user = current_user_token.token\n storms = Storm.query.filter_by(user_token = a_user).all()\n\n response = storms_schema.dump(storms)\n return jsonify(response)\n\n@api.route('/storms/', methods = ['GET'])\n@token_required\ndef get_single_storm(current_user_token, id):\n storm = Storm.query.get(id)\n\n response = storm_schema.dump(storm)\n return jsonify(response)\n\n@api.route('/storm/', methods = ['POST', 'PUT'])\n@token_required\ndef update_image_info(current_user_token, id):\n storm = Storm.query.get(id)\n storm.type_storm = request.json['type_storm']\n storm.severity = request.json['severity']\n storm.date_happened = request.json['date_happened']\n storm.damage_cost = request.json['damage_cost']\n storm.user_token = current_user_token.token\n\n db.session.commit()\n\n response = storm_schema.dump(storm)\n return jsonify(response)\n\n@api.route('/storms/', methods = ['DELETE'])\n@token_required\ndef delete_storm(current_user_token, id):\n storm = Storm.query.get(id)\n\n db.session.delete(storm)\n db.session.commit()\n\n response = storm_schema.dump(storm)\n return jsonify(response)","repo_name":"SJones108/FlaskWeather","sub_path":"app/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"39165747314","text":"import datetime\n\ndate = input('Please enter date DD/MM/YYYY: ') # Input date\nd1 = datetime.datetime.strptime(date, '%d/%m/%Y').date() # Convert date to datetime format\nd2 = datetime.datetime.strptime('15/10/2022', '%d/%m/%Y').date() # Required datein datetime format\nage = int(input('Please enter your age: ')) # Input age\nrisk = input('clinical rish group (true/false): ') # Input risk\n\n\nif age >= 18 and age <= 64 and risk == \"true\": # Check if age is between 18 and 64 and risk is true\n print('You are eligible for a free flu jab') \nelif age >= 50 and age <= 64 and risk == \"false\" and d1 > d2: # Check if age is between 50 and 64 and risk is false and date is after 15/10/2022\n print('You are eligible for a free flu jab')\nelif age > 65: # Check if age is greater than 65\n print('You are eligible for a free flu jab')\nelse:\n print('You can get a Flu Jab for £15.99')","repo_name":"sarochey11/fornrp","sub_path":"Python/fluJab.py","file_name":"fluJab.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"38"} +{"seq_id":"20675634041","text":"\"\"\"\n\nTests sur les lambdas python 3\n\n\"\"\"\nimport os\n\n\ndef lambda_simple():\n \"\"\"\n Simple lambda example with map\n\n map(function_to_apply, list_of_inputs)\n\n :return:\n \"\"\"\n items = [1, 2, 3, 4, 5]\n squared = []\n for i in items:\n squared.append(i ** 2)\n\n squared2 = list(map(lambda x: x ** 2, items))\n\n print(squared == squared2)\n\n\ndef star(f):\n \"\"\"\n For Return a lambda function that takes arguments with a *\n The * is just Pythons way of telling a function\n \"please use the elements of whatever follows as your arguments and not the thing itself!\"\n\n :param f:\n :return:\n \"\"\"\n return lambda args: f(*args)\n\n\ndef lambda_multiple_variable():\n \"\"\"\n Multiple variable lambda\n \"\"\"\n # When you want to parse two elements in a lambda\n points = [(1, 2), (2, 3)]\n result = list(map(lambda p: p[0] * p[0] + p[1] * p[1], points))\n print(result)\n\n result = list(map(star(lambda x, y: (x * x + y * y)), points))\n print(result)\n\n\ndef lambda_2to3():\n \"\"\"\n As tuple parameters are used by lambdas because of the single expression limitation, they must also be supported. This is done by having the expected sequence argument bound to a single parameter and then indexing on that parameter:\n\n lambda (x, y): x + y\n\n will be translated into:\n\n lambda x_y: x_y[0] + x_y[1]\n :return:\n \"\"\"\n pass\n\n\ndef lambda_indexing():\n \"\"\"\n lambda with indexing, value multiplied by its index\n \"\"\"\n a = [1, 3, 5, 6, 8]\n am = map(lambda i_el: i_el[0] * i_el[1], enumerate(a))\n print(list(am))\n\n\ndef lambda_dictionary():\n \"\"\"\n Lambda in dictionary\n \"\"\"\n t = {'A': 2, 'B': 1}\n ts = sorted(t.items(), key=lambda k_v: k_v[1], )\n print(ts)\n\n\ndef zip_instead():\n \"\"\"\n zip takes (in the simplest case) two lists and \"zips\" them: zip([1,2,3], [4,5,6]) will become [(1,4), (2,5), (3,6)].\n So if you consider the outer list to be a matrix and the inner tuples to be the rows,\n that's a transposition (ie., we turned the rows to columns).\n \"\"\"\n # Using zip instead of lambda\n result = zip([1, 2, 3], [4, 5, 6])\n print(list(result))\n # The equivalent\n lis = [[1, 2, 3], [4, 5, 6]]\n result = zip(*lis)\n print(list(result))\n\n\ndef zip_demo():\n \"\"\"\n Howw zip works\n\n :return:\n \"\"\"\n coordinate = ['x', 'y', 'z']\n value = [3, 4, 5, 0, 9]\n\n result = zip(coordinate, value)\n result_list = list(result)\n print(result_list)\n\n c, v = zip(*result_list)\n print('c =', c)\n print('v =', v)\n\n\ndef filter_in_dict(points):\n \"\"\"\n\n :param points:\n :return:\n \"\"\"\n return {k: v for k, v in points.items() if v is not None}\n\n\ndef lambda_in_a_map_and_a_dict():\n \"\"\"\n filter in a list of dict\n \"\"\"\n d = [{'test': None, 'dope': 'yeah'},\n {'test': 2, 'dope': 'yahou'},\n {'test': 3, 'dope': None}]\n\n d_s = map(lambda x: filter_in_dict(x), d)\n\n print(list(d_s))\n\n\ndef strip_inline_lambda():\n languages = \"French, English, Spanish\"\n split = ({x.strip() for x in languages.split(\",\")} if languages else None)\n print(split)\n empty_variable = os.getenv(\"NOTHING\")\n split = ({x.strip() for x in empty_variable.split(\",\")} if empty_variable else {'English'})\n print(split)\n\n\nclass Difference:\n def __init__(self, list_of_numbers):\n self.__elements = list_of_numbers\n self.maximum_difference = 0\n\n def compute_difference_fancy(self):\n \"\"\"\n :return: maximum difference between absolute of all possible subtraction between elements in the list\n \"\"\"\n import itertools\n from functools import reduce\n combinations = list(itertools.combinations(self.__elements, 2))\n differences = list(map(lambda e: abs(e[0] - e[1]), combinations))\n self.maximum_difference = reduce(lambda a, b: a if a > b else b, differences)\n\n def compute_difference(self):\n self.maximum_difference = max(self.__elements) - min(self.__elements)\n\n\nif __name__ == \"__main__\":\n lambda_simple()\n lambda_dictionary()\n lambda_indexing()\n lambda_multiple_variable()\n zip_instead()\n zip_demo()\n strip_inline_lambda()\n","repo_name":"sylhare/Python","sub_path":"src/sandbox/Dev_lambda.py","file_name":"Dev_lambda.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"38"} +{"seq_id":"17040201418","text":"from random import random, uniform\nfrom unittest import TestCase, skip\n\n# noinspection PyProtectedMember\nfrom mock import Mock, MagicMock, patch, call\n\nfrom entity import Entity\nfrom entitylist import EntityList\nfrom constants import T_METHOD_NONE\n\n\nclass TestEntity(TestCase):\n\n def test_entity_scores_one(self):\n \"\"\"Check that an Entity has a default score of 1.\"\"\"\n test_object = Entity(\"A\")\n self.assertEqual(1, test_object.score, \"Default Entity score is 1\")\n\n def test_entity_scores_is_settable(self):\n \"\"\"Check that an Entity score can be set.\"\"\"\n set_score = 99.3\n test_object = Entity(\"A\")\n test_object.score = set_score\n self.assertEqual(set_score, test_object.score,\n \"Entity score should be 99.3\")\n\n def test_entity_has_transformed_score_one(self):\n \"\"\"Check that an Entity has a default transformed score of 0.\"\"\"\n test_object = Entity(\"A\")\n self.assertEqual(0, test_object.transformed_score(),\n \"Default Entity transformed score is 0\")\n\n def test_entity_transformed_score_uses_adjusted_scores_dict(self):\n \"\"\"Check that an Entity transformed score is derived from the\n adjusted_scores dictionary.\"\"\"\n set_score = (random() * 1000) + 1.0\n transform_method = \"Bongo\"\n test_object = Entity(\"A\")\n test_object.adjusted_scores[transform_method] = set_score\n self.assertEqual(set_score,\n test_object.transformed_score(transform_method),\n \"Entity score should be %f\" % set_score)\n\n def test_entity_transformed_score_default_for_incorrect_method_name(self):\n \"\"\"Check that an Entity transformed score of zero is returned for a\n non-calculated method\"\"\"\n set_score = (random() * 1000) + 1.0\n transform_method = \"Bongo\"\n wrong_transform_method = \"wrong\"\n test_object = Entity(\"A\")\n test_object.adjusted_scores[transform_method] = set_score\n self.assertEqual(0,\n test_object.transformed_score(wrong_transform_method),\n \"Entity score should be %f\" % 0)\n\n def test_entity_transformed_score_defaults_to_key_none(self):\n \"\"\"Check that when we ask for a transformed_score with no specifier\n as to the method used, we get the value back from the dictionary\n using key T_METHOD_NONE\"\"\"\n set_score = (random() * 1000) + 1.0\n test_object = Entity(\"A\")\n test_object.adjusted_scores[T_METHOD_NONE] = set_score\n self.assertEqual(set_score,\n test_object.transformed_score(),\n \"Entity score should be %f\" % set_score)\n\n def test_entity_owned_list_is_empty_by_default(self):\n \"\"\"Check that Entity knows of no lists by default.\"\"\"\n test_object = Entity(\"A\")\n self.assertFalse(test_object.lists,\n \"Default List in an Entity should be empty\")\n\n def test_none_is_invalid_entity_name(self):\n \"\"\"Check that Entity throws an error with None as name.\"\"\"\n try:\n Entity(None)\n raise Exception(\"Should not get here because None is an invalid \"\n \"Entity name\")\n except AssertionError:\n pass\n\n def test_empty_string_is_invalid_entity_name(self):\n \"\"\"Check that Entity throws an error with empty string as name.\"\"\"\n try:\n Entity(\"\")\n raise Exception(\"Should not get here because empty string is an \"\n \"invalid Entity name\")\n except AssertionError:\n pass\n\n def test_different_object_with_same_name(self):\n \"\"\"Test that two Entity objects with the same name are different.\"\"\"\n test_object1 = Entity(\"A\")\n test_object2 = Entity(\"A\")\n self.assertTrue(test_object1 != test_object2)\n\n def test_calculate_new_score_drops_to_zero(self):\n \"\"\"\n Test that an Entity calculates its new score as zero by default.\n\n A default Entity has no lists to which it belongs thus its score is\n zero.\n \"\"\"\n test_object = Entity(\"BB\")\n test_object.calculate_new_score()\n self.assertEqual(0, test_object.score, \"Score should now be zero\")\n\n def test_calculate_new_score_sums_list_scores_correctly(self):\n \"\"\"\n Test that an Entity calculates its new score as the sum of the scores\n of the lists that mention it.\n \"\"\"\n test_object = Entity(\"BB\")\n list1 = EntityList()\n list1.weight = 1.7\n list1.append(test_object)\n\n list2 = EntityList()\n list2.weight = 9.3\n list2.append(test_object)\n\n list3 = EntityList()\n list3.weight = 6.112\n list3.append(test_object)\n\n test_object.calculate_new_score()\n self.assertAlmostEqual(\n 17.112,\n test_object.score,\n 3,\n \"Score should now be the sum of the list scores\")\n\n def test_calculate_new_score_accounts_for_categories(self):\n \"\"\"\n Test that an Entity calculates its new score as the sum of the\n scores of the lists that mention it, allowing for the category of\n the list.\n \"\"\"\n test_object = Entity(\"BB\")\n list1 = EntityList()\n list1.category_name = \"type1\"\n list1.weight = 1.7\n list1.append(test_object)\n\n list2 = EntityList()\n list2.category_name = \"type2\"\n list2.weight = 9.3\n list2.append(test_object)\n\n list3 = EntityList()\n list3.category_name = \"type2\"\n list3.weight = 6.112\n list3.append(test_object)\n\n test_object.calculate_new_score()\n self.assertAlmostEqual(\n 11.0,\n test_object.score,\n 1,\n \"Score should now be the sum of the highest list scores in\"\n \"each category {}\".format(test_object.score))\n\n def test_calculate_new_score_accounts_for_categories_opposite_order(self):\n \"\"\"\n Test that an Entity calculates its new score as the sum of the\n scores of the lists that mention it, regardless of the order of the\n EntityList objects\n \"\"\"\n test_object = Entity(\"BB\")\n list1 = EntityList()\n list1.category_name = \"type1\"\n list1.weight = 1.7\n list1.append(test_object)\n\n list2 = EntityList()\n list2.category_name = \"type2\"\n list2.weight = 6.112\n list2.append(test_object)\n\n list3 = EntityList()\n list3.category_name = \"type2\"\n list3.weight = 9.3\n list3.append(test_object)\n\n test_object.calculate_new_score()\n self.assertAlmostEqual(\n 11.0,\n test_object.score,\n 1,\n \"Score should now be the sum of the highest list scores in\"\n \"each category {}\".format(test_object.score))\n\n def test_list_added_is_stored(self):\n \"\"\"Test that when an Entity is added to an EntityList, the list is\n stored in the Entity and can be retrieved\n \"\"\"\n test_object = Entity(\"CC\")\n list1 = EntityList()\n test_object.note_list(list1)\n\n self.assertEqual(1, len(test_object.lists), \"Should be one entry in \"\n \"the list of lists\")\n self.assertTrue(list1 in test_object.lists, \"List should be in the \"\n \"list of lists\")\n\n def test_list_added_is_stored_only_once(self):\n \"\"\"Test that when an Entity is added to an EntityList more than once,\n the list is stored in the Entity and can be retrieved just once\n \"\"\"\n test_object = Entity(\"CC\")\n list1 = EntityList()\n test_object.note_list(list1)\n test_object.note_list(list1)\n test_object.note_list(list1)\n\n self.assertEqual(1, len(test_object.lists), \"Should be one entry in \"\n \"the list of lists\")\n self.assertTrue(list1 in test_object.lists, \"List should be in the \"\n \"list of lists\")\n\n def test_entity_knows_contributing_list_components(self):\n \"\"\"Check that an Entity knows which lists contributed what weights to\n its overall score\"\"\"\n test_object = Entity(\"DD\")\n list1 = EntityList()\n list1.category_name = \"type1\"\n list1.weight = 4.3\n list1.append(test_object)\n\n list2 = EntityList()\n list2.category_name = \"type2\"\n list2.weight = 6.112\n list2.append(test_object)\n\n list3 = EntityList()\n list3.category_name = \"type2\"\n list3.weight = 9.3\n list3.append(test_object)\n\n unrelated_list = EntityList()\n unrelated_list.category_name = \"type1\"\n unrelated_list.weight = 99.42\n\n test_object.calculate_new_score()\n\n self.assertEqual(4.3, test_object.score_from_list(list1),\n \"Expected 4.3 from list1\")\n self.assertEqual(0.0, test_object.score_from_list(list2),\n \"Expected 0.0 from list2\")\n self.assertEqual(9.3, test_object.score_from_list(list3),\n \"Expected 9.3 from list3\")\n self.assertEqual(0.0, test_object.score_from_list(unrelated_list),\n \"Expected 0.0 from unrelated_list\")\n\n def test_entity_reports_raw_scores(self):\n \"\"\"Check that an Entity reports the scores from each list regardless\n of winning category\"\"\"\n test_object = Entity(\"DD\")\n list1 = EntityList()\n list1.category_name = \"type1\"\n list1.weight = 4.3\n list1.append(test_object)\n\n list2 = EntityList()\n list2.category_name = \"type2\"\n list2.weight = 6.112\n list2.append(test_object)\n\n list3 = EntityList()\n list3.category_name = \"type2\"\n list3.weight = 9.3\n list3.append(test_object)\n\n unrelated_list = EntityList()\n unrelated_list.category_name = \"type1\"\n unrelated_list.weight = 99.42\n\n test_object.calculate_new_score()\n\n self.assertEqual(4.3, test_object.raw_score_from_list(list1),\n \"Expected 4.3 from list1\")\n self.assertEqual(6.112, test_object.raw_score_from_list(list2),\n \"Expected 6.112 from list2\")\n self.assertEqual(9.3, test_object.raw_score_from_list(list3),\n \"Expected 9.3 from list3\")\n self.assertEqual(0.0, test_object.raw_score_from_list(unrelated_list),\n \"Expected 0.0 from unrelated_list\")\n\n def test_reset(self):\n \"\"\"Check that we can reset an Entity score and other parameters in\n readiness for another analysis run\"\"\"\n test_object = Entity(\"BB\")\n list1 = EntityList()\n list1.category_name = \"type1\"\n list1.weight = 1.7\n list1.append(test_object)\n\n list2 = EntityList()\n list2.category_name = \"type2\"\n list2.weight = 6.112\n list2.append(test_object)\n\n list3 = EntityList()\n list3.category_name = \"type2\"\n list3.weight = 9.3\n list3.append(test_object)\n\n test_object.calculate_new_score()\n self.assertAlmostEqual(\n 11.0,\n test_object.score,\n 1,\n \"Score should now be the sum of the highest list scores in\"\n \"each category {}\".format(test_object.score))\n\n test_object.reset()\n self.assertAlmostEqual(\n 1,\n test_object.score,\n 10,\n \"Score should now be reset to 1\")\n self.assertEqual(0.0, test_object.score_from_list(list1),\n \"Expected 4.3 from list1\")\n self.assertEqual(0.0, test_object.score_from_list(list2),\n \"Expected 0.0 from list2\")\n self.assertEqual(0.0, test_object.score_from_list(list3),\n \"Expected 9.3 from list3\")\n\n def test_sum_weights(self):\n \"\"\"Test that the sum_weights() function calculates correctly\"\"\"\n from random import randint, uniform\n weights = {}\n expected_sum = 0.0\n for category in ['cat1', 'cat2', 'cat3']:\n weights[category] = []\n count = randint(3, 6)\n for n in range(count):\n weights[category].append(uniform(0, 20))\n expected_sum += max(weights[category])\n test_object = Entity(\"ZZ\")\n self.assertEqual(expected_sum,\n test_object.sum_max_weights_per_category(weights))\n","repo_name":"baillielab/maic","sub_path":"test_entity.py","file_name":"test_entity.py","file_ext":"py","file_size_in_byte":12711,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"38"}