diff --git "a/3473.jsonl" "b/3473.jsonl" new file mode 100644--- /dev/null +++ "b/3473.jsonl" @@ -0,0 +1,677 @@ +{"seq_id":"31740509022","text":"import pandas as pd\n\n# valid small\nid_smiles = pd.read_csv('../full_drugbank/SMILESstrings.csv').values.tolist()\nid_seqs = pd.read_csv('../full_drugbank/biodrugs.csv').values.tolist()\n\nvalid_small_bio = set(list(pd.read_csv('drugs_valid_th30000_10.csv').iloc[:, 0]))\n\nres_small = []\nsmalls = set()\nfor i in id_smiles:\n if i[0] in valid_small_bio:\n res_small.append(i)\n smalls.add(i[0])\n\nres_bio = []\nbios = set()\nfor i in id_seqs:\n if i[0] in valid_small_bio:\n res_bio.append(i)\n bios.add(i[0])\n\nprint(len(res_small), len(res_bio))\npd.DataFrame(res_small).to_csv('final_SMILESstrings.csv', header=None, index=False)\npd.DataFrame(res_bio).to_csv('final_biodrugs.csv', header=None, index=False)\n\nprint(valid_small_bio.difference(set(bios)).difference(set(smalls)))\n\nss, sm, mm, _del = 0, 0, 0, 0\nres = []\nddis = pd.read_csv('ddi_valid_th30000_10_directed.csv').values.tolist()\nfor i in ddis:\n if i[0] in smalls and i[2] in smalls:\n ss += 1\n res.append(i)\n elif (i[0] in smalls and i[2] in bios) or (i[2] in smalls and i[0] in bios):\n sm += 1\n res.append(i)\n elif i[0] in bios and i[2] in bios:\n mm += 1\n res.append(i)\n else:\n _del += 1\nprint(ss, sm, mm, _del)\npd.DataFrame(res).to_csv('ddi---.csv', index=False, sep=',', header=None)\n\n# valid biotech\n\n# avg smiles len\n# avg amino acid seq len\n\n# avg small molecule nodes\n# avg small molecule edges\n\n# avg macro nodes\n# avg macro edges\n","repo_name":"ZillaRU/ChemBioTIP_old","sub_path":"data/final_db/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35206374959","text":"import math\nimport torch\n\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules import Module\n\nclass GraphConvolution(Module):\n \"\"\"\n Implementation of Graph Convolution\n\n - Attributes\n - in_features: int\n the size of input features, i.e |H^(l)|\n - out_features: int\n the size of output features, i.e |H^(l+1)|\n - bias: bool\n default as True\n - weight: Parameter\n trainable param in GC\n\n - Methods\n - reset_parameters(self)\n -forward(self, input, adj)\n - Feed forward func, adjacency matrix after transformation N(A)\n\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias',None)\n self.reset_parameters()\n \n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv,stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv,stdv)\n\n def forward(self, input, adj):\n # H * W\n support = torch.mm(input, self.weight)\n # N(A) * H * W\n output = torch.spmm() #sparse matrix mul\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + '(' + str(self.in_features) + '->' + str(self.out_features) + ')'\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GCN(Module):\n\n \"\"\"\n two layer GCN model\n ...\n Attributes\n -----------\n - n_feat: int\n - n_hid: int\n - n_class: int\n - dropout: float\n\n Methods\n --------\n - forward(self, x, adj)\n \"\"\"\n\n def __init__(self, n_feat, n_hid, n_class, dropout):\n super(GCN, self).__init__()\n self.gc1 = GraphConvolution(n_feat, n_hid)\n self.gc2 = GraphConvolution(n_hid, n_class)\n self.dropout = dropout\n\n def forward(self, x, adj):\n x = F.relu(self.gc1(x,adj))\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc2(x, adj)\n return F.log_softmax(x, dim=1)\n","repo_name":"rickywesker/implementNetinPytorch","sub_path":"GCN/GCN.py","file_name":"GCN.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30762933498","text":"import sys\r\n\r\ninput = sys.stdin.readline\r\n\r\n\r\ntc = int(input())\r\n\r\nanswers = []\r\n\r\nfor _ in range(tc):\r\n arr = list(map(int, input().split()))\r\n avg = sum(arr[1:]) / arr[0]\r\n cnt = 0\r\n for num in arr[1:]:\r\n if num > avg:\r\n cnt += 1\r\n answers.append(\"%.3f\" % (100 * cnt / arr[0]))\r\n\r\nfor a in answers:\r\n print(a + \"%\")\r\n","repo_name":"ng-lee/ProblemSolving","sub_path":"백준/Bronze/4344. 평균은 넘겠지/평균은 넘겠지.py","file_name":"평균은 넘겠지.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13463562263","text":"\n\nclass RegisterArtistValidation:\n def validate_artist(data, fail):\n username = data.get('username', '')\n firstname = data.get('first_name', '')\n lastname = data.get('last_name', '')\n\n # Validate fields\n if firstname and len(firstname) <= 2:\n fail(dict(firstname='firstname must be more than two characters'))\n elif firstname and not firstname.isalpha():\n fail(dict(firstname='firstname must contain only alphabet'))\n\n if lastname and len(lastname) <= 2:\n fail(dict(lastname='lastname must contain at least two characters'))\n elif lastname and not lastname.isalpha():\n fail(dict(lastname='lastname must contain only alphabet'))\n\n if username and len(username) <= 2:\n fail(dict(username='username must contain at least 2 characters'))\n return True","repo_name":"AdejokeOgunyinka/cinch-API","sub_path":"project/app/validations/validate_artist.py","file_name":"validate_artist.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20829293479","text":"import json\nimport pytest\nfrom pages.base_page import BasePage\nfrom pages.home_page import HomePage\n\nCONFIG_PATH = \"F:\\\\Code\\\\Python\\\\SeleniumPy\\\\tests\\\\config.json\"\nSUPPORTED_BROWSERS = ['CHROME', 'FIREFOX']\n\n# https://blog.testproject.io/2019/07/16/read-config-files-in-python-selenium\n@pytest.fixture(scope='session')\ndef config():\n # Read the JSON config file and returns it as a parsed dict\n with open(CONFIG_PATH) as config_file:\n config = json.load(config_file)\n # Validate and return the browser choice from the config data\n if BasePage.KV_BROWSER not in config:\n raise Exception(f'The config file does not contain \"{BasePage.KV_BROWSER}\"')\n elif config[BasePage.KV_BROWSER] not in SUPPORTED_BROWSERS:\n raise Exception(f'\"{config[BasePage.KV_BROWSER]}\" is not a supported browser')\n # Validate and return other needed data\n if BasePage.KV_PLATFORM not in config:\n raise Exception(f'The config file does not contain \"{BasePage.KV_PLATFORM}\"')\n if BasePage.KV_HUB_URL not in config:\n raise Exception(f'The config file does not contain \"{BasePage.KV_HUB_URL}\"')\n if BasePage.KV_BASE_URL not in config:\n raise Exception(f'The config file does not contain \"{BasePage.KV_BASE_URL}\"')\n return config\n\n#https://www.py4u.net/discuss/146481: \"# Teardown : logic is guaranteed to run regardless of what happens during the tests.\"\n#However, saw that if run the node without a timeout, the chrome instance is not made available\n@pytest.fixture\ndef home_page(request, config):\n page = HomePage(config)\n request.cls.page = page\n yield\n page.close()","repo_name":"esther-86/SeleniumPy","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71668374968","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 11 20:09:42 2023\n\n@author: Jiayi Fan\n\"\"\"\n\n# imports\nfrom LA_crime_predictor import crime_db as cd\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import losses\nfrom tensorflow import keras\nfrom sklearn.preprocessing import LabelEncoder\n\n\n\n#Global Variables\nle_time = LabelEncoder()\nle_VictAge = LabelEncoder()\nle_risk = LabelEncoder()\nscalars = [\"Crime Period\", \"Vict Age Group\", \"LAT\", \"LON\"]\n\nscalars_input = keras.Input(\n shape = (len(scalars), ),\n name = \"scalars\",\n dtype = \"float64\"\n)\n\n\ndef Encode_Df(df):\n '''\n Takes in an unfiltered dataframe with categorical entries as input\n Select specific entries from the dataset and make them all numerical\n\n Parameters\n ----------\n df: pandas dataframe\n\n Returns\n -------\n a filtered pandas dataframe with all entries numerical\n '''\n #select following columns as trained inputs\n #X= df[[\"Crime Period\", \"Vict Age Group\", \"LAT\", \"LON\", \"Risk\"]]\n df[\"Crime Period\"] = le_time.fit_transform(df[\"Crime Period\"])\n df[\"Vict Age Group\"] = le_VictAge.fit_transform(df[\"Vict Age Group\"])\n df[\"Risk\"] = le_risk.fit_transform(df[\"Risk\"])\n return df\n\n\ndef Make_Data(df):\n '''\n Takes in a pandas dataframe and convert it to TensorFlow Dataset\n\n Parameters\n ----------\n df: pandas dataframe\n\n Returns\n -------\n a TensorFlow dataset\n '''\n\n data = tf.data.Dataset.from_tensor_slices(\n (\n #they wann be in the same model --> group\n {\n \"scalars\" : df[scalars]\n },\n {\n \"risk\" : df[[\"Risk\"]]\n }\n )\n )\n\n return data\n\n\n\ndef train_model(year_begin, year_end, target_month):\n '''\n Takes in a beginning year and end year as integers, opens a database connection,\n returns a trained NN model that fits the data.\n We will only look at data with the same month as the target_month.\n\n The Returned Model aims to predict the type of crime given \"Crime Period\", \"Vict Age Group\",\"LAT\", \"LON\" as input\n\n Parameters\n ----------\n year_begin : int\n the first year the user would like to query\n year_end : int\n the final year the user would like to query\n target_month: int\n the month the user would like to investigate\n\n Returns\n -------\n (Trained NN model, training history of the model)\n '''\n df = cd.query_years(year_begin, year_end)\n #get the month information and extract all the data that match the target_month\n df[\"Month\"] = df[\"DATE OCC\"].str[0:2].astype(int)\n train_df = df[df[\"Month\"] == target_month]\n #transform categorical variables to numericals ones\n filt_train_df = Encode_Df(train_df)\n data = Make_Data(filt_train_df)\n\n data = data.shuffle(buffer_size = len(data), reshuffle_each_iteration=False)\n\n train_size = int(0.9*len(data))\n val_size = int(0.1*len(data))\n #run SGD on sample of 100\n train = data.take(train_size).batch(50)\n val = data.skip(train_size).take(val_size).batch(50)\n\n \n #Define Layers of the Model\n scalar_features = layers.Reshape((len(scalars), 1), input_shape=(len(scalars),))(scalars_input)\n\n scalar_features = layers.Conv1D(filters = 18, kernel_size=3, activation='relu')(scalar_features)\n scalar_features = layers.BatchNormalization()(scalar_features)\n scalar_features = layers.MaxPooling1D(pool_size = 2, strides = 1, padding = \"valid\")(scalar_features)\n scalar_features = layers.Dropout(0.2)(scalar_features)\n\n\n scalar_features = layers.LSTM(32, return_sequences=True)(scalar_features)\n scalar_features = layers.LSTM(16)(scalar_features)\n\n scalar_features = layers.Dense(64, activation='relu')(scalar_features)\n scalar_features = layers.BatchNormalization()(scalar_features)\n scalar_features = layers.Dropout(0.2)(scalar_features)\n\n scalar_features = layers.Dense(3, activation='softmax', name = 'risk')(scalar_features)\n output = scalar_features\n\n #Establish the Model\n model = keras.Model(\n inputs = scalars_input,\n outputs = output\n )\n #Compile the Model\n model.compile(optimizer = \"adam\",\n loss = losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n )\n #Fit the Model\n history = model.fit(train,\n validation_data=val,\n epochs = 50,\n verbose = True)\n \n return model, history \n\n\n\n\ndef test_model(clf, target_year, target_month):\n '''\n Parameters\n ----------\n clf: a NN model that you just finished training by using train_model function\n target_year: int, the year you would like your model to be tested on\n target_month: int, the month you would like your model to be tested on\n\n Note: target_month should be the same as the target month you trained the model on, \n otherwise the result will not guaranteed to be indicative\n\n Returns\n -------\n accuracy score when applied the model to the test data\n '''\n\n df = cd.query_years(target_year, target_year)\n\n #get the month information and extract all the data that match the target_month\n df[\"Month\"] = df[\"DATE OCC\"].str[0:2].astype(int)\n test_df = df[df[\"Month\"] == target_month]\n\n #transform categorical variables to numericals ones and obtain the test data\n filt_test_df = Encode_Df(test_df)\n test_data = Make_Data(filt_test_df)\n #batch the test data\n test = test_data.batch(100)\n return clf.evaluate(test)","repo_name":"frankfan6969/Pic-16B-Project","sub_path":"LA_crime_predictor/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":5597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18810816960","text":"from llm.qa_base import QABaseBrainPicking\nfrom routes.authorizations.brain_authorization import validate_brain_authorization\nfrom routes.authorizations.types import RoleEnum\nfrom routes.chat.interface import ChatInterface\n\nfrom repository.brain import get_brain_details\n\n\nclass BrainfulChat(ChatInterface):\n def validate_authorization(self, user_id, brain_id):\n if brain_id:\n validate_brain_authorization(\n brain_id=brain_id,\n user_id=user_id,\n required_roles=[RoleEnum.Viewer, RoleEnum.Editor, RoleEnum.Owner],\n )\n\n def get_openai_api_key(self, brain_id, user_id):\n brain_details = get_brain_details(brain_id)\n if brain_details:\n return brain_details.openai_api_key\n\n def get_answer_generator(\n self,\n brain_id,\n chat_id,\n model,\n max_tokens,\n temperature,\n user_openai_api_key,\n streaming,\n prompt_id,\n ):\n return QABaseBrainPicking(\n chat_id=chat_id,\n model=model,\n max_tokens=max_tokens,\n temperature=temperature,\n brain_id=brain_id,\n user_openai_api_key=user_openai_api_key,\n streaming=streaming,\n prompt_id=prompt_id,\n )\n","repo_name":"coachlou/quivr","sub_path":"backend/routes/chat/brainful_chat.py","file_name":"brainful_chat.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"42832447063","text":"from flask import Flask, jsonify, request, render_template, make_response, redirect, url_for\nfrom flask_jwt_extended import JWTManager, jwt_required, create_access_token, create_refresh_token, get_jwt_identity, verify_jwt_in_request\nimport pymongo\nfrom datetime import timedelta\n\ncliente = pymongo.MongoClient('localhost',\n port=27017,\n username='user',\n password='password') \n\ndb = cliente.get_database('API')\ncoleccion = db['usuarios verificados']\n\ndef convertir_a_json(documento):\n if '_id' in documento:\n documento['_id'] = str(documento['_id'])\n return documento\n\napppp = Flask(__name__)\napppp.config[\"JWT_SECRET_KEY\"] = \"Patata\" \njwt = JWTManager(apppp)\n\n@apppp.route('/cookies')\ndef cookie():\n respuesta = make_response('Añadiendo la cookie')\n respuesta.set_cookie('Nombre', 'Mi cookie')\n return redirect(url_for('vercookie'))\n\n@apppp.route('/cookies/vercookie')\ndef vercookie():\n Nombre = request.cookies.get('Nombre')\n if Nombre:\n return 'Este es el contenido de mi cookie: ' + Nombre\n else:\n respuesta = make_response('No se encontró la cookie')\n respuesta.delete_cookie('Nombre') \n return respuesta\n\n\n\n@apppp.route('/cookiesaceptar', methods=['GET', 'POST'])\ndef cookies():\n if request.method == 'POST':\n action = request.form.get('action')\n if action == 'accept':\n global cookies_accepted\n cookies_accepted = True\n return redirect(url_for('protected_page'))\n elif action == 'reject':\n return redirect(url_for('cookies_rejected'))\n\n return render_template('cookies.html')\n\n@apppp.route('/protected')\ndef protected_page():\n if 'cookies_accepted' in globals() and cookies_accepted:\n return \"¡Bienvenido a la página donde necesitas las cookies!\"\n else:\n return redirect(url_for('cookies_rejected'))\n\n@apppp.route('/cookies_rejected')\ndef cookies_rejected():\n return \"Debes activar las cookies para acceder a esta página.\"\n\nif __name__ == \"__main__\":\n apppp.run(debug=True, port=4000)","repo_name":"GabrielLuezas/Practicas","sub_path":"Otros/Pruebas/API/Cookies.py","file_name":"Cookies.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14104150861","text":"import numpy as np\r\nimport databricks.koalas as ks\r\nfrom pyspark.sql import SparkSession\r\nimport os\r\n#import sys\r\nfrom scipy.sparse import csr_matrix\r\nimport geopandas as gpd\r\nimport pickle\r\nfrom pathlib import Path\r\nimport argparse\r\nfrom pyspark.sql.functions import expr, to_timestamp, col, unix_timestamp, first, sum, count\r\nfrom pyspark.sql.types import *\r\nfrom pyspark.sql.types import LongType, IntegerType\r\n# importing module\r\nimport logging\r\n \r\n#\r\n\r\n# Datapath file CSV (dataset)\r\npathCSV = 'data/trips2323_new.csv'\r\n\r\n# Path ShapeFile\r\npathShapeF = 'data/shapefile/shapefile_2323.shp'\r\n\r\n# Path for results\r\npathRes = 'results/'\r\n\r\n# Skip division per 0\r\nnp.seterr(divide='ignore', invalid='ignore')\r\n\r\n\r\n\r\ndef preprocessing(df):\r\n #df_cache = df.spark.cache()\r\n df = df.rename(columns={'from_zone':'from_zone_fid', 'from_timedate':'from_timedate_gmt', 'to_zone':'to_zone_fid', 'to_timedate':'to_timedate_gmt'})\r\n\r\n df = df.dropna(subset=['to_zone_fid'])\r\n fill = {'from_zone_fid': -1}\r\n df = df.fillna(value=fill)\r\n\r\n\r\n # Initial Filter\r\n # (df.triptime_s < 3278)\r\n df = df.loc[ (df.tripdistance_m > 0) & (df.stoptime_s > 0) & (df.stoptime_s < 604800)]\r\n\r\n #df = df.sort_values(by=['from_timedate_gmt', 'to_timedate_gmt'])\r\n #df = df.reset_index(drop = True)\r\n\r\n df_spark = df.to_spark()\r\n\r\n df_spark = df_spark.withColumn(\"from_timedate_gmt\", expr(\"substring(from_timedate_gmt, 1, length(from_timedate_gmt)-3)\"))\r\n df_spark = df_spark.withColumn(\"to_timedate_gmt\", expr(\"substring(to_timedate_gmt, 1, length(to_timedate_gmt)-3)\"))\r\n #df_spark = df_spark.withColumn(\"from_timedate_gmt\", df_spark.from_timedate_gmt.substr(1, 6))\r\n\r\n df = df_spark.to_koalas()\r\n\r\n #df_cache.spark.unpersist()\r\n\r\n return df\r\n\r\ndef square(s):\r\n return s/60\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n spark = SparkSession.builder\\\r\n .master(\"local[8]\")\\\r\n .appName(\"Pyspark\")\\\r\n .config('spark.ui.port', '4050')\\\r\n .config('spark.executor.cores', \"100\")\\\r\n .getOrCreate()\r\n spark.catalog.clearCache()\r\n \r\n \r\n # Upload CSV datas\r\n df = ks.read_csv(pathCSV, sep=';', dtype = {'from_timedate':str, 'to_timedate':str})\r\n #df = df.spark.cache()\r\n df = df.spark.repartition(30)\r\n df = preprocessing(df)\r\n\r\n# print(df.head())\r\n# exit()\r\n\r\n print('-----------OUTPUT------------')\r\n\r\n #df_cache = df.spark.cache()\r\n df = df.rename(columns={'from_zone':'from_zone_fid', 'from_timedate':'from_timedate_gmt', 'to_zone':'to_zone_fid', 'to_timedate':'to_timedate_gmt'})\r\n\r\n df = df.dropna(subset=['to_zone_fid'])\r\n fill = {'from_zone_fid': -1}\r\n df = df.fillna(value=fill)\r\n# Initial Filter\r\n# (df.triptime_s < 3278)\r\n df = df.loc[ (df.tripdistance_m > 0) & (df.stoptime_s > 0) & (df.stoptime_s != 31422298)]\r\n df.stoptime_s = df.stoptime_s.apply(square)\r\n #test = df.stoptime_s.to_numpy()\r\n \r\n #ax = \r\n df.stoptime_s.plot.kde(bw_method=3) \r\n #fig = ax.get_figure()\r\n #fig.savefig('figure.png')\r\n #print(df.stoptime_s)#.plot.kde(bw_method=3) \r\n\r\n# Eliminare veicolo con filtro massimo e verificare se cambia qualcosa (anomalia o verità)\r\n# Altrimenti porre 1 settimana come filtro massimo\r\n# sistematicità nei parcheggi? ripetizione? dipendenza temporale \r\n# Heatmap?","repo_name":"mirkobrig24/Parking-Occupancy-Prediction","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"18450636716","text":"# coding: utf-8\n\n\n\nfrom keras.models import load_model\nimport cv2\nimport os\nimport sys\nimport numpy as np\n\nimg_shape = (64,64,3)\nlabel_name = [\"good\",\"paper\",\"scissors\",\"stone\"]\nnum_classes = len(label_name)\n\ndef imread(path): #注意 cv2是gbr!\n if img_shape[2] == 1:\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n else:\n img = cv2.imread(path)\n img = cv2.resize(img, (img_shape[1], img_shape[0]), interpolation=cv2.INTER_CUBIC)\n img = img/255\n img = np.reshape(img,(1,img_shape[0], img_shape[1],img_shape[2]))\n return img\n\nif len(sys.argv) < 2:\n print(\"python predict.py 檔名\")\n exit()\nif not os.path.isfile(sys.argv[1]):\n print(\"圖片不存在\")\n exit()\nimage = imread(sys.argv[1])\nmodel = load_model('my_model.h5')\nresult = model.predict(image)\n\nprint(\"結果\",label_name[np.argmax(result[0])])","repo_name":"neno12345/HO-HSIN-auto-crawler","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38055300161","text":"#!/usr/bin/env python3\n##########################\n#\n# Author: Andrej Panicek\n# Desc : Single class for generating datasets structure.\n#\n########################## \nimport os\nimport sys\nfrom PIL import Image\n\nclass DatasetGenerator():\n \"\"\"\n Generate structure of simple dataset in filesystem.\n Store images in it with proper ground truth.\n \"\"\"\n \n def __init__(self, dataset_root_path):\n self._gt_path = None\n self._img_path = None\n self._curr_index = None\n self._root_path = dataset_root_path\n self._init_dataset_dirs()\n \n def _init_dataset_dirs(self):\n \"\"\"\n Check proper structure of dataset dirs.\n \"\"\"\n self._gt_path = self._create_dir(\"gt\")\n self._img_path = self._create_dir(\"images\")\n img_count = self._current_count()\n\n def _create_dir(self, path):\n path = os.path.abspath(os.path.join(self._root_path, path))\n try:\n os.mkdir(path)\n except FileExistsError as err:\n pass\n except OSError as err:\n raise OSError(\"Not possible to create dir \\\" \", path, \"\\\"\" , file=sys.stderr)\n return path\n\n def _current_count(self):\n \"\"\"\n Set index for how many images dataset already contains, so we wont overwritte any of them. \n \"\"\"\n self._curr_index = len([image for image in os.listdir(self._img_path) if os.path.isfile(os.path.join(self._img_path, image))])\n\n def _gen_new_index(self):\n self._curr_index += 1\n return self._curr_index\n \n def add_image(self, image: Image, coords: list, sign_type: str):\n index = str(self._gen_new_index()) \n\n bbox = self._transform_coords(coords)\n self._store_image(index, image)\n self._store_data(index, bbox, sign_type)\n\n def _store_image(self, name, image):\n image_name = name + \".jpg\"\n path = os.path.join(self._img_path, image_name)\n image.save(path)\n \n def _store_data(self, name, bbox, sign_type):\n data_name = name + \".txt\"\n path = os.path.join(self._gt_path, data_name)\n\n with open(path, \"w\") as gt_data: \n for point in bbox:\n gt_data.write(str(int(point)))\n gt_data.write(\" \")\n\n gt_data.write(sign_type)\n \n def _transform_coords(self, coords: list):\n \"\"\"\n Transform coordinates from x_start, y_start, x_end, y_end \n to x_start, y_start, width, height.\n \"\"\"\n return [coords[0], coords[1],\n coords[2] - coords[0],\n coords[3] - coords[1]]\n\n \n\n","repo_name":"andrejPP/Pova--my-part","sub_path":"dataset_generator.py","file_name":"dataset_generator.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72084222328","text":"#!/usr/bin/python3\n\"\"\"\nNqueens module\n\"\"\"\nimport sys\n\n\ndef nqueens() -> None:\n \"\"\" Nqueens puzzle function representation \"\"\"\n\n N = sys.argv[1:]\n if len(N) != 1:\n print(\"Usage: nqueens N\")\n sys.exit(1)\n else:\n try:\n N = int(N[0])\n\n if N < 4:\n print(\"N must be at least 4\")\n sys.exit(1)\n else:\n board = [-1 for i in range(N)]\n solve_nqueens(N, board, 0)\n except ValueError:\n print(\"N must be a number\")\n sys.exit(1)\n\n\ndef is_safe(board, row, col):\n \"\"\"Check if it's safe to place a queen at the specified row and col.\"\"\"\n for i in range(col):\n if (board[i] == row or\n board[i] - i == row - col or\n board[i] + i == row + col):\n return False\n return True\n\n\ndef solve_nqueens(n, board, col):\n \"\"\"Use backtracking to solve N queens problem.\"\"\"\n if col >= n:\n print([[i, board[i]] for i in range(n)])\n return\n\n for i in range(n):\n if is_safe(board, i, col):\n board[col] = i\n solve_nqueens(n, board, col + 1)\n\n\nif __name__ == \"__main__\":\n nqueens()\n","repo_name":"Maneida/alx-interview","sub_path":"0x05-nqueens/0-nqueens.py","file_name":"0-nqueens.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10762872200","text":"from __future__ import annotations\n\nfrom typing import Any, Awaitable, Callable, Dict, Type\n\nfrom aiogram import BaseMiddleware\nfrom aiogram.types import Update\n\nfrom base_machine import BaseMachine\nfrom machines_manager import MachinesManager\n\nMACHINES = \"machines\"\nMACHINES_MANAGER = \"machines_manager\"\n\n\nclass TransitionsMiddleware(BaseMiddleware[Update]):\n machines: Dict[str, Type[BaseMachine]]\n\n def __init__(self, *machines: Type[BaseMachine]):\n self.machines = dict()\n if machines:\n for machine in machines:\n self.register_machine(machine)\n\n async def __call__(\n self,\n handler: Callable[[Update, Dict[str, Any]], Awaitable[Any]],\n event: Update,\n data: Dict[str, Any],\n ) -> Any:\n\n machine_manager = MachinesManager(registered_machines=self.machines, data=data)\n await machine_manager.create_user_machines()\n data[MACHINES_MANAGER] = machine_manager\n\n result = await handler(event, data)\n\n await machine_manager.update_machines_data()\n return result\n\n def register_machine(self, machine: Type[BaseMachine]):\n self.machines[machine.__name__] = machine\n","repo_name":"darksidecat/aiogram_transitions","sub_path":"transitions_middleware.py","file_name":"transitions_middleware.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8770720188","text":"#Matthew Suriawinata\n#4/23/18\n#fwordDemo.py - print out words with f\n\n\nwords = input(\"Type in some wordsL \").split(\" \")\n\nfor item in words:\n if \"f\" in item or \"F\" in item:\n print(item)\n","repo_name":"mattsuri/unit5","sub_path":"fwordDemo.py","file_name":"fwordDemo.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30001259933","text":"import sys\nimport csv\n\ncsv.field_size_limit(sys.maxsize)\n\nclass Item:\n def __init__(self, chr=0, src='', feature='', start=0, end=0, score=0, strand='', frame='', attribute=''):\n self.chr = chr\n self.src = src\n self.feature = feature\n self.start = start\n self.end = end \n self.score = score\n self.strand = strand\n self.frame = frame\n self.attribute = attribute\n\nclass Group:\n def __init__(self, circRNA=0, exon_list=[]):\n self.circRNA = circRNA\n self.exon_list = exon_list\n\n#input = \"lung_annotation_org\"\n#output = \"lung_anno.gtf\"\n\ninput = sys.argv[1]\noutput = sys.argv[2]\n\ngroups = []\n\n#read each line, form a group of circRNA with exons and add to groups\nwith open(input, \"r\", encoding=\"utf8\") as isocirc_file:\n tsv_reader = csv.reader(isocirc_file, delimiter=\"\\t\")\n\n #Skip the first row, which is the header\n #next(tsv_reader)\n\n row_list = list(tsv_reader)\n print('before sort length',len(row_list))\n #print(row_list[0])\n\n for i in range(0,len(row_list)):\n row = row_list[i]\n converted_row = [int(ele) if ele.isdigit() else ele for ele in row] #convert int to int and str to str inside a row\n row_list[i] = converted_row\n #print(row)\n\n #print(row_list)\n\n bundle = []\n\n row = row_list[0]\n item = Item(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8])\n lst = row[8].split(\";\")\n prev_trans_id = lst[1]\n\n bundle.append(item)\n\n if len(row_list) == 1: #add this single instance in groups, for loop does not run\n grp = Group()\n exon = bundle[0]\n circ = Item(exon.chr, exon.src, \"circRNA\", bundle[0].start, bundle[len(bundle)-1].end, exon.score, exon.strand, exon.frame, exon.attribute)\n grp.circRNA = circ\n grp.exon_list = bundle.copy()\n #print(\"exon list size:\",len(grp.exon_list))\n groups.append(grp)\n\n for i in range(1,len(row_list)):\n row = row_list[i]\n item = Item(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8])\n lst = row[8].split(\";\")\n trans_id = lst[1]\n #print(trans_id)\n\n if trans_id != prev_trans_id:\n #add to groups\n grp = Group()\n exon = bundle[0]\n circ = Item(exon.chr, exon.src, \"circRNA\", bundle[0].start, bundle[len(bundle)-1].end, exon.score, exon.strand, exon.frame, exon.attribute)\n grp.circRNA = circ\n grp.exon_list = bundle.copy()\n #print(\"exon list size:\",len(grp.exon_list))\n groups.append(grp)\n\n bundle.clear()\n bundle.append(item)\n\n if trans_id == prev_trans_id:\n #print(item.attribute)\n bundle.append(item)\n\n prev_trans_id = trans_id\n \n if i == len(row_list) - 1:\n #add the last group\n grp = Group()\n exon = bundle[0]\n circ = Item(exon.chr, exon.src, \"circRNA\", bundle[0].start, bundle[len(bundle)-1].end, exon.score, exon.strand, exon.frame, exon.attribute)\n grp.circRNA = circ\n grp.exon_list = bundle.copy()\n #print(\"exon list size:\",len(grp.exon_list))\n groups.append(grp)\n\n# for i in range(0,len(groups)):\n# grp = groups[i]\n# print(\"size:\",len(grp.exon_list))\n\nprint(\"Number of distinct circRNAs:\",len(groups))\n\nwith open(output, \"w\") as f:\n\n for i in range(0,len(groups)):\n group = groups[i]\n circRNA = group.circRNA\n exon_list = group.exon_list\n\n f.write(str(circRNA.chr) + \"\\t\" + str(circRNA.src) + \"\\t\" + str(circRNA.feature) + \"\\t\" + str(circRNA.start) + \"\\t\" + str(circRNA.end) + \"\\t\" + str(circRNA.score) + \"\\t\" + str(circRNA.strand) + \"\\t\" + str(circRNA.frame) + \"\\t\" + str(circRNA.attribute) + \"\\n\")\n\n for j in range(0,len(exon_list)):\n exon = exon_list[j]\n\n f.write(str(exon.chr) + \"\\t\" + str(exon.src) + \"\\t\" + str(exon.feature) + \"\\t\" + str(exon.start) + \"\\t\" + str(exon.end) + \"\\t\" + str(exon.score) + \"\\t\" + str(exon.strand) + \"\\t\" + str(exon.frame) + \"\\t\" + str(exon.attribute) + \"\\n\")\n \n\n\n","repo_name":"Shao-Group/TERRACE-test","sub_path":"CIRI-full_scripts/convert_isocirc_gtf.py","file_name":"convert_isocirc_gtf.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9466410632","text":"# Rylan Hunter\r\n# ECEN 689 - Final Project Code\r\n\r\n# THIS IS A SUPPORTING FILE FOR THE SMART_ROCKETS PROJECT\r\n# It contains the classes \"DOT\", \"POPULATION\", \"GOAL\", \"OBSTACLE\", and \"BRAIN\"\r\n# Dot is the object moving around,\r\n# Brain is the 'DNA' of the dot, giving it direction,\r\n# Population is a collection of dots,\r\n# Goal is the target destination\r\n# Obstacles are what they sound like\r\n\r\nfrom PVector import PVector\r\nimport pygame\r\nimport math\r\nimport random\r\n\r\n# Define global parameters. Basically copy/paste from main.py. I couldn't figure out how to get main.py to share\r\n# these without passing them in as parameters, and got tired of doing so.\r\nSCREENWIDTH = 400\r\nSCREENHEIGHT = 400\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\n\r\n'''\r\nDot Class: \r\nA 'dot' has position, velocity, and acceleration vectors. \r\nThe 'Brain' is the directions that the dot follows. These apply forces (accelerations) to the dot.\r\nThe dot knows if it is dead, if it reached the goal, it's fitness, and whether or not it is the best.\r\n\r\nThe Dot can show itself, print its current pos/vel/accel to screen, move itself, \r\nupdate itself (test if done/collision), calculate its fitness, and clone itself\r\n'''\r\nclass Dot:\r\n def __init__(self, xx=SCREENWIDTH/2, yy=SCREENHEIGHT-10):\r\n self.pos = PVector(xx,yy)\r\n self.vel = PVector(0,0,5)\r\n self.acc = PVector(0,0)\r\n self.brain = Brain(400)\r\n self.dead = False\r\n self.fitness = 0\r\n self.reachedGoal = False\r\n self.isBest = False\r\n\r\n def show(self, SCREEN):\r\n dotRad = 2\r\n if self.isBest:\r\n dotRad = 4\r\n surf1 = pygame.Surface((dotRad*2,dotRad*2))\r\n surf1 = surf1.convert()\r\n pygame.draw.circle(surf1, GREEN, (dotRad-1,dotRad-1), dotRad)\r\n SCREEN.blit(surf1,(self.pos.x-dotRad,self.pos.y-dotRad))\r\n else:\r\n surf1 = pygame.Surface((dotRad*2,dotRad*2))\r\n surf1 = surf1.convert()\r\n pygame.draw.circle(surf1, WHITE, (dotRad-1,dotRad-1), dotRad)\r\n SCREEN.blit(surf1,(self.pos.x-dotRad,self.pos.y-dotRad))\r\n\r\n def print(self):\r\n arr = [self.pos.getVec(),self.vel.getVec(),self.acc.getVec()]\r\n print(arr)\r\n\r\n def move(self):\r\n if (len(self.brain.directions)>self.brain.step):\r\n self.acc = self.brain.directions[self.brain.step]\r\n self.brain.step += 1\r\n else:\r\n self.dead = True\r\n self.vel.add(self.acc)\r\n self.pos.add(self.vel)\r\n \r\n def update(self, SCREENWIDTH, SCREENHEIGHT,Goal,Obstacles):\r\n if (not self.dead) and (not self.reachedGoal):\r\n self.move()\r\n x = self.pos.x\r\n y = self.pos.y\r\n if (x < 2) or (y < 2) or (x > SCREENWIDTH-2) or (y > SCREENHEIGHT-2):\r\n self.dead = True\r\n elif (PVector.dist(self.pos,Goal.pos) < Goal.size):\r\n self.reachedGoal = True\r\n for obs in Obstacles:\r\n if obs.collision(self):\r\n self.dead = True\r\n\r\n def calculateFitness(self,Goal):\r\n if self.reachedGoal:\r\n self.fitness = 1.0/16.0 + 10000 / (self.brain.step * self.brain.step)\r\n else:\r\n distToGoal = PVector.dist(self.pos,Goal.pos)\r\n self.fitness = 1.0/(distToGoal * distToGoal)\r\n\r\n def gimmieBaby(self):\r\n baby = Dot()\r\n baby.brain = self.brain.clone()\r\n return baby\r\n\r\n'''\r\nBrain Class: \r\nA 'brain' contains the forces (acceleration vectors) applied to a dot. This is the \"DNA\" of the genetic algorithm\r\nIt can initialize itself, clone itself, and mutate itself\r\n'''\r\nclass Brain:\r\n def __init__(self, size=100):\r\n self.directions = []\r\n self.step = 0\r\n for i in range (size):\r\n ang = random.uniform(0, 2*math.pi)\r\n self.directions.append(PVector.fromAngle(ang))\r\n\r\n def randomize(self,size):\r\n for i in range (size):\r\n ang = random.uniform(0, 2*math.pi)\r\n self.directions.append(PVector.fromAngle(ang))\r\n\r\n def clone(self):\r\n clone = Brain(len(self.directions))\r\n clone.directions = self.directions.copy()\r\n return clone\r\n\r\n def mutate(self,mutationRate):\r\n for i in range(len(self.directions)):\r\n rand = random.random()\r\n if rand < mutationRate:\r\n randAng = random.uniform(0,2*math.pi)\r\n self.directions[i] = PVector.fromAngle(randAng)\r\n\r\n'''\r\nPopulation Class: \r\nA 'population' is a collection of dots. It can calculate the statistics of the dots, call update on the dots, etc. \r\n'''\r\nclass Population:\r\n def __init__(self, size=100, dotStartX=0, dotStartY=0):\r\n self.dots = []\r\n for i in range (size):\r\n self.dots.append(Dot(dotStartX,dotStartY))\r\n self.fitnessSum = 0\r\n self.generation = 1\r\n self.bestDot = 0\r\n self.bestSteps = len(self.dots[0].brain.directions)\r\n self.avgFitness = 0\r\n self.stdDevFitness = 0\r\n self.maxFitness = 0\r\n\r\n def show(self,SCREEN):\r\n for i in range (1,len(self.dots)):\r\n self.dots[i].show(SCREEN)\r\n self.dots[0].show(SCREEN)\r\n\r\n def update(self, SCREENWIDTH, SCREENHEIGHT,Goal,Obstacles):\r\n for i in range (len(self.dots)):\r\n if self.dots[i].brain.step > self.bestSteps:\r\n self.dots[i].dead = True\r\n else:\r\n self.dots[i].update(SCREENWIDTH,SCREENHEIGHT,Goal,Obstacles)\r\n\r\n def calculateAvgFitness(self):\r\n self.avgFitness = self.fitnessSum / len(self.dots)\r\n\r\n def calculateStdDevFitness(self):\r\n runningSum = 0\r\n for dot in self.dots:\r\n runningSum += math.pow(dot.fitness - self.avgFitness,2)\r\n self.stdDevFitness = math.sqrt(runningSum/len(self.dots))\r\n\r\n def printStats(self):\r\n print(\"Generation: \",self.generation)\r\n print(\"Best Fit: \",self.maxFitness)\r\n print(\"Best Steps: \",self.bestSteps)\r\n print(\"Mean Fit: \",self.avgFitness)\r\n print(\"StdDev Fit: \",self.stdDevFitness)\r\n print(\" \")\r\n\r\n def naturalSelection(self):\r\n # Print stats of the generation\r\n Population.setBestDot(self)\r\n Population.calculateFitnessSum(self)\r\n Population.calculateAvgFitness(self)\r\n Population.calculateStdDevFitness(self)\r\n Population.printStats(self)\r\n\r\n # Generate new dot list (next generation)\r\n newDots = []\r\n newDots.append(self.dots[self.bestDot].gimmieBaby())\r\n newDots[0].isBest = True\r\n for i in range(1,len(self.dots)):\r\n # Select Parent based on fitness\r\n parent = Population.selectParent(self)\r\n\r\n # Get baby from them\r\n baby = parent.gimmieBaby()\r\n newDots.append(baby)\r\n self.dots = newDots.copy()\r\n self.generation += 1\r\n\r\n def mutateDemBabies(self,mutationRate):\r\n for i in range(1,len(self.dots)):\r\n self.dots[i].brain.mutate(mutationRate)\r\n\r\n def setBestDot(self):\r\n maxScore = 0\r\n maxIdx = 0\r\n for i in range (len(self.dots)):\r\n if self.dots[i].fitness > maxScore:\r\n maxScore = self.dots[i].fitness\r\n maxIdx = i\r\n self.bestDot = maxIdx\r\n self.maxFitness = maxScore\r\n\r\n if self.dots[self.bestDot].reachedGoal:\r\n self.bestSteps = self.dots[self.bestDot].brain.step\r\n\r\n def calculateFitness(self,Goal):\r\n for i in range (len(self.dots)):\r\n self.dots[i].calculateFitness(Goal)\r\n\r\n def calculateFitnessSum(self):\r\n self.fitnessSum = 0\r\n for dot in self.dots:\r\n self.fitnessSum += dot.fitness\r\n\r\n def selectParent(self):\r\n rand = random.uniform(0,self.fitnessSum)\r\n runningSum = 0\r\n for dot in self.dots:\r\n runningSum += dot.fitness\r\n if runningSum > rand:\r\n return dot\r\n # Should never get to this point\r\n print (\"HALP YOU BROKE IT - natural selection & select parent\")\r\n return None\r\n\r\n def allDotsDead(self):\r\n for i in range (len(self.dots)):\r\n if (not self.dots[i].dead) and (not self.dots[i].reachedGoal):\r\n return False\r\n return True\r\n\r\n'''\r\nGoal Class: \r\nA 'goal' is a simple class that can draw itself, has a position, and size.\r\n'''\r\nclass Goal:\r\n def __init__(self, xx=200, yy=20):\r\n self.pos = PVector(xx,yy)\r\n self.size = 5\r\n\r\n def show(self, SCREEN):\r\n surf1 = pygame.Surface((self.size*2,self.size*2))\r\n surf1 = surf1.convert()\r\n pygame.draw.circle(surf1, RED, (self.size-1,self.size-1), self.size)\r\n SCREEN.blit(surf1,(self.pos.x-self.size,self.pos.y-self.size))\r\n\r\n'''\r\nObstacle Class: \r\nA 'obstacle' is a rectangle, similar to the goal. Given a dot, it can test if that dot has collided with itself.\r\n'''\r\nclass Obstacle:\r\n def __init__(self,x=0,y=0,width=0,height=0):\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n\r\n def collision(self,dot):\r\n if (dot.pos.x > self.x) and (dot.pos.x < self.x + self.width) and (dot.pos.y > self.y) and (dot.pos.y < self.y + self.height):\r\n return True\r\n return False\r\n\r\n def show(self,SCREEN):\r\n surf1 = pygame.Surface((self.width,self.height))\r\n surf1 = surf1.convert()\r\n #pygame.draw.rect(surf1, BLUE, [0,0,self.width,self.height])\r\n surf1.fill(BLUE)\r\n SCREEN.blit(surf1,(self.x,self.y))\r\n","repo_name":"abhishekhanchate/Smart-Rockets-Game-using-Genetic-Algorithm","sub_path":"SmartRockets.py","file_name":"SmartRockets.py","file_ext":"py","file_size_in_byte":9677,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"24155669418","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport jsonfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('compatibility_test', '0002_auto_20160531_1913'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='voting',\n name='created',\n field=models.DateTimeField(auto_now_add=True, null=True),\n ),\n migrations.AddField(\n model_name='voting',\n name='documents',\n field=jsonfield.fields.JSONField(default=dict),\n ),\n migrations.AlterField(\n model_name='topic',\n name='mp_positions',\n field=jsonfield.fields.JSONField(default=dict),\n ),\n ]\n","repo_name":"ManoSeimas/manoseimas.lt","sub_path":"manoseimas/compatibility_test/migrations/0003_auto_20160601_0929.py","file_name":"0003_auto_20160601_0929.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"31863657338","text":"import unittest\n\nfrom io import StringIO\n\nfrom Bio import BiopythonDeprecationWarning\nfrom Bio import SeqIO\nfrom Bio.SeqIO.FastaIO import FastaIterator\nfrom Bio.SeqIO.FastaIO import FastaTwoLineParser\nfrom Bio.SeqIO.FastaIO import SimpleFastaParser\n\n\ndef title_to_ids(title):\n \"\"\"Convert a FASTA title line into the id, name, and description.\n\n This is just a quick-n-dirty implementation, and is definitely not meant\n to handle every FASTA title line case.\n \"\"\"\n # first split the id information from the description\n # the first item is the id info block, the rest is the description\n all_info = title.split(\" \")\n id_info = all_info[0]\n rest = all_info[1:]\n descr = \" \".join(rest)\n\n # now extract the ids from the id block\n # gi|5690369|gb|AF158246.1|AF158246\n id_info_items = id_info.split(\"|\")\n if len(id_info_items) >= 4:\n assert id_info_items[2] in [\"gb\", \"emb\", \"dbj\", \"pdb\"], title\n id = id_info_items[3] # the id with version info\n name = id_info_items[4] # the id without version info\n else:\n # Fallback:\n id = id_info_items[0]\n name = id_info_items[0]\n\n return id, name, descr\n\n\ndef read_title_and_seq(filename):\n \"\"\"Crude parser that gets the first record from a FASTA file.\"\"\"\n with open(filename) as handle:\n title = handle.readline().rstrip()\n assert title.startswith(\">\")\n seq = \"\"\n for line in handle:\n if line.startswith(\">\"):\n break\n seq += line.strip()\n return title[1:], seq\n\n\nclass Wrapping(unittest.TestCase):\n \"\"\"Tests for two-line-per-record FASTA variant.\"\"\"\n\n def test_fails(self):\n \"\"\"Test case which should fail.\"\"\"\n self.assertRaises(ValueError, SeqIO.read, \"Fasta/aster.pro\", \"fasta-2line\")\n\n def test_passes(self):\n \"\"\"Test case which should pass.\"\"\"\n expected = SeqIO.read(\"Fasta/aster.pro\", \"fasta\")\n\n record = SeqIO.read(\"Fasta/aster_no_wrap.pro\", \"fasta\")\n self.assertEqual(expected.id, record.id)\n self.assertEqual(expected.name, record.name)\n self.assertEqual(expected.description, record.description)\n self.assertEqual(expected.seq, record.seq)\n\n record = SeqIO.read(\"Fasta/aster_no_wrap.pro\", \"fasta-2line\")\n self.assertEqual(expected.id, record.id)\n self.assertEqual(expected.name, record.name)\n self.assertEqual(expected.description, record.description)\n self.assertEqual(expected.seq, record.seq)\n\n\nclass TitleFunctions(unittest.TestCase):\n \"\"\"Test using title functions.\"\"\"\n\n def simple_check(self, filename):\n \"\"\"Test parsing single record FASTA files.\"\"\"\n msg = f\"Test failure parsing file {filename}\"\n title, seq = read_title_and_seq(filename) # crude parser\n idn, name, descr = title_to_ids(title)\n # First check using Bio.SeqIO.FastaIO directly with title2ids function.\n # (DEPRECATED)\n with self.assertWarns(BiopythonDeprecationWarning):\n records = FastaIterator(filename, title2ids=title_to_ids)\n record = next(records)\n with self.assertRaises(StopIteration):\n next(records)\n self.assertEqual(record.id, idn, msg=msg)\n self.assertEqual(record.name, name, msg=msg)\n self.assertEqual(record.description, descr, msg=msg)\n self.assertEqual(record.seq, seq, msg=msg)\n # Now check using Bio.SeqIO (default settings)\n record = SeqIO.read(filename, \"fasta\")\n self.assertEqual(record.id, title.split()[0], msg=msg)\n self.assertEqual(record.name, title.split()[0], msg=msg)\n self.assertEqual(record.description, title, msg=msg)\n self.assertEqual(record.seq, seq, msg=msg)\n # Uncomment this for testing the methods are calling the right files:\n # print(\"{%s done}\" % filename)\n\n def multi_check(self, filename):\n \"\"\"Test parsing multi-record FASTA files.\"\"\"\n msg = f\"Test failure parsing file {filename}\"\n # title2ids is deprecated\n with self.assertWarns(BiopythonDeprecationWarning):\n re_titled = list(FastaIterator(filename, title2ids=title_to_ids))\n default = list(SeqIO.parse(filename, \"fasta\"))\n self.assertEqual(len(re_titled), len(default), msg=msg)\n for old, new in zip(default, re_titled):\n idn, name, descr = title_to_ids(old.description)\n self.assertEqual(new.id, idn, msg=msg)\n self.assertEqual(new.name, name, msg=msg)\n self.assertEqual(new.description, descr, msg=msg)\n self.assertEqual(new.seq, old.seq, msg=msg)\n # Uncomment this for testing the methods are calling the right files:\n # print(\"{%s done}\" % filename)\n\n def test_no_name(self):\n \"\"\"Test FASTA record with no identifier.\"\"\"\n handle = StringIO(\">\\nACGT\")\n record = SeqIO.read(handle, \"fasta\")\n handle.close()\n self.assertEqual(record.seq, \"ACGT\")\n self.assertEqual(\"\", record.id)\n self.assertEqual(\"\", record.name)\n self.assertEqual(\"\", record.description)\n\n def test_single_nucleic_files(self):\n \"\"\"Test Fasta files containing a single nucleotide sequence.\"\"\"\n paths = (\n \"Fasta/lupine.nu\",\n \"Fasta/elderberry.nu\",\n \"Fasta/phlox.nu\",\n \"Fasta/centaurea.nu\",\n \"Fasta/wisteria.nu\",\n \"Fasta/sweetpea.nu\",\n \"Fasta/lavender.nu\",\n \"Fasta/f001\",\n )\n for path in paths:\n self.simple_check(path)\n\n def test_multi_dna_files(self):\n \"\"\"Test Fasta files containing multiple nucleotide sequences.\"\"\"\n paths = (\"Quality/example.fasta\",)\n for path in paths:\n self.multi_check(path)\n\n def test_single_proteino_files(self):\n \"\"\"Test Fasta files containing a single protein sequence.\"\"\"\n paths = (\n \"Fasta/aster.pro\",\n \"Fasta/rosemary.pro\",\n \"Fasta/rose.pro\",\n \"Fasta/loveliesbleeding.pro\",\n )\n for path in paths:\n self.simple_check(path)\n\n def test_multi_protein_files(self):\n \"\"\"Test Fasta files containing multiple protein sequences.\"\"\"\n paths = (\"Fasta/f002\", \"Fasta/fa01\")\n for path in paths:\n self.multi_check(path)\n\n\nclass TestSimpleFastaParsers(unittest.TestCase):\n \"\"\"Test SimpleFastaParser and FastaTwoLineParser directly.\"\"\"\n\n # Regular cases input strings and outputs\n ins_two_line = [\">1\\nACGT\", \">1\\nACGT\", \">1\\nACGT\\n>2\\nACGT\"]\n outs_two_line = [[(\"1\", \"ACGT\")], [(\"1\", \"ACGT\")], [(\"1\", \"ACGT\"), (\"2\", \"ACGT\")]]\n\n ins_multiline = [\">1\\nACGT\\nACGT\", \">1\\nACGT\\nACGT\\n>2\\nACGT\\nACGT\"]\n outs_multiline = [[(\"1\", \"ACGTACGT\")], [(\"1\", \"ACGTACGT\"), (\"2\", \"ACGTACGT\")]]\n\n # Edge case input strings and outputs\n ins_two_line_edges = [\">\\nACGT\", \">1\\n\\n\", \">1>1\\n\\n>1\\n\\n\", \"\"]\n outs_two_line_edges = [[(\"\", \"ACGT\")], [(\"1\", \"\")], [(\"1>1\", \"\"), (\"1\", \"\")], []]\n\n ins_simple_edges = [\">1\", \">1\\n\\n\\n\", \">\\n>1\\n>2\"]\n outs_simple_edges = [[(\"1\", \"\")], [(\"1\", \"\")], [(\"\", \"\"), (\"1\", \"\"), (\"2\", \"\")]]\n\n def test_regular_SimpleFastaParser(self):\n \"\"\"Test regular SimpleFastaParser cases.\"\"\"\n for inp, out in zip(self.ins_two_line, self.outs_two_line):\n handle1 = StringIO(inp)\n handle2 = StringIO(inp + \"\\n\")\n self.assertEqual(list(SimpleFastaParser(handle1)), out)\n self.assertEqual(list(SimpleFastaParser(handle2)), out)\n for inp, out in zip(self.ins_multiline, self.outs_multiline):\n handle1 = StringIO(inp)\n handle2 = StringIO(inp + \"\\n\")\n self.assertEqual(list(SimpleFastaParser(handle1)), out)\n self.assertEqual(list(SimpleFastaParser(handle2)), out)\n\n def test_regular_FastaTwoLineParser(self):\n \"\"\"Test regular FastaTwoLineParser cases.\"\"\"\n for inp, out in zip(self.ins_two_line, self.outs_two_line):\n handle1 = StringIO(inp)\n handle2 = StringIO(inp + \"\\n\")\n self.assertEqual(list(FastaTwoLineParser(handle1)), out)\n self.assertEqual(list(FastaTwoLineParser(handle2)), out)\n\n def test_edgecases_SimpleFastaParser(self):\n \"\"\"Test SimpleFastaParser edge-cases.\"\"\"\n for inp, out in zip(self.ins_two_line_edges, self.outs_two_line_edges):\n handle = StringIO(inp)\n self.assertEqual(list(SimpleFastaParser(handle)), out)\n for inp, out in zip(self.ins_simple_edges, self.outs_simple_edges):\n handle = StringIO(inp)\n self.assertEqual(list(SimpleFastaParser(handle)), out)\n\n def test_edgecases_FastaTwoLineParser(self):\n \"\"\"Test FastaTwoLineParser edge-cases.\"\"\"\n for inp, out in zip(self.ins_two_line_edges, self.outs_two_line_edges):\n handle = StringIO(inp)\n self.assertEqual(list(FastaTwoLineParser(handle)), out)\n\n def test_exceptions_FastaTwoLineParser(self):\n \"\"\"Test FastaTwoLineParser exceptions.\"\"\"\n for inp in self.ins_multiline + self.ins_simple_edges:\n handle = StringIO(inp)\n with self.assertRaises(ValueError):\n list(FastaTwoLineParser(handle))\n\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner(verbosity=2)\n unittest.main(testRunner=runner)\n","repo_name":"biopython/biopython","sub_path":"Tests/test_SeqIO_FastaIO.py","file_name":"test_SeqIO_FastaIO.py","file_ext":"py","file_size_in_byte":9367,"program_lang":"python","lang":"en","doc_type":"code","stars":3852,"dataset":"github-code","pt":"77"} +{"seq_id":"17171113946","text":"menu = \"\"\"\n[d] Deposit\n[w] Withdraw\n[b] Balance\n[q] Quit\n\n=> \"\"\"\n\nbalance = 0\nlimit = 500\nstatement = \"\"\nnum_withdrawals = 0\nWITHDRAWAL_LIMIT = 3\n\nwhile True:\n option = input(menu)\n\n if option == \"d\":\n value = float(input(\"Enter the deposit amount: \"))\n if value <= 0:\n print(\"Operation failed! The value entered is invalid.\")\n continue\n\n balance += value\n statement += f\"Deposit: R$ {value:.2f}\\n\"\n\n elif option == \"w\":\n value = float(input(\"Enter the withdrawal amount: \"))\n\n if value > balance:\n print(\"Operation failed! You don't have sufficient balance.\")\n elif value > limit:\n print(\"Operation failed! The withdrawal amount exceeds the limit.\")\n elif num_withdrawals >= WITHDRAWAL_LIMIT:\n print(\"Operation failed! Maximum number of withdrawals exceeded.\")\n elif value <= 0:\n print(\"Operation failed! The value entered is invalid.\")\n else:\n balance -= value\n statement += f\"Withdrawal: R$ {value:.2f}\\n\"\n num_withdrawals += 1\n\n elif option == \"b\":\n print(\"\\n================ STATEMENT ================\")\n print(\"No transactions made.\" if not statement else statement)\n print(f\"\\nBalance: R$ {balance:.2f}\")\n print(\"============================================\")\n\n elif option == \"q\":\n break\n\n else:\n print(\"Invalid operation, please select the desired operation again.\")\n","repo_name":"lgustavopuga/Python-Banking-System","sub_path":"Bank-System.py","file_name":"Bank-System.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1388999610","text":"import datetime\n\nfrom django.shortcuts import render\nfrom .models import Ticket, Airport\n\n\ndef airtickets(request):\n airports = Airport.objects.all()\n companies = Ticket.objects.values_list('company', flat=True).distinct()\n agencies = Ticket.objects.values_list('agency', flat=True).distinct()\n\n if request.method == 'POST':\n search_param = {\n 'departurePoint': request.POST['departurePoint'],\n 'destinationPoint': request.POST['destinationPoint'],\n 'departureTime': request.POST['departureTime'],\n 'companies': request.POST.getlist('companies'),\n 'agencies': request.POST.getlist('agencies'),\n 'sort': request.POST['sort'],\n 'min_price': request.POST['min_price'],\n 'max_price': request.POST['max_price']\n }\n tickets = Ticket.objects.filter(\n departurePoint__code=search_param['departurePoint'][-4:-1],\n destinationPoint__code=search_param['destinationPoint'][-4:-1],\n departureTime__date=search_param['departureTime'],\n company__in=search_param['companies'],\n agency__in=search_param['agencies'],\n price__range=(search_param['min_price'], search_param['max_price'])\n ).order_by(search_param['sort'])\n else:\n tickets = Ticket.objects.filter(departureTime__date=datetime.date.today())\n search_param = {\n 'companies': companies,\n 'agencies': agencies,\n 'departureTime': datetime.date.today().__str__()\n }\n\n return render(request, 'airtickets/tickets.html', {\n 'tickets': tickets,\n 'airports': airports,\n 'companies': companies,\n 'agencies': agencies,\n 'search_param': search_param\n })\n","repo_name":"archthorn/TSPP_CW","sub_path":"airtickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4807198823","text":"'''\ngetting started with some general tests for the talklib module via Pytest.\n'''\nimport pytest\nfrom unittest.mock import patch\n\nfrom talklib import TLShow\nfrom .. import mock\nfrom ..mock import env_vars\n\n# this RSS feed chosen as test feed because it is reliably updated every day \n# (many times per day) and because the audio file is short/small!\n\nurl = 'https://feeds.npr.org/500005/podcast.xml'\n\n@pytest.fixture()\ndef template():\n with patch.dict('os.environ', env_vars):\n test = TLShow()\n test.show = 'Delete Me'\n test.show_filename = 'delete_me'\n test.url = url\n\n test.destinations = mock.mock_destinations()\n\n # disable notifications for testing. Need separate tests for these!\n test.notifications = False\n test.syslog_enable = False\n \n yield test\n\n mock.remove_destinations()\n\n# ---------- run ----------\n \ndef test_run(template: TLShow):\n '''implementation test with real audio. asserts that no exceptions are raised'''\n template.run()\n\ndef test_run2(template: TLShow):\n template.url = 'invalid_URL'\n with pytest.raises(Exception):\n template.run()\n\ndef test_run3(template: TLShow):\n '''assert an exception is raised when the URL is a valid URL but not an rss feed'''\n template.url = 'https://pnsne.ws/3mVuTax'\n with pytest.raises(Exception):\n template.run()\n\ndef test_run_bad_feed(template: TLShow):\n '''asserts an exception is raised for a non-updated feed'''\n non_updated_feed = 'https://www.pythonpodcast.com/rss' # hasn't been updated in a while.\n template.url = non_updated_feed\n with pytest.raises(Exception):\n with patch('builtins.input', return_value='y'):\n template.run()","repo_name":"talkinglibrary/talklib","sub_path":"src/tests/integration/test_rss.py","file_name":"test_rss.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14110198029","text":"file_to_write = open('authors_no_tag_text_results.txt', 'w', encoding=\"utf8\", errors=\"replace\")\nimport os\nlist_of_possible_tags = ['FULL COURT', 'COURT OF APPEAL', 'COURT OF CRIMINAL APPEAL', 'FULL BENCH']\n\nwith open('authors_no_tag_text.txt', 'r', encoding=\"utf8\", errors=\"replace\") as list_of_files_to_read:\n lines_in_files_to_read = list_of_files_to_read.readlines()\n for file_in_list in lines_in_files_to_read:\n with open('d:\\\\txtutf\\\\' + file_in_list.strip(), encoding=\"utf8\", errors='ignore') as f:\n lines_found = ''\n lines = f.readlines()\n line_number = 0\n possible_tag_found = 0\n for line in lines:\n line_number = line_number + 1\n if line_number < 30:\n regularcase_line_stripped = line.strip()\n uppercase_line = line.upper()\n uppercase_line_stripped = uppercase_line.strip()\n for possible_tag in list_of_possible_tags:\n if possible_tag in uppercase_line_stripped:\n #if possible_tag_found == 1:\n lines_found = lines_found + '|' + uppercase_line.strip()\n #possible_tag_found = 0\n #if \"REASONS FOR JUDGMENT\" in uppercase_line_stripped:\n #possible_tag_found = 1\n file_to_write.write(file_in_list.strip() + lines_found + \"\\n\")","repo_name":"jamesdalmau/Download_from_Austlii","sub_path":"src/FindAuthorsNoTagText.py","file_name":"FindAuthorsNoTagText.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34888043123","text":"from IrisLoader import *\nfrom HRLoader import *\nfrom KeplerLoader import *\n\ndef main():\n #add the last / for the directory path.\n path = ''# all CSVs need to be in same folder\n irisLoader(path)\n HRLoader(path)\n #KeplerFluxLoader(path)\n #dropAll()\n\ndef dropAll():\n dropIrisTables()\n dropHR()\n dropKeplerTables()\n print(\"done.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MarkTigchelaar/KaggleDatabaseLoader","sub_path":"src/DataBaseLoader.py","file_name":"DataBaseLoader.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9608170988","text":"import os\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\nimport torch\nimport transformers\n\nfrom opencompass.models.base import BaseModel\nfrom opencompass.models.base_api import APITemplateParser\nfrom opencompass.registry import MODELS\nfrom opencompass.utils.logging import get_logger\nfrom opencompass.utils.prompt import PromptList\n\nPromptType = Union[PromptList, str]\n\n\nclass MultiTokenEOSCriteria(transformers.StoppingCriteria):\n \"\"\"Criteria to stop on the specified multi-token sequence.\"\"\"\n\n def __init__(\n self,\n sequence: str,\n tokenizer: transformers.PreTrainedTokenizer,\n batch_size: int,\n ):\n self.done_tracker = [False] * batch_size\n self.sequence = sequence\n self.sequence_ids = tokenizer.encode(sequence,\n add_special_tokens=False)\n self.sequence_id_len = len(self.sequence_ids)\n self.tokenizer = tokenizer\n\n def __call__(self, input_ids, scores, **kwargs) -> bool:\n # compare the last len(stop) tokens\n lookback_ids_batch = input_ids[:, -self.sequence_id_len:]\n lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)\n for i, done in enumerate(self.done_tracker):\n if done:\n continue\n self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]\n return False not in self.done_tracker\n\n\n@MODELS.register_module()\nclass HuggingFace(BaseModel):\n \"\"\"Model wrapper around HuggingFace models.\n\n Args:\n path (str): The name or path to HuggingFace's model.\n hf_cache_dir: Set the cache dir to HF model cache dir. If None, it will\n use the env variable HF_MODEL_HUB. Defaults to None.\n max_seq_len (int): The maximum length of the input sequence. Defaults\n to 2048.\n tokenizer_path (str): The path to the tokenizer. Defaults to None.\n tokenizer_kwargs (dict): Keyword arguments for the tokenizer.\n Defaults to {}.\n peft_path (str, optional): The name or path to the HuggingFace's PEFT\n model. If None, the original model will not be converted to PEFT.\n Defaults to None.\n tokenizer_only (bool): If True, only the tokenizer will be initialized.\n Defaults to False.\n model_kwargs (dict): Keyword arguments for the model, used in loader.\n Defaults to dict(device_map='auto').\n meta_template (Dict, optional): The model's meta prompt\n template if needed, in case the requirement of injecting or\n wrapping of any meta instructions.\n extract_pred_after_decode (bool): Whether to extract the prediction\n string from the decoded output string, instead of extract the\n prediction tokens before decoding. Defaults to False.\n batch_padding (bool): If False, inference with be performed in for-loop\n without batch padding.\n pad_token_id (int): The id of the padding token. Defaults to None. Use\n (#vocab + pad_token_id) if get negative value.\n mode (str, optional): The method of input truncation when input length\n exceeds max_seq_len. 'mid' represents the part of input to\n truncate. Defaults to 'none'.\n use_fastchat_template (str, optional): Whether to use fastchat to get\n the conversation template. If True, fastchat needs to be\n implemented first. Defaults to False.\n end_str (str, optional): Whether to trim generated strings with end_str\n if the model has special ending strings that are not handled well.\n Defaults to None.\n\n Note:\n About ``extract_pred_after_decode``: Commonly, we should extract the\n the prediction tokens before decoding. But for some tokenizers using\n ``sentencepiece``, like LLaMA, this behavior may change the number of\n whitespaces, which is harmful for Python programming tasks.\n \"\"\"\n\n def __init__(self,\n path: str,\n hf_cache_dir: Optional[str] = None,\n max_seq_len: int = 2048,\n tokenizer_path: Optional[str] = None,\n tokenizer_kwargs: dict = dict(),\n peft_path: Optional[str] = None,\n tokenizer_only: bool = False,\n model_kwargs: dict = dict(device_map='auto'),\n generation_kwargs: dict = dict(),\n meta_template: Optional[Dict] = None,\n extract_pred_after_decode: bool = False,\n batch_padding: bool = False,\n pad_token_id: Optional[int] = None,\n mode: str = 'none',\n use_fastchat_template: bool = False,\n end_str: Optional[str] = None):\n super().__init__(path=path,\n max_seq_len=max_seq_len,\n tokenizer_only=tokenizer_only,\n meta_template=meta_template)\n from opencompass.utils.fileio import patch_hf_auto_model\n if hf_cache_dir is None:\n hf_cache_dir = os.getenv('HF_MODEL_HUB', None)\n patch_hf_auto_model(hf_cache_dir)\n self.logger = get_logger()\n self.pad_token_id = pad_token_id\n assert mode in ['none', 'mid']\n self.mode = mode\n self._load_tokenizer(path=path,\n tokenizer_path=tokenizer_path,\n tokenizer_kwargs=tokenizer_kwargs)\n self.batch_padding = batch_padding\n self.extract_pred_after_decode = extract_pred_after_decode\n if not tokenizer_only:\n self._load_model(path=path,\n model_kwargs=model_kwargs,\n peft_path=peft_path)\n self.generation_kwargs = generation_kwargs\n self.use_fastchat_template = use_fastchat_template\n self.end_str = end_str\n\n def _load_tokenizer(self, path: str, tokenizer_path: Optional[str],\n tokenizer_kwargs: dict):\n from transformers import AutoTokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(\n tokenizer_path if tokenizer_path else path, **tokenizer_kwargs)\n\n # A patch for some models without pad_token_id\n if self.pad_token_id is not None:\n if self.pad_token_id < 0:\n self.pad_token_id += self.tokenizer.vocab_size\n if self.tokenizer.pad_token_id is None:\n self.logger.debug(f'Using {self.pad_token_id} as pad_token_id')\n elif self.tokenizer.pad_token_id != self.pad_token_id:\n self.logger.warning(\n 'pad_token_id is not consistent with the tokenizer. Using '\n f'{self.pad_token_id} as pad_token_id')\n self.tokenizer.pad_token_id = self.pad_token_id\n elif self.tokenizer.pad_token_id is None:\n self.logger.warning('pad_token_id is not set for the tokenizer.')\n if self.tokenizer.eos_token is not None:\n self.logger.warning(\n f'Using eos_token_id {self.tokenizer.eos_token} '\n 'as pad_token_id.')\n self.tokenizer.pad_token = self.tokenizer.eos_token\n else:\n from transformers.generation import GenerationConfig\n gcfg = GenerationConfig.from_pretrained(path)\n\n if gcfg.pad_token_id is not None:\n self.logger.warning(\n f'Using pad_token_id {gcfg.pad_token_id} '\n 'as pad_token_id.')\n self.tokenizer.pad_token_id = gcfg.pad_token_id\n else:\n raise ValueError(\n 'pad_token_id is not set for this tokenizer. Try to '\n 'set pad_token_id via passing '\n '`pad_token_id={PAD_TOKEN_ID}` in model_cfg.')\n\n # A patch for llama when batch_padding = True\n if 'decapoda-research/llama' in path or \\\n (tokenizer_path and\n 'decapoda-research/llama' in tokenizer_path):\n self.logger.warning('We set new pad_token_id for LLaMA model')\n # keep consistent with official LLaMA repo\n # https://github.com/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb # noqa\n self.tokenizer.bos_token = ''\n self.tokenizer.eos_token = ''\n self.tokenizer.pad_token_id = 0\n\n def _set_model_kwargs_torch_dtype(self, model_kwargs):\n if 'torch_dtype' not in model_kwargs:\n torch_dtype = torch.float16\n else:\n torch_dtype = {\n 'torch.float16': torch.float16,\n 'torch.bfloat16': torch.bfloat16,\n 'torch.float': torch.float,\n 'auto': 'auto',\n 'None': None\n }.get(model_kwargs['torch_dtype'])\n self.logger.debug(f'HF using torch_dtype: {torch_dtype}')\n if torch_dtype is not None:\n model_kwargs['torch_dtype'] = torch_dtype\n\n def _load_model(self,\n path: str,\n model_kwargs: dict,\n peft_path: Optional[str] = None):\n from transformers import AutoModel, AutoModelForCausalLM\n\n self._set_model_kwargs_torch_dtype(model_kwargs)\n try:\n self.model = AutoModelForCausalLM.from_pretrained(\n path, **model_kwargs)\n except ValueError:\n self.model = AutoModel.from_pretrained(path, **model_kwargs)\n\n if peft_path is not None:\n from peft import PeftModel\n self.model = PeftModel.from_pretrained(self.model,\n peft_path,\n is_trainable=False)\n self.model.eval()\n self.model.generation_config.do_sample = False\n\n # A patch for llama when batch_padding = True\n if 'decapoda-research/llama' in path:\n self.model.config.bos_token_id = 1\n self.model.config.eos_token_id = 2\n self.model.config.pad_token_id = self.tokenizer.pad_token_id\n\n def generate(self,\n inputs: List[str],\n max_out_len: int,\n stopping_criteria: List[str] = [],\n **kwargs) -> List[str]:\n \"\"\"Generate results given a list of inputs.\n\n Args:\n inputs (List[str]): A list of strings.\n max_out_len (int): The maximum length of the output.\n\n Returns:\n List[str]: A list of generated strings.\n \"\"\"\n generation_kwargs = kwargs.copy()\n generation_kwargs.update(self.generation_kwargs)\n if self.batch_padding and len(inputs) > 1:\n return self._batch_generate(inputs=inputs,\n max_out_len=max_out_len,\n **generation_kwargs)\n else:\n return sum(\n (self._single_generate(inputs=[input_],\n max_out_len=max_out_len,\n stopping_criteria=stopping_criteria,\n **generation_kwargs)\n for input_ in inputs), [])\n\n def _batch_generate(self, inputs: List[str], max_out_len: int,\n **kwargs) -> List[str]:\n \"\"\"Support for batch prompts inference.\n\n Args:\n inputs (List[str]): A list of strings.\n max_out_len (int): The maximum length of the output.\n\n Returns:\n List[str]: A list of generated strings.\n \"\"\"\n if self.extract_pred_after_decode:\n prompt_lens = [len(input_) for input_ in inputs]\n\n if self.use_fastchat_template:\n try:\n from fastchat.model import get_conversation_template\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n 'Fastchat is not implemented. You can use '\n '\\'pip install \"fschat[model_worker,webui]\"\\' '\n 'to implement fastchat.')\n for i in range(len(inputs)):\n conv = get_conversation_template('vicuna')\n conv.append_message(conv.roles[0], inputs[i])\n conv.append_message(conv.roles[1], None)\n inputs[i] = conv.get_prompt()\n\n # step-1: tokenize the input with batch_encode_plus\n tokens = self.tokenizer.batch_encode_plus(inputs,\n padding=True,\n truncation=True,\n max_length=self.max_seq_len -\n max_out_len)\n tokens = {\n k: torch.tensor(np.array(tokens[k]), device=self.model.device)\n for k in tokens if k in ['input_ids', 'attention_mask']\n }\n\n # step-2: conduct model forward to generate output\n outputs = self.model.generate(**tokens,\n max_new_tokens=max_out_len,\n **kwargs)\n\n if not self.extract_pred_after_decode:\n outputs = outputs[:, tokens['input_ids'].shape[1]:]\n\n decodeds = self.tokenizer.batch_decode(outputs,\n skip_special_tokens=True)\n\n if self.extract_pred_after_decode:\n decodeds = [\n token[len_:] for token, len_ in zip(decodeds, prompt_lens)\n ]\n\n if self.end_str:\n decodeds = [token.split(self.end_str)[0] for token in decodeds]\n return decodeds\n\n def _single_generate(self,\n inputs: List[str],\n max_out_len: int,\n stopping_criteria: List[str] = [],\n **kwargs) -> List[str]:\n \"\"\"Support for single prompt inference.\n\n Args:\n inputs (List[str]): A list of strings.\n max_out_len (int): The maximum length of the output.\n\n Returns:\n List[str]: A list of generated strings.\n \"\"\"\n if self.extract_pred_after_decode:\n prompt_lens = [len(input_) for input_ in inputs]\n\n if self.use_fastchat_template:\n try:\n from fastchat.model import get_conversation_template\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n 'Fastchat is not implemented. You can use '\n '\\'pip install \"fschat[model_worker,webui]\"\\' '\n 'to implement fastchat.')\n conv = get_conversation_template('vicuna')\n conv.append_message(conv.roles[0], inputs[0])\n conv.append_message(conv.roles[1], None)\n inputs = [conv.get_prompt()]\n\n if self.mode == 'mid':\n input_ids = self.tokenizer(inputs, truncation=False)['input_ids']\n input_ids = torch.tensor(input_ids, device=self.model.device)\n if len(input_ids[0]) > self.max_seq_len - max_out_len:\n half = int((self.max_seq_len - max_out_len) / 2)\n inputs = [\n self.tokenizer.decode(input_ids[0][:half],\n skip_special_tokens=True) +\n self.tokenizer.decode(input_ids[0][-half:],\n skip_special_tokens=True)\n ]\n\n input_ids = self.tokenizer(inputs,\n truncation=True,\n max_length=self.max_seq_len -\n max_out_len)['input_ids']\n input_ids = torch.tensor(input_ids, device=self.model.device)\n\n if stopping_criteria:\n # Construct huggingface stopping criteria\n stopping_criteria = stopping_criteria + [self.tokenizer.eos_token]\n stopping_criteria = transformers.StoppingCriteriaList([\n *[\n MultiTokenEOSCriteria(sequence, self.tokenizer,\n input_ids.shape[0])\n for sequence in stopping_criteria\n ],\n ])\n kwargs['stopping_criteria'] = stopping_criteria\n\n # To accommodate the PeftModel, parameters should be passed in\n # key-value format for generate.\n outputs = self.model.generate(input_ids=input_ids,\n max_new_tokens=max_out_len,\n **kwargs)\n\n if not self.extract_pred_after_decode:\n outputs = outputs[:, input_ids.shape[1]:]\n\n decodeds = self.tokenizer.batch_decode(outputs,\n skip_special_tokens=True)\n\n if self.extract_pred_after_decode:\n decodeds = [\n token[len_:] for token, len_ in zip(decodeds, prompt_lens)\n ]\n\n if self.end_str:\n decodeds = [token.split(self.end_str)[0] for token in decodeds]\n return decodeds\n\n def get_logits(self, inputs: List[str]):\n\n if self.batch_padding and len(inputs) > 1:\n # batch inference\n tokens = self.tokenizer(inputs,\n padding=True,\n truncation=True,\n max_length=self.max_seq_len)\n\n tokens = {\n k: torch.tensor(np.array(tokens[k]), device=self.model.device)\n for k in tokens if k in ['input_ids', 'attention_mask']\n }\n outputs = self.model(**tokens)\n\n else:\n input_ids = self.tokenizer(\n inputs,\n padding=False,\n truncation=True,\n max_length=self.max_seq_len)['input_ids']\n input_ids = torch.tensor(input_ids, device=self.model.device)\n tokens = {'input_ids': input_ids}\n\n outputs = self.model(input_ids)\n return outputs[0], {'tokens': tokens}\n\n def get_ppl(self,\n inputs: List[str],\n mask_length: Optional[List[int]] = None) -> List[float]:\n \"\"\"Get perplexity scores given a list of inputs.\n\n Args:\n inputs (List[str]): A list of strings.\n mask_length (Optional[List[int]]): A list of mask lengths. If\n provided, the perplexity scores will be calculated with the\n first mask_length[i] tokens masked out. It's okay to skip\n its implementation if advanced features in PPLInfernecer is\n not needed.\n\n Returns:\n List[float]: A list of perplexity scores.\n \"\"\"\n\n if self.batch_padding and len(inputs) > 1:\n assert self.tokenizer.pad_token\n return self._get_ppl(inputs, mask_length=mask_length)\n else:\n return np.concatenate([\n self._get_ppl(inputs=[text], mask_length=mask_length)\n for text in inputs\n ])\n\n def _get_ppl(self,\n inputs: List[str],\n mask_length: Optional[List[int]] = None) -> List[float]:\n \"\"\"Get perplexity scores given a list of inputs.\n\n Args:\n inputs (List[str]): A list of strings.\n mask_length (Optional[List[int]]): A list of mask lengths. If\n provided, the perplexity scores will be calculated with the\n first mask_length[i] tokens masked out. It's okay to skip\n its implementation if advanced features in PPLInfernecer is\n not needed.\n\n Returns:\n List[float]: A list of perplexity scores.\n \"\"\"\n\n outputs, inputs = self.get_logits(inputs)\n shift_logits = outputs[..., :-1, :].contiguous().float()\n\n shift_labels = inputs['tokens']['input_ids'][..., 1:].contiguous()\n\n loss_fct = torch.nn.CrossEntropyLoss(\n reduction='none', ignore_index=self.tokenizer.pad_token_id)\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1)).view(shift_labels.size())\n\n if mask_length is not None:\n mask = torch.zeros_like(shift_labels) # [batch,seqlen]\n for i in range(len(mask)):\n for j in range(mask_length[i] - 1, len(mask[i])):\n mask[i][j] = 1\n loss = loss * mask\n\n lens = (inputs['tokens']['input_ids'] !=\n self.tokenizer.pad_token_id).sum(-1).cpu().numpy()\n if mask_length is not None:\n lens -= np.array(mask_length)\n ce_loss = loss.sum(-1).cpu().detach().numpy() / lens\n return ce_loss\n\n def get_loglikelihood(\n self,\n inputs: List[str],\n conts: List[str],\n mask_length: Optional[List[int]] = None) -> List[float]:\n \"\"\"Get loglikelihood scores given a list of inputs.\n\n Args:\n inputs (List[str]): A list of strings.\n conts (List[str]): A list of strings: slices after the space.\n NOT SUPPORT mask_length YET!\n mask_length (Optional[List[int]]): A list of mask lengths. If\n provided, the perplexity scores will be calculated with the\n first mask_length[i] tokens masked out. It's okay to skip\n its implementation if advanced features in PPLInfernecer is\n not needed.\n\n Returns:\n List[float]: A list of loglikelihood scores.\n \"\"\"\n assert mask_length is None, 'Not support mask_length yet.'\n if self.batch_padding and len(inputs) > 1:\n raise NotImplementedError('Batch padding is not supported yet.')\n # assert self.tokenizer.pad_token\n # return self._get_loglikelihood(inputs, mask_length=mask_length)\n return np.array([\n self._get_loglikelihood(inputs=inputs[idx], conts=conts[idx])\n for idx in range(len(inputs))\n ])\n\n def _get_loglikelihood(self, inputs: str, conts: str) -> float:\n \"\"\"Get loglikelihood scores given input string and continuation string.\n\n Args:\n inputs (str): string.\n conts (str): strings: slices after the space.\n Returns:\n float: loglikelihood scores.\n \"\"\"\n\n input_ids = self.tokenizer(inputs,\n padding=False,\n truncation=True,\n max_length=self.max_seq_len)['input_ids']\n input_ids = torch.tensor(input_ids, device=self.model.device)\n context_ids = self.tokenizer(inputs.replace(conts, ''),\n padding=False,\n truncation=True,\n max_length=self.max_seq_len)['input_ids']\n cont_ids = input_ids[len(context_ids):]\n\n output = self.model(input_ids.unsqueeze(0))\n logits = output['logits'][:, :-1]\n logits = torch.nn.functional.log_softmax(logits, dim=-1)\n contlen = cont_ids.shape[0]\n logits = logits[:, -contlen:, :]\n # Reducing the dimension will lead to a wrong outcome\n logits_gather = torch.gather(\n logits, 2,\n cont_ids.unsqueeze(0).unsqueeze(-1)) # [1, seq]\n\n # Answer: sum the likelihood of each token in continuation\n answer = float(logits_gather.detach().cpu().sum())\n return answer\n\n def get_token_len(self, prompt: str) -> int:\n \"\"\"Get lengths of the tokenized strings.\n\n Args:\n prompt (str): Input string.\n\n Returns:\n int: Length of the input tokens\n \"\"\"\n return len(self.tokenizer.encode(prompt))\n\n\n@MODELS.register_module()\nclass HuggingFaceCausalLM(HuggingFace):\n \"\"\"Model wrapper around HuggingFace CausalLM.\n\n Args:\n path (str): The name or path to HuggingFace's model.\n hf_cache_dir: Set the cache dir to HF model cache dir. If None, it will\n use the env variable HF_MODEL_HUB. Defaults to None.\n max_seq_len (int): The maximum length of the input sequence. Defaults\n to 2048.\n tokenizer_path (str): The path to the tokenizer. Defaults to None.\n tokenizer_kwargs (dict): Keyword arguments for the tokenizer.\n Defaults to {}.\n peft_path (str, optional): The name or path to the HuggingFace's PEFT\n model. If None, the original model will not be converted to PEFT.\n Defaults to None.\n tokenizer_only (bool): If True, only the tokenizer will be initialized.\n Defaults to False.\n model_kwargs (dict): Keyword arguments for the model, used in loader.\n Defaults to dict(device_map='auto').\n meta_template (Dict, optional): The model's meta prompt\n template if needed, in case the requirement of injecting or\n wrapping of any meta instructions.\n batch_padding (bool): If False, inference with be performed in for-loop\n without batch padding.\n \"\"\"\n\n def _load_model(self,\n path: str,\n model_kwargs: dict,\n peft_path: Optional[str] = None):\n from transformers import AutoModelForCausalLM\n\n self._set_model_kwargs_torch_dtype(model_kwargs)\n self.model = AutoModelForCausalLM.from_pretrained(path, **model_kwargs)\n if peft_path is not None:\n from peft import PeftModel\n self.model = PeftModel.from_pretrained(self.model,\n peft_path,\n is_trainable=False)\n self.model.eval()\n self.model.generation_config.do_sample = False\n\n\nclass HuggingFaceChatGLM3(HuggingFace):\n \"\"\"Model wrapper around HuggingFace's ChatGLM3. Details available in\n `https://huggingface.co/THUDM/chatglm3-6b`.\n\n model.chat() is used for inference.\n \"\"\"\n\n def __init__(self,\n path: str,\n hf_cache_dir: Optional[str] = None,\n max_seq_len: int = 2048,\n tokenizer_path: Optional[str] = None,\n tokenizer_kwargs: dict = dict(),\n peft_path: Optional[str] = None,\n tokenizer_only: bool = False,\n model_kwargs: dict = dict(device_map='auto'),\n meta_template: Optional[Dict] = None,\n extract_pred_after_decode: bool = False,\n batch_padding: bool = False,\n pad_token_id: Optional[int] = None,\n mode: str = 'none',\n num_extra_tokens: int = 50):\n super().__init__(path=path,\n hf_cache_dir=hf_cache_dir,\n max_seq_len=max_seq_len,\n tokenizer_path=tokenizer_path,\n tokenizer_kwargs=tokenizer_kwargs,\n peft_path=peft_path,\n tokenizer_only=tokenizer_only,\n model_kwargs=model_kwargs,\n meta_template=meta_template,\n extract_pred_after_decode=extract_pred_after_decode,\n batch_padding=batch_padding,\n pad_token_id=pad_token_id,\n mode=mode)\n self.template_parser = APITemplateParser(meta_template)\n # used to compensate for #tokens occupied by sth like system prompt\n self.num_extra_tokens = num_extra_tokens\n\n def generate(self,\n inputs: List[str or PromptList],\n max_out_len: int = 512,\n temperature: float = 0.6,\n skip_overlength=False) -> str:\n \"\"\"Generate response from input prompt.\n\n Args:\n inputs (list): input prompt\n max_out_len (int): max output length\n temperature (float): temperature for sampling\n \"\"\"\n responses = []\n for _input in inputs:\n assert isinstance(_input, (str, PromptList))\n if isinstance(_input, str):\n history = [{'role': 'user', 'content': _input}]\n else:\n history = []\n for item in _input:\n msg = {\n 'content': item['prompt'],\n 'role': {\n 'HUMAN': 'user',\n 'BOT': 'assistant',\n 'SYSTEM': 'system',\n }[item['role'].upper()]\n }\n history.append(msg)\n user_content = history[-1]['content']\n history = history[:-1]\n\n if skip_overlength:\n # The model will report the following error\n # if the sequence length is greater than the maximum length:\n # \"Input length of input_ids is {INPUT_IDS},\n # but `max_length` is set to 8192.\n # This can lead to unexpected behavior.\n # You should consider increasing `max_new_tokens`.\"\n # The following hardcode can fix this exception.\n len_user_content = len(self.tokenizer.encode(user_content))\n if len_user_content > 8192:\n responses.append('')\n continue\n\n try:\n response, history = self.model.chat(self.tokenizer,\n user_content,\n history=history)\n # response will be dict sometime\n if isinstance(response, dict):\n response = response.get('content', '')\n responses.append(response)\n except Exception:\n responses.append('')\n return responses\n\n def get_token_len(self, prompt: str) -> int:\n return len(self.tokenizer.encode(prompt)) + self.num_extra_tokens\n","repo_name":"helloyongyang/opencompass","sub_path":"opencompass/models/huggingface.py","file_name":"huggingface.py","file_ext":"py","file_size_in_byte":30274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"2496050350","text":"import os\nimport tempfile\nfrom time import sleep\nimport boto3\n\nfrom tts.TTS import TTS\nfrom pydub import AudioSegment\nfrom contextlib import closing\n\n\nclass AWSPolly(TTS):\n def __init__(self):\n # Create a polly client using default settings\n self.client = boto3.client('polly')\n\n def text_to_speech(self, plain_text, output_filename):\n responses = []\n\n words = plain_text.split()\n words.reverse()\n current_seg = \"\"\n\n while len(words) > 0:\n word = words.pop()\n\n # If we have gone over the boundary, process the word\n if len(current_seg) + len(word) > 1500:\n # Push the last word back onto the stack and remove the word\n words.append(word)\n # Process the element\n responses.append(self.__synthesize_speech(current_seg))\n # Cleanup segment\n current_seg = \"\"\n else:\n current_seg = current_seg + \" \" + word\n\n # Catch any left over words\n if len(words) == 0 and len(current_seg) > 0:\n responses.append(self.__synthesize_speech(current_seg))\n\n # Merge all the responses\n self.__merge_responses(responses, output_filename)\n\n def __synthesize_speech(self, text_input):\n sleep(0.1)\n print(\"Sending request to AWS Polly. Size: {}\".format(len(text_input)))\n response = self.client.synthesize_speech(\n OutputFormat='mp3',\n SampleRate='22050',\n Text=text_input,\n TextType='text',\n VoiceId='Amy'\n )\n return response\n\n @staticmethod\n def __merge_responses(responses, output_filename):\n\n print(\"Saving the response to disk...\")\n\n # AudioSegments\n audio_segments = []\n\n # Save each response to disk in order\n with tempfile.TemporaryDirectory() as dirname:\n for response in responses:\n if \"AudioStream\" in response:\n with closing(response[\"AudioStream\"]) as stream:\n data = stream.read()\n temp_filename = os.path.join(dirname, 'tempmp3')\n fp = open(temp_filename, 'wb')\n fp.write(data)\n fp.close()\n audio_segments.append(AudioSegment.from_mp3(temp_filename))\n else:\n print(\"Bad/Empty response from AWS Polly\")\n\n # Merge all the responses into one\n if len(audio_segments) > 0:\n merged_audio = audio_segments[0]\n for index, item in enumerate(audio_segments):\n if index != 0:\n merged_audio = merged_audio + item\n\n # Save the merged audio\n merged_audio.export(output_filename, format=\"mp3\")\n else:\n print(\"No audio segments to process\")\n","repo_name":"martysweet/latex-to-speech","sub_path":"tts/AWSPolly.py","file_name":"AWSPolly.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"48336786823","text":"import unittest\n\nfrom beanmachine.ppl.inference.bmg_inference import BMGInference\nfrom torch import tensor\n\nfrom .testlib.conjugate_models import BetaBernoulliScaleHyperParameters\n\n\nclass BetaBernoulliWithScaledHPConjugateTest(unittest.TestCase):\n def test_beta_bernoulli_conjugate_graph(self) -> None:\n model = BetaBernoulliScaleHyperParameters(0.5, 1.5)\n queries = [model.theta()]\n observations = {\n model.y(0): tensor(0.0),\n model.y(1): tensor(0.0),\n model.y(2): tensor(1.0),\n model.y(3): tensor(0.0),\n }\n num_samples = 1000\n bmg = BMGInference()\n\n # This is the model after beta-bernoulli conjugate rewrite is done\n skip_optimizations = set()\n observed_bmg = bmg.to_dot(\n queries, observations, num_samples, skip_optimizations=skip_optimizations\n )\n expected_bmg = \"\"\"\ndigraph \"graph\" {\n N0[label=1.5];\n N1[label=6.5];\n N2[label=Beta];\n N3[label=Sample];\n N4[label=Query];\n N0 -> N2;\n N1 -> N2;\n N2 -> N3;\n N3 -> N4;\n}\n\"\"\"\n self.assertEqual(expected_bmg.strip(), observed_bmg.strip())\n","repo_name":"facebookresearch/beanmachine","sub_path":"tests/ppl/compiler/fix_beta_bernoulli_const_added_test.py","file_name":"fix_beta_bernoulli_const_added_test.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"77"} +{"seq_id":"71195575608","text":"from __future__ import print_function\n\nfrom datetime import datetime\nimport os\nimport sh\nimport sys\nimport tensorflow as tf\nfrom tensorflow import data\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.tools import freeze_graph\nfrom tensorflow.python import ops\nfrom tensorflow.tools.graph_transforms import TransformGraph\n\nfrom inference_test import inference_test, load_mnist_keras\nfrom optimize_graph import (run_experiment, get_graph_def_from_saved_model,\n describe_graph, get_size, get_metagraph, get_graph_def_from_file,\n convert_graph_def_to_saved_model, freeze_model, optimize_graph, TRANSFORMS)\n\nNUM_CLASSES = 10\nMODELS_LOCATION = 'models/mnist'\nMODEL_NAME = 'keras_classifier'\n\n\ndef keras_model_fn(params):\n\n inputs = tf.keras.layers.Input(shape=(28, 28), name='input_image')\n input_layer = tf.keras.layers.Reshape(target_shape=(28, 28, 1), name='reshape')(inputs)\n\n # convolutional layers\n conv_inputs = input_layer\n for i in range(params.num_conv_layers):\n filters = params.init_filters * (2**i)\n conv = tf.keras.layers.Conv2D(kernel_size=3, filters=filters, strides=1, padding='SAME', activation='relu')(conv_inputs)\n max_pool = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='SAME')(conv)\n batch_norm = tf.keras.layers.BatchNormalization()(max_pool)\n conv_inputs = batch_norm\n\n flatten = tf.keras.layers.Flatten(name='flatten')(conv_inputs)\n\n # fully-connected layers\n dense_inputs = flatten\n for i in range(len(params.hidden_units)):\n dense = tf.keras.layers.Dense(units=params.hidden_units[i], activation='relu')(dense_inputs)\n dropout = tf.keras.layers.Dropout(params.dropout)(dense)\n dense_inputs = dropout\n\n # softmax classifier\n logits = tf.keras.layers.Dense(units=NUM_CLASSES, name='logits')(dense_inputs)\n softmax = tf.keras.layers.Activation('softmax', name='softmax')(logits)\n\n # keras model\n model = tf.keras.models.Model(inputs, softmax)\n return model\n\n\ndef create_estimator_keras(params, run_config):\n\n keras_model = keras_model_fn(params)\n print(keras_model.summary())\n\n optimizer = tf.keras.optimizers.Adam(lr=params.learning_rate)\n keras_model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n mnist_classifier = tf.keras.estimator.model_to_estimator(\n keras_model=keras_model,\n config=run_config\n )\n\n return mnist_classifier\n\n\n#### Train and Export Model\n\ndef train_and_export_model(train_data, train_labels):\n model_dir = os.path.join(MODELS_LOCATION, MODEL_NAME)\n\n hparams = tf.contrib.training.HParams(\n batch_size=100,\n hidden_units=[512, 512],\n num_conv_layers=3,\n init_filters=64,\n dropout=0.2,\n max_training_steps=50,\n eval_throttle_secs=10,\n learning_rate=1e-3,\n debug=True\n )\n\n run_config = tf.estimator.RunConfig(\n tf_random_seed=19830610,\n save_checkpoints_steps=1000,\n keep_checkpoint_max=3,\n model_dir=model_dir\n )\n\n if tf.gfile.Exists(model_dir):\n print('Removing previous artifacts...')\n tf.gfile.DeleteRecursively(model_dir)\n\n os.makedirs(model_dir)\n\n estimator = run_experiment(hparams, train_data, train_labels, run_config, create_estimator_keras)\n\n def make_serving_input_receiver_fn():\n inputs = {'input_image': tf.placeholder(\n shape=[None,28,28], dtype=tf.float32, name='serving_input_image')}\n return tf.estimator.export.build_raw_serving_input_receiver_fn(inputs)\n\n export_dir = os.path.join(model_dir, 'export')\n\n if tf.gfile.Exists(export_dir):\n tf.gfile.DeleteRecursively(export_dir)\n\n estimator.export_savedmodel(\n export_dir_base=export_dir,\n serving_input_receiver_fn=make_serving_input_receiver_fn()\n )\n\n return export_dir\n\n\ndef setup_model():\n train_data, train_labels, eval_data, eval_labels = load_mnist_keras()\n export_dir = train_and_export_model(train_data, train_labels)\n return export_dir, eval_data\n\n\nNUM_TRIALS = 10\n\ndef main(args):\n if len(args) > 1 and args[1] == '--inference':\n export_dir = args[2]\n _, _, eval_data, _ = load_mnist_keras()\n\n total_load_time = 0.0\n total_serve_time = 0.0\n saved_model_dir = os.path.join(\n export_dir, [f for f in os.listdir(export_dir) if f.isdigit()][0])\n for i in range(0, NUM_TRIALS):\n load_time, serving_time = inference_test(saved_model_dir, eval_data, repeat=10000)\n total_load_time += load_time\n total_serve_time += serving_time\n\n print(\"****************************************\")\n print(\"*** Load time on original model: {:.2f}\".format(total_load_time / NUM_TRIALS))\n print(\"*** Serve time on original model: {:.2f}\".format(total_serve_time / NUM_TRIALS))\n print(\"****************************************\")\n\n total_load_time = 0.0\n total_serve_time = 0.0\n optimized_export_dir = os.path.join(export_dir, 'optimized')\n for i in range(0, NUM_TRIALS):\n load_time, serving_time = inference_test(optimized_export_dir, eval_data,\n signature='serving_default',\n repeat=10000)\n total_load_time += load_time\n total_serve_time += serving_time\n print(\"****************************************\")\n print(\"*** Load time on optimized model: {:.2f}\".format(total_load_time / NUM_TRIALS))\n print(\"*** Serve time on optimized model: {:.2f}\".format(total_serve_time / NUM_TRIALS))\n print(\"****************************************\")\n\n else:\n # generate and output original model\n export_dir, eval_data = setup_model()\n saved_model_dir = os.path.join(export_dir, os.listdir(export_dir)[-1])\n describe_graph(get_graph_def_from_saved_model(saved_model_dir))\n get_size(saved_model_dir, 'saved_model.pb')\n get_metagraph(saved_model_dir)\n\n # freeze model and describe it\n freeze_model(saved_model_dir, 'softmax/Softmax', 'frozen_model.pb')\n frozen_filepath = os.path.join(saved_model_dir, 'frozen_model.pb')\n describe_graph(get_graph_def_from_file(frozen_filepath))\n get_size(saved_model_dir, 'frozen_model.pb', include_vars=False)\n\n # optimize model and describe it\n optimize_graph(saved_model_dir, 'frozen_model.pb', TRANSFORMS, 'softmax/Softmax')\n optimized_filepath = os.path.join(saved_model_dir, 'optimized_model.pb')\n describe_graph(get_graph_def_from_file(optimized_filepath))\n get_size(saved_model_dir, 'optimized_model.pb', include_vars=False)\n\n # convert to saved model and output metagraph again\n optimized_export_dir = os.path.join(export_dir, 'optimized')\n convert_graph_def_to_saved_model(optimized_export_dir, optimized_filepath,\n 'softmax', 'softmax/Softmax:0')\n get_size(optimized_export_dir, 'saved_model.pb')\n get_metagraph(optimized_export_dir)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"GoogleCloudPlatform/tf-estimator-tutorials","sub_path":"00_Miscellaneous/model_optimisation/optimize_graph_keras.py","file_name":"optimize_graph_keras.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":675,"dataset":"github-code","pt":"77"} +{"seq_id":"33464395589","text":"# import all necessary packages\nimport numpy as np\nimport pandas as pd\nfrom keras import Sequential\nfrom keras.layers import Input, BatchNormalization, LSTM, Dense, TimeDistributed, Masking, Dropout, Bidirectional, Concatenate \nfrom keras.models import Model\nfrom keras.losses import BinaryCrossentropy\nfrom keras.optimizers import Adam, Adadelta\nfrom keras.optimizers.schedules import ExponentialDecay\nfrom keras import backend as K \nimport pickle \nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import roc_curve, auc\nfrom keras.callbacks import EarlyStopping\nimport matplotlib.pyplot as plt\n\n#load dataset #(written by Seo Eun)\npkl_file = open('datas_nor.pkl', 'rb') \ndatas_nor = pickle.load(pkl_file) #normalized version\npkl_file.close()\n\n#pkl_file = open('datas.pkl', 'rb') \n#datas = pickle.load(pkl_file) #without normalization\n#pkl_file.close()\n\n# a function of presenting prediction outputs from model\n# written by Seo Eun and David\ndef calc_test_result(result, true_label): \n # input: (1) result: predicted one-hot coded labels\n # (2) true_label: true one-hot coded labels\n # output: print out Confucion Matrix, Classification Report, Accuracy rate, and AUC\n preds = np.argmax(result, axis=1)\n gt = np.argmax(true_label, axis=1)\n print(\"Confusion Matrix :\")\n print(confusion_matrix(gt, preds))\n print(\"Classification Report :\")\n print(classification_report(gt, preds))\n print(\"Accuracy \", accuracy_score(gt, preds))\n tpr, fpr, _ = roc_curve(gt, 1 - result[:, 0], pos_label=0)\n print(f\"AUC: {auc(fpr, tpr)}\")\n\n# a function of making a structure of bidirectional LSTM model based on audio data before compile it.\n# written by David and Seo Eun\ndef build_audio_model(output_dim, use_time_distribution=True, last_activation=\"relu\", return_sequences=True): \n # input: (1) output_dim: the number of units in output layer\n # (2) use_time_distribution: determine whether a model adds LSTM layers that return sequences rather than single values\n # (3) last_activation: determine an activation function of the last layer\n # (4) return_sequences: determine whether LSTM layers return sequences\n # output: (1) Audio_model: the structure of bi-LSTM model\n Audio_model = Sequential()\n Audio_model.add(Masking(mask_value =0,name='mask_audio'))\n Audio_model.add(Bidirectional(LSTM(20, activation='tanh', return_sequences = return_sequences, name='Bi-LSTM_audio')))\n Audio_model.add(Dropout(0.5,name='Dropout1_audio'))\n out_layer = Dense(output_dim,activation=last_activation,name='layer_2_audio')\n if use_time_distribution:\n out_layer = TimeDistributed(out_layer)\n Audio_model.add(out_layer)\n\n return Audio_model\n\n# a function of making a structure of bidirectional LSTM model based on text data before compile it.\n# written by David and Seo Eun\ndef build_text_model(output_dim, use_time_distribution=True, last_activation=\"relu\", return_sequences=True): \n # input: (1) output_dim: the number of units in output layer\n # (2) use_time_distribution: determine whether a model adds LSTM layers that return sequences rather than single values\n # (3) last_activation: determine an activation function of the last layer\n # (4) return_sequences: determine whether LSTM layers return sequences\n # output: (1) Text_model: the structure of bi-LSTM model\t\n Text_model = Sequential()\n Text_model.add(Masking(mask_value =0,name='mask_text'))\n Text_model.add(Bidirectional(LSTM(125, activation='tanh', return_sequences = return_sequences, name='Bi-LSTM_text')))\n Text_model.add(Dropout(0.5,name='Dropout1_text'))\n out_layer = Dense(output_dim,activation=last_activation,name='layer_2_text')\n if use_time_distribution:\n out_layer = TimeDistributed(out_layer)\n Text_model.add(out_layer)\n\n return Text_model\n\n# a function of unimodal bidirectional LSTM model (contruct a model, complie, fitting, and then evaluate)\n# written by Seo Eun and David \ndef unimodel(datas, mode, norm,nepoch, batch_size=5):\n # input: (1) datas: training and test dataset (should be a dictionary)\n # (2) mode: determine if unimodal is text-based ('text') or audio-based ('audio')\n # (3) norm: determine if dataset is normalized or not\n # (4) nepoch: the number of epochs\n #. (5) batch_size: determine the size of batch (default=5)\n # output: print classification performances from training and test dataset, and save a trained model.\n ################################################ \n # load data \n train_audio_data=datas['train_audio_data'].astype(np.float32)\n train_text_data=datas['train_text_data'].astype(np.float32)\n test_audio_data=datas['test_audio_data'].astype(np.float32)\n test_text_data=datas['test_text_data'].astype(np.float32)\n test_mask = datas['test_mask']\n train_mask = datas['train_mask']\n test_label = np.unique(datas['test_label'], axis=1).reshape(-1, 2)\n train_label = np.unique(datas['train_label'], axis=1).reshape(-1, 2)\n train_data = np.concatenate((train_audio_data,train_text_data), axis=2)\n test_data = np.concatenate((test_audio_data,test_text_data), axis=2)\n class_0_weight = 1.0 - ((train_label[:, 0] == 1).sum() / train_label.shape[0])\n class_1_weight = 1.0 - ((train_label[:, 1] == 1).sum() / train_label.shape[0])\n \n ################################################ \n if mode == 'audio':\n #audio\n in_audio = Input(shape=(train_audio_data.shape[1],train_audio_data.shape[2]),name='audio_input') #setting input \n Audio_model = build_audio_model(2, False, 'sigmoid',False) #construct audio model\n Audio_output = Audio_model(in_audio) #its output\n \n model = Model(in_audio, Audio_output) #make a model\n scheduler = ExponentialDecay(initial_learning_rate=0.0003, decay_steps=(nepoch // 10) * (train_audio_data.shape[0] // batch_size), decay_rate=0.9) #learning rate with exponential decay\n opt = Adam(learning_rate=scheduler) #select optimizer\n model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) #complie it\n history = model.fit(train_audio_data, train_label, #fitting model with 0.2 validation data from training data\n \t epochs=nepoch,\n \t batch_size=batch_size, \n \t shuffle=True,\n class_weight={ 0: class_0_weight, 1: class_1_weight },\n \t validation_split=0.2)\n model.save(mode+'_'+norm+'.h5') #save the trained model\n predicted_train = model.predict(train_audio_data) #prediction results of training data(Dim: 709*275*2)\n predicted_test = model.predict(test_audio_data) #prediction results of test data (Dim: 177*275*26)\n \n if mode == 'text':\n #text\n in_text = Input(shape=(train_text_data.shape[1],train_text_data.shape[2]),name='text_input') #setting input \n Text_model = build_text_model(2, False, 'sigmoid',False) #construct audio model\n Text_output = Text_model(in_text) #its output\n model = Model(in_text, Text_output) #make a model\n scheduler = ExponentialDecay(initial_learning_rate=0.0003, decay_steps=(nepoch // 10) * (train_text_data.shape[0] // batch_size), decay_rate=0.9) #learning rate with exponential decay\n opt = Adam(learning_rate=scheduler) #select optimizer\n model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) #complie it\n history = model.fit(train_text_data, train_label, #fitting model with 0.2 validation data from training data\n \t epochs=nepoch, \n \t batch_size=batch_size,\n \t shuffle=True,\n class_weight={ 0: class_0_weight, 1: class_1_weight },\n \t validation_split=0.2)\n model.save(mode+'.h5') #save the trained model\n predicted_train = model.predict(train_text_data) #prediction results of training data(Dim: #709*275*2)\n predicted_test = model.predict(test_text_data) #prediction results of test data (Dim: 177*275*26)\n \n # show predicted outcomes\n print('-----train result-----')\n calc_test_result(predicted_train, train_label)\n print('-----test result-----')\n calc_test_result(predicted_test, test_label)\n \n # save the final model\n output = open('result_'+mode+'.pkl', 'wb') \n pickle.dump({'pre_train':predicted_train,'pre_test':predicted_test}, output)\n output.close() \n\n# a function of multimodal bidirectional LSTM model (contruct a model, complie, fitting, and then evaluate)\n# written by Seo Eun, David and Dan\ndef multimodel(datas, mode, nepoch, batch_size=5): \n # input: (1) datas: training and test dataset (should be a dictionary)\n # (2) mode: determine if unimodal is text-based ('text') or audio-based ('audio') \n # (3) nepoch: the number of epochs\n #. (4) batch_size: determine the size of batch (default=5)\n # output: print classification performances from training and test dataset, and save a trained model.\n \n ################################################ \n # load data\n train_audio_data=datas['train_audio_data'].astype(np.float32)\n train_text_data=datas['train_text_data'].astype(np.float32)\n test_audio_data=datas['test_audio_data'].astype(np.float32)\n test_text_data=datas['test_text_data'].astype(np.float32)\n test_mask = datas['test_mask']\n train_mask = datas['train_mask']\n test_label = np.unique(datas['test_label'], axis=1).reshape(-1, 2)\n train_label = np.unique(datas['train_label'], axis=1).reshape(-1, 2)\n train_data = np.concatenate((train_audio_data,train_text_data), axis=2)\n test_data = np.concatenate((test_audio_data,test_text_data), axis=2) \n class_0_weight = 1.0 - ((train_label[:, 0] == 1).sum() / train_label.shape[0])\n class_1_weight = 1.0 - ((train_label[:, 1] == 1).sum() / train_label.shape[0])\n\n ################################################ (written by Seo Eun and David)\n #stage 1: extract unimodal features (contextual bi-LSTM) \n #audio\n in_audio = Input(shape=(train_audio_data.shape[1],train_audio_data.shape[2]),name='audio_input')\n Audio_model = build_audio_model(1)\n Audio_output = Audio_model(in_audio) \n #text\n in_text = Input(shape=(train_text_data.shape[1],train_text_data.shape[2]),name='text_input')\n Text_model = build_text_model(1) \n Text_output = Text_model(in_text)\n\n #merging audio and text from two outputs from audio and text\n merged = Concatenate(axis=2)([Audio_output,Text_output])\n \t \n ################################################ (written by Dan, Seo Eun and David)\n #stage 2: BiLSTM with a two-layer neural network \n Combined_model = Sequential() \n Combined_model.add(Bidirectional(LSTM(150, activation='tanh', dropout=0.5, name='Bi-LSTM_merged'))) #bi-LSTM model\n Combined_model.add(Dropout(0.5, name='Dropout_com1')) #dropout 0.5 rate\n Combined_model.add(Dense(2, activation='sigmoid', name='output')) #output layer\n output = Combined_model(merged) \n #\n ################################################ (written by Seo Eun)\n model = Model([in_audio,in_text], output) #model\n scheduler = ExponentialDecay(initial_learning_rate=0.0003, decay_steps=(nepoch // 10) * (train_audio_data.shape[0] // batch_size), decay_rate=0.9) #learning rate with exponential Decay\n opt = Adam(learning_rate=scheduler) #use Adam optimizer\n model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy']) #compile a model\n history = model.fit([train_audio_data, train_text_data], train_label, #fitting a model\n \t epochs=nepoch,\n \t batch_size=batch_size, \n \t shuffle=True,\n class_weight={ 0: class_0_weight, 1: class_1_weight },\n \t validation_split=0.2)\n model.save(mode+'.h5') #save the trained model\n predicted_train = model.predict([train_audio_data,train_text_data]) #prediction results of training data(Dim: #709*2)\n predicted_test = model.predict([test_audio_data,test_text_data]) #prediction results of test data(Dim: #177*2)\n\n # show predicted outcomes\n print('-----train result-----')\n calc_test_result(predicted_train, train_label)\n print('-----test result-----')\n calc_test_result(predicted_test, test_label)\n\n # scatter plot of results (written by Dan Weber)\n plt.scatter(range(180), predicted_test, c='r')\n plt.scatter(range(180), test_label, c='g')\n plt.title('Prediction Accuracy')\n plt.ylabel('test case label')\n plt.xlabel('test case data')\n plt.show()\n\n # Training and validation loss plot (Written by Dan Weber)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show() \n \n # save the trained model\n output = open('result_'+mode+'.pkl', 'wb') \n pickle.dump({'pre_train':predicted_train,'pre_test':predicted_test}, output)\n output.close() \n \nif __name__==\"__main__\":\n\n print('----- multimodal -----')\n #multimodel(datas,'multimodal_no_normalized',50)\n multimodel(datas_nor,'multimodal_normalized', 50, 5)\n print('----- text -----')\n #unimodel(datas,'text','no_normalized',50)\n unimodel(datas_nor,'text','normalized',50, 5)\n print('----- audio -----')\n #unimodel(datas,'audio','no_normalized',50)\n unimodel(datas_nor,'audio','normalized',50, 5)\n","repo_name":"DavidCarlyn/vote_prediction","sub_path":"seoeun/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72363508409","text":"#User function Template for python3\n\ndef wordBreak(line, dictionary):\n # Complete this function\n d={}\n for i in dictionary:\n d[i]=1 \n # dp=[-1]*len(line)\n # return helper(0,line,d,dp)\n return tabhelper(line,d)\n \ndef helper(pos,s,d,dp):\n # bc\n if pos == len(s):\n return True\n \n if dp[pos]!=-1:\n return dp[pos]\n \n temp=''\n for i in range(pos,len(s)):\n temp+=s[i]\n if d.get(temp)!=None and helper(i+1,s,d,dp)==True:\n dp[pos]=True\n return dp[pos]\n dp[pos]=False\n return dp[pos]\n \ndef tabhelper(s,d):\n n=len(s)\n dp=[False]*(n+1)\n # bc\n dp[n]=True\n for pos in range(n-1,-1,-1):\n temp=''\n for i in range(pos,len(s)):\n temp+=s[i]\n if d.get(temp)!=None and dp[i+1]==True:\n dp[pos]=True\n return dp[0]\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n\ttest_case = int(input())\n\n\tfor _ in range(test_case):\n\t\tnumber_of_elements = int(input())\n\t\tdictionary = [word for word in input().strip().split()]\n\t\tline = input().strip()\n\t\tres = wordBreak(line, dictionary)\n\t\tif res:\n\t\t\tprint(1)\n\t\telse:\n\t\t\tprint(0)\n# } Driver Code Ends","repo_name":"CypherAk007/CP-DSA","sub_path":"Word Break - GFG/word-break.py","file_name":"word-break.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1161998649","text":"from rest_framework import serializers\r\nfrom apps.products.models import (\r\n Purchase,\r\n Sales,\r\n Warehouse,\r\n Brand,\r\n Category,\r\n SubCategory,\r\n Product,\r\n Unit,\r\n Barcode,\r\n PurchaseInvoice,\r\n SalesInvoice,\r\n Adjustment,\r\n)\r\nfrom apps.accounts.serializers import SupplierSerializer, UserSerializer\r\n\r\n\r\nclass WarehouseSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Warehouse\r\n fields = (\r\n 'id',\r\n 'name',\r\n 'phone',\r\n 'email',\r\n )\r\n\r\n\r\nclass BrandSerializers(serializers.ModelSerializer):\r\n class Meta:\r\n model = Brand\r\n fields = (\r\n \"id\",\r\n \"brand_name\",\r\n \"brand_image\",\r\n )\r\n\r\n\r\nclass SubCategorySerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = SubCategory\r\n fields = (\r\n \"id\",\r\n \"name\",\r\n \"category\",\r\n )\r\n\r\n\r\nclass CategorySerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Category\r\n fields = (\r\n \"id\",\r\n \"name\",\r\n )\r\n\r\n\r\nclass UnitSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Unit\r\n fields = (\"id\", \"unit_name\", \"short_name\")\r\n\r\n\r\nclass ProductSerializer(serializers.ModelSerializer):\r\n # warehouse = WarehouseSerializer(many=True)\r\n class Meta:\r\n model = Product\r\n fields = (\r\n \"id\",\r\n \"product_name\",\r\n \"product_type\",\r\n \"product_code\",\r\n \"product_unit\",\r\n \"unit_price\",\r\n \"product_price\",\r\n \"product_tax\",\r\n \"tax_method\",\r\n \"discount\",\r\n \"stock_alert\",\r\n \"expense\",\r\n \"category\",\r\n \"brand\",\r\n \"warehouse\",\r\n \"barcode\",\r\n \"product_image\",\r\n \"featured\",\r\n \"price_difference_in_warehouse\",\r\n \"add_promotional_sale\",\r\n \"has_expiry_date\",\r\n \"has_multi_variant\",\r\n \"has_imie_code\",\r\n )\r\n\r\n def create(self, validated_data):\r\n warehouse_data = validated_data.pop(\r\n 'warehouse', []) # Extract warehouse data\r\n\r\n # Create the product instance with other fields\r\n product = Product.objects.create(**validated_data)\r\n\r\n for warehouse_info in warehouse_data:\r\n warehouse_id = warehouse_info.id\r\n\r\n try:\r\n warehouse_obj = Warehouse.objects.get(id=warehouse_id)\r\n product.warehouse.add(warehouse_obj)\r\n except Warehouse.DoesNotExist:\r\n pass\r\n\r\n return product\r\n\r\n\r\nclass BarcodeSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Barcode\r\n fields = (\r\n \"information\",\r\n \"papersize\",\r\n )\r\n\r\n\r\nclass GETProductSerializer(serializers.ModelSerializer):\r\n brand = BrandSerializers()\r\n category = CategorySerializer()\r\n product_unit = UnitSerializer()\r\n user = UserSerializer()\r\n modified_by = UserSerializer()\r\n warehouse = WarehouseSerializer(many=True)\r\n\r\n class Meta:\r\n model = Product\r\n fields = \"__all__\"\r\n\r\n\r\nclass GetCategorySeralizer(serializers.ModelSerializer):\r\n user = UserSerializer()\r\n modified_by = UserSerializer()\r\n\r\n class Meta:\r\n model = Category\r\n fields = (\r\n \"user\",\r\n \"modified_by\",\r\n \"main_category\",\r\n \"sub_category\",\r\n )\r\n\r\n\r\nclass GetUnitSeralizer(serializers.ModelSerializer):\r\n created_by = UserSerializer()\r\n modified_by = UserSerializer()\r\n\r\n class Meta:\r\n model = Unit\r\n fields = (\r\n \"created_by\",\r\n \"modified_by\",\r\n \"unit_name\",\r\n \"short_name\",\r\n )\r\n\r\n\r\nclass GetBrandSeralizer(serializers.ModelSerializer):\r\n created_by = UserSerializer()\r\n modified_by = UserSerializer()\r\n\r\n class Meta:\r\n model = Brand\r\n fields = (\r\n \"created_by\",\r\n \"modified_by\",\r\n \"brand_name\",\r\n \"brand_image\",\r\n )\r\n\r\n\r\nclass BarcodeSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Barcode\r\n fields = (\"id\", \"information\", \"papersize\")\r\n\r\n\r\nclass AdjustmentSerializer(serializers.ModelSerializer):\r\n quantity = serializers.IntegerField(write_only=True)\r\n\r\n class Meta:\r\n model = Adjustment\r\n fields = (\"id\", \"quantity\", \"warehouse\", \"product\", \"type\")\r\n\r\n def validate(self, data):\r\n product = data.get(\"product\")\r\n type = data.get(\"type\")\r\n quantity = data.get(\"quantity\")\r\n \r\n if type == \"Substraction\" and product.stock_alert < quantity:\r\n raise serializers.ValidationError(\r\n {product.product_name: \"Stock is less than quantity to be substracted.\"}\r\n )\r\n elif type == \"Addition\":\r\n product.stock_alert += int(quantity)\r\n else:\r\n product.stock_alert -= int(quantity)\r\n product.save()\r\n\r\n return data\r\n\r\n\r\nclass GETProductSerializer(serializers.ModelSerializer):\r\n brand = BrandSerializers()\r\n category = CategorySerializer()\r\n product_unit = UnitSerializer()\r\n created_by = UserSerializer()\r\n modified_by = UserSerializer()\r\n user = UserSerializer()\r\n warehouse = WarehouseSerializer(many=True)\r\n\r\n class Meta:\r\n model = Product\r\n fields = \"__all__\"\r\n\r\n\r\nclass GetBarcodeSerializer(serializers.ModelSerializer):\r\n information = ProductSerializer()\r\n\r\n class Meta:\r\n model = Barcode\r\n fields = \"__all__\"\r\n\r\n\r\nclass GetCategorySeralizer(serializers.ModelSerializer):\r\n created_by = UserSerializer()\r\n modified_by = UserSerializer()\r\n\r\n class Meta:\r\n model = Category\r\n fields = (\r\n \"created_by\",\r\n \"modified_by\",\r\n \"main_category\",\r\n \"sub_category\",\r\n )\r\n\r\n\r\nclass GetUnitSeralizer(serializers.ModelSerializer):\r\n created_by = UserSerializer()\r\n modified_by = UserSerializer()\r\n\r\n class Meta:\r\n model = Unit\r\n fields = (\"created_by\", \"modified_by\", \"unit_name\", \"short_name\")\r\n\r\n\r\nclass GetBrandSeralizer(serializers.ModelSerializer):\r\n created_by = UserSerializer()\r\n modified_by = UserSerializer()\r\n\r\n class Meta:\r\n model = Brand\r\n fields = (\r\n \"created_by\",\r\n \"modified_by\",\r\n \"brand_name\",\r\n \"brand_image\",\r\n )\r\n\r\n\r\nclass PurchaseSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Purchase\r\n fields = [\r\n \"id\",\r\n \"warehouse\",\r\n \"supplier\",\r\n \"product\",\r\n \"order_tax\",\r\n \"order_discount\",\r\n \"shipping\",\r\n \"sales_status\",\r\n \"purchase_note\",\r\n ]\r\n\r\n\r\nclass GetPurachseSerializer(serializers.ModelSerializer):\r\n warehouse = WarehouseSerializer()\r\n supplier = SupplierSerializer()\r\n product = ProductSerializer(many=True)\r\n\r\n class Meta:\r\n model = Purchase\r\n fields = (\"warehouse\",\r\n \"supplier\",\r\n \"product\",\r\n \"order_tax\",\r\n \"order_discount\",\r\n \"shipping\",\r\n \"sales_status\",\r\n \"purchase_note\")\r\n\r\n\r\nclass SalesSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Sales\r\n fields = [\r\n \"customer\",\r\n \"warehouse\",\r\n \"biller\",\r\n \"product\",\r\n \"sales_tax\",\r\n \"discount\",\r\n \"shipping\",\r\n \"sales_status\",\r\n \"payment_status\",\r\n \"sales_image\",\r\n \"sales_note\",\r\n \"staff_remark\",\r\n ]\r\n\r\n\r\nclass PurchaseInvoiceSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = PurchaseInvoice\r\n fields = ('warehouse', 'supplier', 'purchases')\r\n\r\nclass GetAdjustmentSeralizer(serializers.ModelSerializer):\r\n product=ProductSerializer()\r\n warehouse=WarehouseSerializer()\r\n class Meta:\r\n model = Adjustment\r\n fields = '__all__'","repo_name":"SaugatMgr/Inventory-System","sub_path":"apps/products/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21612591887","text":"'''\n # @ Author: Zhi Wu\n # @ Create Time: 1970-01-01 00:00:00\n # @ Modified by: Zhi Wu\n # @ Modified time: 2022-08-27 13:04:30\n # @ Description: RFMask backbone definition\n '''\n\nfrom torchvision.models.detection.backbone_utils import BackboneWithFPN, resnet\nfrom torchvision.ops import misc as misc_nn_ops\nfrom torchvision.ops.feature_pyramid_network import LastLevelMaxPool\n\n\ndef resnet_fpn_backbone(\n backbone_name,\n pretrained,\n norm_layer=misc_nn_ops.FrozenBatchNorm2d,\n trainable_layers=3,\n returned_layers=None,\n extra_blocks=None\n):\n backbone = resnet.__dict__[backbone_name](\n pretrained=pretrained,\n norm_layer=norm_layer)\n\n # select layers that wont be frozen\n assert 0 <= trainable_layers <= 5\n layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]\n if trainable_layers == 5:\n layers_to_train.append('bn1')\n for name, parameter in backbone.named_parameters():\n if all([not name.startswith(layer) for layer in layers_to_train]):\n parameter.requires_grad_(False)\n\n if extra_blocks is None:\n extra_blocks = LastLevelMaxPool()\n\n if returned_layers is None:\n returned_layers = [1, 2, 3, 4]\n assert min(returned_layers) > 0 and max(returned_layers) < 5\n return_layers = {f'layer{k}': str(v) for v, k in enumerate(returned_layers)}\n\n in_channels_stage2 = backbone.inplanes // 8\n in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]\n out_channels = 64\n return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)","repo_name":"wuzhiwyyx/RFMask-PUB","sub_path":"models/rfmask/backbone.py","file_name":"backbone.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71343246968","text":"\"\"\"Extends pandas DataFrame class for UPSERT (or INSERT OR UPDATE) data to some database\n\n This file can also be imported as a module and contains the following functions:\n\n * iterate_group - useful function for iterating throw list by group with 2 or more elements\n\n ...and class:\n * DataFrameDATA - child of pandas.DataFrame with re-defned 'to_sql' function\n\n\"\"\"\n\n__author__ = \"G. Golyshev\"\n__copyright__ = \"CMASF 2020\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"G. Golyshev\"\n__email__ = \"g.golyshev@forecast.ru\"\n__status__ = \"Production\"\n\n\nimport pandas as pd\nimport sqlalchemy as sa\nimport cmasf.serv as srv\n\n\n# def iterate_group(iterator, count):\n# itr = iter(iterator)\n# for i in range(0, len(iterator), count):\n# yield iterator[i:i + count]\n\nclass DataFrameDATA(pd.DataFrame):\n\n not_country=[]\n name=''\n\n\n @property\n def _constructor(self):\n return DataFrameDATA\n\n def to_sql(self, name, con, flavor='sqlite', schema=None, if_exists='fail', index=True,\n index_label=None, chunksize=100, dtype=None):\n \"\"\"Re-defined original function for UPSERT operations\n all difference in params:\n if_exists: it can be\n - 'replace' for delete table in database if it ezists, create new and fiil it by data from self dataframe\n indexes of dataframe will be indexes of the new table.\n - 'append', 'ignore' for append not existing in table data and ignoring existing\n - 'update', 'upsert' for insert not existing data and update existing. find existing data by\n self dataframe indexses\n chunksize: writing to database by packages of 'chunksize' records\n \"\"\"\n def drop_table(strTName):\n meta = sa.MetaData(bind=con)\n try:\n tbl_ = sa.Table(strTName, meta, autoload=True, autoload_with=con)\n tbl_.drop(con, checkfirst=False)\n except:\n pass\n\n def get_data_dict(strDateFormat='%Y-%m-%d'):\n lst_date = self.reset_index().select_dtypes(include='datetime')\n vals = self.reset_index().to_dict(orient='records')\n for v in vals:\n for d in lst_date:\n v[d] = v[d].strftime('%Y-%m-%d')\n return vals\n\n def create_table(strTName, lstIndNames):\n\n def type_to_sqlA(lstName, sqType):\n l = len(lstName)\n return dict(zip(lstName, [sqType] * l))\n\n dctReplace={'int':sa.Integer, 'int64':sa.Integer, 'datetime64[ns]':sa.String,\n 'datetime':sa.String, 'float':sa.Float, 'object':sa.String}\n\n dct_trans = type_to_sqlA(self.select_dtypes(include='int').columns.tolist(), sa.Integer)\n dct_trans.update(type_to_sqlA(self.select_dtypes(include='int64').columns.tolist(), sa.Integer))\n dct_trans.update(type_to_sqlA(self.select_dtypes(include='datetime').columns.tolist(), sa.String))\n dct_trans.update(type_to_sqlA(self.select_dtypes(include='float').columns.tolist(), sa.Float))\n dct_trans.update(type_to_sqlA(self.select_dtypes(include='object').columns.tolist(), sa.String))\n\n try:\n dct_trans_indexes = {n: dctReplace[str(self.index.get_level_values(n).dtype)] for n in self.index.names}\n except AttributeError:\n dct_trans_indexes = {self.index.name: dctReplace[str(self.index.dtype)]}\n\n lstIndexes=[sa.Column(k, v, primary_key=True, nullable=False, autoincrement=False) for k, v in dct_trans_indexes.items()]\n lstDBCols=[sa.Column(k, v) for k, v in dct_trans.items()]\n\n columns=lstIndexes+lstDBCols\n\n metadata = sa.MetaData(bind=con)\n\n bname_t = sa.Table(strTName, metadata, *columns)\n metadata.create_all()\n\n def buff_insert(alch_table, insert_prefix, values, buff_size=chunksize):\n for i in srv.iterate_group(values, buff_size):\n inserter = alch_table.insert(prefixes=insert_prefix, values=i)\n con.execute(inserter)\n\n if if_exists == 'replace':\n drop_table(name)\n if_exists = 'fail'\n\n if not con.dialect.has_table(con, name):\n create_table(name, self.index.names)\n\n meta = sa.MetaData(bind=con)\n tbl_names = sa.Table(name, meta, autoload=True, autoload_with=con)\n\n vals = get_data_dict()\n\n inserter = None\n\n if flavor == 'mysql':\n if if_exists in ['append', 'ignore']:\n inserter = tbl_names.insert(prefixes=['IGNORE'], values=vals)\n elif if_exists in ['update', 'upsert']:\n ins_state = sa.dialects.mysql.insert(tbl_names).values(vals)\n inserter = ins_state.on_duplicate_key_update(Date=ins_state.inserted.Date)\n elif if_exists == 'fail':\n inserter = tbl_names.insert(values=vals)\n con.execute(inserter)\n\n if flavor == 'sqlite':\n if if_exists in ['append', 'ignore']:\n # inserter = tbl_names.insert(prefixes=['OR IGNORE'], values=vals)\n buff_insert(tbl_names, ['OR IGNORE'], vals, buff_size=chunksize)\n elif if_exists in ['update', 'upsert']:\n buff_insert(tbl_names, ['OR REPLACE'], vals, buff_size=chunksize)\n # inserter = tbl_names.insert(prefixes=['OR REPLACE'], values=vals)\n elif if_exists == 'fail':\n buff_insert(tbl_names, None, vals, buff_size=chunksize)","repo_name":"GeorgyGol/cmasf","sub_path":"cmasf/pandas_sql.py","file_name":"pandas_sql.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"818800422","text":"import csv\nfrom catboost import CatBoostRegressor\nfrom sklearn.model_selection import train_test_split\n\n\ndef read_data():\n with open('position_evaluation.csv', 'r', newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n return [row for row in reader]\n\ndef main():\n # Initialize data\n data = read_data()\n print('Read {0} rows'.format(len(data)))\n\n labels = [row[0] for row in data]\n data = [row[1:] for row in data]\n\n train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.3)\n\n # Initialize CatBoostRegressor\n model = CatBoostRegressor(\n iterations=40,\n learning_rate=1,\n depth=8,\n logging_level=\"Verbose\")\n\n # Fit model\n model.fit(train_data, train_labels)\n\n score = model.score(test_data, test_labels)\n print(\"Score: {0}\".format(score))\n\n model.save_model(\"my_model.cbm\", \"cpp\")\n\n assert(False)\n\n\n\n train_data = [[1, 4, 5, 6],\n [4, 5, 6, 7],\n [30, 40, 50, 60]]\n\n eval_data = [[2, 4, 6, 8],\n [1, 4, 50, 60]]\n\n train_labels = [10, 20, 30]\n # Get predictions\n preds = model.predict(eval_data)\n\nif __name__ == '__main__':\n main()","repo_name":"maksimbulva/chess","sub_path":"learning/catboost_regression.py","file_name":"catboost_regression.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5370277737","text":"from django.conf.urls import patterns, include, url\nfrom django.conf import settings\nfrom class_based_auth_views.views import LoginView, LogoutView\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom zunndubb.views import HomeView\n\nurlpatterns = patterns('',\n #url(r'^auth/', include('social_auth.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^movies/', include('apps.movies.urls')),\n url(r'^wishlist/', include('apps.wishlist.urls')),\n url(r'^groups/', include('apps.groups.urls')),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),\n url(r'^~', include('apps.profiles.urls')),\n # url(r'^social/', include('social_auth.urls' )),\n url('', include('social.apps.django_app.urls', namespace='social')),\n url(r'^$', HomeView.as_view(), name='home'),\n)\n\n\n# Static file handling for dev env\nif settings.DEBUG:\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += patterns('',\n (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT, 'show_indexes':True}),\n )\n\n\n\n# Autoload template tags see AUTOLOAD_TEMPLATETAGS in settings\nif hasattr(settings, 'AUTOLOAD_TEMPLATETAGS'):\n from django.template.loader import add_to_builtins\n for tag in settings.AUTOLOAD_TEMPLATETAGS:\n add_to_builtins(tag)\n\n","repo_name":"uzhare/zunndubb","sub_path":"zunndubb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21332369089","text":"from tkinter import*\nfrom PIL import ImageTk, Image\n\nroot = Tk()\nroot.title(\"GROCERY BILLING SYSTEM\")\nroot.geometry(\"400x400\")\n\nvertical=Scale(root,from_=0, to=200).pack()\nhorizental=Scale(root, from_=0, to=400, orient=HORIZONTAL).pack()\n\ndef slide():\n my_label=Label(root, text=horizental.get()).pack()\n root.geometry(str(horizental.get()) + \"x\" + str(vertical.get()))\n\nmy_btn=Button(root,text=\"click me\", command=slide).pack()\n\nroot.mainloop()\n\n#littel bit problem is there in this program whenever we are clicking on \"click me \" it is not printing","repo_name":"roshanprusty/Python","sub_path":"GUI/14sliders.py","file_name":"14sliders.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41567522870","text":"# Code to create a waypoint mission for the DJI Matrice 300 RTK. It can be used to create an\n# KMZ file, which can be loaded into the DJI Pilot 2 application.\n#\n# The documentation that was used can be found at the following website:\n# https://developer.dji.com/doc/cloud-api-tutorial/en/specification/dji-wpml/template-kml.html\n\nimport time\nimport xml.etree.ElementTree as ET\nimport shutil\nimport os\nimport tempfile\nimport numpy as np\nimport copy\n\nfrom pyproj import Transformer\n\ndef export_formated_xml(ET_xml: ET.Element, filename = 'test.xml'):\n ''' Export the input ET_xml parameter to a xml file. The name of the file \n is given by the filename.'''\n tree = ET.ElementTree(ET_xml)\n ET.indent(tree, space=\"\\t\", level=0)\n tree.write(filename, encoding=\"utf-8\", xml_declaration=True)\n\n\ndef return_string(ET_element: ET.Element, encoding = 'unicode'):\n ''' Return a string of the ET_element parameter.'''\n # encoding can also be 'utf8'\n ET_string = ET.tostring(ET_element, encoding=encoding , method='xml')\n return ET_string\n\n\nclass dji_waypoint_mission():\n ''' For now only hover and yaw actions are supported.\n\n First the mission should be initialised by giving the __init__ equations. For each point the \n 'add_yaw_action' and the 'add_hover_action' can be used to perform actions at a certain \n waypoint. The actions will be performed in the order the actions are added. The self.action_param_list\n parameter can be used as quicker method to give the actions that should be performed. \n\n The build_action_group can be used to return a action group xml element. This requires added \n actions.\n\n The build_waypoint_xml can be used to return a waypoint xml element with included action group element\n '''\n def __init__(\n self, \n point_id: int, \n longitude: float, \n latitude: float, \n height: float = 100, \n executeHeight: float = 100,\n useGlobalHeight: int = 1,\n ellipsoidHeight: float = 100,\n useGlobalSpeed: int = 1, \n waypointSpeed: float = 1, \n useGlobalHeadingParam: int = 1, \n useGlobalTurnParam: int = 1, \n useStraightLine: int = 1, \n waypointHeadingMode: str = 'followWayline',\n waypointHeadingAngle: int | None = None,\n waypointHeadingYawPathMode: str = 'followBadArc',\n waypointTurnMode: str = 'toPointAndPassWithContinuityCurvature',\n waypointTurnDampingDist: float = 0.2,\n gimbalPitchAngle: float = 0,\n ):\n '''\n Args:\n point_id: Waypoint index, this must be unique for a route. It must be \n a sequence number that monotonously and continuously increases \n from 0. Range [0, 65535]\n longitude: Longitude value.\n latitude: Latitude value.\n altitude: Wayline height in EGM96 or relative to take of height.\n useGlobalHeight: If the global height is used. Must be 0 or 1. False \n or True respectively.\n executeHeight: Execution altitude of waypoint. The reference is declared \n in wpml:executeHeightMode.\n useGlobalSpeed: If the global speed is used. Must be 0 or 1. False \n or True respectively.\n waypointSpeed: Waypoint flight speed in m/s. Overruled by useGlobalSpeed.\n useGlobalHeadingParam: Whether to use the global yaw mode parameter. Must \n be 0 or 1. False or True respectively.\n useGlobalTurnParam: If the global waypoint type should be used. 0 is do not\n use. 1 indicates use it.\n useStraightLine\n gimbalPitchAngle\n waypointTurnMode: Can be: 'coordinateTurn', 'toPointAndStopWithDiscontinuityCurvature',\n 'toPointAndStopWithContinuityCurvature', 'toPointAndPassWithContinuityCurvature'.\n waypointTurnDampingDist: WARNING: When this parameter is used by the code it \n should be smaller than the wayline length (distance between consecutive\n waypoints). So if 0.1 m is set the minimum distance between waypoints \n should be 0.1 meters. This is only the case.\n\n\n '''\n self.longitude = longitude\n self.latitude = latitude\n self.point_id = point_id\n self.height = height\n self.useGlobalHeight = useGlobalHeight\n self.useGlobalSpeed = useGlobalSpeed\n self.waypointSpeed = waypointSpeed\n self.useGlobalHeadingParam = useGlobalHeadingParam\n self.useGlobalTurnParam = useGlobalTurnParam\n self.useStraightLine = useStraightLine\n self.gimbalPitchAngle = gimbalPitchAngle\n self.ellipsoidHeight = ellipsoidHeight\n self.executeHeight = executeHeight\n self.waypointHeadingMode = waypointHeadingMode\n self.waypointHeadingAngle = waypointHeadingAngle\n self.waypointHeadingYawPathMode = waypointHeadingYawPathMode\n self.waypointTurnMode = waypointTurnMode\n self.waypointTurnDampingDist = waypointTurnDampingDist\n\n #Inititise parameters\n self.actionId = 0\n self.action_param_list = []\n\n # def new_method(self, gimbalPitchAngle):\n # self.gimbalPitchAngle = gimbalPitchAngle\n\n def add_yaw_action(self, aircraftHeading: float, aircraftPathMode: str = 'clockwise'):\n '''\n This function adds a yaw action to a waypoint.\n\n Args:\n aircraftHeading: heading to north in degrees. [-180,180]\n aircraftPathMode: 'clockwise' or 'counterClockwise'\n \n '''\n #Get correct action id\n currentId = len(self.action_param_list)\n\n # Add parameters to the action parameter list\n params = ['rotateYaw', currentId, aircraftHeading, aircraftPathMode]\n self.action_param_list.append(params)\n\n def add_hover_action(self, hoverTime: float):\n ''' \n This function adds a hover action to a waypoint\n\n Args:\n hoverTime: Time to hover in seconds.'''\n\n #Get correct action id\n currentId = len(self.action_param_list)\n\n # Add parameters to the action parameter list\n params = ['hover', currentId, hoverTime]\n self.action_param_list.append(params)\n\n def yaw_action_xml(self, aircraftHeading: float, aircraftPathMode: str):\n heading = ET.Element('wpml:aircraftHeading')\n heading.text = str(aircraftHeading)\n pathmode = ET.Element('wpml:aircraftPathMode')\n pathmode.text = str(aircraftPathMode)\n return (heading,pathmode)\n\n def hover_action_xml(self, hoverTime: float):\n hovertime_xml = ET.Element('wpml:hoverTime')\n hovertime_xml.text = str(hoverTime)\n return hovertime_xml\n\n def kml_actions(self):\n '''\n actionActuatorFunc can be:\n rotateYaw\n hover\n '''\n actions = []\n for params in self.action_param_list:\n actionId = params[1]\n actionActuatorFunc = params[0]\n if actionActuatorFunc == 'hover':\n actionActuatorFuncParam_kml_xml = self.hover_action_xml(params[2])\n elif actionActuatorFunc == 'rotateYaw':\n actionActuatorFuncParam_kml_xml = self.yaw_action_xml(params[2], params[3])\n else:\n raise ValueError('The actionActuatorFunc {} was provided but does not exist. Pleas choose from [\"hover\",\"rotateYaw\"].'.format(actionActuatorFunc))\n \n action_xml = ET.Element('wpml:action')\n actionid = ET.SubElement(action_xml, 'wpml:actionId')\n actionid.text = str(actionId)\n actionac = ET.SubElement(action_xml, 'wpml:actionActuatorFunc')\n actionac.text = str(actionActuatorFunc)\n actionacparam = ET.SubElement(action_xml, 'wpml:actionActuatorFuncParam')\n \n if len(actionActuatorFuncParam_kml_xml) == 0:\n actionacparam.append(actionActuatorFuncParam_kml_xml)\n else:\n for element in actionActuatorFuncParam_kml_xml:\n actionacparam.append(element)\n\n actions.append(action_xml)\n return actions\n\n def build_waypointHeadingParam(self):\n waypoint_heading_param = ET.Element('wpml:waypointHeadingParam')\n mode = ET.SubElement(waypoint_heading_param, 'wpml:waypointHeadingMode')\n mode.text = str(self.waypointHeadingMode)\n if self.waypointHeadingAngle != None:\n angle = ET.SubElement(waypoint_heading_param, 'wpml:waypointHeadingAngle')\n angle.text = str(self.waypointHeadingAngle)\n path = ET.SubElement(waypoint_heading_param, 'wpml:waypointHeadingYawPathMode')\n path.text = str(self.waypointHeadingYawPathMode)\n return waypoint_heading_param\n\n def build_waypointTurnParam(self):\n waypoint_turn_param = ET.Element('wpml:waypointTurnParam')\n turn_mode = ET.SubElement(waypoint_turn_param, 'wpml:waypointTurnMode')\n turn_mode.text = str(self.waypointTurnMode)\n # It was chosen to always have a damping distance. It is set as a small value as it must \n # be larger than 0 but smaller than the maximum length of wayline segment.\n waypoint_damping_distance = ET.SubElement(waypoint_turn_param, 'wpml:waypointTurnDampingDist')\n waypoint_damping_distance.text = str(self.waypointTurnDampingDist)\n return waypoint_turn_param\n\n def build_action_group(self):\n ''' Returns an action group KML.\n '''\n actionGroupId = self.point_id\n actionGroupStartIndex = self.point_id\n actionGroupEndIndex = self.point_id\n actionGroupMode = 'sequence' # only option yet\n actionTriggerType = 'reachPoint' # other options not yet supported.\n actions = self.kml_actions()\n\n action_group_xml = ET.Element('wpml:actionGroup')\n action_group_id = ET.SubElement(action_group_xml, 'wpml:actionGroupId')\n action_group_id.text = str(actionGroupId)\n action_group_st = ET.SubElement(action_group_xml, 'wpml:actionGroupStartIndex')\n action_group_st.text = str(actionGroupStartIndex)\n action_group_ed = ET.SubElement(action_group_xml, 'wpml:actionGroupEndIndex')\n action_group_ed.text = str(actionGroupEndIndex)\n action_group_id = ET.SubElement(action_group_xml, 'wpml:actionGroupMode')\n action_group_id.text = str(actionGroupMode) \n action_group_t = ET.SubElement(action_group_xml, 'wpml:actionTrigger')\n action_group_tt = ET.SubElement(action_group_t, 'wpml:actionTriggerType')\n action_group_tt.text = str(actionTriggerType) \n\n for action in actions:\n action_group_xml.append(action)\n \n return action_group_xml\n\n\n def build_waypoint_xml(self):\n \n location = str(self.longitude) + ', ' + str(self.latitude)\n\n waypoint_xml = ET.Element('Placemark')\n \n point = ET.SubElement(waypoint_xml, 'Point')\n coords = ET.SubElement(point, 'coordinates')\n coords.text = str(location)\n\n index_xml = ET.SubElement(waypoint_xml, 'wpml:index')\n index_xml.text = str(self.point_id)\n use_global_height = ET.SubElement(waypoint_xml, 'wpml:useGlobalHeight')\n use_global_height.text = str(self.useGlobalHeight)\n ellip_h = ET.SubElement(waypoint_xml, 'wpml:ellipsoidHeight')\n ellip_h.text = str(self.ellipsoidHeight)\n height_xml = ET.SubElement(waypoint_xml, 'wpml:height') \n height_xml.text = str(self.height)\n execute_height = ET.SubElement(waypoint_xml, 'wpml:executeHeight')\n execute_height.text = str(self.executeHeight)\n gl_sp_xml = ET.SubElement(waypoint_xml, 'wpml:useGlobalSpeed')\n gl_sp_xml.text = str(self.useGlobalSpeed)\n waypoint_speed = ET.SubElement(waypoint_xml, 'wpml:waypointSpeed')\n waypoint_speed.text = str(self.waypointSpeed) \n gl_hprm_xml = ET.SubElement(waypoint_xml, 'wpml:useGlobalHeadingParam')\n gl_hprm_xml.text = str(self.useGlobalHeadingParam)\n # needs to be created with function build_waypointHeadingParam.\n waypointHeadingParam = self.build_waypointHeadingParam()\n waypoint_xml.append(waypointHeadingParam)\n gl_tprm_xml = ET.SubElement(waypoint_xml, 'wpml:useGlobalTurnParam')\n gl_tprm_xml.text = str(self.useGlobalTurnParam)\n # waypointTurnParam needs to be created with a function\n waypointTurnParam = self.build_waypointTurnParam()\n waypoint_xml.append(waypointTurnParam)\n use_straigt_line = ET.SubElement(waypoint_xml, 'wpml:useStraightLine')\n use_straigt_line.text = str(self.useStraightLine) \n gpa_xml = ET.SubElement(waypoint_xml, 'wpml:gimbalPitchAngle')\n gpa_xml.text = str(self.gimbalPitchAngle)\n\n # Only add actions if there is a filled action list.\n if self.action_param_list != []:\n action_group_element = self.build_action_group()\n waypoint_xml.append(action_group_element)\n\n return waypoint_xml\n\n\nclass dji_kmz():\n def __init__(self, \n waypoints_elements: list,\n takeOffSecurityHeight: float,\n autoFlightSpeed: float,\n globalHeight: float,\n globalTransitionalSpeed: float = 10,\n flyToWaylineMode: str = 'safely',\n finishAction: str = 'goHome', \n exitOnRCLost: str = 'executeLostAction',\n executeRCLostAction: str = 'goBack',\n takeOffRefPoint = None,\n droneEnumValue: int = 60,\n droneSubEnumValue: int | None = None,\n coordinateMode: str = 'WGS84',\n heightMode: str = 'relativeToStartPoint',\n templateId: int = 0,\n globalWaypointTurnMode: str = 'toPointAndPassWithContinuityCurvature',\n globalUseStraightLine: int | None = 1,\n waypointTurnDampingDist: float | None = 1,\n gimbalPitchMode: str = 'usePointSetting',\n ellipsoidHeight: float = 0,\n waypointHeadingMode: str = 'followWayline',\n waypointHeadingAngle: int | None = None,\n waypointHeadingYawPathMode: str = 'followBadArc',\n executeHeightMode: str = 'relativeToStartPoint',\n nameAutor: str = 'dji_kml_creator', ):\n '''\n Drone payloads are not yet supported. Keyarguments are set for M300RTK.\n So please double check for other drones.\n\n (gimbalPitchMode is not added as gimbals are not yet supported by the code.)\n\n Args:\n takeOffSecurityHeight: Altitude that the UAV climbs to after \n taking off. This value is relative to the ground level. \n Range [1.5, 1500].\n autoFlightSpeed: Global flight speed in m/s. Range [0, max target flight speed].\n height: Global route height. Can be EGM96 altitude, relative take-off point height or AGL relative ground height. Standard is relative to take-off point. This can be changed in the heightMode parameter. This Element is used in conjunction with \"wpml:ellipsoidHeight\".\n templateType: For now only the 'waypoint' template is supported.\n templateId: Id of the template. Range [0, 65535].\n globalTransitionalSpeed: Speed in m/s the aircraft flight to and from \n the first and last waypoint respectively. Also the speed\n used when the mission is interupted. Must be larger than 0.\n flyToWaylineMode: Can be 'safely' or 'pointToPoint'. \n finishAction: Can be 'goHome', 'autoLand' or 'goToFirstWaypoint'.\n exitOnRCLost: Can be 'goContinue' or 'executeLostAction'\n executeRCLostAction: Can be 'goBack', 'landing' or 'hover'. It \n is overruled by the the exitOnRCLost parameter.\n takeOffRefPoint: In the form x,y,z. If None is provided it is \n not used. \n droneEnumValue: Drone type. 60 for M300RTK and 67 for M30 series.\n droneSubEnumValue: drone sub type. Is not required for M300RTK.\n coordinateMode: Currently only WGS84 is supported\n heightMode: Can be 'EGM96', 'relativeToStartPoint' or 'aboveGroundLevel'.\n globalWaypointTurnMode: Can be 'coordinateTurn', 'toPointAndStopWithDiscontinuityCurvature','toPointAndStopWithDiscontinuityCurvature','toPointAndPassWithContinuityCurvature'\n globalUseStraightLine: Boolean 0 or 1.\n ellipsoidHeight: Gives the ellipsoidheight in meters. Is used in combination with \"wpml:height\", which are expressions of different elevation reference planes at the same location.\n waypointHeadingMode: Can be 'followWayline', 'manually', 'fixed'.\n waypointHeadingAngle: Required if \"wpml:waypointHeadingMode\" is \"smoothTransition\". Is given in degrees.\n waypointHeadingYawPathMode: Can be 'clockwise', 'counterClockwise' or 'followBadArc' (shortest rotation).\n executeHeightMode: Can be 'WGS84' or 'relativeToStartPoint'.\n nameAutor: Name of the Autor in the created KML file.\n \n '''\n # self.payloadInfo = payloadInfo\n templateType = 'waypoint'\n\n self.waypoints_elements = waypoints_elements\n self.globalHeight = globalHeight\n self.takeOffSecurityHeight = takeOffSecurityHeight\n self.autoFlightSpeed = autoFlightSpeed\n self.executeRCLostAction = executeRCLostAction\n self.takeOffRefPoint = takeOffRefPoint\n self.nameAutor = nameAutor\n self.finishAction = finishAction\n self.exitOnRCLost = exitOnRCLost\n self.flyToWaylineMode = flyToWaylineMode\n self.globalTransitionalSpeed = globalTransitionalSpeed\n self.droneEnumValue = droneEnumValue\n self.droneSubEnumValue = droneSubEnumValue\n self.templateType = templateType\n self.templateId = templateId\n self.globalWaypointTurnMode = globalWaypointTurnMode\n self.coordinateMode = coordinateMode\n self.heightMode = heightMode\n self.globalUseStraightLine = globalUseStraightLine\n self.ellipsoidHeight = ellipsoidHeight\n self.waypointHeadingMode = waypointHeadingMode\n self.waypointHeadingAngle = waypointHeadingAngle\n self.waypointHeadingYawPathMode = waypointHeadingYawPathMode\n self.executeHeightMode = executeHeightMode\n self.waylineId = 0\n self.waypointTurnDampingDist = waypointTurnDampingDist\n self.gimbalPitchMode = gimbalPitchMode\n self.__check_input_types__()\n self.__check_required_rules__()\n\n def __check_input_types__(self):\n options = []\n ## Options for Template.kml settings\n # Options missionConfig\n options.append(\n (\n self.flyToWaylineMode,\n 'enum',\n 'safely',\n 'pointToPoint',\n ))\n options.append((\n self.finishAction,\n 'enum',\n 'goHome',\n 'autoLand',\n 'gotoFirstWaypoint',\n ))\n options.append((\n self.exitOnRCLost,\n 'enum',\n 'goContinue',\n 'executeLostAction',\n ))\n options.append((\n self.executeRCLostAction,\n 'enum',\n 'goBack',\n 'landing',\n 'hover',\n ))\n options.append((\n self.takeOffSecurityHeight,\n 'range',\n 1.5,\n 1500,\n ))\n options.append((\n self.globalTransitionalSpeed,\n 'range_not_equal_to_borders',\n 0,\n 99999999, # Must be larger than 0\n ))\n\n # Options Template information\n options.append((\n self.templateType,\n 'enum',\n 'waypoint',\n 'mapping2d',\n 'mapping3d',\n 'mappingStrip',\n ))\n options.append((\n self.templateId,\n 'range',\n 0,\n 65535,\n ))\n options.append((\n self.autoFlightSpeed,\n 'range_not_equal_to_borders',\n 0,\n 50, # max speed of the aircraft can be set higher when required.\n ))\n # Options Waypoint Template \n options.append((\n self.globalWaypointTurnMode, \n 'enum',\n 'coordinateTurn',\n 'toPointAndStopWithDiscontinuityCurvature',\n 'toPointAndStopWithContinuityCurvature',\n 'toPointAndPassWithContinuityCurvature',\n ))\n options.append((\n self.globalUseStraightLine,\n 'enum',\n 0,\n 1,\n ))\n options.append((\n self.gimbalPitchMode,\n 'enum'\n 'manual',\n 'usePointSetting'\n ))\n\n ## Options for Waylines.wpml settings\n # Mission Informationalready covered\n\n # Waylines Information\n options.append((\n self.executeHeightMode,\n 'enum',\n 'WGS84',\n 'relativeToStartPoint',\n ))\n options.append((\n self.waylineId,\n 'range',\n 0,\n 65535,\n )) \n\n ## Options for Common Elements settings\n # droneInfo\n options.append((\n self.droneEnumValue,\n 'enum',\n 60,\n 67,\n 77,\n ))\n options.append((\n self.droneSubEnumValue,\n 'enum',\n 0,\n 1,\n None,\n ))\n # As payload is not supported by the current code, payloadInfo is also not checked\n #waylineCoordinateSysParam\n options.append((\n self.coordinateMode,\n 'enum'\n 'WGS84',\n ))\n options.append((\n self.heightMode,\n 'enum',\n 'EGM96',\n 'relativeToStartPoint',\n 'aboveGroundLevel',\n 'realTimeFollowSurface', # only supported by Mavic 3 Enterprise series.\n ))\n # options.append(options_positioningType = ( #Only used to mark the positioningtype. Does not effect route execution.\n # self.positioningType,\n # 'enum',\n # 'GPS',\n # 'RTKBaseStation',\n # 'QianXun',\n # 'Custom',\n # ))\n # globalShootHeight, surfaceFollowModeEnable, surfaceRelativeHeight only available for template types mapping2d, mapping3d, mappingStrip\n # is not supported by this code.\n # is not yet supported by this code.\n # & \n options.append((\n self.waypointHeadingMode,\n 'enum',\n 'followWayline',\n 'manually',\n 'fixed',\n 'smoothTransition', # The target yaw angle for a waypoint is given by \"wpml:waypointHeadingAngle\" and transitions evenly to the target yaw angle of the next waypoint during the flight segment.\n ))\n if self.waypointHeadingAngle != None:\n options.append(( # this is not set in the documentation. But good to check to be sure.\n self.waypointHeadingAngle,\n 'range',\n -180,\n 180,\n ))\n options.append((\n self.waypointHeadingYawPathMode,\n 'enum'\n 'clockwise',\n 'counterClockwise',\n 'followBadArc',\n ))\n\n # should be given as input if required\n # options.append(options_waypointTurnMode = (\n # self.waypointTurnMode,\n # 'enum',\n # 'coordinateTurn',\n # 'toPointAndStopWithDiscontinuityCurvature',\n # 'toPointAndStopWithContinuityCurvature',\n # 'toPointAndPassWithContinuityCurvature',\n # ))\n # is given as input.\n # is given as input\n # is given as input\n # is given as input\n # is not supported yet\n # startRecord is not supported yet\n # stopRecord is not supported yet\n # focus is not supported yet\n # zoom is not supported yet\n # customDirName is not supported yet\n # gimbalRotate is not supported yet\n # rotateYaw is given as input\n # hover is given as input\n\n # to add globalTransitionalSpeed and globalSpeed larger than 0\n\n # Test if the correct parameters have been given as input.\n for options_param in options:\n if options_param[1] == 'enum':\n if options_param[0] not in options_param[2:]:\n raise ValueError(f'Not a possible value. Parameter was {options_param[0]}, but can only be one of the following arguments: {options_param[2:]}')\n elif options_param[1] == 'range':\n if not (options_param[0] >= options_param[2]) & (options_param[0] <= options_param[3]): \n raise ValueError(f'Not a possible value. Parameter was {options_param[0]}, but can only be between: {options_param[2:]}')\n elif options_param[1] == 'range_not_equal_to_borders':\n if not (options_param[0] > options_param[2]) & (options_param[0] < options_param[3]): \n raise ValueError(f'Not a possible value. Parameter was {options_param[0]}, but can only be between: {options_param[2:]}')\n\n def __check_required_rules__(self):\n # globalUseStraightLine\n if (self.globalWaypointTurnMode == \"toPointAndStopWithContinuityCurvature\") | (self.globalWaypointTurnMode == \"toPointAndPassWithContinuityCurvature\"):\n if (self.globalUseStraightLine != 0) and (self.globalUseStraightLine != 1) :\n raise ValueError('globalUseStraightLine cannot be None if \"wpml:globalWaypointTurnMode\" is set to \"toPointAndStopWithContinuityCurvature\" or \"toPointAndPassWithContinuityCurvature\".')\n\n if self.droneEnumValue == 67:\n if self.droneSubEnumValue == None:\n raise ValueError('This element is required when droneEnumValue is 67(M30 Series) and connot be None.')\n \n if self.waypointHeadingMode == \"smoothTransition\":\n if self.waypointHeadingAngle == None:\n raise ValueError('Required if \"wpml:waypointHeadingMode\" is \"smoothTransition\" and cannot be set to None in this case.')\n\n if self.globalWaypointTurnMode == \"coordinateTurn\":\n if self.waypointTurnDampingDist == None:\n raise ValueError('waypointTurnDampingDist is required when waypointTurnMode\" is \"coordinateTurn\", \"wpml:waypointTurnMode\" is \"toPointAndPassWithContinuityCurvature\" and \"wpml:useStraightLine\" is 1')\n if self.globalUseStraightLine == 1:\n if self.waypointTurnDampingDist == None:\n raise ValueError('waypointTurnDampingDist is required when waypointTurnMode\" is \"coordinateTurn\", \"wpml:waypointTurnMode\" is \"toPointAndPassWithContinuityCurvature\" and \"wpml:useStraightLine\" is 1')\n if self.globalWaypointTurnMode == \"toPointAndPassWithContinuityCurvature\":\n if self.waypointTurnDampingDist == None:\n raise ValueError('waypointTurnDampingDist is required when waypointTurnMode\" is \"coordinateTurn\", \"wpml:waypointTurnMode\" is \"toPointAndPassWithContinuityCurvature\" and \"wpml:useStraightLine\" is 1')\n # check if WaypointTurnMode in point \n\n def build_globalWaypointHeadingParam(self):\n global_waypoint_heading = ET.Element('wpml:globalWaypointHeadingParam')\n mode = ET.SubElement(global_waypoint_heading, 'wpml:waypointHeadingMode')\n mode.text = str(self.waypointHeadingMode)\n if self.waypointHeadingAngle != None:\n angle = ET.SubElement(global_waypoint_heading, 'wpml:waypointHeadingAngle')\n angle.text = str(self.waypointHeadingAngle)\n path = ET.SubElement(global_waypoint_heading, 'wpml:waypointHeadingYawPathMode')\n path.text = str(self.waypointHeadingYawPathMode)\n return global_waypoint_heading\n\n def build_waypoint_template(self):\n ''' This function adds all waypoint elements to 1 list.'''\n xml_template_list = []\n global_waypoint_turn_mode = ET.Element('wpml:globalWaypointTurnMode')\n global_waypoint_turn_mode.text = str(self.globalWaypointTurnMode)\n xml_template_list.append(global_waypoint_turn_mode)\n # global_waypoint_straightline is only added as it is marked as required.\n if self.globalUseStraightLine != None:\n global_waypoint_straightline = ET.Element('wpml:globalUseStraightLine')\n global_waypoint_straightline.text = str(self.globalUseStraightLine)\n xml_template_list.append(global_waypoint_straightline)\n gimbal_pitch_mode = ET.Element('wpml:gimbalPitchMode')\n gimbal_pitch_mode.text = str(self.gimbalPitchMode)\n xml_template_list.append(gimbal_pitch_mode) \n ellipsoid_height = ET.Element('wpml:ellipsoidHeight')\n ellipsoid_height.text = str(self.ellipsoidHeight)\n xml_template_list.append(ellipsoid_height) \n # Changed to globalHeight instead of height after not loading in the DJI pilot 2 app\n global_height = ET.Element('wpml:globalHeight')\n global_height.text = str(self.globalHeight)\n xml_template_list.append(global_height) \n global_waypoint_heading_params = self.build_globalWaypointHeadingParam() \n xml_template_list.append(global_waypoint_heading_params) \n\n # Remove wpml:executeHeight from waypoint.kml file\n waypoints_elements_kml = []\n elements = copy.deepcopy(self.waypoints_elements)#[:]\n for element in elements:\n wpml_executeHeight = element.find('wpml:executeHeight')\n if wpml_executeHeight != None: \n element.remove(wpml_executeHeight)\n waypoints_elements_kml.append(element)\n\n # Add waypoints\n waypoints_xml_template_list = xml_template_list + waypoints_elements_kml\n\n return waypoints_xml_template_list \n\n def build_mission_config(self):\n mission_config = ET.Element('wpml:missionConfig')\n ftwlm = ET.SubElement(mission_config, 'wpml:flyToWaylineMode')\n ftwlm.text = str(self.flyToWaylineMode)\n finish_ac = ET.SubElement(mission_config, 'wpml:finishAction')\n finish_ac.text = str(self.finishAction)\n exit_rc_lost = ET.SubElement(mission_config, 'wpml:exitOnRCLost')\n exit_rc_lost.text = str(self.exitOnRCLost)\n ex_rc_lost_action = ET.SubElement(mission_config, 'wpml:executeRCLostAction')\n ex_rc_lost_action.text = str(self.executeRCLostAction) \n tofs = ET.SubElement(mission_config, 'wpml:takeOffSecurityHeight')\n tofs.text = str(self.takeOffSecurityHeight)\n gts = ET.SubElement(mission_config, 'wpml:globalTransitionalSpeed')\n gts.text = str(self.globalTransitionalSpeed)\n # Drone info\n drone_info = ET.SubElement(mission_config, 'wpml:droneInfo')\n drone_enum = ET.SubElement(drone_info, 'wpml:droneEnumValue')\n drone_enum.text = str(self.droneEnumValue)\n if self.droneSubEnumValue != None:\n drone_sub_enum = ET.SubElement(drone_info, 'wpml:droneSubEnumValue')\n drone_sub_enum.text = str(self.droneSubEnumValue)\n # Payload info\n ## is for now skipped as Yellowscan payload is not supported. \n return mission_config\n\n def build_kml(self):\n kml_xml = ET.Element('kml')\n kml_xml.set('xmlns', \"http://www.opengis.net/kml/2.2\")\n kml_xml.set('xmlns:wpml', \"http://www.dji.com/wpmz/1.0.0\")\n document_xml = ET.SubElement(kml_xml, 'Document')\n\n # Add file creation information\n author = ET.SubElement(document_xml, 'wpml:author')\n author.text = str(self.nameAutor)\n createtime = ET.SubElement(document_xml, 'wpml:createTime')\n createtime.text = str(int( time.time_ns() / 1000 ))\n updatetime = ET.SubElement(document_xml, 'wpml:updateTime')\n updatetime.text = str(int( time.time_ns() / 1000 ))\n\n # Setup mission configuration, payload info is not added yet\n document_xml.append(self.build_mission_config())\n\n # Setup folder and template\n folder = ET.SubElement(document_xml, 'Folder')\n templatetype = ET.SubElement(folder, 'wpml:templateType')\n templatetype.text = str(self.templateType)\n template_id = ET.SubElement(folder, 'wpml:templateId')\n template_id.text = str(self.templateId)\n\n # Set the autoFlightSpeed\n global_speed = ET.SubElement(folder, 'wpml:autoFlightSpeed')\n global_speed.text = str(self.autoFlightSpeed)\n\n # Wayline coordinate system parameters\n coords_sys_params = ET.SubElement(folder, 'wpml:waylineCoordinateSysParam')\n coords_sys = ET.SubElement(coords_sys_params, 'wpml:coordinateMode')\n coords_sys.text = self.coordinateMode\n height_mode = ET.SubElement(coords_sys_params, 'wpml:heightMode')\n height_mode.text = self.heightMode\n # surfaceFollowModeEnable, globalShootHeight, surfaceRelativeHeight not \n # added yet as this cannot be used for waypoint missions.\n\n if self.templateType == 'waypoint':\n waypoint_xml_template_list = self.build_waypoint_template()\n for element in waypoint_xml_template_list:\n folder.append(element)\n return kml_xml \n\n def build_waylines_wpml(self):\n waylines_kml = ET.Element('kml')\n waylines_kml.set('xmlns',\"http://www.opengis.net/kml/2.2\")\n waylines_kml.set('xmlns:wpml',\"http://www.dji.com/wpmz/1.0.0\")\n document_xml = ET.SubElement(waylines_kml, 'Document')\n\n # Setup mission configuration, payload info is not added yet\n document_xml.append(self.build_mission_config())\n\n # Setup Waylines Information\n folder = ET.SubElement(document_xml, 'Folder')\n template_id = ET.SubElement(folder, 'wpml:templateId')\n template_id.text = str(self.templateId) \n # Create an increasing waylineID \n wayline_id = ET.SubElement(folder, 'wpml:waylineId')\n wayline_id.text = str(self.waylineId) \n self.waylineId +=1\n \n auto_flight_speed = ET.SubElement(folder, 'wpml:autoFlightSpeed')\n auto_flight_speed.text = str(self.autoFlightSpeed)\n execute_height_mode = ET.SubElement(folder, 'wpml:executeHeightMode')\n execute_height_mode.text = str(self.executeHeightMode)\n\n # Add placemark information.\n if self.templateType == 'waypoint':\n # Required to have different wayline and kml placemarks (self keeps these linked otherwise)\n wayline_elements = copy.deepcopy(self.waypoints_elements)\n for element in wayline_elements:\n # # The following elements should not be in the waylines.wpml file\n e = element\n wpml_height = e.find('wpml:height')\n wpml_Useglobalheight = e.find('wpml:useGlobalHeight')\n wpml_Ellispoidheight = e.find('wpml:ellipsoidHeight')\n wpml_Useglobalspeed = e.find('wpml:useGlobalSpeed')\n wpml_UseGlobalHeading = e.find('wpml:useGlobalHeading')\n wpml_useGlobalHeadingParam = e.find('wpml:useGlobalHeadingParam')\n # The following is required to be able to run this function\n # multiple times without producing errors.\n if wpml_height != None: \n e.remove(wpml_height)\n if wpml_Useglobalheight != None: \n e.remove(wpml_Useglobalheight)\n if wpml_Ellispoidheight != None: \n e.remove(wpml_Ellispoidheight)\n if wpml_Useglobalspeed != None: \n e.remove(wpml_Useglobalspeed)\n if wpml_UseGlobalHeading != None: \n e.remove(wpml_UseGlobalHeading)\n if wpml_useGlobalHeadingParam != None: \n e.remove(wpml_useGlobalHeadingParam)\n # e.append(ET.Element('yo'))\n folder.append(e)\n\n # wpml:startActionGroup was not added as the documentation was not \n # clear about this parameter. And is not usable for the Matrice 300 RTK\n\n return waylines_kml\n\n def build_kmz(self, file = 'test.kmz'):\n # Make sure that a path can be provided.\n splitted_path = os.path.split(file)\n if splitted_path[0] == '':\n filename = file\n else:\n path_dir, filename = splitted_path\n \n kml_element = self.build_kml()\n waylines_wpml_element = self.build_waylines_wpml()\n\n # Make the kmz file in a temporary directory as files need to be\n # temporary created to make a zip file. This zipfile is then\n # changed to the kmz file\n with tempfile.TemporaryDirectory(prefix = 'temp_kmz_1') as tmpdir_root:\n zip_path = os.path.join(tmpdir_root, 'zip_folder')\n wpmz_path = os.path.join(zip_path, 'wpmz')\n kml_path = os.path.join(wpmz_path, 'template.kml')\n wpml_path = os.path.join(wpmz_path, 'waylines.wpml')\n res_path = os.path.join(wpmz_path, 'res')\n \n # Create required folders\n os.mkdir(zip_path)\n os.mkdir(wpmz_path)\n os.mkdir(res_path)\n\n # Export xml files\n export_formated_xml(kml_element, kml_path)\n export_formated_xml(waylines_wpml_element, wpml_path)\n\n # Zip the file and move it to the final directory\n path_zip = os.path.join(tmpdir_root,filename) + '.zip'\n shutil.make_archive(\n os.path.join(tmpdir_root,filename),\n 'zip',\n zip_path,\n )\n\n shutil.move(path_zip, file)\n\ndef imu_callibration_j_turn(start_coordinate_epsg_local, epsg_local, epsg_kml, rotation, start_point_index, height, max_speed = 10, length = 30, turn_radius = 5):\n '''\n angle = angle with north-south line\n '''\n transformer = Transformer.from_crs(epsg_local,\n epsg_kml)\n x = np.zeros(6)\n y = np.zeros(6)\n \n x[0] = start_coordinate_epsg_local[0]\n x[1] = start_coordinate_epsg_local[0] + (length/np.sin(rotation))\n x[2] = start_coordinate_epsg_local[0]\n x[3] = start_coordinate_epsg_local[0] + (length/np.sin(rotation))\n x[4] = start_coordinate_epsg_local[0] + ((length+turn_radius)/np.sin(rotation))\n x[5] = start_coordinate_epsg_local[0] + ((length+turn_radius)/np.sin(rotation))\n\n y[0] = start_coordinate_epsg_local[0]\n y[1] = start_coordinate_epsg_local[0] + (length/np.cos(rotation))\n y[2] = start_coordinate_epsg_local[0]\n y[3] = start_coordinate_epsg_local[0] + (length/np.cos(rotation))\n y[4] = start_coordinate_epsg_local[0] + ((length+turn_radius)/np.cos(rotation))\n y[5] = start_coordinate_epsg_local[0] + ((length+turn_radius)/np.cos(rotation))\n\n # Transorm to kml crs\n transformed_lat, transformed_lon = transformer.transform(x, y)\n \n point0 = dji_waypoint_mission(\n start_point_index, \n transformed_lon, \n transformed_lat, \n height = height,\n useGlobalHeight = 1,\n useGlobalSpeed = 1,\n useGlobalTurnParam = 0,\n waypointTurnMode = 'toPointAndStopWithDiscontinuityCurvature',\n useStraightLine = 1,\n # waypointTurnDampingDist = checked_waypointTurnDampingDist,\n gimbalPitchAngle = 0,\n )\n\n\n\nif __name__ == '__main__':\n # Example code\n point1 = dji_waypoint_mission(10, 4.233, 52.00)\n point1.add_hover_action(5)\n point1.add_yaw_action(-20)\n point1_xml = point1.build_waypoint_xml()\n point2 = dji_waypoint_mission(10, 4.233, 52.00)\n point2.add_hover_action(22)\n point2.add_yaw_action(-5)\n point2_xml = point2.build_waypoint_xml()\n\n test = dji_kmz(\n [point1_xml, point2_xml],\n 80,\n 5,\n 80,\n )\n\n kml_element = test.build_kml()\n waylines_wpml_element = test.build_waylines_wpml()\n test.build_kmz(\"data/test_dji_kmz_creator.kmz\")\n \n string_xml = return_string(point1_xml)\n print(string_xml)\n \n print('############### KML ####################')\n string_xml = return_string(kml_element)\n print(string_xml)\n\n\n print('############### WAYLINES ####################')\n string_xml = return_string(waylines_wpml_element)\n print(string_xml)\n\n \n","repo_name":"Marijn-22/dji_flight_planner","sub_path":"flightplanner/dji_kmz_creator/dji_kmz_creator_functions.py","file_name":"dji_kmz_creator_functions.py","file_ext":"py","file_size_in_byte":41051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30519668728","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : p300圆圈中最后的数字.py\n@Time : 2018/12/18 14:41:56\n@Author : Jeyson\n@Version : 1.0\n@Contact : dllOoOllb@163.com\n@Desc : None\n'''\n\n\ndef the_last_num(num_li, m):\n \"\"\"删除第m个数字\"\"\"\n # 当前是第index个数字\n index = 0\n num_pos = m\n while num_li:\n if num_pos == 1:\n print(num_li.pop(index))\n num_pos = m\n else:\n num_pos -= 1\n index += 1\n \n if index == len(num_li):\n index = 0\n\n\ndef main():\n num_li = [0, 1, 2, 3, 4]\n the_last_num(num_li, 6)\n # print(num_li)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jingv/SwordToOffer","sub_path":"p300圆圈中最后的数字.py","file_name":"p300圆圈中最后的数字.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15029163778","text":"\"\"\"\nDesign a class to find the kth largest element in a stream. Note that it is the kth largest element in the sorted order, not the kth distinct element.\n\nImplement KthLargest class:\n- KthLargest(int k, int[] nums) Initializes the object with the integer k and the stream of integers nums.\n- int add(int val) Appends the integer val to the stream and returns the element representing the kth largest element in the stream.\n \n\nExample 1:\nInput\n[\"KthLargest\", \"add\", \"add\", \"add\", \"add\", \"add\"]\n[[3, [4, 5, 8, 2]], [3], [5], [10], [9], [4]]\nOutput\n[null, 4, 5, 5, 8, 8]\n\nExplanation\nKthLargest kthLargest = new KthLargest(3, [4, 5, 8, 2]);\nkthLargest.add(3); // return 4\nkthLargest.add(5); // return 5\nkthLargest.add(10); // return 5\nkthLargest.add(9); // return 8\nkthLargest.add(4); // return 8\n \nConstraints:\n1 <= k <= 10**4\n0 <= nums.length <= 10**4\n-10**4 <= nums[i] <= 104\n-10**4 <= val <= 10**4\nAt most 10**4 calls will be made to add.\nIt is guaranteed that there will be at least k elements in the array when you search for the kth element.\n\nsource: https://leetcode.com/problems/kth-largest-element-in-a-stream/\n\"\"\"\n\nimport heapq\nfrom typing import List\n\nclass KthLargest:\n\n def __init__(self, k: int, nums: List[int]):\n # solution: minHeap of size k\n # first value in minHeap will always be the kth largest value\n heapq.heapify(nums)\n \n # pop the smallest value until heap is size k\n while len(nums) > k:\n heapq.heappop(nums)\n \n self.nums = nums\n self.k = k\n\n def add(self, val: int) -> int:\n # add new value to heap\n heapq.heappush(self.nums, val)\n \n # if heap is larger than k, pop the smallest value\n if len(self.nums) > self.k:\n heapq.heappop(self.nums)\n \n # find and return the kth largest value\n kth_largest = self.nums[0]\n return kth_largest\n\n# Your KthLargest object will be instantiated and called as such:\n# obj = KthLargest(k, nums)\n# param_1 = obj.add(val)\n\nif __name__ == \"__main__\":\n INPUTS = (\n # nums # expected\n ([[3, [4, 5, 8, 2]], [3], [5], [10], [9], [4]], [None, 4, 5, 5, 8, 8]),\n )\n for inputs, expected in INPUTS:\n k = inputs[0][0]\n nums = inputs[0][1]\n kthLargest = KthLargest(k=k, nums=nums)\n for i in range(len(inputs[1:])):\n val = inputs[1:][i][0]\n actual = kthLargest.add(val)\n assert actual == expected[i+1], (actual, expected[i+1])\n","repo_name":"aarondelgiudice/leetCode","sub_path":"python/kthLargestElementInAStream.py","file_name":"kthLargestElementInAStream.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43209921271","text":"from euler import *\r\nfrom itertools import combinations\r\nfrom math import factorial\r\n\r\nprime = prime_sieve(10000)[1:]\r\n\r\n\r\ndef check_concatenate(alist):\r\n a = len(alist)\r\n count = 0\r\n for j in combinations(alist, 2):\r\n if is_prime(int(str(j[0]) + str(j[1]))) and is_prime(int(str(j[1]) + str(j[0]))):\r\n count += 1\r\n if count == factorial(a) / (2 * factorial(a - 2)):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef sub_check(i):\r\n for j in prime[prime.index(i):]:\r\n if check_concatenate([i, j]):\r\n results = [i, j]\r\n n = prime.index(j)\r\n while len(results) < 5:\r\n a = list(results)\r\n a.append(prime[n])\r\n if check_concatenate(a):\r\n results.append(prime[n])\r\n n += 1\r\n if n == len(prime):\r\n break\r\n if len(results) == 5:\r\n return sum(results)\r\n return False\r\n\r\nfor i in prime:\r\n a = sub_check(i)\r\n if a != False:\r\n print(a)\r\n break\r\n\r\n# result = []\r\n# for i in combinations(prime, 5):\r\n# count = 0\r\n# c = i[0]\r\n# for j in combinations(i, 2):\r\n# if is_prime(int(str(j[0]) + str(j[1]))) and is_prime(int(str(j[1]) + str(j[0]))):\r\n# count += 1\r\n# if count == 10:\r\n# print(i)\r\n# break\r\n","repo_name":"tonyyzy/ProjectEuler","sub_path":"51-75/60.py","file_name":"60.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29139787427","text":"import asyncio\nimport errno\nimport json\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom aiohttp import BasicAuth, client_exceptions, web\n\nfrom .aidboxpy import AsyncAidboxClient\nfrom .db import DBProxy\nfrom .handlers import routes\nfrom .sdk import SDK\nfrom .settings import Settings\nfrom fhirpy.base.exceptions import OperationOutcome\n\nlogger = logging.getLogger(\"aidbox_sdk\")\nTHIS_DIR = Path(__file__).parent\nBASE_DIR = THIS_DIR.parent\n\n\ndef setup_routes(app):\n app.add_routes(routes)\n\n\nasync def register_app(sdk: SDK, client: AsyncAidboxClient):\n app_manifest = sdk.build_manifest()\n \n try:\n # We create app directly using execute to avoid conversion\n await client.execute(\n f\"/App/{app_manifest['id']}\", method=\"put\", data=app_manifest)\n \n logger.info(\"Creating seeds and applying migrations\")\n await sdk.create_seed_resources(client)\n await sdk.apply_migrations(client)\n logger.info(\"Aidbox app successfully registered\")\n except OperationOutcome as error:\n logger.error(\n \"Error during the App registration: %s\", json.dumps(error, indent=2)\n )\n sys.exit(errno.EINTR)\n except (\n client_exceptions.ServerDisconnectedError,\n client_exceptions.ClientConnectionError,\n ):\n logger.error(\n \"Aidbox address is unreachable {}\".format(sdk.settings.APP_INIT_URL)\n )\n sys.exit(errno.EINTR)\n\n\nasync def init_client(settings: Settings):\n AidboxClient = settings.AIDBOX_CLIENT_CLASS\n basic_auth = BasicAuth(\n login=settings.APP_INIT_CLIENT_ID,\n password=settings.APP_INIT_CLIENT_SECRET,\n )\n\n return AidboxClient(\n \"{}\".format(settings.APP_INIT_URL), authorization=basic_auth.encode()\n )\n\n\nasync def init(app):\n app[\"client\"] = await init_client(app[\"settings\"])\n app[\"db\"] = DBProxy(app[\"settings\"])\n await app[\"db\"].initialize()\n await register_app(app[\"sdk\"], app[\"client\"])\n yield\n await app[\"db\"].deinitialize()\n\n\ndef create_app(sdk: SDK):\n app = web.Application()\n app.cleanup_ctx.append(init)\n app.update(\n settings=sdk.settings,\n sdk=sdk,\n )\n setup_routes(app)\n return app\n","repo_name":"Aidbox/aidbox-python-sdk","sub_path":"aidbox_python_sdk/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"36369991022","text":"# 최소직사각형\n# https://programmers.co.kr/learn/courses/30/lessons/86491\n\ndef solution(sizes):\n answer = 0\n tmp = []\n wmax, hmax = 0, 0\n\n for w, h in sizes:\n if w > h:\n w, h = h, w\n tmp.append([w, h])\n \n if wmax < w: wmax = w\n if hmax < h: hmax = h\n \n answer = wmax * hmax\n \n return answer\n","repo_name":"oleveloper/problem-solving","sub_path":"programmers/86491.py","file_name":"86491.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18040395629","text":"from sys import stdin\n\nstdin = open('input.txt', 'r')\ninput = stdin.readline\n\n# functions\ndef DFS(start, end, curr):\n global N, result\n \n # print(end, curr)\n \n for move in dirs[curr]:\n nstart = end\n nend = [end[0]+move['dir'][0], end[1]+move['dir'][1]]\n ncurr = move['next']\n if nend == [N, N]:\n result += 1\n # print('goal')\n return\n if 0 <= nend[0] < N and 0 <= nend[1] < N:\n if ncurr in ['hor', 'ver'] and House[nend[0]][nend[1]] == 0:\n DFS(nstart, nend, ncurr)\n elif ncurr == 'diag' and House[nend[0]][nend[1]] == 0 and House[nend[0]-1][nend[1]] == 0 and House[nend[0]][nend[1]-1] == 0:\n DFS(nstart, nend, ncurr)\n################################################################\n# inputs\nN = int(input())\nHouse = [list(map(int, input().split())) for _ in range(N)]\n\n# init\ndirs = {\n 'hor': [\n {'dir': [0, 1], 'next': 'hor'},\n {'dir': [1, 1], 'next': 'diag'}\n ],\n 'ver': [\n {'dir': [1, 0], 'next': 'ver'},\n {'dir': [1, 1], 'next': 'diag'}\n ],\n 'diag': [\n {'dir': [0, 1], 'next': 'hor'},\n {'dir': [1, 1], 'next': 'diag'},\n {'dir': [1, 0], 'next': 'ver'}\n ]\n}\nresult = 0\n\n# exec\nDFS([0, 0], [0, 1], 'hor')\nprint(result)","repo_name":"ririro93/algorithm_probs","sub_path":"Baekjoon/11th_week/17070.py","file_name":"17070.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23601992094","text":"#!/usr/bin/env python3\n\n############################################################\n# Doing seismicicty event selection #\n# #\n# Y.K. Liu @ 2021 June #\n############################################################\n#%%\n\nimport os\nimport pandas as pd\nimport matplotlib.dates as mdates\nfrom shapely.geometry import Point, Polygon\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime as dt\nfrom datetime import timedelta\nfrom geopy.distance import geodesic\nimport dataUtils as du\n\nplt.rcParams.update({'font.size': 20})\n\n\nclass pre_analysis():\n \"\"\"\n pre-analysis of the seismicity\n \"\"\"\n\n def __init__(self, in_json=None, catalog='default'):\n ## Opening JSON file for metadata\n self.meta = du.read_meta(in_json)\n self.catalog = catalog\n ## Create output pics folder\n if not os.path.exists(self.meta['PIC_DIR']):\n print('Make pic output dir %s' % self.meta['PIC_DIR'])\n os.makedirs(self.meta['PIC_DIR'])\n self.load_data(self.catalog)\n return\n\n\n def load_data(self, catalog):\n ## Read the plate boundary data from file; format=(latitude, longitude)\n if self.meta['PLOT_PLATE_BOUND'] == 'yes':\n self.trench = du.read_plate_bound(self.meta['PLATE_BOUND_FILE'], self.meta['PLATE_BOUND_NAME'])\n\n ## Read the slab model data from file; format=(latitude, longitude, depth, thickness)\n if (self.meta['SLAB_DEPTH'] is not None) and (self.meta['SLAB_THICK'] is not None):\n _, tmp = du.read_xyz(self.meta['SLAB_DEPTH'])\n _, tmp_thk = du.read_xyz(self.meta['SLAB_THICK'])\n self.slab = np.vstack((tmp[:,1], tmp[:,0]-360, tmp[:,2], tmp_thk[:,2])).T\n\n ## Read the earthquake catalog\n if catalog == 'default':\n catalog = self.meta['CATALOG']\n print('Reading the seismicity catalog %s' % catalog)\n cat = du.read_cat(catalog)\n\n if len(cat) == 7:\n self.evid, self.dtime, self.dtime_s, self.lat, self.lon, self.dep, self.mag = cat\n elif len(cat) == 9:\n self.evid, self.dtime, self.dtime_s, self.lat, self.lon, self.dep, self.mag, self.td, self.sd = cat\n elif len(cat) == 10:\n self.evid, self.dtime, self.dtime_s, self.lat, self.lon, self.dep, self.mag, self.td, self.sd, self.mc = cat\n return\n\n\n def plot_select_events(self, o, o_back=False, draw_poly=False, savename=False):\n # Define cursor clicking\n\n def onclick(event):\n click = event.xdata, event.ydata\n if None not in click: # clicking outside the plot area produces a coordinate of None, so we filter those out.\n print('x = {}, y = {}'.format(*click))\n coords.append(click)\n\n if draw_poly:\n print(' > Please specify a {} polygon'.format(draw_poly))\n coords = []\n\n plt.figure(figsize=[14,10])\n if o_back is not False:\n plt.scatter(self.lon[o_back], self.lat[o_back], s=(2.2**self.mag[o_back]), ec='grey', marker='o', linewidths=0.2, fc='lightgrey', alpha=0.3)\n sc =plt.scatter(self.lon[o], self.lat[o], s=(2.2**self.mag[o]), ec='k', marker='o', linewidths=0.4, c=self.dep[o], cmap='jet_r')\n cbar = plt.colorbar(sc)\n cbar.set_label('Depth [km]')\n plt.xlim(self.meta['EXTENT'][0], self.meta['EXTENT'][1])\n plt.ylim(self.meta['EXTENT'][2], self.meta['EXTENT'][3])\n plt.plot(self.trench[:-1,1], self.trench[:-1,0], '--', color='k', lw=2, clip_on=True)\n plt.plot(self.trench[1:,1], self.trench[1:,0], '--', color='k', lw=2, clip_on=True)\n plt.title('{} events (draw a polygon for {})'.format(np.sum(o), draw_poly))\n if draw_poly:\n plt.gca().figure.canvas.mpl_connect('button_press_event', onclick)\n plt.savefig('{}/{}.png'.format(self.meta['PIC_DIR'], draw_poly), bbox_inches='tight', dpi=300)\n if savename:\n plt.savefig('{}/{}.png'.format(self.meta['PIC_DIR'], savename), bbox_inches='tight', dpi=300)\n plt.show()\n if draw_poly:\n coords.append(coords[0])\n if len(coords) != 0:\n np.savetxt(self.meta[draw_poly], coords, delimiter=\", \")\n print('Polygon saved to file: {}'.format(self.meta[draw_poly]))\n return coords\n else:\n return\n\n\n def Mc_history(self):\n \"\"\"Plot the magnitude completeness history\"\"\"\n # first check the mc for every epoch (each year)\n print('Checking the history of completeness magnitude')\n self.starttime = dt.datetime.strptime(self.meta['STARTTIME'],'%Y%m%d')\n self.endtime = dt.datetime.strptime(self.meta['ENDTIME'] ,'%Y%m%d')\n o_time = (self.dtime >= self.starttime) * (self.dtime < self.endtime)\n n_yr = int((self.endtime - self.starttime).days/365.25)\n\n print('There are {} events between {} and {}'.format(np.sum(o_time), self.starttime.strftime(\"%Y%m%d\"), self.endtime.strftime(\"%Y%m%d\")))\n epochs, bin_sec, Mcs = du.epoch_Mc(self.mag[o_time], self.dtime[o_time], n_yr, plot='no')\n bin_day = bin_sec/86400.0\n\n # choose some colors for plotting\n fc1 = 'r'\n fc2 = 'lightskyblue'\n\n # now make the plot\n fig, ax1 = plt.subplots(figsize=[14,14])\n ax2 = ax1.twinx()\n ax1.scatter(self.dtime, self.mag+np.random.uniform(-0.05, 0.05, len(self.mag)), marker='o', fc='grey', s=10, zorder=0)\n ax1.scatter(self.dtime[o_time][epochs]+timedelta(days=bin_day/2), np.array(Mcs), s=100, c=fc1, ec='k')\n ax1.plot(self.dtime[o_time][epochs]+timedelta(days=bin_day/2), np.array(Mcs), lw=2, c=fc1 ,zorder=0)\n ax2.hist(self.dtime[o_time], bins=n_yr, fc=fc2, ec='k', alpha=0.6)\n ax2.xaxis.set_major_locator(mdates.AutoDateLocator())\n ax1.text(0.1, 0.8, 'Binning ~{:.0f} days'.format(bin_day), c=fc1, transform=ax2.transAxes)\n ax1.text(0.1, 0.9, 'Binning ~{:.0f} days'.format(bin_day), c=fc2, transform=ax2.transAxes)\n ax2.set_xlabel('Year')\n ax2.set_ylabel('# events', rotation=270)\n ax1.set_ylabel('Magnitude of \\ncompleteness', color=fc1, labelpad=50)\n ax1.set_yticks(np.arange(1,8,0.5))\n ax1.grid(True, alpha=0.8)\n fig.savefig('{}/McHistory.png'.format(self.meta[\"PIC_DIR\"]), dpi=300, bbox_inches='tight')\n return\n\n\n def update_time(self, yyyymmdd):\n \"\"\" Change the starttime to a more recent and reliable period \"\"\"\n self.meta[\"STARTTIME\"] = yyyymmdd\n self.starttime = dt.datetime.strptime(self.meta['STARTTIME'],'%Y%m%d')\n self.o_time = (self.dtime >= self.starttime) * (self.dtime < self.endtime)\n return\n\n\n def magfreq_dist(self):\n \"\"\"Look at overall magnitude-frequency distribution\"\"\"\n guess_Mc = du.maxc_Mc(self.mag[self.o_time], plot='yes', save=self.meta[\"PIC_DIR\"], title='ori', range=[0,8.5])\n print('Estimated overall Mc = {:.2f}'.format(guess_Mc))\n\n self.Mc = du.maxc_Mc(self.mag[self.o_time], plot='yes', save=self.meta[\"PIC_DIR\"], title='final', Mc=self.meta[\"Mc\"], range=[0,8.5])\n print('Decided Mc = {:.2f}'.format(self.Mc))\n return\n\n\n def update_Mc(self, Mc=None):\n \"\"\"Update the event selection based on Mc\"\"\"\n if Mc is None:\n Mc = self.Mc\n self.o_mag = (self.mag>=Mc)\n o_out = self.o_time * self.o_mag\n print('There are {} events between {} and {}, Mag >= {}'.format(np.sum(o_out), self.starttime.strftime(\"%Y%m%d\"), self.endtime.strftime(\"%Y%m%d\"), Mc))\n return o_out\n\n\n def manual_select(self, o_in, key):\n \"\"\"Draw a polygon for further event subset\n Example:\n around the faultl;\n within a region near the subduction zone;\n \"\"\"\n if self.meta['SELECT_{}'.format(key)] == 'yes':\n polytype='POLYGON_{}'.format(key)\n if (self.meta['UPDATE_{}'.format(key)] == 'no') and (os.path.exists(self.meta[polytype])):\n print('Read the existing polygon file: {}'.format(self.meta[polytype]))\n coords = pd.read_csv(self.meta[polytype], dtype='float', header=None).to_numpy()\n else:\n coords = self.plot_select_events(o_in, draw_poly=polytype)\n poly = Polygon(coords)\n o_man = []\n print('Classifying points in/out of the {}'.format(polytype))\n for i in range(len(self.lat)):\n pp = Point(self.lon[i], self.lat[i])\n o_man.append(pp.within(poly))\n o_man = np.array(o_man)\n return o_man\n\n\n def include_roi(self, o_in, key='ROI'):\n print('Make a polygon to include events within the region of interest')\n self.o_roi = self.manual_select(o_in, key)\n return\n\n\n def exclude_arc(self, o_in, key='ARC'):\n min_depth = self.meta['ARC_EV_DEPTH']\n print('Make a polygon to exclude arc events; will exclude events shallower than {} km'.format(min_depth))\n o_arc = self.manual_select(o_in, key)\n self.o_arc = ~(np.array(o_arc)*(self.dep<=min_depth))\n return\n\n\n def get_trench_proj_distance(self, o_in):\n \"\"\" Calculate trench projection location \"\"\"\n print('Calculate trench projection location')\n lalo = np.vstack([self.lat[o_in], self.lon[o_in]]).T\n self.td = du.calc_trench_project(lalo, self.trench)[-1]\n return self.td\n\n\n def get_slab_shortest_distance(self, o_in):\n \"\"\" Calculate hypos from slab model distance \"\"\"\n print('Calculate hypos from slab model distance')\n lalod = np.vstack([self.lat[o_in], self.lon[o_in], self.dep[o_in]]).T\n self.sd = du.calc_slab_distance(lalod, self.slab)\n return self.sd\n\n\n def update_catalog(self, infile, outfile='default', ext='', o_in=None, append_info=[]):\n \"\"\" Generate a new catalog file \"\"\"\n if outfile == 'default':\n outname = self.meta['OUT_CATALOG']+ext+'.csv'\n else:\n outname = outfile+ext+'.csv'\n cat = du.read_cat(infile, fullFile=True)\n if o_in is None:\n idx = np.ones(len(cat), dtype=bool)\n else:\n idx = o_in\n\n # get columns to save to a new catalog\n n = np.sum(idx)\n if cat.shape[1] == 22:\n # USGS original format (22 columns)\n data = np.concatenate([cat[idx,:6], cat[idx,10].reshape(n,1), cat[idx,11].reshape(n,1)], axis=1)\n head = 'time, latitude, longitude, depth, mag, magType, net, id'\n elif cat.shape[1] == 10:\n # format appended with trench dist & slab dist\n data = np.array(cat[idx])\n head = 'time, latitude, longitude, depth, mag, magType, net, id, td, sd'\n\n # append additional info (e.g., trench proj distance, distance to slab model, segment Mc)\n if len(append_info) != 0:\n for key, val in append_info.items():\n head += ', {}'.format(key)\n val = np.round(val, 4)\n data = np.concatenate([data, val.reshape(n,1)], axis=1)\n np.savetxt(outname, data, fmt='%s', header=head, delimiter=\", \")\n print('Final selected events saved to file: {}'.format(outname))\n return\n\n\n def final_plot(self, o_in):\n ## Make a final plot showing the selected seismicity\n if self.meta['SELECT_ROI'] or self.meta['SELECT_ARC']:\n msg = ' >> Time {} to {}\\n'.format(self.starttime.strftime(\"%Y%m%d\"), self.endtime.strftime(\"%Y%m%d\"))\n msg += ' >> Mag >= {}\\n'.format(self.Mc)\n o_out = self.o_time * self.o_mag\n if self.meta['SELECT_ROI']:\n o_out *= self.o_roi\n msg += ' >> Within the ROI polygon\\n'\n if self.meta['SELECT_ARC']:\n o_out *= self.o_arc\n msg += ' >> Excluded the ARC events <= {} km depth\\n'.format(self.meta['ARC_EV_DEPTH'])\n msg += ' >> Total {} events'.format(np.sum(o_out))\n print(msg)\n\n # Plot final selection\n print('Plot the final events further selected by the polygon')\n self.plot_select_events(o_out, o_back=o_in, savename='polygon_final')\n\n # Save the new selected catalog to file\n self.update_catalog(infile=self.meta['CATALOG'], ext='', o_in=o_out)\n return o_out\n\n\n def spatial_chunck_mc(self):\n td = self.td - min(self.td)\n chunk_size = 100 # analyze Mc in each chunck (km)\n chunks = np.arange(min(td), max(td), chunk_size)\n chunks[-1] += chunk_size\n\n depth_groups = dict()\n depth_groups['shallow'] = [0, 30]\n depth_groups['interm'] = [30, 70]\n\n Mc_arr = np.zeros(len(self.mag))\n for key, val in depth_groups.items():\n idx1 = (self.dep>=val[0]) * (self.dep=chunks[i]) * (td= prev_e:\n # Take previous interval\n # Need to make decision of removing curr interval\n cnt_removal += 1\n # Another way to do this is to take the minimum of prev end time and curr end time\n # prev_e = min(prev_e,curr_e)\n elif curr_s >= prev_e:\n # Previous mistake: curr_s > prev_e (this does not take care of the case where prev and curr intervals are apart from each other - meaning there is a time gap between end of prev and start of curr interval)\n \n # No overlap situation\n # Update the prev_s and prev_e\n prev_s = curr_s\n prev_e = curr_e\n \n \n return cnt_removal\n","repo_name":"TheLargePanda7/LC_training","sub_path":"medium/eraseOverlapIntervals.py","file_name":"eraseOverlapIntervals.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71351814969","text":"import random\nfrom typing import Optional\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import url_for\n\n\ndef scrap_book_cats(book_l):\n # this function will scrap the book categories and return a list of categories\n # get the book link\n book_link = \"https://www.pdfdrive.com\"+book_l.book_download_link\n # get the html\n html = requests.get(book_link)\n # parse the html\n soup = BeautifulSoup(html.text, \"html.parser\")\n # get the categories\n a_tags = soup.find_all('div', class_='ebook-tags')[0].find_all('a')\n # get the categories names\n categories = [a.text for a in a_tags]\n return categories\n\n\ndef split_string(s, char_to_split):\n \"\"\"\n Split a string into unique substrings using the specified character.\n\n Args:\n s (str): The input string.\n char_to_split (str): The character to split the string by.\n\n Returns:\n list: A list of unique substrings obtained from the split operation, or an empty list if input is None.\n \"\"\"\n if s is not None:\n if char_to_split in s:\n substrings = s.split(char_to_split)\n # Use a set to eliminate duplicate substrings\n unique_substrings = list(set(substrings))\n return unique_substrings\n else:\n return [s]\n else:\n return [\"New\"]\n\n\n# Example usage:\n# input_string = \"sadsadasdasd\"\n# char_to_split_on = \",\"\n# result = split_string(input_string, char_to_split_on)\n# print()\n\ndef remove_duplicates(input_list):\n seen = set()\n unique_items = []\n\n for item in input_list:\n item_set = frozenset(item.items())\n\n if item_set not in seen:\n unique_items.append(item)\n seen.add(item_set)\n\n return unique_items\n\n\ndef random_books(model: SQLAlchemy, limit=8, param=None):\n # this function will return a list of random books\n # get the books\n Book = model\n num = limit\n if param:\n books = Book.query.filter_by(category_name=param).all()\n else:\n books = Book.query.all()\n # shuffle the books\n random.shuffle(books)\n # get the first 5 books\n if num == \"all\":\n return books\n else:\n return books[:num]\n\n\ndef make_json(data=None):\n \"\"\"\n Converts a list of book objects into a list of dictionaries containing book information.\n\n Args:\n data (list): A list of book objects.\n\n Returns:\n list: A list of dictionaries containing book information.\n \"\"\"\n cat_books = data\n\n return [\n {\n \"book_title\": book.book_title,\n \"book_img_link\": book.book_img_link,\n \"book_pages\": book.book_pages,\n \"year\": book.year,\n \"book_size\": book.book_size,\n \"book_description\": book.book_description,\n \"book_author\": book.book_author,\n \"book_download_link\": book.book_download_link,\n \"book_id\": book.book_id,\n \"category\": book.category,\n \"scrap_cat\": book.scrap_cat,\n \"date\": book.date,\n \"book_link\": url_for(\"book\", book=book.book_title.replace(\" \", \"-\") + \"-\" + book.book_id)\n\n }\n for book in cat_books\n ]\n\n","repo_name":"MohamedAbdElgni/pdf-drivez","sub_path":"pdf_d/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8113087817","text":"class Solution(object):\n def find132pattern(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n if len(nums) < 3:\n return False\n n1, stack = -float('inf'), [nums[-1]]\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] < n1:\n return True\n if nums[i] > stack[-1]:\n while stack and nums[i] > stack[-1]:\n n1 = max(stack.pop(), n1)\n stack.append(nums[i])\n return False\n","repo_name":"icearith/Leetcode-Group-Discussion","sub_path":"Leetcode/456. 132 Pattern/Arith/arith.py","file_name":"arith.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36140208457","text":"import torch\r\nfrom torch import nn\r\n\r\nfrom models.yolo_layer import YOLOHead\r\nfrom models.yolov3_spp import FPNNeck\r\nfrom models.yolov4 import CSPDarknet\r\n\r\n\r\nclass YOLOv4FPN(nn.Module):\r\n \"\"\"CSPDarknet + SPP + FPN + YOLO\"\"\"\r\n def __init__(self, class_num, image_size=416, anchors=None, anchor_masks=None):\r\n super().__init__()\r\n if anchors is None:\r\n # 默认在 image_size=608 的anchors\r\n if image_size == 608:\r\n anchors = [[12, 16], [19, 36], [40, 28],\r\n [36, 75], [76, 55], [72, 146],\r\n [142, 110], [192, 243], [459, 401]]\r\n elif image_size == 416:\r\n anchors = [[10, 13], [16, 30], [33, 23],\r\n [30, 61], [62, 45], [59, 119],\r\n [116, 90], [156, 198], [373, 326]]\r\n\r\n if anchor_masks is None:\r\n anchor_masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\r\n\r\n self.anchors = torch.tensor(anchors)\r\n self.anchor_masks = torch.tensor(anchor_masks)\r\n self.class_num = class_num\r\n self.image_size = image_size\r\n\r\n self.backbone = CSPDarknet()\r\n self.neck = FPNNeck(self.class_num)\r\n self.yolo_layers = self._create_yolo_head(3)\r\n\r\n # 此处需要将yolo_layers列表展开,作为yolo4的属性,才能设置他们训练与否\r\n self.yolo_0 = self.yolo_layers[0]\r\n self.yolo_1 = self.yolo_layers[1]\r\n self.yolo_2 = self.yolo_layers[2]\r\n\r\n def forward(self, x):\r\n x0, x1, x2 = self.backbone(x)\r\n out0, out1, out2 = self.neck(x0, x1, x2)\r\n out0 = self.yolo_0(out0, self.image_size)\r\n out1 = self.yolo_1(out1, self.image_size)\r\n out2 = self.yolo_2(out2, self.image_size)\r\n\r\n if not self.training:\r\n return torch.cat([out0, out1, out2], dim=1)\r\n\r\n return [out0, out1, out2]\r\n\r\n def _create_yolo_head(self, n=3):\r\n layers = []\r\n for i in range(n):\r\n layer = YOLOHead(self.anchors[self.anchor_masks[i]], self.class_num)\r\n layers.append(layer)\r\n return layers\r\n\r\n\r\nif __name__ == '__main__':\r\n x = torch.ones((1, 3, 608, 608)).to(torch.device(\"cuda\"))\r\n\r\n model = YOLOv4FPN(class_num=1, image_size=416).to(torch.device(\"cuda\"))\r\n\r\n # model.eval()\r\n\r\n out = model(x)\r\n\r\n # print(out.shape)\r\n\r\n print(out[0].shape)\r\n print(out[1].shape)\r\n print(out[2].shape)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"YY0628/Crystal_Plate_Detect","sub_path":"models/yolov4_fpn.py","file_name":"yolov4_fpn.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29311708139","text":"#Juego adivina mi número\nfrom random import randint\nb=randint(1,20)\ni=0\nprint(b)\nwhile i<5:\n a = eval(input(\"Adivina el numero: \"))\n if a==b:\n print(\"Adivinaste, mi número era\",b)\n break\n if a>b:\n print(\"mi número es menor\")\n else:\n print(\"mi número es mayor\")\n i += 1\nif i==5:\n print(\"No adivinaste, mi número era\",b)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_b90da7fff28ef58dc27349939cba15b1.py","file_name":"hito1_ej12_b90da7fff28ef58dc27349939cba15b1.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42707095640","text":"from clang.cindex import *\nfrom glud import *\nfrom glud.predicates import *\n\n\ndef fully_qualified(c):\n res = c.spelling\n c = c.semantic_parent\n while (c is not None) and (c.kind != CursorKind.TRANSLATION_UNIT):\n res = c.spelling + '::' + res\n c = c.semantic_parent\n return res\n\n\ndef limit_indirections(n):\n def _limit_indirections(t):\n cnt = 0\n while t.kind == TypeKind.POINTER:\n t = t.get_pointee()\n cnt += 1\n return cnt > n\n return _limit_indirections\n\n\ndef is_ref_to_ptr(t):\n \"\"\"True if a Type refers to a T*& \n \"\"\"\n assert( type(t) == Type )\n return t.kind == TypeKind.LVALUEREFERENCE and t.get_pointee().kind == TypeKind.POINTER\n\n\ndef is_bool_ptr(c):\n \"\"\"Test if a cursor refers to a boolean pointer\n \"\"\"\n if not c.type.kind == TypeKind.POINTER:\n return False\n if not c.type.get_pointee().spelling == 'bool':\n return False\n return True\n\n\ndef is_anonymous_argument(c):\n \"\"\"Test if one argument is anonymous (unnamed)\n\n In the declaration `void f(int x, int);` the second argument is unnamed\n \"\"\"\n return c.spelling is None or c.spelling == ''\n\n\ndef has_any_anonymous_arguments(m):\n return any(is_anonymous_argument(a) for a in m.get_arguments())\n\n\ndef dependent_types(m):\n \"\"\"Yield all of the types the method depends on\n \"\"\"\n lst = [ m.result_type ]\n for arg in m.get_arguments():\n lst.append(arg.type)\n return lst\n\n\ndef method_signature(m):\n rt = m.result_type.get_canonical()\n cls = m.semantic_parent.type.spelling\n args = ', '.join(a.type.get_canonical().spelling for a in m.get_arguments() )\n res = '%s (%s::*)(%s)' % (rt.spelling, cls, args)\n if m.is_const_method():\n return res + ' const'\n return res\n\n\ndef is_overload(c):\n it = iter_child_nodes(is_kind(CursorKind.CXX_METHOD), c.semantic_parent)\n names = [ m.spelling for m in it if m.spelling == c.spelling ]\n return len(names) > 1 \n\n\ndef underlying_type(t):\n \"\"\" Retrieve the simplest version of this type\n \"\"\"\n if t.kind == TypeKind.POINTER:\n return underlying_type(t.get_pointee())\n elif t.kind == TypeKind.LVALUEREFERENCE:\n return underlying_type(t.get_pointee())\n elif t.kind == TypeKind.TYPEDEF:\n return underlying_type(t.get_canonical())\n if t.kind == TypeKind.UNEXPOSED:\n canonical = t.get_canonical()\n if canonical == t:\n return t\n else:\n return underlying_type(t.get_canonical())\n else:\n return t\n\n\ndef in_decl_set(decls, c):\n \"\"\" True if the cursor is in an sequence of cursor\n \"\"\"\n for d in decls:\n if c == d:\n return True\n return False\n\n\ndef is_resolved_type(decls, t):\n \"\"\" True if a type exists in the context of a set of declarations\n \"\"\"\n ut = underlying_type(t)\n if is_builtin(ut):\n return True\n elif in_decl_set(decls, ut.get_declaration()):\n return True\n return False\n\n\ndef is_resolved_method(m, decls):\n \"\"\" True if all a methods dependent types exist in a set of declarations\n \"\"\"\n if not is_resolved_type(decls, m.result_type):\n return False\n for a in m.get_arguments():\n if not is_resolved_type(decls, a.type):\n return False\n return True\n","repo_name":"AndrewWalker/clast","sub_path":"clastgen/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"30644299054","text":"import json\nfrom flask import Flask, render_template, request, Response\n\napp = Flask(__name__)\n\nwith open('books.json') as json_data:\n books_data = json.load(json_data)['books']\n\n@app.route('/books', methods=['GET'])\ndef books():\n return render_template('books.html', list_data=books_data)\n\n@app.route('/books/', methods=['GET'])\ndef book_info(book_id):\n book = next((b for b in books_data if b['id'] == book_id), None)\n if book is None:\n return render_template('Error.html')\n else:\n return render_template('books.html', list_data=[book])\n\n@app.route('/books//orders', methods=['GET'])\ndef sales(book_id):\n book = next((b for b in books_data if b['id'] == book_id), None)\n if book is None:\n return render_template('Error.html')\n else:\n sale_details = book['orders']\n return render_template('books.html', list_data=sale_details)\n\n@app.route('/books//orders/', methods=['GET'])\ndef sale_info(book_id, order_id):\n book = next((b for b in books_data if b['id'] == book_id), None)\n if book is None:\n return render_template('Error.html')\n else:\n order = next((s for s in book['orders'] if s['order_id'] == order_id), None)\n if order is None:\n return render_template('Error.html')\n else:\n return render_template('books.html', list_data=[order])\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8050)\n","repo_name":"rohithbhogaraju95/Restfulwebservice-Assignment5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71428182329","text":"import random\r\nimport torch\r\nimport torch.utils.data.dataset as Dataset\r\nimport torch.utils.data.dataloader as DataLoader\r\n\r\ndef datafn(expnum=10):\r\n def to_two(n):\r\n if n==0:\r\n return ''\r\n else:\r\n return to_two(int(n/2))+str(n%2)\r\n def to_four(n):\r\n if n==0:\r\n return ''\r\n else:\r\n return to_four(int(n/4))+str(n%4)\r\n\r\n data=random.sample(range(0,31),expnum)\r\n while (15 in data) or (20 in data):\r\n if 15 in data:\r\n data.remove(15)\r\n data = data + random.sample(range(0, 31), 1)\r\n if 20 in data:\r\n data.remove(20)\r\n data = data + random.sample(range(0, 31), 1)\r\n input=[]\r\n output=[]\r\n for data_item in data:\r\n # 转2进制\r\n bin = to_two(data_item)\r\n bin = (5 - len(bin)) * '0' + bin\r\n bin = list(bin)\r\n bin = list(map(int, bin))\r\n\r\n # 转4进制\r\n four = to_four(data_item)\r\n four = (3 - len(four)) * '0' + four\r\n four = list(four)\r\n four = list(map(int, four))\r\n input.append(bin)\r\n output.append(four)\r\n return input,output\r\n\r\n\r\nclass Trainer(object):\r\n\r\n def __init__(self, model=None, criterion=None, optimizer=None, dataset=None, USE_CUDA=False):\r\n self.model = model\r\n self.criterion = criterion\r\n self.optimizer = optimizer\r\n self.dataset = dataset\r\n self.iterations = 0\r\n self.USE_CUDA = USE_CUDA\r\n\r\n def run(self, epochs=1):\r\n # 每一个epoch 就是一次train的过程\r\n for i in range(1, epochs + 1):\r\n self.train()\r\n self.test()\r\n\r\n def train(self):\r\n # 从dataloader 中拿数据\r\n for i, data in enumerate(self.dataset, self.iterations + 1):\r\n batch_input, batch_target = data\r\n input_var = batch_input\r\n target_var = batch_target\r\n if self.USE_CUDA:\r\n input_var = input_var.cuda()\r\n target_var = target_var.cuda()\r\n\r\n # 每一次前馈就是一次函数闭包操作\r\n def closure():\r\n batch_output = self.model(input_var)\r\n print(\"input_var:{} batch_output:{} batch_target:{}\".format(input_var,batch_output,batch_target))\r\n loss = self.criterion(batch_output, target_var)\r\n loss.backward()\r\n return loss\r\n\r\n # loss 返回,准备优化\r\n self.optimizer.zero_grad()\r\n self.optimizer.step(closure)\r\n self.iterations += i\r\n\r\n def test(self):\r\n test_input1=[0,1,1,1,1]\r\n test_input1=torch.Tensor(test_input1)\r\n test_output1 = self.model(test_input1)\r\n test_input2 = [1, 0, 1, 0, 0]\r\n test_input2 = torch.Tensor(test_input2)\r\n test_output2 = self.model(test_input2)\r\n print(\"测试如下:\")\r\n print(\"test_input1:{} test_output1:{}\".format(test_input1,test_output1))\r\n print(\"test_input2:{} test_output2:{}\".format(test_input2,test_output2))\r\n\r\n\r\n\r\n\r\nclass MyLayer(torch.nn.Module):\r\n def __init__(self, in_features, out_features, bias=True):\r\n super(MyLayer, self).__init__() # 和自定义模型一样,第一句话就是调用父类的构造函数\r\n self.in_features = in_features\r\n self.out_features = out_features\r\n self.weight = torch.nn.Parameter(torch.zeros(in_features, out_features)) # 由于weights是可以训练的,所以使用Parameter来定义\r\n if bias:\r\n self.bias = torch.nn.Parameter(torch.zeros(out_features)) # 由于bias是可以训练的,所以使用Parameter来定义\r\n else:\r\n self.register_parameter('bias', None)\r\n\r\n def forward(self, input):\r\n # input_ = torch.pow(input, 2) + self.bias\r\n y = torch.matmul(input, self.weight)+self.bias\r\n return y\r\n\r\nD_in,D_out=5,3\r\n\r\nclass MyNet(torch.nn.Module):\r\n\r\n def __init__(self):\r\n super(MyNet, self).__init__() # 第一句话,调用父类的构造函数\r\n self.mylayer1 = MyLayer(D_in, D_out)\r\n\r\n def forward(self, x):\r\n x = self.mylayer1(x)\r\n return x\r\n\r\nclass SubDataset(Dataset.Dataset):\r\n def __init__(self, Data, Label):\r\n self.Data = Data\r\n\r\n self.Label = Label\r\n\r\n # 返回数据集大小\r\n\r\n def __len__(self):\r\n return len(self.Data)\r\n\r\n # 得到数据内容和标签\r\n\r\n def __getitem__(self, index):\r\n data = torch.Tensor(self.Data[index])\r\n\r\n label = torch.Tensor(self.Label[index])\r\n\r\n return data, label\r\n\r\n\r\ndef builder_trainer():\r\n model=MyNet()\r\n criterion=torch.nn.MSELoss(reduction='sum')\r\n learning_rate = 1e-4\r\n optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate)\r\n DTinput,DToutput=datafn()\r\n print(\"数据集如下所示:\")\r\n print(DTinput)\r\n print(DToutput)\r\n print(\"\\n\\n\")\r\n data=SubDataset(DTinput,DToutput)\r\n dataloader=DataLoader.DataLoader(data,batch_size=1,shuffle=False)\r\n trainer=Trainer(model=model,criterion=criterion,optimizer=optimizer,dataset=dataloader)\r\n return trainer\r\n\r\n\r\nif __name__=='__main__':\r\n trainer=builder_trainer()\r\n trainer.run(1)","repo_name":"xiaomin418/classtest","sub_path":"test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14926314326","text":"\"\"\"\nThis is the points module for wrapping the points portion in the API.\n\nSpecifically, this module holds classes for the following urls:\n ``/points/{point}``\\n\n ``/points/{point}/stations``\\n\n\nYou can find the API documentation here: https://www.weather.gov/documentation/services-web-api#/\n\"\"\"\nimport copy\n\nimport pint\nfrom shapely.geometry import Point as pt\nimport pandas as pd\nimport numpy as np\n\nfrom old_package.errors import ParameterTypeError\nimport old_package.utils as utils\n\n\nclass PointError(utils.ErrorObject):\n \"\"\"An Error object for the point endpoints.\n\n Attributes\n ----------\n type: str\n A URI reference (RFC3986) that identifies the problem type.\n\n title: str\n A short, human-readable summary of the problem type.\n\n status: int\n The HTTP status code (RFC7231, Section 6) generated by the origin server for this occurrence of the problem.\n Minimum: 100, Max 999\n\n detail: str\n A human-readable explanation specific to this occurrence of the problem.\n\n instance: string\n A URI reference that identifies the specific occurrence of the problem.\n\n correlationId: str\n A unique identifier for the request, used for NWS debugging purposes.\n Please include this identifier with any correspondence to help the API maintainers investigate your issue.\n \"\"\"\n\n def __init__(self, response):\n super().__init__(response)\n\n\nclass BasePoint(utils.ObjectIterator):\n\n n_errors = 0\n has_any_request_errors = True\n\n def _validate(self, lat, lon):\n valid_lat = (isinstance(lat, int), isinstance(lat, float))\n if not any(valid_lat):\n raise ParameterTypeError(lat, \"int or float\")\n\n valid_lon = (isinstance(lon, int), isinstance(lon, float))\n if not any(valid_lon):\n raise ParameterTypeError(lon, \"int or float\")\n\n def to_dict(self) -> dict:\n \"\"\"Puts all attributes in a dictionary.\n\n Returns\n -------\n self.dict: dict\n A dictionary containing all attributes of the object.\n \"\"\"\n\n return self.__dict__\n\n def to_pint(self, unit_registry : pint.UnitRegistry) -> object:\n \"\"\"Returns a new self object with units using Pint.\n\n Parameters\n ----------\n unit_registry: pint.UnitRegistry\n Your unit registry used in your application.\n \"\"\"\n\n new_point_obj = copy.deepcopy(self)\n if hasattr(self, 'distance'):\n distance = self.distance['value'] * unit_registry.meter\n new_point_obj.distance = distance\n new_point_obj.series['distance'] = distance\n if hasattr(self, 'stations'):\n for station in self.stations:\n station_elevation = station.elevation['value'] * unit_registry.meter\n station.elevation = station_elevation\n station.series['elevation'] = station_elevation\n if hasattr(self, 'bearing'):\n bearing = self.bearing['value'] * unit_registry.degrees\n new_point_obj.bearing = bearing\n new_point_obj.series['bearing'] = bearing\n\n return new_point_obj\n\n\nclass Point(BasePoint):\n r\"\"\"A class used to hold information about the points types found from ``/points``.\n\n Attributes\n ----------\n bearing: dict or pint units\n The bearing of the point.\n\n city: str\n The city of the selected point.\n\n county: str\n The county code for the selected point.\n\n countyUrl: str\n The URL of the specific county.\n\n cwa: str\n The associated NWS county warning area.\n\n distance: dict or pint units\n\n fireWeatherZone: str\n The fire weather zone ID.\n\n fireWeatherZoneUrl: str\n The associated fire weather zone URL for this point.\n\n forecast: str\n The forecast URL for this specific grid point (See also: gridX and gridY).\n\n forecastGridData: str\n The URL for the associated grid data (See also: gridX and gridY).\n\n forecastHourly: str\n The URL for the hourly forecast for this grid point (See also: gridX and gridY).\n\n forecastOffice: str\n The URL for the associated NWS office.\n\n forecastZone: str\n The forecast zone ID.\n\n forecastZoneUrl: str\n The URL for the forecast zone.\n\n gridId: str\n The ID associated with the grid point.\n\n gridX: int\n The grid value (latitude).\n\n gridY: int\n The grid value (longitude).\n\n lat: int or float\n The requested latitude.\n\n lon: int or float\n The requested longitude.\n\n observationStations: str\n The URL for the observation stations associated with the grid point.\n\n radarStation: str\n The radar station of the given gridpoint.\n\n response_headers: requests.structures.CaseInsensitiveDict\n A dictionary containing the response headers.\n\n series: pd.Series\n A pandas series containing information about the point.\n\n state: str\n The state where this point is located in.\n\n timeZone: str\n The timezone of where this point is located at.\n \"\"\"\n\n def __repr__(self):\n return f\"Point object located at {self.lat}, {self.lon} ({self.city}, {self.state})\"\n\n def __init__(self, lat, lon, user_agent):\n\n self._validate(lat, lon)\n\n self.lat = round(lat, 4) # api puts it into 4 decimals anyways\n self.lon = round(lon, 4)\n\n response = utils.request(f\"https://api.weather.gov/points/{self.lat},{self.lon}\", headers=user_agent)\n self.response_headers = response.headers\n\n if not response.ok: # successful retrieval.\n self.points = PointError(response)\n self.n_errors += 1\n self.has_any_request_errors = True\n else:\n response = response.json()['properties']\n\n response['forecastZoneUrl'] = response['forecastZone']\n response['forecastZone'] = response['forecastZone'].split(\"/\")[-1]\n\n response['countyUrl'] = response['county']\n response['county'] = response['county'].split(\"/\")[-1]\n\n response['fireWeatherZoneUrl'] = response['fireWeatherZone']\n response['fireWeatherZone'] = response['fireWeatherZone'].split(\"/\")[-1]\n\n rloc_props = response['relativeLocation']['properties']\n response['city'] = rloc_props['city']\n response['state'] = rloc_props['state']\n response['bearing'] = rloc_props['bearing']\n response['distance'] = rloc_props['distance']\n\n response['series'] = pd.Series(data = response)\n\n for k, v in response.items():\n setattr(self, k, v)\n\n\nclass Station:\n r\"\"\"A class used to hold information about the points types found from ``/points``.\n\n Attributes\n ----------\n\n elevation: dict or pint\n The elevation above sea level of the station.\n\n stationIdentifier: str\n The 4 letter identifier of the station.\n\n name: str\n The human-like string representing the name of the station.\n\n timeZone: str\n The timezone where the station is located\n\n forecast: str\n The URL of the associated forecast for the station.\n\n county: str\n The URL of which county in which the station resides.\n\n fireWeatherZone: str\n The URL associted with the fire weather zone.\n \"\"\"\n\n def __init__(self, station_info_d):\n # Assumes that station_info_d is one individual station (0, 1, etc)\n attribute_d = {}\n geom = station_info_d['geometry']\n props = station_info_d['properties']\n\n # put in the lat/lon in shapely geom\n # have to put in shapely.geometry.Point otherwise it'll try and call point from above.\n point = pt([geom['coordinates'][0], geom['coordinates'][1]])\n attribute_d['point'] = point\n attribute_d.update(props)\n\n self.series = pd.Series(attribute_d)\n\n for k, v in attribute_d.items():\n setattr(self, k, v)\n\n def to_dict(self) -> dict:\n \"\"\"Returns a dictionary of the attributes.\"\"\"\n\n return self.__dict__\n\n\nclass PointStation(BasePoint):\n r\"\"\"A class used to hold information about the points types found from ``/points``.\n\n Attributes\n ----------\n response_headers : requests.structures.CaseInsensitiveDict\n A dictionary containing the response headers.\n\n \"\"\"\n\n def __init__(self, lat, lon, user_agent):\n\n self._validate(lat, lon)\n\n self.lat = round(lat, 4) # api puts it into 4 decimals anyways\n self.lon = round(lon, 4)\n\n response = utils.request(f\"https://api.weather.gov/points/{self.lat},{self.lon}/stations\", headers=user_agent)\n self.response_headers = response.headers\n\n if not response.ok: # successful retrieval.\n self.points = PointError(response)\n self.n_errors += 1\n self.has_any_request_errors = True\n else:\n response = response.json()\n self.stations = [Station(x) for x in response['features']]\n\n self._iterable = self.stations # make this object iterable.\n\n def to_dataframe(self) -> pd.DataFrame:\n r\"\"\"Converts all of all retrieved alerts into a Pandas dataframe.\n\n Returns\n -------\n pandas.DataFrame\n A dataframe that contains the information (attributes) of all of the point requested.\n \"\"\"\n df = pd.DataFrame()\n for station in self.stations:\n df = df.append(station.series, ignore_index=True) # append the series.\n\n df = df.fillna(value=np.nan) # in case there is missing data\n return df\n","repo_name":"WxBDM/nwsapy","sub_path":"old_package/points.py","file_name":"points.py","file_ext":"py","file_size_in_byte":9655,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"20512516270","text":"# 가장 흔한 for문\r\ndef solution(arr, divisor):\r\n answer = []\r\n for n in arr:\r\n if n % divisor == 0:\r\n answer.append(n)\r\n\r\n if not answer:\r\n return [-1]\r\n else:\r\n return sorted(answer)\r\n\r\n\r\n# lIST COMPRHENSION 이용\r\ndef solution(arr, divisor):\r\n return sorted([ num for num in arr if num % divisor == 0]) or [-1]\r\n\r\n# sorted([]) = [] 은 boolean으로 FaLSE 이므로 [-1]이 대신 리턴된다.\r\n\r\n# return에 관하여 한말씀 올리면\r\n# return A or B 일때 A,B가 둘다 참이면, A가 return됨(가장 첫번쨰 값)\r\n# but A 와 B 중에 none, empty 값(boolean이 False)이 있으면 그 값을 제외한 True 값이 리턴됨\r\n# 즉, 하나만 참이면 그 참인 값이 리턴.\r\n\r\n\r\n\r\n\r\n\r\n#파이썬에서는 괄호 없이 값을 콤마로 구분하면 튜플이 된다.\r\n# ex) 1,2 = (1,2)","repo_name":"suhyeok24/Programmers-For-Coding-Test","sub_path":"Programmers Lv.1/프로그래머스 lv1. 나누어 떨어지는 숫자 배열.py","file_name":"프로그래머스 lv1. 나누어 떨어지는 숫자 배열.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21063258804","text":"import csv\nimport unittest\nfrom time import sleep\nfrom automate_driver.automate_driver import AutomateDriver\nfrom model.connect_sql import ConnectSql\nfrom pages.base.base_page import BasePage\nfrom pages.login.log_in_page_read_csv import LogInPageReadCsv\nfrom pages.login.login_page import LoginPage\nfrom pages.organize_management.organize_management import OrganizeManagement\nfrom pages.organize_management.organize_management_read_csv import OrganizeManagementReadCsv\nfrom pages.role_management.role_management import RoleManagement\nfrom pages.role_management.role_management_read_csv import RoleManagementReadCsv\nfrom pages.user_center.user_center import UserCenter\nfrom pages.user_center.user_center_read_csv import UserCenterReadCsv\n\n\n\nclass TestCase04RoleManageEditRole(unittest.TestCase):\n # 测试角色管理修改角色\n def setUp(self):\n self.driver = AutomateDriver()\n self.base_url = self.driver.base_url\n self.base_page = BasePage(self.driver, self.base_url)\n self.login_page = LoginPage(self.driver, self.base_url)\n self.user_center = UserCenter(self.driver, self.base_url)\n self.role_management = RoleManagement(self.driver, self.base_url)\n self.log_in_page_read_csv = LogInPageReadCsv()\n self.user_center_read_csv = UserCenterReadCsv()\n self.role_management_read_csv = RoleManagementReadCsv()\n self.driver.set_window_max()\n self.connect_sql = ConnectSql()\n self.driver.wait(1)\n self.driver.clear_cookies()\n self.driver.wait(1)\n\n\n\n def tearDown(self):\n self.driver.quit_browser()\n\n def test_edit_role(self):\n # 通过csv测试修改角色功能\n\n csv_file = self.role_management_read_csv.read_csv('edit_role_info.csv')\n csv_data = csv.reader(csv_file)\n for row in csv_data:\n edit_role_info = {\n \"account\": row[0],\n \"password\": row[1],\n \"search_key\": row[2],\n \"edit_role_name\": row[3],\n \"edit_role_description\": row[4],\n \"edit_role_limit\": row[5],\n }\n\n # 打开风控首页-登录页\n self.base_page.open_page()\n sleep(1)\n # 登录账号\n self.login_page.user_login(edit_role_info['account'],edit_role_info['password'])\n\n\n # 判断登录成功后招呼栏的用户名是否正确\n username = self.user_center.get_username()\n\n # 从数据库获取登录账号的用户名\n account_info = self.user_center.get_account_info_by_sql(edit_role_info['account'])\n print(account_info)\n account_name = account_info[1]\n\n self.assertEqual(account_name, username, '登录成功后招呼栏的用户名错误')\n\n # 点击进入角色管理\n self.role_management.click_role_manage()\n\n # 查找11111角色\n self.role_management.search_role(edit_role_info['search_key'])\n # 获取页面查找结果\n search_role_name_01 = self.role_management.get_search_result_one()\n # 查询数据库获取搜索结果\n role_info_00 = self.role_management.get_search_result_by_sql(edit_role_info['account'], edit_role_info['search_key'])\n role_name_00 = role_info_00[0]\n role_desc_00 = role_info_00[1]\n self.assertEqual(search_role_name_01,role_name_00)\n\n # 点击修改\n self.role_management.click_edit_role()\n # 切入内层frame\n self.role_management.switch_to_2_frame()\n # 获取当前显示的角色名称是否与未修改前一致\n current_role_name_01 = self.role_management.get_current_role_name()\n self.assertEqual(search_role_name_01,current_role_name_01)\n # 获取显示的角色描述是否与未修改前一致\n current_role_desc = self.role_management.get_current_role_desc()\n self.assertEqual(role_desc_00,current_role_desc)\n\n # 输入角色名称\n self.role_management.input_add_role_name(edit_role_info['edit_role_name'])\n # 输入角色描述\n self.role_management.input_add_role_description(edit_role_info['edit_role_description'])\n # 选择角色权限\n self.role_management.choose_add_role_limit(edit_role_info['edit_role_limit'])\n # 点击取消\n self.role_management.click_add_role_dismiss()\n # 数据库查询是否修改失败\n role_info_01 = self.role_management.get_search_result_by_sql(edit_role_info['account'],\n edit_role_info['search_key'])\n role_name_01 = role_info_01[0]\n role_desc_01 = role_info_01[1]\n self.assertEqual(role_name_00, role_name_01)\n self.assertEqual(role_desc_00, role_desc_01)\n\n\n\n # 点击修改\n self.role_management.click_edit_role()\n # 切入内层frame\n self.role_management.switch_to_2_frame()\n # 获取当前显示的角色名称是否与未修改前一致\n current_role_name_01 = self.role_management.get_current_role_name()\n self.assertEqual(search_role_name_01, current_role_name_01)\n # 获取显示的角色描述是否与未修改前一致\n current_role_desc = self.role_management.get_current_role_desc()\n self.assertEqual(role_desc_00, current_role_desc)\n\n # 输入角色名称\n self.role_management.input_add_role_name(edit_role_info['edit_role_name'])\n # 输入角色描述\n self.role_management.input_add_role_description(edit_role_info['edit_role_description'])\n # 选择角色权限\n self.role_management.choose_add_role_limit(edit_role_info['edit_role_limit'])\n # 点击关闭按钮\n self.role_management.click_add_role_close()\n # 数据库查询是否修改失败\n role_info_02 = self.role_management.get_search_result_by_sql(edit_role_info['account'],\n edit_role_info['search_key'])\n role_name_02 = role_info_02[0]\n role_desc_02 = role_info_02[1]\n self.assertEqual(role_name_00, role_name_02)\n self.assertEqual(role_desc_00, role_desc_02)\n\n\n\n\n # 点击修改\n self.role_management.click_edit_role()\n # 切入内层frame\n self.role_management.switch_to_2_frame()\n # 获取当前显示的角色名称是否与未修改前一致\n current_role_name_01 = self.role_management.get_current_role_name()\n self.assertEqual(search_role_name_01, current_role_name_01)\n # 获取显示的角色描述是否与未修改前一致\n current_role_desc = self.role_management.get_current_role_desc()\n self.assertEqual(role_desc_00, current_role_desc)\n\n # 输入角色名称\n self.role_management.input_add_role_name(edit_role_info['edit_role_name'])\n # 输入角色描述\n self.role_management.input_add_role_description(edit_role_info['edit_role_description'])\n # 选择角色权限\n self.role_management.choose_add_role_limit(edit_role_info['edit_role_limit'])\n # 点击保存按钮\n self.role_management.click_add_role_save()\n # 数据库查询是否修改成功\n role_info_03 = self.role_management.get_search_result_by_sql(edit_role_info['account'],\n edit_role_info['edit_role_name'])\n role_name_03 = role_info_03[0]\n role_desc_03 = role_info_03[1]\n self.assertNotEqual(role_name_00, role_name_03)\n self.assertNotEqual(role_desc_00, role_desc_03)\n\n # 跳出外层frame\n self.role_management.switch_to_default_content()\n\n # 退出登录\n self.user_center.logout()\n\n csv_file.close()\n\n\n","repo_name":"huangqiming123/risk_control_automate_test","sub_path":"testcases/role_management/test_case_04_role_manage_edit_role.py","file_name":"test_case_04_role_manage_edit_role.py","file_ext":"py","file_size_in_byte":8190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23783801172","text":"import sys\n\n\ndef get_min(li, limit):\n li.sort()\n if len(li) < limit:\n return 1\n return li[max(0, len(li)-limit)]\n\n\nif __name__ == \"__main__\":\n N, M = map(int, sys.stdin.readline().split())\n classes = []\n for _ in range(N):\n P, L = map(int, sys.stdin.readline().split())\n classes.append(get_min(list(map(int, sys.stdin.readline().split())), L))\n classes.sort()\n answer = 0\n for c in classes:\n if M-c < 0:\n break\n M -= c\n answer += 1\n print(answer)\n\n","repo_name":"pangpang-study/algorithm-coding-test-study","sub_path":"nyc/211116/baekjoon_12018_연세토토.py","file_name":"baekjoon_12018_연세토토.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"1671360844","text":"from PySide6.QtWidgets import (\n QVBoxLayout,\n QWidget,\n QPushButton,\n QTextEdit,\n QLabel,\n QFileDialog,\n)\nfrom PySide6.QtGui import QPalette, QColor\nfrom PySide6.QtCore import Qt\nimport base64\n\n\nclass CriptografarScreen(QWidget):\n def __init__(self, parent):\n super().__init__()\n self.parent = parent\n self.pastaPath = \"\"\n self.setMouseTracking(True)\n self.inicializadorUi()\n\n # Inicializador desta interface\n def inicializadorUi(self):\n layout = QVBoxLayout()\n\n style = \"\"\"\n font-size: 14px; \n font-weight: bold; \n \"\"\"\n\n label = QLabel(\"Digite o texto para ser criptografado:\")\n layout.addWidget(label)\n\n self.erroLabel = QLabel(\"\")\n self.erroLabel.setStyleSheet(style)\n layout.addWidget(self.erroLabel)\n\n self.textoEdit = QTextEdit()\n layout.addWidget(self.textoEdit)\n\n self.label2 = QLabel(\"\")\n # Deixa o texto clicavel\n self.label2.setTextInteractionFlags(\n Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard\n )\n self.label2.setStyleSheet(\"font-size: 14px;\")\n layout.addWidget(self.label2)\n\n openDirButton = QPushButton(\"Selecionar Diretório\")\n openDirButton.setFixedHeight(35)\n openDirButton.clicked.connect(self.openDiretorio)\n layout.addWidget(openDirButton)\n\n cripButton = QPushButton(\"Gerar arquivo criptografado\")\n cripButton.setFixedHeight(40)\n cripButton.clicked.connect(self.criptografar)\n layout.addWidget(cripButton)\n\n voltarButton = QPushButton(\"Voltar ao Menu\")\n voltarButton.clicked.connect(self.parent.showVoltarMenu)\n voltarButton.setFixedHeight(30)\n layout.addWidget(voltarButton)\n\n self.setLayout(layout)\n\n def openDiretorio(self):\n options = QFileDialog.Options()\n options |= QFileDialog.ShowDirsOnly\n\n self.pastaPath = QFileDialog.getExistingDirectory(\n self, \"Selecionar Diretório\", \"\", options=options\n )\n\n # Metodo de condição, codificação e criptografia\n def criptografar(self):\n texto = self.textoEdit.toPlainText()\n if not texto.strip():\n palette = QPalette()\n palette.setColor(QPalette.WindowText, QColor(\"red\"))\n self.erroLabel.setPalette(palette)\n self.erroLabel.setText(\"Digite algo para ser criptografado!!\")\n elif not self.pastaPath.strip():\n palette = QPalette()\n palette.setColor(QPalette.WindowText, QColor(\"red\"))\n self.erroLabel.setPalette(palette)\n self.erroLabel.setText(\"Escolha a pasta para salvar o arquivo!\")\n else:\n texto_codificado = base64.b64encode(texto.encode(\"utf-8\"))\n self.chave = texto_codificado.decode(\"utf-8\")\n\n self.erroLabel.setText(\"\")\n\n texto_crip = CriptografarScreen.cifra_de_vigenere(texto, self.chave)\n\n self.label2.setText(f\"Sua chave de criptografia é: {self.chave}\")\n\n arquivo = open(f\"{self.pastaPath}/codificado.txt\", \"w\")\n arquivo.write(texto_crip)\n arquivo.close()\n\n palette = QPalette()\n palette.setColor(QPalette.WindowText, QColor(\"green\"))\n self.erroLabel.setPalette(palette)\n self.erroLabel.setText(\"As informações foram criptografadas!\")\n\n # Metodo de criptografia\n def cifra_de_vigenere(texto, chave):\n resultado = []\n\n # Pega cada caractere do texto digitado\n for i in range(len(texto)):\n # Verifica se o caractere é uma letra\n if texto[i].isalpha():\n # Verifica se o caractere do texto é maiusculo ou minusculo e retorna o numero unicode\n texto_offset = ord(\"a\") if texto[i].islower() else ord(\"A\")\n # Verifica se o caractere da chave é maiusculo ou minusculo e retorna o numero unicode\n chave_offset = ord(\"a\") if chave[i % len(chave)].islower() else ord(\"A\")\n\n # Calcula a quantidade de deslocamento\n deslocamento = (\n # Pega o numero unicode do carectere\n ord(texto[i])\n - texto_offset\n \n \n + ord(chave[i % len(chave)])\n - chave_offset\n ) % 26\n # Ele soma o codigo unicode do caractere digitado e do deslocamento, depois converte em letra\n resultado.append(chr(deslocamento + texto_offset))\n else:\n resultado.append(texto[i])\n\n return \"\".join(resultado)\n","repo_name":"SrAbnerP/aps-criptografia","sub_path":"criptografar_screen.py","file_name":"criptografar_screen.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32675203318","text":"from typing import Iterable\n\nfrom lineapy.execution.inspect_function import FunctionInspector\nfrom lineapy.instrumentation.annotation_spec import (\n AllPositionalArgs,\n BoundSelfOfFunction,\n ExternalState,\n ImplicitDependencyValue,\n InspectFunctionSideEffect,\n KeywordArgument,\n MutatedValue,\n PositionalArg,\n Result,\n ValuePointer,\n ViewOfValues,\n)\nfrom lineapy.system_tracing._object_side_effect import (\n ImplicitDependencyObject,\n MutatedObject,\n ObjectSideEffect,\n ViewOfObjects,\n)\nfrom lineapy.system_tracing.function_call import FunctionCall\nfrom lineapy.utils.lineabuiltins import LINEA_BUILTINS\n\n\ndef function_calls_to_object_side_effects(\n function_inspector: FunctionInspector,\n function_calls: Iterable[FunctionCall],\n) -> Iterable[ObjectSideEffect]:\n \"\"\"\n Turn the function calls into the side effects in terms of the Python objects.\n For example, \"the object [1, 2, 3] was mutated.\"\n \"\"\"\n for fc in function_calls:\n for side_effect in function_inspector.inspect(\n fc.fn, fc.args, fc.kwargs, fc.res\n ):\n yield from to_object_side_effects(side_effect, fc)\n\n\ndef to_object_side_effects(\n side_effect: InspectFunctionSideEffect, function_call: FunctionCall\n) -> Iterable[ObjectSideEffect]:\n if isinstance(side_effect, ViewOfValues):\n yield ViewOfObjects(\n [\n o\n for p in side_effect.views\n for o in pointer_to_objects(p, function_call)\n ]\n )\n elif isinstance(side_effect, MutatedValue):\n for o in pointer_to_objects(side_effect.mutated_value, function_call):\n yield MutatedObject(o)\n elif isinstance(side_effect, ImplicitDependencyValue):\n for o in pointer_to_objects(side_effect.dependency, function_call):\n yield ImplicitDependencyObject(o)\n else:\n raise NotImplementedError()\n\n\ndef pointer_to_objects(\n pointer: ValuePointer, function_call: FunctionCall\n) -> Iterable[object]:\n \"\"\"\n Translate a pointer to the actual values, based on the values in the function call.\n \"\"\"\n if isinstance(pointer, PositionalArg):\n yield function_call.args[pointer.positional_argument_index]\n elif isinstance(pointer, KeywordArgument):\n yield function_call.kwargs[pointer.argument_keyword]\n elif isinstance(pointer, AllPositionalArgs):\n yield from function_call.args\n elif isinstance(pointer, BoundSelfOfFunction):\n if hasattr(function_call.fn, \"__self__\"):\n yield function_call.fn.__self__ # type: ignore\n else:\n yield from []\n elif isinstance(pointer, Result):\n yield function_call.res\n elif isinstance(pointer, ExternalState):\n yield LINEA_BUILTINS[pointer.external_state]\n else:\n raise NotImplementedError(str(type(pointer)))\n","repo_name":"LineaLabs/lineapy","sub_path":"lineapy/system_tracing/_function_calls_to_object_side_effects.py","file_name":"_function_calls_to_object_side_effects.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":634,"dataset":"github-code","pt":"77"} +{"seq_id":"72799194809","text":"def divide(x, y):\n if y == 0:\n raise ZeroDivisionError('division by zero')\n\n quotient = 0\n power = 32 # Assume 32-bit integer\n yPower = y << power # Initial y^d value is y^32\n remainder = x # Initial remainder is x\n while remainder >= y:\n while yPower > remainder:\n yPower >>= 1\n power -= 1\n quotient += 1 << power\n remainder -= yPower\n\n return quotient\n","repo_name":"LennyGonz/LeetCode-Questions","sub_path":"Daily-Coding-Problem/problem-88.py","file_name":"problem-88.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15703148730","text":"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\n\r\n# Importing file\r\nforestfires = pd.read_csv(\"forestfires.csv\")\r\nforestfires.head()\r\nforestfires.describe()\r\ncolnames = forestfires.columns\r\nprint(colnames)\r\n\r\n\r\n# Boxplot of size_category with different variables\r\nsns.boxplot(x= \"FFMC\", y= \"size_category\", data=forestfires, palette = \"hls\")\r\nsns.boxplot(x = \"DMC\", y= \"size_category\", data=forestfires, palette= \"hls\")\r\nsns.boxplot(x = \"DC\", y= \"size_category\", data=forestfires, palette= \"hls\")\r\nsns.boxplot(x = \"ISI\", y= \"size_category\", data=forestfires, palette= \"hls\")\r\nsns.boxplot(x = \"temp\", y= \"size_category\", data=forestfires, palette= \"hls\")\r\nsns.boxplot(x = \"RH\", y= \"size_category\", data=forestfires, palette= \"hls\")\r\nsns.boxplot(x = \"wind\", y= \"size_category\", data=forestfires, palette= \"hls\")\r\n\r\nforestfires['month'].unique()\r\nforestfires['day'].unique()\r\nforestfires['size_category'].unique()\r\n\r\n# Label encoding\r\nfrom sklearn import preprocessing\r\nlabel_encoder = preprocessing.LabelEncoder()\r\n\r\nforestfires['month']=label_encoder.fit_transform(forestfires['month'])\r\nforestfires['day']=label_encoder.fit_transform(forestfires['day'])\r\nforestfires['size_category']=label_encoder.fit_transform(forestfires['size_category'])\r\n\r\nforestfires['month'].unique()\r\nforestfires['day'].unique()\r\nforestfires['size_category'].unique()\r\n\r\n# Converting datatype to 'category'\r\nforestfires['month']=forestfires['month'].astype('category')\r\nforestfires['day']=forestfires['day'].astype('category')\r\nforestfires['size_category']=forestfires['size_category'].astype('category')\r\n\r\n\r\n# Importing 'Support vector machine' classifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nx = forestfires.drop('size_category', axis =1 )\r\ny = forestfires['size_category']\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 0)\r\n\r\nx_train.head()\r\ny_train.head()\r\nx_test.head()\r\ny_test.head()\r\n\r\n# Creatting SVM classification object \r\n# Kernel = linear\r\nmodel_linear = SVC(kernel = \"linear\")\r\nmodel_linear.fit(x_train, y_train)\r\npred_linear_test = model_linear.predict(x_test)\r\n\r\nnp.mean(pred_linear_test==y_test) #Accuracy = 96.79%\r\n\r\n# Kernel = poly\r\nmodel_poly = SVC(kernel = \"poly\")\r\nmodel_poly.fit(x_train, y_train)\r\npred_poly_test = model_poly.predict(x_test)\r\n\r\nnp.mean(pred_poly_test==y_test) #Accuracy = 75.64%\r\n\r\n# Kernel = rbf\r\nmodel_rbf = SVC(kernel = \"rbf\")\r\nmodel_rbf.fit(x_train, y_train)\r\npred_rbf_test = model_rbf.predict(x_test)\r\n\r\nnp.mean(pred_rbf_test==y_test) #Accuracy = 72.43%\r\n\r\n# Kernel = sigmoid\r\nmodel_sigmoid = SVC(kernel = \"sigmoid\")\r\nmodel_sigmoid.fit(x_train, y_train)\r\npred_sigmoid_test = model_sigmoid.predict(x_test)\r\n\r\nnp.mean(pred_sigmoid_test==y_test) #Accuracy = 71.15%\r\n\r\n# Linear kernel has the best accuracy\r\n","repo_name":"sandeepnjois/ds","sub_path":"forestfires.py","file_name":"forestfires.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9560130162","text":"# IMPORT LIBRARIES & FUNCTIONS\n# External Libraries\nimport streamlit as st\nfrom PIL import Image\nfrom transfer_it import style_transfer, device_info\nimport cv2\nimport numpy as np\n\n\nst.write(\"# Style Transfer\")\n\nst.write('''Welcome to our Style Transfer web app! With this app, you can upload your own content and style images, and generate a stylized output image using a transformer model. \n Simply upload your images, click the 'Transfer Style' button, and wait for the output image to be generated. You can also clear your inputs by clicking the 'Clear Inputs' button.''')\n\n\ncontent_path_default = \"demo/content.jpeg\"\nstyle_path_default = \"demo/style.jpeg\"\n\ncol1, col2 = st.columns(2)\n\n# Add file pickers for content and style images\ncontent_path = col1.file_uploader(\"Upload Content Image\", type=[\"jpg\", \"jpeg\", \"png\"])\nstyle_path = col2.file_uploader(\"Upload Style Image\", type=[\"jpg\", \"jpeg\", \"png\"])\n\n# Check if user has uploaded new images or using defaults\nif content_path is None:\n content_path = content_path_default\nif style_path is None:\n style_path = style_path_default\n\ncontent_image = Image.open(content_path)\ncol1.image(content_image, caption=\"Content Image\", use_column_width=True)\nstyle_image = Image.open(style_path)\ncol2.image(style_image, caption=\"Style Image\", use_column_width=True)\n\n# Add button to generate stylized output image\noutput_shown = False\n\ndef apply_filters(img_path, filter_name,alpha=0.5):\n if isinstance(img_path,str):\n img = cv2.imread(img_path)\n else:\n img = Image.open(img_path)\n img = img.save(\"output/up_content.jpg\")\n img = cv2.imread(\"output/up_content.jpg\")\n \n if filter_name == \"sobel\":\n # Apply Sobel filter\n grad_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)\n grad_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)\n abs_grad_x = cv2.convertScaleAbs(grad_x)\n abs_grad_y = cv2.convertScaleAbs(grad_y)\n filtered_img = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)\n output_path = \"output/pre/c_sobel.jpeg\"\n\n elif filter_name == \"gaussian\":\n # Apply Gaussian filter\n filtered_img = cv2.GaussianBlur(img, (5, 5), 0)\n output_path = \"output/pre/c_gaussian.jpeg\"\n\n elif filter_name == \"median\":\n filtered_img = cv2.medianBlur(img, 5)\n output_path = \"output/pre/c_median.jpeg\" \n\n elif filter_name == \"bilateral\":\n filtered_img = cv2.bilateralFilter(img, 9, 75, 75)\n output_path = \"output/pre/c_bilateral.jpeg\"\n\n elif filter_name == \"equalize\":\n # Apply histogram equalization\n b, g, r = cv2.split(img)\n b_eq = cv2.equalizeHist(b)\n g_eq = cv2.equalizeHist(g)\n r_eq = cv2.equalizeHist(r)\n filtered_img = cv2.merge([b_eq, g_eq, r_eq])\n output_path = \"output/pre/c_equalized.jpeg\"\n\n elif filter_name == \"edgedetection\":\n # Convert the image to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Apply the Canny edge detection filter\n edges = cv2.Canny(gray, 100, 200)\n # Create a mask for the edges\n mask = np.zeros_like(img)\n mask[edges != 0] = 255\n # Apply the mask to the original image\n filtered_img = cv2.bitwise_and(img, mask)\n output_path = \"output/pre/c_edgedetection.jpeg\"\n\n elif filter_name == \"sharpen\":\n # Define the kernel for the sharpen filter\n kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])\n filtered_img = cv2.filter2D(img, -1, kernel)\n output_path = \"output/pre/c_sharpen.jpeg\"\n else:\n raise ValueError(\"Invalid filter name. Valid options are 'sobel', 'gaussian', and 'equalize'.\")\n\n # Superimpose the filtered image on the original image\n output_img = cv2.addWeighted(img, alpha, filtered_img, 1 - alpha, 0)\n\n # Save the filtered image\n cv2.imwrite(output_path, output_img)\n\n return output_path\n\n\nif st.button(\"Transfer Style\"):\n with st.spinner(\"Transferring style\"):\n output_path = \"output/output.jpg\"\n\n col1, col2, col3= st.columns(3)\n style_transfer(content_path, style_path)\n output_image = Image.open(output_path)\n col1.image(output_image, caption=\"Original\", width=256)\n\n sobel_path = apply_filters(content_path,'sobel')\n style_transfer(sobel_path, style_path)\n output_image = Image.open(output_path)\n col2.image(output_image, caption=\"/w Sobel Filter\", width=256)\n\n gaussian_path= apply_filters(content_path,'gaussian')\n style_transfer(gaussian_path, style_path)\n output_image = Image.open(output_path)\n col3.image(output_image, caption=\"/w Gaussian Filter\", width=256)\n\n equalized_path = apply_filters(content_path,'equalize')\n style_transfer(equalized_path, style_path)\n output_image = Image.open(output_path)\n col1.image(output_image, caption=\"/w Histogram Equalization\", width=256)\n \n median_path = apply_filters(content_path,'median')\n style_transfer(median_path, style_path)\n output_image = Image.open(output_path)\n col2.image(output_image, caption=\"/w Median Filter\", width=256)\n\n bilateral_path = apply_filters(content_path,'bilateral')\n style_transfer(bilateral_path, style_path)\n output_image = Image.open(output_path)\n col3.image(output_image, caption=\"/w Bilateral Filter\", width=256)\n\n edgedetection_path = apply_filters(content_path,'edgedetection')\n style_transfer(edgedetection_path, style_path)\n output_image = Image.open(output_path)\n col1.image(output_image, caption=\"/w Edge Detection\", width=256)\n\n sharpen_path = apply_filters(content_path,'sharpen')\n style_transfer(sharpen_path, style_path)\n output_image = Image.open(output_path)\n col2.image(output_image, caption=\"/w Sharpen Filter\", width=256)\n print('Done') \n output_shown = True\n\n# Show \"Clear Inputs\" button if output is being displayed\nif output_shown:\n if st.button(\"Clear outputs\"):\n output_image = None\n output_shown = False\n\ndevice = device_info()\n\nif str(device) == \"cpu\":\n st.write(\"Inference might be slow as the model is running on a cpu instance.\")\n","repo_name":"GaganRajSingh/ImageStyler","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71447177849","text":"import os\r\n\r\nfrom telegram.ext import Updater, CallbackQueryHandler, MessageHandler, Filters\r\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\r\nimport cv2\r\n\r\nfrom utils import check_id_and_save_image, overlay\r\nfrom config import Config\r\n\r\nconfig = Config()\r\n\r\n\r\ndef mark(update, context):\r\n try:\r\n id = update.message.forward_from.id\r\n key = update.message.chat_id\r\n check_id_and_save_image(id, config.token, f'images/{key}_org.jpg')\r\n context.user_data[key] = {\r\n 'status': 'id',\r\n 'id': id,\r\n 'badge_id': None\r\n }\r\n print(id)\r\n\r\n text = 'بدج مورد نظر خود را انتخاب کنید.'\r\n buttons = [\r\n InlineKeyboardButton(text=badge['title'], callback_data=f'badge_{badge[\"id\"]}')\r\n for badge in config.badges\r\n ]\r\n keyboard = [buttons[i:i + 2] for i in range(0, len(buttons), 2)]\r\n\r\n update.effective_message.reply_text(text, reply_markup=InlineKeyboardMarkup(keyboard))\r\n\r\n except Exception as e:\r\n print(e)\r\n context.bot.send_message(chat_id=update.effective_chat.id, text='اهدای بدج به این کاربر امکان‌پذیر نیست.')\r\n\r\n\r\ndef badge(update, context):\r\n try:\r\n key = update.effective_chat.id\r\n badge_id = update.callback_query.data.replace('badge_', '')\r\n context.user_data[key] = {\r\n 'status': 'badge',\r\n 'id': context.user_data[key]['id'],\r\n 'badge_id': badge_id\r\n }\r\n\r\n image = cv2.imread(f'images/{key}_org.jpg')\r\n badge_img = cv2.imread('badges/badge.png', -1)\r\n badge_img = badge_img[:, (badge_img.shape[1] // 4):(badge_img.shape[1] // 4 * 3)]\r\n badge_img = cv2.resize(badge_img, None, fx=0.1, fy=0.1)\r\n image = overlay(badge_img, image)\r\n\r\n cv2.imwrite(f'images/{key}_edited.png', image)\r\n context.bot.send_photo(chat_id=key, photo=open(f'images/{key}_edited.png', 'rb'))\r\n print(context.user_data[key])\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\nif __name__ == '__main__':\r\n os.makedirs('images', exist_ok=True)\r\n\r\n updater = Updater(config.token, use_context=True)\r\n\r\n updater.dispatcher.add_handler(MessageHandler(Filters.text, mark))\r\n updater.dispatcher.add_handler(CallbackQueryHandler(badge, pattern=r\"badge_\"))\r\n\r\n updater.start_polling()\r\n updater.idle()\r\n","repo_name":"bahar3474/telegram-badge-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17114459922","text":"from django.views.generic.base import TemplateResponseMixin, ContextMixin, View\n\nimport django_anysign\nimport pydocusign\n\n\nclass SignatureCallbackView(TemplateResponseMixin, ContextMixin, View):\n \"\"\"Handle DocuSign's event notification.\n\n This view can handle both recipient and envelope events.\n\n \"\"\"\n template_name = 'docusign/signature_callback.html'\n\n @property\n def docusign_parser(self):\n \"\"\"Parser for DocuSign's request.\n\n This is a shortcut property using a cache.\n If you want to adapt the implementation, consider overriding\n :meth:`get_docusign_parser`.\n\n \"\"\"\n try:\n return self._docusign_parser\n except AttributeError:\n self._docusign_parser = self.get_docusign_parser()\n return self._docusign_parser\n\n def get_docusign_parser(self):\n \"\"\"Extract, validate and return data from DocuSign's request.\"\"\"\n parser = pydocusign.DocuSignCallbackParser(\n xml_source=self.request.body)\n return parser\n\n @property\n def envelope_status(self):\n \"\"\"Envelope status, extracted from DocuSign input data.\"\"\"\n return self.docusign_parser.envelope_status\n\n def post(self, request, *args, **kwargs):\n \"\"\"Route request to signature callback depending on status.\n\n Trigger events for latest signer and signature events: calls\n ``signature_{status}`` and ``signer_{status}`` methods.\n\n \"\"\"\n signature_event = self.docusign_parser.envelope_events[-1]\n signer_events = []\n if signature_event['status'] == pydocusign.Envelope.STATUS_SENT:\n # If signature status is \"sent\" and all signers are \"sent\", then\n # trigger \"sent\" event for signature and all signers.\n if all([True\n for signer_event in self.docusign_parser.recipient_events\n if signer_event['status'] == 'sent']):\n signer_events = self.docusign_parser.recipient_events\n # Else, do not care about \"sent\" event for signature.\n else:\n signature_event = None\n signer_events = [self.docusign_parser.recipient_events[-1]]\n else:\n signer_events = [self.docusign_parser.recipient_events[-1]]\n # Trigger signature event.\n if signature_event:\n callback = getattr(self,\n 'signature_{status}'.format(\n status=signature_event['status'].lower()))\n callback()\n # Trigger signer events.\n for signer_event in signer_events:\n callback = getattr(self,\n 'signer_{status}'.format(\n status=signer_event['status'].lower()))\n callback(signer_id=signer_event['recipient'])\n # Render view.\n context = self.get_context_data(**kwargs)\n return self.render_to_response(context)\n\n def get_context_data(self, **kwargs):\n \"\"\"Return context data.\n\n Updates default data with ``signature`` and ``signer``.\n\n \"\"\"\n data = super(SignatureCallbackView, self).get_context_data(**kwargs)\n data['signature'] = self.signature\n return data\n\n @property\n def signature(self):\n \"\"\"Signature model instance.\n\n This is a shortcut property using a cache.\n If you want to adapt the implementation, consider overriding\n :meth:`get_signature`.\n\n \"\"\"\n try:\n return self._signature\n except AttributeError:\n self._signature = self.get_signature()\n return self._signature\n\n def get_signature(self):\n Signature = django_anysign.get_signature_model()\n envelope_id = self.docusign_parser.envelope_id\n return Signature.objects.get(signature_backend_id=envelope_id)\n\n @property\n def signature_backend(self):\n \"\"\"Signature backend instance.\n\n This is a shortcut property using a cache.\n If you want to adapt the implementation, consider overriding\n :meth:`get_signature_backend`.\n\n \"\"\"\n try:\n return self._signature_backend\n except AttributeError:\n self._signature_backend = self.get_signature_backend()\n return self._signature_backend\n\n def get_signature_backend(self):\n \"\"\"Return signature backend instance.\n\n Default implementation calls signature instance's\n ``signature_backend``. Override this method if you want a custom\n backend initialization.\n\n \"\"\"\n return self.signature.signature_backend\n\n def update_signer(self, signer_id, status, status_datetime=None,\n message=u''):\n \"\"\"Update ``signer`` with ``status``.\n\n Additional ``status_datetime`` argument is the datetime mentioned by\n DocuSign.\n\n \"\"\"\n raise NotImplementedError()\n\n def update_signature(self, status, status_datetime=None):\n \"\"\" Update signature with ``datetime``.\n\n Additional ``status_datetime`` argument is the datetime mentioned by\n DocuSign.\n\n \"\"\"\n raise NotImplementedError()\n\n def signature_sent(self):\n \"\"\"Handle 'sent' status reported by DocuSign callback.\n\n Default implementation just calls :meth:`update_signer` with status.\n\n \"\"\"\n self.update_signature(\n status='sent',\n status_datetime=self.docusign_parser.envelope_status_datetime(\n 'Sent'))\n\n def signature_delivered(self):\n \"\"\"Handle 'delivered' status reported by DocuSign callback.\n\n Default implementation just calls :meth:`update_signer` with status.\n\n \"\"\"\n self.update_signature(\n status='delivered',\n status_datetime=self.docusign_parser.envelope_status_datetime(\n 'Delivered'))\n\n def signature_completed(self):\n \"\"\"Handle 'completed' status reported by DocuSign callback.\n\n Default implementation calls :meth:`update_signer` and\n :meth:`update_signature` with status.\n\n \"\"\"\n self.update_signature(\n status='completed',\n status_datetime=self.docusign_parser.envelope_status_datetime(\n 'Completed'))\n\n def signature_declined(self):\n \"\"\"Handle 'declined' status reported by DocuSign callback.\"\"\"\n self.update_signature(\n status='declined',\n status_datetime=self.docusign_parser.envelope_status_datetime(\n 'Declined'))\n\n def signer_sent(self, signer_id):\n \"\"\"Handle 'Sent' status reported by DocuSign for signer.\"\"\"\n recipient = self.docusign_parser.recipients[signer_id]\n self.update_signer(\n signer_id,\n status='sent',\n status_datetime=recipient['Sent'],\n )\n\n def signer_delivered(self, signer_id):\n \"\"\"Handle 'Delivered' status reported by DocuSign for signer.\"\"\"\n recipient = self.docusign_parser.recipients[signer_id]\n self.update_signer(\n signer_id,\n status='delivered',\n status_datetime=recipient['Delivered'],\n )\n\n def signer_signed(self, signer_id):\n \"\"\"Handle 'Signed' event reported by DocuSign for signer.\n\n Notice that recipient status is 'Completed' whereas event is 'Signed'.\n\n \"\"\"\n recipient = self.docusign_parser.recipients[signer_id]\n self.update_signer(\n signer_id,\n status='completed',\n status_datetime=recipient['Completed'],\n )\n\n def signer_declined(self, signer_id):\n \"\"\"Handle 'Declined' status reported by DocuSign for signer.\"\"\"\n recipient = self.docusign_parser.recipients[signer_id]\n self.update_signer(\n signer_id,\n status='declined',\n status_datetime=recipient['Declined'],\n message=recipient.get('DeclineReason', u''),\n )\n\n def signer_authenticationfailed(self, signer_id):\n \"\"\"Handle 'AuthenticationFailed' status for signer.\"\"\"\n recipient = self.docusign_parser.recipients[signer_id]\n self.update_signer(\n signer_id,\n status='authentication_failed',\n status_datetime=recipient['AuthenticationFailed'],\n )\n\n def signer_autoresponded(self, signer_id):\n \"\"\"Handle 'AutoResponded' status reported by DocuSign for signer.\"\"\"\n recipient = self.docusign_parser.recipients[signer_id]\n self.update_signer(\n signer_id,\n status='auto_responded',\n status_datetime=recipient['AutoResponded'],\n )\n","repo_name":"amir17688/google_data_p2","sub_path":"99366_views.py_C__Users_user_Desktop_data_2_data_google_data_novafloss_django-docusign_django_do.py","file_name":"99366_views.py_C__Users_user_Desktop_data_2_data_google_data_novafloss_django-docusign_django_do.py","file_ext":"py","file_size_in_byte":8685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13624101528","text":"import dateparser\nfrom datetime import datetime\n\nimport git\n\nstring = \"2022-12-14T03:07:03Z\"\nlastUpdateTime = dateparser.parse(string).replace(tzinfo=None)\nepochTime = datetime(1970, 1, 1)\n\nsecondsSinceEpochOfLastUpdate = (lastUpdateTime-epochTime).total_seconds()\n# print(lastUpdateTime < epochTime)\n\nrepo = git.Repo(\"..\\Addie-Box-Data\")\ntree = repo.tree()\nfor blob in tree:\n commit = next(repo.iter_commits(paths=blob.path, max_count=1))\n print(blob.path, commit.committed_date)","repo_name":"KorayL/AddieBox","sub_path":"timeTest.py","file_name":"timeTest.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34735872513","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n\ndef plotElement(coord, *args):\n coord = np.concatenate((coord, coord[0:1]))\n plt.plot(coord[:, 0], coord[:, 1], *args)\n return\n\ndef plotGuassian(coord, *args):\n for gaussianPoints in coord:\n plt.plot(gaussianPoints[:, 0], gaussianPoints[:, 1], *args)\n for i, coorGaussian in enumerate(coord[0]):\n plt.text(x=coorGaussian[0], y=coorGaussian[1], s=str(i))\n return\n\n\ndef saveVTK(fileName, nodeCoord, node2Element, **kwargs):\n nodeNum = len(nodeCoord)\n elementNum = len(node2Element)\n with open(fileName, 'w') as f:\n f.write('# vtk DataFile Version 4.2\\n')\n f.write('vtk file generated by meshmagick on %s\\n' % time.strftime('%c'))\n f.write('ASCII\\n')\n f.write('DATASET UNSTRUCTURED_GRID\\n')\n f.write('\\n')\n f.write('POINTS %u float\\n' % nodeNum)\n for vertex in nodeCoord:\n f.write('%f %f %f\\n' % (vertex[0], vertex[1], 0.))\n f.write('\\n')\n f.write('CELLS %u %u\\n' % (elementNum, 5 * elementNum))\n for face in node2Element:\n if face[0] == face[-1]: # Triangle\n f.write('3 %u %u %u\\n' % (face[0], face[1], face[2]))\n else: # Quadrangle\n f.write('4 %u %u %u %u\\n' % (face[0], face[1], face[2], face[3]))\n\n f.write('\\n')\n f.write('CELL_TYPES %u\\n' % len(node2Element))\n for i in range(len(node2Element)):\n f.write('9\\n')\n\n # -----------------------------------------------------\n # writing the point data\n f.write('\\nPOINT_DATA %u\\n' % (nodeNum))\n # f.write(\"SCALARS test float\\nLOOKUP_TABLE default\\n\")\n # for uu in u:\n # f.write('%.8f\\n' % (uu[0]))\n for key in kwargs:\n temp = kwargs[key]\n f.write(\"VECTORS %s float\\n\" % key)\n if temp[0].shape == (2, 2):\n for uu in temp:\n f.write('%.8f %.8f %.8f\\n' % (uu[0, 0], uu[1, 1], uu[0, 1]))\n else:\n for uu in temp:\n f.write('%.8f %.8f %.8f\\n' % (uu[0], uu[1], 0.))\n return\n","repo_name":"guanshaoheng/pyfem","sub_path":"utils/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"426622586","text":"from setuptools import setup, find_packages\n\ntry:\n with open('README.md') as f:\n readme = f.read()\nexcept IOError:\n readme = ''\n\n\ndef _requires_from_file(filename):\n return open(filename).read().splitlines()\n\n\nsetup(\n name='negima',\n version='0.1.3',\n url='https://github.com/cocodrips/negima',\n author='cocodrips',\n author_email='cocodrips@gmail.com',\n description='Extract phrases in Japanese text '\n 'by using the part-of-speeches based rules you defined.',\n python_requires='>=3.4',\n install_requires=[\n 'mecab-python3>=0.7',\n 'pandas>=0.19',\n 'xlrd>=1.1.0'\n ],\n extras_require={\n 'dev': [\n 'pytest>=3',\n ],\n },\n long_description=readme,\n long_description_content_type='text/markdown',\n license='MIT',\n packages=find_packages(),\n classifiers=[\n 'Programming Language :: Python :: 3',\n ],\n)\n","repo_name":"cocodrips/negima","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"31208405572","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, request, jsonify\nfrom datetime import datetime\nimport hashlib\nimport random\nimport base64\nimport requests\nimport json\nimport uuid\nfrom dock_first_blood.lib.register import verify_signature, decrypt_data\nfrom dock_first_blood.model.exceptions import return_function\nfrom dock_first_blood.model.db import User\nfrom dock_first_blood.model.config import blood_config\n\n\nblueprint = Blueprint('register', __name__, url_prefix='/first_blood/register')\n\n\n@blueprint.route('/phone', methods=['GET', 'POST'])\ndef phone():\n # redis & mysql query do not have 'try'\n json_content = request.values.get('content')\n signature = request.values.get('signature', '')\n sig_kv = request.values.get('sig_kv')\n if not verify_signature(sig_kv, signature, json_content):\n error_dict = return_function(400, 10000, 'signature error')\n return jsonify(meta=error_dict)\n content = json.loads(json_content)\n phone_num = content.get('phone_num')\n user = User.get_user(phone_num)\n if user:\n error_dict = return_function(400, 10001, '该手机号已注册')\n return jsonify(meta=error_dict)\n verify_code = random.randint(100000, 999999)\n vendor_content = {\n \"mobile\": phone_num,\n \"code\": verify_code\n }\n\n # TODO: this should config\n vendor_url = 'https://api.sms.jpush.cn/v1/codes'\n vendor_access_key = 'xxx'\n vendor_secret_access_key = 'xxx'\n base64_auth_string = vendor_access_key + vendor_secret_access_key\n headers = {\n 'Authorization': base64.b64encode(base64_auth_string)\n }\n res = requests.post(vendor_url, data=vendor_content, headers=headers)\n if json.loads(res.text).get('msg_id') is None:\n error_dict = return_function(400, 10002, '验证码发送失败')\n return jsonify(meta=error_dict)\n # TODO: THE END\n\n redis_client = blood_config.rds\n redis_client.set(phone_num, verify_code)\n redis_client.expire(phone_num, 90)\n return jsonify(meta={'code': 200})\n\n\n@blueprint.route('/validate/code', methods=['GET', 'POST'])\ndef validate_code():\n # redis & mysql query do not have 'try'\n json_content = request.values.get('content')\n signature = request.values.get('signature', '')\n sig_kv = request.values.get('sig_kv')\n if not verify_signature(sig_kv, signature, json_content):\n error_dict = return_function(400, 10000, 'signature error')\n return jsonify(meta=error_dict)\n content = json.loads(json_content)\n phone_num = content.get('phone_num')\n verify_code = content.get('verify_code')\n device_id = content.get('device_id')\n redis_client = blood_config.rds\n redis_verify_code = redis_client.get(phone_num).decode()\n if redis_verify_code is None:\n error_dict = return_function(400, 10003, '验证码过期')\n return jsonify(meta=error_dict)\n if redis_verify_code != verify_code:\n error_dict = return_function(400, 10004, '验证码错误')\n return jsonify(meta=error_dict)\n user = User.get_user(phone_num)\n create_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') # 当前UTC时间\n temp = phone_num + verify_code + device_id + create_time\n hash_new = hashlib.sha256('first_blood'.encode())\n hash_new.update(temp.encode())\n user_token = hash_new.hexdigest()\n if user is None:\n try:\n new_user = User(phone_num=phone_num, device_id=device_id,\n create_time=create_time, user_token=user_token)\n User.add_user(new_user)\n except:\n error_dict = return_function(500, 10005, '数据库新增错误')\n return jsonify(meta=error_dict)\n elif user.user_id is None:\n try:\n User.get_user(phone_num).update_user(dict(\n device_id=device_id, create_time=create_time, user_token=user_token))\n except:\n error_dict = return_function(500, 10006, '数据库更新错误')\n return jsonify(meta=error_dict)\n else:\n error_dict = return_function(400, 10007, '手机号已注册')\n return jsonify(meta=error_dict)\n return jsonify(meta={'code': 200}, data={'user_token': user_token})\n\n\n@blueprint.route('/register/user', methods=['GET', 'POST'])\ndef register_user():\n phone_num = request.values.get('phone_num')\n ciphertext = request.values.get('ciphertext')\n user = User.get_user(phone_num)\n if user is None:\n error_dict = return_function(400, 10008, '数据库中无该手机号')\n return jsonify(meta=error_dict)\n if user.user_token is None:\n error_dict = return_function(400, 10009, '数据库中无该手机号的token')\n return jsonify(meta=error_dict)\n secret_key = user.user_token\n decrypt_dict = decrypt_data(secret_key, ciphertext)\n user_password = decrypt_dict.get('password')\n if user_password is None:\n error_dict = return_function(400, 10010, '解密失败')\n return jsonify(meta=error_dict)\n user_id = str(uuid.uuid1()).replace('-', '')\n last_login_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n try:\n User.get_user(phone_num).update_user(dict(\n user_password=user_password, user_id=user_id, last_login_time=last_login_time))\n except:\n error_dict = return_function(500, 10011, '数据库更新uuid错误')\n return jsonify(meta=error_dict)\n return jsonify(meta={'code': 200}, data={'user_id': user_id})\n","repo_name":"onair1314/first_blood","sub_path":"dock_first_blood/view/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29447230335","text":"# Description\n# Given an array of integers, find how many unique pairs in the array such that their sum is equal to a specific target number. Please return the number of pairs.\n\n# Wechat reply 【587】 get the latest requent Interview questions . (wechat id : jiuzhang15)\n\n# Example\n# Example 1:\n\n# Input: nums = [1,1,2,45,46,46], target = 47\n# Output: 2\n# Explanation:\n\n# 1 + 46 = 47\n# 2 + 45 = 47\n# Example 2:\n\n# Input: nums = [1,1], target = 2\n# Output: 1\n# Explanation:\n# # 1 + 1 = 2\n\nclass Solution:\n \"\"\"\n @param nums: an array of integer\n @param target: An integer\n @return: An integer\n \"\"\"\n # solution 1: hashmap, if used = 1, not used 0, should check before add\n # def twoSum6(self, nums, target):\n # if not nums or len(nums) <= 1:\n # return 0\n # tracker = {}\n # counter = 0\n\n # for num in nums:\n # diff = target - num\n # if diff in tracker and tracker[diff] == 0:\n # counter += 1\n # tracker[diff] = 1\n # tracker[num] = 1\n # if diff not in tracker:\n # tracker[num] = 0\n\n # return counter\n\n\n # solution 2: two pointer, should sort first\n def twoSum6(self, nums, target):\n if not nums or len(nums) <= 1:\n return 0\n\n nums.sort()\n\n counter = 0\n left, right = 0, len(nums) - 1\n\n while left < right:\n two_sum = nums[left] + nums[right]\n if two_sum == target:\n counter += 1\n left += 1\n right -= 1\n # this comparision is done after update left, right pointer\n while left < right and nums[left] == nums[left - 1]:\n left += 1\n while left < right and nums[right] == nums[right + 1]:\n right -= 1\n elif two_sum < target:\n left += 1\n else:\n right -= 1\n\n return counter\n\n\n\n\n\n\n\n\n\n","repo_name":"jerry-jma/Data-Structure-and-Algorithm-Python3","sub_path":"Hash /Two Sum - Unique pairs.py","file_name":"Two Sum - Unique pairs.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4296650202","text":"# 치킨배달\nimport sys\nfrom itertools import combinations\nN,M = map(int,sys.stdin.readline().split())\nhouse = []\nchicken = []\ngraph = []\nfor i in range(N):\n l = list(map(int,sys.stdin.readline().split()))\n for j in range(N):\n if l[j]==2:\n chicken.append([i,j])\n if l[j]==1:\n house.append([i,j])\nc = list(combinations(chicken,M))\nresult = int(1e9)\nfor ch in c:\n temp_sum = 0\n for h in house:\n temp_min = int(1e9)\n for i in ch:\n k = abs(i[0]-h[0]) + abs(i[1]-h[1])\n if ktemp_sum:\n result = temp_sum\nprint(result)\n\n\n","repo_name":"park1997/BOJ_Solve_PBH","sub_path":"BOJ_15xxx/15686.py","file_name":"15686.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"37497017631","text":"import numpy as np\nimport pandas as pd\n\nfrom geneva_stroke_unit_preprocessing.utils import create_registry_case_identification_column\n\n\ndef restrict_to_patient_selection(variable_df: pd.DataFrame, patient_selection_path: str,\n restrict_to_event_period:bool = True, lower_bound_days:float = 26,\n upper_bound_days:float = 7, verbose: bool = False) -> pd.DataFrame:\n \"\"\"\n Restricts a dataframe to only the patients that are in the patient selection file and with sampling date corresponding to the event period.\n *Exclusion criteria for cases start date of EHR sampling* (if restrict_to_event_period is True):\n - EHR sampling start date needs to at most [lower_bound_days, default 26] days before stroke onset (so that totale sampled days periods includes 72h of stroke monitoring) [when stroke onset is not available, arrival date from registry is used]\n - EHR sampling start date should be at most [upper_bound_days, default 7] days after reference date in registry (stroke onset or arrival date, whichever is later)\n\n *Exclusion criteria for individual samples*\n - Samples occurring before the day of stroke onset should be excluded\n :param variable_df:\n :param patient_selection_path:\n :param restrict_to_event_period:\n :param lower_bound_days: maximum number of days that first sample of EHR data can be before stroke onset (ideally number of ehr sampling days - 4) to include 72h of stroke monitoring\n :param upper_bound_days: maximum number of days that first sample of EHR data can be after reference date in registry to include only acute stroke events (should generally by 7 days)\n :param verbose:\n :return:\n \"\"\"\n\n patient_selection_df = pd.read_csv(patient_selection_path, dtype=str)\n patient_selection_df['case_admission_id'] = create_registry_case_identification_column(patient_selection_df)\n\n restricted_to_selection_df = variable_df[\n variable_df['case_admission_id'].isin(patient_selection_df['case_admission_id'])]\n\n if verbose:\n print('Number of patients after selection:', len(restricted_to_selection_df['case_admission_id'].unique()))\n print('Number of patients not selected:', len(variable_df['case_admission_id'].unique()) - len(restricted_to_selection_df['case_admission_id'].unique()))\n print('Number of patients from selection that were not found:', len(patient_selection_df['case_admission_id'].unique()) - len(restricted_to_selection_df['case_admission_id'].unique()))\n\n\n # Registry and outcome data should not be restricted to the event period\n if not restrict_to_event_period:\n return restricted_to_selection_df\n else:\n # only EHR data is restricted to the event period\n # restrict case_admissions to those sampled within bounds of event [event date -lower_bound_days days, event date + upper_bound_days days]\n datatime_format = '%d.%m.%Y %H:%M'\n # find first sample_date for each case_admission id\n temp_df = restricted_to_selection_df.copy()\n temp_df['sample_date_dt'] = pd.to_datetime(temp_df['sample_date'], format=datatime_format)\n first_sample_date = temp_df.groupby('case_admission_id').sample_date_dt.min()\n temp_df.drop(columns=['sample_date_dt'], inplace=True)\n first_sample_date = first_sample_date.reset_index(level=0)\n first_sample_date.rename(columns={'sample_date_dt': 'first_sample_date'}, inplace=True)\n first_sample_date = first_sample_date.merge(patient_selection_df, on='case_admission_id', how='left')\n\n # LOWER BOUND: Applying lower bound of EHR sampling\n # set stroke onset date as reference (or Arrival date if no stroke onset date is available)\n first_sample_date['event_start_date_reference'] = first_sample_date['Stroke onset date'].fillna(\n first_sample_date['Arrival at hospital'])\n first_sample_date['registry_onset_to_first_sample_date_days'] = (\n pd.to_datetime(first_sample_date['first_sample_date'], format=datatime_format) - pd.to_datetime(\n first_sample_date['event_start_date_reference'], format='%Y%m%d')).dt.days\n cid_sampled_too_early = first_sample_date[first_sample_date['registry_onset_to_first_sample_date_days'] < -1 * lower_bound_days][\n 'case_admission_id'].unique()\n\n # UPPER BOUND: Applying upper bound of EHR sampling\n # set end of reference period to stroke onset or arrival at hospital, whichever is later\n # (if stroke onset not defined, arrival at hospital is used implicitly)\n first_sample_date['delta_onset_arrival'] = (\n pd.to_datetime(first_sample_date['Stroke onset date'], format='%Y%m%d') - pd.to_datetime(\n first_sample_date['Arrival at hospital'], format='%Y%m%d')).dt.total_seconds()\n first_sample_date['sampling_start_upper_bound_reference'] = first_sample_date \\\n .apply(lambda x: x['Stroke onset date'] if x['delta_onset_arrival'] > 0 else x['Arrival at hospital'], axis=1)\n first_sample_date['registry_upper_bound_to_first_sample_date_days'] = (\n pd.to_datetime(first_sample_date['first_sample_date'], format=datatime_format) - pd.to_datetime(\n first_sample_date['sampling_start_upper_bound_reference'], format='%Y%m%d')).dt.days\n cid_sampled_too_late = first_sample_date[first_sample_date['registry_upper_bound_to_first_sample_date_days'] > upper_bound_days][\n 'case_admission_id'].unique()\n\n # drop cid from temp_df if in cid_sampled_too_early or cid_sampled_too_late\n temp_df = temp_df[~temp_df['case_admission_id'].isin(cid_sampled_too_early)]\n temp_df = temp_df[~temp_df['case_admission_id'].isin(cid_sampled_too_late)]\n\n if verbose:\n print(f'Dropping {len(cid_sampled_too_early)} cases because sampling start date too early')\n print(f'Dropping {len(cid_sampled_too_late)} cases because sampling start date too late')\n print('Number of patients after selection:', len(temp_df['case_admission_id'].unique()))\n\n # Samples occurring before stroke onset should be excluded\n initial_columns = temp_df.columns\n # create duplicate of Arrival at hospital to avoid confusion with stroke registry data\n patient_selection_df['arrival_at_hospital_date'] = patient_selection_df['Arrival at hospital']\n temp_df = temp_df.merge(patient_selection_df[['case_admission_id', 'Stroke onset date', 'arrival_at_hospital_date', 'Time of symptom onset known']],\n on='case_admission_id', how='left')\n temp_df.loc[temp_df['Time of symptom onset known'] == 'no', 'Stroke onset date'] = np.nan\n temp_df['event_start_date_reference'] = temp_df['Stroke onset date'].fillna(temp_df['arrival_at_hospital_date'])\n temp_df['delta_sample_date_stroke_onset'] = (\n pd.to_datetime(temp_df['sample_date'], format=datatime_format) - pd.to_datetime(\n temp_df['event_start_date_reference'], format='%Y%m%d')).dt.days\n # drop rows with delta_sample_date_stroke_onset < 0\n temp_df = temp_df[temp_df['delta_sample_date_stroke_onset'] >= 0]\n temp_df = temp_df[initial_columns]\n\n return temp_df\n\n","repo_name":"JulianKlug/Stroke-Unit-Preprocessing","sub_path":"geneva_stroke_unit_preprocessing/patient_selection/restrict_to_patient_selection.py","file_name":"restrict_to_patient_selection.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9059701393","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom ttkthemes import ThemedStyle\nimport requests\nimport io\nfrom PIL import Image, ImageTk\nimport threading # For handling image loading in a separate thread\n\n# Create a function to fetch cat images from the API\ndef fetch_cat_image(event=None):\n # Display loading message or animation\n loading_label.config(text=\"Loading...\")\n \n user_input = user_input_entry.get()\n url = f\"https://cataas.com/cat/cute/says/{user_input}\"\n \n # Create a separate thread to fetch and display the image\n def fetch_image():\n response = requests.get(url)\n if response.status_code == 200:\n image_data = response.content\n # Resize the image to a maximum size of 300px\n img = Image.open(io.BytesIO(image_data))\n img.thumbnail((300, 300))\n img = ImageTk.PhotoImage(img)\n cat_image_label.config(image=img)\n cat_image_label.image = img # Keep a reference\n loading_label.config(text=\"\") # Clear loading message\n else:\n loading_label.config(text=\"Error: Cat couldn't talk. Try again!\")\n\n # Start the thread for image loading\n image_thread = threading.Thread(target=fetch_image)\n image_thread.start()\n\n# Create a function to reset the app\ndef reset_app():\n user_input_entry.delete(0, tk.END)\n cat_image_label.config(image=\"\")\n cat_image_label.config(text=\"\")\n user_input_entry.focus()\n\n# Create the main application window\napp = tk.Tk()\napp.title(\"Cute Cats Talk\")\napp.geometry(\"500x600\")\n\n# Apply the Radiance (Ubuntu) theme\nstyle = ThemedStyle(app)\nstyle.set_theme(\"radiance\")\n\n# Create a frame for padding and alignment\nframe = ttk.Frame(app)\nframe.pack(expand=True, fill=\"both\")\n\n# Create a label for the app name\napp_name_label = ttk.Label(frame, text=\"Cute Cats Talk\")\napp_name_label.pack(fill=\"x\", padx=10, pady=10)\n\n# Create an entry for user input\nuser_input_label = ttk.Label(frame, text=\"What does the cat say?\")\nuser_input_label.pack(anchor=\"w\", padx=10, pady=10)\nuser_input_entry = ttk.Entry(frame)\nuser_input_entry.pack(fill=\"x\", padx=10, pady=10)\n\n# Bind the event to the fetch_cat_image function\nuser_input_entry.bind(\"\", fetch_cat_image)\n\n# Create a button to fetch cat image\nfetch_button = ttk.Button(frame, text=\"Enter\", command=fetch_cat_image)\nfetch_button.pack(fill=\"x\", padx=10, pady=10)\n\n# Create a label for displaying loading message or animation\nloading_label = ttk.Label(frame, text=\"\")\nloading_label.pack(expand=True, fill=\"both\", padx=10, pady=10)\n\n# Create a label for the cat image\ncat_image_label = ttk.Label(frame, text=\"\")\ncat_image_label.pack(expand=True, fill=\"both\", padx=10, pady=10)\n\n# Create a button to reset the app\nreset_button = ttk.Button(frame, text=\"Reset\", command=reset_app)\nreset_button.pack(fill=\"x\", padx=10, pady=10)\n\napp.mainloop()\n","repo_name":"escalanteaj/Cute-Cats-Talk-App","sub_path":"CuteCatsTalk.py","file_name":"CuteCatsTalk.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"43146878362","text":"from flask import Flask, render_template, request, redirect, session\napp=Flask(__name__)\n\napp.secret_key = 'pssst'\n\n@app.route('/')\ndef index():\n import random\n if 'random' in session:\n pass\n else:\n random = random.randrange(1, 101)\n print(random)\n session['random']= random\n\n return render_template('index.html', hidden ='d-none', success = 'd-none', hideform = '')\n\n@app.route('/guess', methods=['POST'])\ndef guess():\n guess = int(request.form['guessbox'])\n print(request.form)\n if guess == int(session['random']):\n print('correct')\n random = session['random']\n session.pop('random')\n return render_template('index.html', success = 'd-block', random = random, hidden = 'd-none', hideform = 'd-none')\n elif guess < int(session['random']):\n print('too low')\n return render_template('index.html', hidden='', wrong='Low', success = 'd-none', hideform = '')\n elif guess > int(session['random']):\n print('too high')\n return render_template('index.html', hidden='', wrong='High', success = 'd-none', hideform = '')\n \n return redirect('/')\n\n\n\n\n\nif __name__==('__main__'):\n app.run(debug=True)","repo_name":"c2truax/pythonRep","sub_path":"flask_fundamentals/Great_Number_Game/gr_numb_game.py","file_name":"gr_numb_game.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12221990182","text":"from __future__ import division\nimport math\nimport sys\nimport os\nimport datetime\nimport random\nimport pygame\nfrom os import path\n\npygame.init()\npygame.mixer.init()\nimg_dir = path.join(path.dirname(__file__), 'images')\nscore = 0\n\ndef load_image_convert_alpha(filename):\n \"\"\"Словарь с картинками...\"\"\"\n return pygame.image.load(os.path.join('images', filename)).convert_alpha()\n\n\ndef load_sound(filename):\n \"\"\"Словарь со звуком\"\"\"\n return pygame.mixer.Sound(os.path.join('sounds', filename))\n\n\ndef draw_centered(surface1, surface2, position):\n \"\"\"Прорисовка поля для игры\"\"\"\n rect = surface1.get_rect()\n rect = rect.move(position[0] - rect.width // 2, position[1] - rect.height // 2)\n surface2.blit(surface1, rect)\n\n\ndef rotate_center(image, rect, angle):\n \"\"\"Поворот кораблика вокруг своей оси\"\"\"\n rotate_image = pygame.transform.rotate(image, angle)\n rotate_rect = rotate_image.get_rect(center=rect.center)\n return rotate_image, rotate_rect\n\n\ndef distance(p, q):\n \"\"\"Рассчет растояния между 2 точками(кораблик и метеор, метеор и пулька)\"\"\"\n return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)\n\n\nclass GameObject(object):\n \"\"\"Класс для создаия и генерации объектов\"\"\"\n\n def __init__(self, position, image, speed=0):\n self.image = image\n self.position = list(position[:])\n self.speed = speed\n\n def draw_on(self, screen):\n draw_centered(self.image, screen, self.position)\n\n def size(self):\n return max(self.image.get_height(), self.image.get_width())\n\n def radius(self):\n return self.image.get_width() / 2\n\n\nclass Spaceship(GameObject):\n def __init__(self, position):\n \"\"\"Инициализация объекта космического корабля с учетом его положения\"\"\"\n super(Spaceship, self).__init__(position, load_image_convert_alpha('zaca.png'))\n\n self.image_on = load_image_convert_alpha('zacaf.png')\n self.image_die = load_image_convert_alpha('bang.png')\n self.direction = [0, -1]\n self.is_throttle_on = False\n self.bang = False\n self.angle = 0\n\n # список для хранения ракет, выпущенных кораблем\n self.active_missiles = []\n\n def draw_on(self, screen):\n \"\"\"Прорисовка корыбля\"\"\"\n\n # Находится ли корабль в движении или нет\n if self.bang:\n new_image, rect = rotate_center(self.image_die, self.image_on.get_rect(), self.angle)\n\n if self.is_throttle_on:\n new_image, rect = rotate_center(self.image_on, self.image_on.get_rect(), self.angle)\n\n else:\n new_image, rect = rotate_center(self.image, self.image.get_rect(), self.angle)\n\n draw_centered(new_image, screen, self.position)\n\n def move(self):\n \"\"\"Стоит ли обновлять один кадр для объекта...\"\"\"\n\n # calculate the direction from the angle variable\n self.direction[0] = math.sin(-math.radians(self.angle))\n self.direction[1] = -math.cos(math.radians(self.angle))\n\n # calculate the position from the direction and speed\n self.position[0] += self.direction[0] * self.speed\n self.position[1] += self.direction[1] * self.speed\n\n def fire(self):\n \"\"\"Создание пули и выстрел\"\"\"\n\n # корректировка ракеты по углу наклона космического корабля\n # adjust [] используется для удержания положения точки откуда должна быть выпущена ракета\n adjust = [0, 0]\n adjust[0] = math.sin(-math.radians(self.angle)) * self.image.get_width()\n adjust[1] = -math.cos(math.radians(self.angle)) * self.image.get_height()\n\n # Создание новой ракеты\n new_missile = Missile((self.position[0] + adjust[0], self.position[1] + adjust[1] / 2), self.angle)\n self.active_missiles.append(new_missile)\n\n\nclass Missile(GameObject):\n \"\"\"Создание ракеты\"\"\"\n\n def __init__(self, position, angle, speed=15):\n super(Missile, self).__init__(position, load_image_convert_alpha('missile.png'))\n\n self.angle = angle\n self.direction = [0, 0]\n self.speed = speed\n\n def move(self):\n \"\"\"Движение ракеты\"\"\"\n\n # Вычисление направления по переменной угла(так проще)\n self.direction[0] = math.sin(-math.radians(self.angle))\n self.direction[1] = -math.cos(math.radians(self.angle))\n\n # Вычисление позиции согласно направлению и скорости\n self.position[0] += self.direction[0] * self.speed\n self.position[1] += self.direction[1] * self.speed\n\n\nclass Rock(GameObject):\n \"\"\"Метеориты\"\"\"\n\n def __init__(self, position, size, speed=4):\n \"\"\"Инициализация метеора: его размер и позиция\"\"\"\n\n # если размер допустим\n if size in {\"big\", \"normal\", \"small\"}:\n\n # загружаем изображение\n str_filename = \"rock-\" + str(size) + \".png\"\n super(Rock, self).__init__(position, load_image_convert_alpha(str_filename))\n self.size = size\n\n self.position = list(position)\n\n self.speed = speed\n\n # Создание рандомного направления\n if bool(random.getrandbits(1)):\n rand_x = random.random() * -1\n else:\n rand_x = random.random()\n\n if bool(random.getrandbits(1)):\n rand_y = random.random() * -1\n else:\n rand_y = random.random()\n\n self.direction = [rand_x, rand_y]\n\n def move(self):\n \"\"\"Движение метеора\"\"\"\n\n self.position[0] += self.direction[0] * self.speed\n self.position[1] += self.direction[1] * self.speed\n\n\nclass MyGame(object):\n # определение и инициализация состояний игры\n PLAYING, DYING, GAME_OVER, STARTING, WELCOME = range(5)\n\n # определение особых таких состояний\n REFRESH, START, RESTART = range(pygame.USEREVENT, pygame.USEREVENT + 3)\n\n def __init__(self):\n \"\"\"Новая игра\"\"\"\n\n self.counter = 0\n pygame.mixer.init()\n pygame.mixer.pre_init(44100, -16, 2, 2048)\n pygame.init()\n\n img_dir = path.join(path.dirname(__file__), 'images')\n\n # экран\n self.width = 800\n self.height = 600\n self.screen = pygame.display.set_mode((self.width, self.height))\n\n self.bg_color = pygame.image.load(path.join(img_dir, 'back.jpg')).convert()\n\n # Загружаем музычку\n self.soundtrack = load_sound('soundtrack1.ogg')\n self.soundtrack.set_volume(.3)\n\n # Спецэффекты\n self.die_sound = load_sound('die1.ogg')\n self.gameover_sound = load_sound('game_over.wav')\n self.missile_sound = load_sound('fire1.ogg')\n\n # шрифт(размер)\n self.big_font = pygame.font.SysFont(None, 100)\n self.medium_font = pygame.font.SysFont(None, 50)\n self.small_font = pygame.font.SysFont(None, 25)\n\n # ПИшем\n self.gameover_text = self.big_font.render('GAME OVER!', True, (255, 0, 0))\n self.gameover1_text = self.medium_font.render('Press Enter to Restart', True, (35, 107, 142))\n\n # Кол-во жизней\n self.lives_image = load_image_convert_alpha('zaca.png')\n\n # ставим фпс\n self.FPS = 30\n pygame.time.set_timer(self.REFRESH, 1000 // self.FPS)\n\n # Метры до смерти от метеоров\n self.death_distances = {\"big\": 90, \"normal\": 65, \"small\": 40}\n\n # нача��ьный экран\n self.do_welcome()\n\n # используется для контроля старта ракет\n # чтобы предотвратить запуск слишком большого количества ракет за короткое время\n self.fire_time = datetime.datetime.now()\n\n def do_welcome(self):\n \"\"\"Начальный экран\"\"\"\n\n self.state = MyGame.WELCOME\n\n # Лобби\n self.welcome_asteroids = self.big_font.render(\"Метеоритные войны\", True, (255, 215, 0))\n self.welcome_desc = self.medium_font.render(\"Press Enter to begin!\", True, (35, 107, 142))\n self.soundtrack_menu = load_sound('menu.ogg')\n\n self.soundtrack_menu.set_volume(.3)\n\n\n def do_init(self):\n \"\"\"Рестарт миссии\"\"\"\n\n # держит метеоры\n self.rocks = []\n\n # минимальное расстояние от космического корабля при создании камней\n # это меняется в зависимости от сложности с течением времени.\n self.min_rock_distance = 350\n\n # начало игры\n self.start()\n\n # создать метеоры(4)\n for i in range(4):\n self.make_rock()\n\n # Yf,jh jxrjd b rjk-dj ;bpytq\n self.lives = 3\n self.score = 0\n\n # счетчик, используемый для подсчета секунд\n self.counter = 0\n\n def make_rock(self, size=\"big\", pos=None):\n \"\"\"Создать новый метеор\"\"\"\n\n # Минимальное кол-во очков для создания\n margin = 200\n\n if pos is None:\n # Случайная позиция камешка\n\n rand_x = random.randint(margin, self.width - margin)\n rand_y = random.randint(margin, self.height - margin)\n\n # пока координата слишком близка, отбросьте ее\n # и создать еще один\n while distance((rand_x, rand_y), self.spaceship.position) < \\\n self.min_rock_distance:\n # новая координата\n rand_x = random.randint(0, self.width)\n rand_y = random.randint(0, self.height)\n\n temp_rock = Rock((rand_x, rand_y), size)\n\n else:\n # Позиция через аргумент\n temp_rock = Rock(pos, size)\n\n # Добавление последнего к списку\n self.rocks.append(temp_rock)\n\n def start(self):\n \"\"\"Начало появлением героя\"\"\"\n self.spaceship = Spaceship((self.width // 2, self.height // 2))\n self.missiles = []\n\n # начало опенинга\n self.soundtrack.play(-1, 0, 1000)\n\n # Состояние игры - играет\n self.state = MyGame.PLAYING\n\n def run(self):\n \"\"\"Бесконечный цикл обработки событий\"\"\"\n running = True\n while running:\n event = pygame.event.wait()\n\n # Хочу выйти\n if event.type == pygame.QUIT:\n running = False\n\n # Состояние игры - презапуск\n elif event.type == MyGame.REFRESH:\n\n if self.state != MyGame.WELCOME:\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_SPACE]:\n new_time = datetime.datetime.now()\n if new_time - self.fire_time > \\\n datetime.timedelta(seconds=0.15):\n # между ними должно быть не менее 0,15 задержки\n # запуск каждой ракеты\n\n # запустить ракету\n self.spaceship.fire()\n\n # ПИУУУУУУ\n self.missile_sound.play()\n\n # Время выстрела\n self.fire_time = new_time\n\n if self.state == MyGame.PLAYING:\n # Продолжение игры\n\n if keys[pygame.K_RIGHT] or keys[pygame.K_d]:\n # при нажатии клавиши \"d\" или \"Стрелка вправо\" поворот\n # космического корабля по часовой стрелке на 10 градусов\n self.spaceship.angle -= 10\n self.spaceship.angle %= 360\n\n if keys[pygame.K_LEFT] or keys[pygame.K_a]:\n # при нажатии клавиши \"d\" или \"Стрелка вправо\" поворот\n # космического корабля против часовой стрелки на 10 градусов\n self.spaceship.angle += 10\n self.spaceship.angle %= 360\n\n if keys[pygame.K_UP] or keys[pygame.K_w]:\n # если нажата \"w\" или \"стрелка вверх\" ,\n # мы должны ускориться\n self.spaceship.is_throttle_on = True\n\n # расчет скорости\n if self.spaceship.speed < 20:\n self.spaceship.speed += 1\n else:\n # если клавиша дроссельной заслонки (\"d\" или \" up\")\n # не нажимается, притормози\n if self.spaceship.speed > 0:\n self.spaceship.speed -= 1\n self.spaceship.is_throttle_on = False\n\n # если на экране есть ракеты, обработайте их\n if len(self.spaceship.active_missiles) > 0:\n self.missiles_physics()\n\n # Физика метеоров\n if len(self.rocks) > 0:\n self.rocks_physics()\n\n # физика корабля\n self.physics()\n\n # нарисовать\n self.draw()\n\n # Воскрешение\n elif event.type == MyGame.START:\n pygame.time.set_timer(MyGame.START, 0) # обнуление таймера\n if self.lives < 1:\n self.game_over()\n else:\n self.rocks = []\n for i in range(4):\n self.make_rock()\n # начать заново\n self.start()\n\n # Перейти от гейм овера к игре\n elif event.type == MyGame.RESTART:\n pygame.time.set_timer(MyGame.RESTART, 0) # turn the timer off\n self.state = MyGame.STARTING\n\n # пользователь нажимает, чтобы начать новую игру\n elif event.type == pygame.MOUSEBUTTONDOWN and (self.state == MyGame.STARTING or\n self.state == MyGame.WELCOME):\n self.do_init()\n\n # Press Enter\n elif event.type == pygame.KEYDOWN \\\n and event.key == pygame.K_RETURN and (self.state == MyGame.STARTING or\n self.state == MyGame.WELCOME):\n self.do_init()\n\n def game_over(self):\n \"\"\"Смерть\"\"\"\n self.soundtrack.stop()\n self.state = MyGame.GAME_OVER\n\n self.gameover_sound.play()\n delay = int((self.gameover_sound.get_length() + 1) * 1000)\n pygame.time.set_timer(MyGame.RESTART, delay)\n\n\n def die(self):\n \"\"\"Потеря жизни\"\"\"\n self.soundtrack.stop()\n # Не начать пока не закончится смерть\n self.lives -= 1\n self.counter = 0\n self.state = MyGame.DYING\n\n self.die_sound.play()\n delay = int((self.die_sound.get_length() + 1) * 1000)\n pygame.time.set_timer(MyGame.START, delay)\n\n def physics(self):\n \"\"\"Физика жизни\"\"\"\n\n if self.state == MyGame.PLAYING:\n # вызрв функции движения\n self.spaceship.move()\n\n \"\"\"План осуществить анти-выход за границы\"\"\"\n\n def missiles_physics(self):\n \"\"\"Физика пулек\"\"\"\n\n # Активные пульки\n if len(self.spaceship.active_missiles) > 0:\n for missile in self.spaceship.active_missiles:\n # Их движение\n missile.move()\n\n # проверьте столкновение с каждым камнем\n for rock in self.rocks:\n if rock.size == \"big\":\n # если ракета попадет в Большой Камень, уничтожьте его,\n # сделайте два камня среднего размера и дайте 20 баллов\n if distance(missile.position, rock.position) < 80:\n self.rocks.remove(rock)\n if missile in self.spaceship.active_missiles:\n self.spaceship.active_missiles.remove(missile)\n self.make_rock(\"normal\", (rock.position[0] + 10, rock.position[1]))\n self.make_rock(\"normal\", (rock.position[0] - 10, rock.position[1]))\n self.score += 20\n\n elif rock.size == \"normal\":\n # если ракета попадет в камень среднего размера, уничтожьте его,\n # сделайте два небольших камня и дайте 50 баллов\n if distance(missile.position, rock.position) < 55:\n self.rocks.remove(rock)\n if missile in self.spaceship.active_missiles:\n self.spaceship.active_missiles.remove(missile)\n self.make_rock(\"small\", (rock.position[0] + 10, rock.position[1]))\n self.make_rock(\"small\", (rock.position[0] - 10, rock.position[1]))\n self.score += 50\n else:\n # если ракета попадет в небольшой камень, уничтожьте его,\n # сделайте один большой камень, если есть менее 10 камней\n # на экране, и дать 100 баллов\n if distance(missile.position, rock.position) < 30:\n self.rocks.remove(rock)\n if missile in self.spaceship.active_missiles:\n self.spaceship.active_missiles.remove(missile)\n\n if len(self.rocks) < 10:\n self.make_rock()\n\n self.score += 100\n\n def rocks_physics(self):\n \"\"\"Физика камня\"\"\"\n\n # Проверка камней\n if len(self.rocks) > 0:\n\n for rock in self.rocks:\n # Движение камня\n rock.move()\n\n # смерть от метеора\n if distance(rock.position, self.spaceship.position) < self.death_distances[rock.size]:\n self.die()\n\n # если камень выходит из экрана, и их меньше, чем 10\n # создайте новый камень с тем же размером\n elif distance(rock.position, (self.width / 2, self.height / 2)) > \\\n math.sqrt((self.width / 2) ** 2 + (self.height / 2) ** 2):\n\n self.rocks.remove(rock)\n if len(self.rocks) < 10:\n self.make_rock(rock.size)\n\n def draw(self):\n \"\"\"Обновление дисплея\"\"\"\n # все, что мы рисуем сейчас, находится в буфере, который не отображается\n self.screen.blit(self.bg_color, (0, 0))\n\n # если нас нет на экране приветствия\n if self.state != MyGame.WELCOME:\n\n # Рисуем кораблик\n self.spaceship.draw_on(self.screen)\n\n # При отсутствии актив-пулек\n if len(self.spaceship.active_missiles) > 0:\n for missile in self.spaceship.active_missiles:\n missile.draw_on(self.screen)\n\n # Метеоры... Снова они\n if len(self.rocks) > 0:\n for rock in self.rocks:\n rock.draw_on(self.screen)\n\n # При состоянии игры - играет\n if self.state == MyGame.PLAYING:\n\n # Счетчик += 1\n self.counter += 1\n\n if self.counter == 20 * self.FPS:\n\n # Повышение сложности(20 секунд без смерти)\n\n if len(self.rocks) < 15:\n # Новый камушек\n self.make_rock()\n\n # Минимальное расстояния для создания\n if self.min_rock_distance < 200:\n self.min_rock_distance -= 50\n\n # Обнуление Счетчика\n\n # дисплэй Счетчика\n scores_text = self.medium_font.render(str(self.score), True, (0, 155, 0))\n draw_centered(scores_text, self.screen,\n (self.width - scores_text.get_width(), scores_text.get_height() + 10))\n\n # если игра окончена, выведите текст игры на экран\n if self.state == MyGame.GAME_OVER or self.state == MyGame.STARTING:\n draw_centered(self.gameover_text, self.screen, (self.width // 2, self.height // 2 -\n self.gameover_text.get_height()))\n\n draw_centered(self.gameover1_text, self.screen, (self.width // 2, self.height // 2 +\n self.gameover_text.get_height()))\n\n # Дисплей жизни\n for i in range(self.lives):\n draw_centered(self.lives_image, self.screen, (self.lives_image.get_width() * i * 1.2 + 40,\n self.lives_image.get_height() // 2))\n\n else:\n # Приветствие\n draw_centered(self.welcome_asteroids, self.screen, (self.width // 2, self.height // 2 -\n self.welcome_asteroids.get_height()))\n\n draw_centered(self.welcome_desc, self.screen, (self.width // 2, self.height // 2 +\n self.welcome_desc.get_height()))\n\n pygame.display.flip()\n\n\nMyGame().run()\npygame.quit()\nsys.exit()\n","repo_name":"BlackExperience69/Yandex-Lyceum","sub_path":"zmeyka.py","file_name":"zmeyka.py","file_ext":"py","file_size_in_byte":24143,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31356083924","text":"import csv\nimport datetime\nimport typing\n\nimport rclpy\nfrom rcl_interfaces.msg import Parameter as ParameterMsg\nfrom rcl_interfaces.msg import ParameterValue\nfrom rcl_interfaces.srv import GetParameters, ListParameters, SetParameters\nfrom rclpy.parameter import Parameter\n\nfrom mavros_msgs.msg import ParamEvent\nfrom mavros_msgs.srv import ParamPull, ParamSetV2\n\nfrom .base import PARAMETERS_QOS, PluginModule, SubscriptionCallable, cached_property\nfrom .utils import (\n call_get_parameters,\n call_list_parameters,\n call_set_parameters_check_and_raise,\n parameter_from_parameter_value,\n)\n\n\nclass ParamFile:\n \"\"\"Base class for param file parsers.\"\"\"\n\n parameters: typing.Optional[typing.Dict[str, Parameter]] = None\n stamp: typing.Optional[datetime.datetime] = None\n tgt_system: int = 1\n tgt_component: int = 1\n\n def load(self, file_: typing.TextIO) -> \"ParamFile\":\n \"\"\"Load Parameters from a file.\"\"\"\n raise NotImplementedError\n\n def save(self, file_: typing.TextIO):\n \"\"\"Write Parameters to a file.\"\"\"\n raise NotImplementedError\n\n\nclass MavProxyParam(ParamFile):\n \"\"\"Parse MavProxy parm file.\"\"\"\n\n class CSVDialect(csv.Dialect):\n delimiter = \" \"\n doublequote = False\n skipinitialspace = True\n lineterminator = \"\\r\\n\"\n quoting = csv.QUOTE_NONE\n escapechar = \"\"\n\n def _parse_param_file(self, file_: typing.TextIO):\n def to_numeric(x):\n return float(x) if \".\" in x else int(x)\n\n for data in csv.reader(file_, self.CSVDialect):\n if data[0].startswith(\"#\"):\n continue # skip comments\n\n if len(data) != 2:\n raise ValueError(\"wrong field count\")\n\n yield Parameter(data[0].strip(), value=to_numeric(data[1]))\n\n def load(self, file_: typing.TextIO) -> ParamFile:\n self.parameters = {p.name: p for p in self._parse_param_file(file_)}\n return self\n\n def save(self, file_: typing.TextIO):\n if self.stamp is None:\n self.stamp = datetime.datetime.now()\n\n writer = csv.writer(file_, self.CSVDialect)\n file_.write(\n f\"\"\"#NOTE: {self.stamp.strftime(\"%d.%m.%Y %T\")}{self.CSVDialect.lineterminator}\"\"\"\n )\n for k, p in self.parameters.items():\n writer.writerow((p.name, p.value))\n\n\nclass MissionPlannerParam(MavProxyParam):\n \"\"\"Parse MissionPlanner param file.\"\"\"\n\n class CSVDialect(csv.Dialect):\n delimiter = \",\"\n doublequote = False\n skipinitialspace = True\n lineterminator = \"\\r\\n\"\n quoting = csv.QUOTE_NONE\n escapechar = \"\"\n\n\nclass QGroundControlParam(ParamFile):\n \"\"\"Parse QGC param file.\"\"\"\n\n class CSVDialect(csv.Dialect):\n delimiter = \"\\t\"\n doublequote = False\n skipinitialspace = True\n lineterminator = \"\\n\"\n quoting = csv.QUOTE_NONE\n escapechar = \"\"\n\n def _parse_param_file(self, file_: typing.TextIO):\n def to_numeric(x):\n return float(x) if \".\" in x else int(x)\n\n for data in csv.reader(file_, self.CSVDialect):\n if data[0].startswith(\"#\"):\n continue # skip comments\n\n if len(data) != 5:\n raise ValueError(\"wrong field count\")\n\n yield Parameter(data[2].strip(), value=to_numeric(data[3]))\n\n def load(self, file_: typing.TextIO) -> ParamFile:\n self.parameters = {p.name: p for p in self._parse_param_file(file_)}\n return self\n\n def save(self, file_: typing.TextIO):\n def to_type(x):\n if isinstance(x, float):\n return 9 # REAL32\n elif isinstance(x, int):\n return 6 # INT32\n else:\n raise ValueError(f\"unknown type: {type(x):r}\")\n\n if self.stamp is None:\n self.stamp = datetime.datetime.now()\n\n writer = csv.writer(file_, self.CSVDialect)\n writer.writerow((f\"\"\"# NOTE: {self.stamp.strftime(\"%d.%m.%Y %T\")}\"\"\",))\n writer.writerow(\n (\n f\"# Onboard parameters saved by \"\n f\"mavparam for ({self.tgt_system}.{self.tgt_component})\",\n )\n )\n writer.writerow((\"# MAV ID\", \"COMPONENT ID\", \"PARAM NAME\", \"VALUE\", \"(TYPE)\"))\n for k, p in self.parameters.items():\n writer.writerow(\n (\n self.tgt_system,\n self.tgt_component,\n p.name,\n p.value,\n to_type(p.value),\n )\n )\n\n\nclass ParamPlugin(PluginModule):\n \"\"\"Parameter plugin interface.\"\"\"\n\n timeout_sec: float = 5.0\n _parameters = None\n _event_sub = None\n\n @cached_property\n def cli_list_parameters(self) -> rclpy.node.Client:\n \"\"\"Client for ListParameters service.\"\"\"\n return self.create_client(ListParameters, (\"param\", \"list_parameters\"))\n\n @cached_property\n def cli_get_parameters(self) -> rclpy.node.Client:\n \"\"\"Client for GetParameters service.\"\"\"\n return self.create_client(GetParameters, (\"param\", \"get_parameters\"))\n\n @cached_property\n def cli_set_parameters(self) -> rclpy.node.Client:\n \"\"\"Client for SetParameters service.\"\"\"\n return self.create_client(SetParameters, (\"param\", \"set_parameters\"))\n\n @cached_property\n def cli_pull(self) -> rclpy.node.Client:\n \"\"\"Client for ParamPull service.\"\"\"\n return self.create_client(ParamPull, (\"param\", \"pull\"))\n\n @cached_property\n def cli_set(self) -> rclpy.node.Client:\n \"\"\"Client for ParamSetV2 service.\"\"\"\n return self.create_client(ParamSetV2, (\"param\", \"set\"))\n\n def subscribe_events(\n self,\n callback: SubscriptionCallable,\n qos_profile: rclpy.qos.QoSProfile = PARAMETERS_QOS,\n ) -> rclpy.node.Subscription:\n \"\"\"Subscribe to parameter events.\"\"\"\n return self.create_subscription(\n ParamEvent, (\"param\", \"event\"), callback, qos_profile\n )\n\n def call_pull(self, *, force_pull: bool = False) -> ParamPull.Response:\n \"\"\"Do a call to ParamPull service.\"\"\"\n lg = self.get_logger()\n\n req = ParamPull.Request(force_pull=force_pull)\n resp = self.cli_pull.call(req)\n lg.debug(f\"pull result: {resp}\")\n return resp\n\n @property\n def values(self) -> \"ParamDict\":\n \"\"\"Provide current state of parameters and allows to change them.\"\"\"\n if self._parameters is not None:\n return self._parameters\n\n pm = ParamDict()\n pm._pm = self\n\n # 1. subscribe for parameter updates\n self._event_sub = self.subscribe_events(pm._event_handler)\n self._parameters = pm\n\n # 2. pull parameters, if it isn't yet done\n # we'll get bunch of events\n self.call_pull()\n\n # 3. if too small events come, request whole list\n if len(pm) < 10:\n names = call_list_parameters(\n node=self._node, client=self.cli_list_parameters\n )\n for k, v in call_get_parameters(\n node=self._node, client=self.cli_get_parameters, names=names\n ).items():\n pm.setdefault(k, v)\n\n return pm\n\n\nclass ParamDict(dict):\n \"\"\"\n ParamDict wrapper.\n\n That class holds states of parameters\n and allows to upload new items.\n \"\"\"\n\n class NoSet:\n \"\"\"Wrapper to mark values we do not want to send set request for.\"\"\"\n\n value: Parameter\n\n def __init__(self, p):\n self.value = p\n\n _pm: \"ParamPlugin\" = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __getitem__(self, key: str) -> Parameter:\n return super().__getitem__(key)\n\n def __setitem__(self, key: str, value):\n do_call_set, value = self._set_item(key, value)\n if do_call_set:\n call_set_parameters_check_and_raise(\n node=self._pm._node,\n client=self._pm.cli_set_parameters,\n parameters=[value],\n )\n\n def _set_item(self, key: str, value) -> (bool, Parameter):\n is_no_set = False\n if isinstance(value, ParamDict.NoSet):\n is_no_set = True\n value = value.value\n\n if isinstance(value, Parameter):\n pass\n elif isinstance(value, ParameterValue):\n value = parameter_from_parameter_value(key, value)\n elif isinstance(value, ParameterMsg):\n value = Parameter.from_parameter_msg(value)\n else:\n value = Parameter(name=key, value=value)\n\n assert key == value.name\n\n do_call_set = not is_no_set and self.get(key, Parameter(name=key)) != value\n super().__setitem__(key, value)\n return do_call_set, value\n\n def __getattr__(self, key: str):\n try:\n return object.__getattribute__(self, key)\n except AttributeError:\n try:\n return self[key]\n except KeyError:\n raise AttributeError(key)\n\n def __setattr__(self, key: str, value):\n try:\n object.__getattribute__(self, key)\n except AttributeError:\n try:\n self[key] = value\n except Exception as ex:\n raise AttributeError(f\"{key}: {ex}\")\n else:\n object.__setattr__(self, key, value)\n\n def __delattr__(self, key: str):\n try:\n object.__getattribute__(self, key)\n except AttributeError:\n try:\n del self[key]\n except KeyError:\n raise AttributeError(key)\n else:\n object.__delattr__(self, key)\n\n def update(self, *args, **kwargs):\n keys_to_set = []\n for k, v in dict(*args, **kwargs).items():\n do_call_set, _ = self._set_item(k, v)\n if do_call_set:\n keys_to_set.append(k)\n\n if keys_to_set:\n call_set_parameters_check_and_raise(\n node=self._pm._node,\n client=self._pm.cli_set_parameters,\n parameters=[self[k] for k in keys_to_set],\n )\n\n def setdefault(self, key: str, value=None):\n if key not in self:\n self[key] = ParamDict.NoSet(value)\n\n def _event_handler(self, msg: ParamEvent):\n self[msg.param_id] = parameter_from_parameter_value(msg.param_id, msg.value)\n","repo_name":"mavlink/mavros","sub_path":"mavros/mavros/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":10459,"program_lang":"python","lang":"en","doc_type":"code","stars":770,"dataset":"github-code","pt":"77"} +{"seq_id":"42756510938","text":"from __future__ import print_function\nimport os\n\nwhile True:\n def clear():\n os.system('cls')\n\n board = [x for x in range(1, 10)]\n\n def display_board():\n for i in range(0, 9, 3):\n print(board[i], board[i + 1], board[i + 2], sep=\" | \")\n\n display_board()\n\n def check_win():\n for i in range(0, 9, 3):\n if board[i] == board[i + 1] == board[i + 2]:\n return board[i]\n for i in range(3):\n if board[i] == board[i + 3] == board[i + 6]:\n return board[i]\n if board[0] == board[4] == board[8]:\n return board[0]\n if board[2] == board[4] == board[6]:\n return board[2]\n\n pos_range = [i for i in range(1, 10)]\n\n x = 0\n while x <= 8:\n try:\n if x % 2 == 0:\n print(\"Turn of, x\")\n else:\n print(\"Turn of, O\")\n try:\n pos = int(input('Enter your position (1-9) ')) - 1\n except SyntaxError:\n pass\n clear()\n if (pos + 1) in pos_range:\n if board[pos] == 'X' or board[pos] == 'O':\n print('Already in use Ha ha!\\n')\n else:\n if x % 2 == 0:\n board[pos] = 'X'\n else:\n board[pos] = 'O'\n x += 1\n else:\n print('You are out of the range fool!\\n')\n except NameError:\n clear()\n print('What you are trying to do fool!\\n')\n display_board()\n check_winner = check_win()\n if check_winner:\n print('you are winner ', check_winner)\n break\n if not check_win():\n print(\"This tie Good Game\")\n os.system(\"pause & cls\")\n print(\"\\nLet's Play again!\\n\")\n","repo_name":"ShailMurtaza/Tic-tac-toe-CLI","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15981770793","text":"# See tests for a more comprehensive complementary table\r\nSIMPLE_COMPLEMENTS_STR = \"\"\"#Reduced table with bases A, G, C, T\r\n Base\tComplementary Base\r\n A\tT\r\n T\tA\r\n G\tC\r\n C\tG\r\n\"\"\"\r\n\r\n# Recommended helper function\r\ndef _clean_sequence(sequence, str_table=SIMPLE_COMPLEMENTS_STR):\r\n \"\"\"\r\n Receives a DNA sequence and a str_table that defines valid (and\r\n complementary) bases\r\n Returns all sequences converted to upper case and remove invalid\r\n characters\r\n t!t%ttttAACCG --> TTTTTTAACCG\r\n \"\"\"\r\n clean_seq = ''\r\n compl_dict = {}\r\n for line in str_table.splitlines()[2:]:\r\n base = line.split()[0]\r\n comp = line.split()[-1]\r\n compl_dict[base] = comp\r\n for base in sequence:\r\n base = base.upper()\r\n if base in compl_dict.keys():\r\n clean_seq += base\r\n return clean_seq, compl_dict \r\n\r\ndef reverse(sequence, str_table=SIMPLE_COMPLEMENTS_STR):\r\n \"\"\"\r\n Receives a DNA sequence and a str_table that defines valid (and\r\n complementary) bases\r\n Returns a reversed string of sequence while removing all characters\r\n not found in str_table characters\r\n e.g. t!t%ttttAACCG --> GCCAATTTTTT\r\n \"\"\"\r\n clean_seq, compl_dict = _clean_sequence(sequence, str_table)\r\n return clean_seq[::-1]\r\n\r\n\r\n\r\ndef complement(sequence, str_table=SIMPLE_COMPLEMENTS_STR):\r\n \"\"\"\r\n Receives a DNA sequence and a str_table that defines valid (and\r\n complementary) bases\r\n Returns a string containing complementary bases as defined in\r\n str_table while removing non input_sequence characters\r\n e.g. t!t%ttttAACCG --> AAAAAATTGGC\r\n \"\"\"\r\n clean_seq, compl_dict = _clean_sequence(sequence, str_table)\r\n complement = ''\r\n for base in clean_seq:\r\n complement += compl_dict[base]\r\n return complement\r\n\r\n\r\ndef reverse_complement(sequence, str_table=SIMPLE_COMPLEMENTS_STR):\r\n \"\"\"\r\n Receives a DNA sequence and a str_table that defines valid (and\r\n complementary) bases\r\n Returns a string containing complementary bases as defined in str_table\r\n while removing non input_sequence characters\r\n e.g. t!t%ttttAACCG --> CGGTTAAAAAA\r\n \"\"\"\r\n clean_seq, compl_dict = _clean_sequence(sequence, str_table)\r\n return complement(reverse(clean_seq, str_table), str_table)\r\n\r\nCOMPLEMENTS_STR = \"\"\"# Full table with ambigous bases\r\n Base\tName\tBases Represented\tComplementary Base\r\n A\tAdenine\tA\tT\r\n T\tThymidine\tT \tA\r\n U\tUridine(RNA only)\tU\tA\r\n G\tGuanidine\tG\tC\r\n C\tCytidine\tC\tG\r\n Y\tpYrimidine\tC T\tR\r\n R\tpuRine\tA G\tY\r\n S\tStrong(3Hbonds)\tG C\tS\r\n W\tWeak(2Hbonds)\tA T\tW\r\n K\tKeto\tT/U G\tM\r\n M\taMino\tA C\tK\r\n B\tnot A\tC G T\tV\r\n D\tnot C\tA G T\tH\r\n H\tnot G\tA C T\tD\r\n V\tnot T/U\tA C G\tB\r\n N\tUnknown\tA C G T\tN\r\n\"\"\"\r\n\r\n\r\nAMBIGOUS_DIRTY = \"AGB Vnc gRy Tvv V\"\r\n# # (\"TAC WSA YBG KGK DVN YRS TGG TAC TAA TGC CTA AGT GAC CGG CAG CAA AAT GTT\"\r\n# # \" GCA GCA CTG ACC CTT TTG GGA CCG CAA TGG GTT GAA TTA GCG GAA CGT CGT GT\"\r\n# # \"A GGG GGA AAG CGG TCG ACC GCA TTA TCG CTT CTC CGG GCG TGG CTA GCG GGA A\"\r\n# # \"GG GTT GTC AAC GCG TCG GAC TTA CCG CTT ACC GCG AAA CGG ACC AAA GGC CGT \"\r\n# # \"GGT CTT CGC CAC GGC CTT TCG ACC GAC CTC ACG CTA GAA GGA\")\r\n\r\n# print('rev_comp: ', reverse_complement(AMBIGOUS_DIRTY, COMPLEMENTS_STR))\r\nprint('comp: ',complement(AMBIGOUS_DIRTY, COMPLEMENTS_STR))\r\n#print('reverse: ',reverse(AMBIGOUS_DIRTY, COMPLEMENTS_STR))","repo_name":"pyrrhull/bitesofpy","sub_path":"259/reverse_complement.py","file_name":"reverse_complement.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24653244149","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimport os\nimport time\nfrom multiprocessing.connection import wait\nimport scipy.spatial as spatial\nimport scipy.cluster as cluster\nfrom collections import defaultdict\nfrom operator import itemgetter\n\ndef auto_canny(image, sigma=0.33):\n \"\"\"\n Canny edge detection with automatic thresholds.\n \"\"\"\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n \n # apply automatic Canny edge detection using the computed median\n lower = int(max(160, (1.0 - sigma) * v))\n upper = int(min(300, (1.0 + sigma) * v))\n edged = cv.Canny(image, lower, upper)\n \n # return the edged image\n return edged\ndef hough_line(edges, min_line_length=100, max_line_gap=10):\n lines = cv.HoughLines(edges, 1, np.pi / 180, 110, min_line_length, max_line_gap)\n lines = np.reshape(lines, (-1, 2))\n return lines\n\ndef reduce_line(lines):\n newlines=[]\n for rho, theta in lines:\n if (theta>0.52 and theta<1.04) or (theta>2.09 and theta<2.61) or (theta>3.66 and theta<4.18) or (theta>5.23 and theta<5.75):\n pass\n else:\n newlines.append([rho,theta])\n # print(np.array(newlines))\n # print(len(np.array(newlines)))\n return np.array(newlines)\ndef show_line(lines,image):\n for rho, theta in lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 4000*(-b))\n y1 = int(y0 + 4000*(a))\n x2 = int(x0 - 4000*(-b))\n y2 = int(y0 - 4000*(a))\n cv.line(image,(x1,y1),(x2,y2),(0,0,255),2)\n\ndef h_v_lines(lines):\n h_lines, v_lines = [], []\n for rho, theta in lines:\n if theta < np.pi / 4 or theta > np.pi - np.pi / 4:\n v_lines.append([rho, theta])\n else:\n h_lines.append([rho, theta])\n return h_lines, v_lines\n\ndef intersections(h, v):\n \"\"\"\n Given lists of horizontal and vertical lines in (rho, theta) form, returns list\n of (x, y) intersection points.\n \"\"\"\n points = []\n for d1, a1 in h:\n for d2, a2 in v:\n A = np.array([[np.cos(a1), np.sin(a1)], [np.cos(a2), np.sin(a2)]])\n b = np.array([d1, d2])\n point = np.linalg.solve(A, b)\n points.append(point)\n return np.array(points)\n\ndef cluster_points(points):\n dists = spatial.distance.pdist(points)\n single_linkage = cluster.hierarchy.single(dists)\n flat_clusters = cluster.hierarchy.fcluster(single_linkage, 15, 'distance')\n cluster_dict = defaultdict(list)\n for i in range(len(flat_clusters)):\n cluster_dict[flat_clusters[i]].append(points[i])\n cluster_values = cluster_dict.values()\n clusters = map(lambda arr: (np.mean(np.array(arr)[:, 0]), np.mean(np.array(arr)[:, 1])), cluster_values)\n #for i in range(len(clusters)) :\n #clusters[i]=list(clusters[i])\n #a=clusters[i][0]\n #b=clusters[i][1]\n #clusters[i].append(cv.norm((int(a),int(b)),(0,int(b)), normType=cv.NORM_L2))\n #clusters[i]=tuple(clusters[i])\n return sorted(list(clusters), key=lambda k: [k[1],k[0]])\n\ndef clean_point(points):\n over_point=[]\n points=np.array(points,dtype=int)\n num_point=int(len(points))\n if num_point==90 or num_point==99:\n points=np.reshape(points,(int(num_point/9),9,2))\n for i in range(0,2) :\n #print(points[i][0][1],points[i+1][0][1])\n #if cv.norm(points[i][0],points[i+1][0], normType=cv.NORM_L2) <30 :\n if abs(points[i][0][1]-points[i+1][0][1])<30:\n over_point.append(i+1)\n print(over_point)\n for i in range(int(num_point/9)-1,int(num_point/9)-3,-1) : \n # print(points[i][0][1],points[i-1][0][1])\n #if cv.norm(points[i][0],points[i-1][0], normType=cv.NORM_L2) <30 :\n if abs(points[i][0][1]-points[i-1][0][1])<30:\n over_point.append(i-1)\n print(over_point)\n x=0 \n for j in over_point : \n points=np.delete(points,j-x,axis=0)\n x+=1\n elif num_point==81 :\n points=np.reshape(points,(int(len(points)/9),9,2)) \n else :\n print(\"error\")\n print(num_point)\n temp = np.zeros(shape=(9,9,2),dtype=int)\n for i in range(0,9) :\n x=sorted(tuple(points[i]),key=itemgetter(0))\n for j in range(0,9) :\n temp[i][j]=x[j]\n return temp\n\ndef clean_point2(points):\n over_point=[]\n # points=np.array(points,dtype=int)\n num_point=len(points)\n print('num_point',num_point)\n # print(num_point,'p001',points.sort())\n if num_point==99:\n points = points[9:90] \n points=np.array(points,dtype=int)\n # print('p1',points)\n points=np.reshape(points,(int(len(points)/9),9,2))\n else :\n print(\"error\")\n print(num_point)\n # temp = np.zeros(shape=(9,9,2),dtype=int)\n # for i in range(0,9) :\n # x=sorted(tuple(points[i]),key=itemgetter(0))\n # for j in range(0,9) :\n # temp[i][j]=x[j]\n # print('p0',points)\n # print('p0sort',points.sort())\n \n # print('p2',points)\n return points\n\ndef get_box(points,img):\n box=np.zeros(shape=(8,8,4,2),dtype=int)\n picture=np.float32([[0,0],[124,0],[124,124],[0,124]])\n for i in range(0,8) :\n for j in range(0,8) :\n box[i][j][0]=points[i][j]\n box[i][j][1]=points[i][j+1]\n box[i][j][2]=points[i+1][j+1]\n box[i][j][3]=points[i+1][j]\n #########ครอปภาพ\n my_box=np.float32([box[i][j][0],box[i][j][1],box[i][j][2],box[i][j][3]])\n my_picture=cv.getPerspectiveTransform(my_box,picture)\n img_output=cv.warpPerspective(img,my_picture,(124,124))\n #cv.imwrite(img_name+'_'+str(i)+str(j)+\".png\",img_output)\n path = 'main/klui_tempbox/img'\n cv.imwrite(os.path.join(path , str(i)+str(j)+'.png'),img_output)\n\ndef take_picture(port):\n cap = cv.VideoCapture(port,cv.CAP_DSHOW)\n fourcc = cv.VideoWriter_fourcc(*'XVID')\n cap.set(cv.CAP_PROP_FRAME_WIDTH, 900)\n cap.set(cv.CAP_PROP_FRAME_HEIGHT, 900)\n w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))\n\n time.sleep(2) \n ret, frame = cap.read()\n frame = frame[0:0+h, 100:100+730] \n #frame=cv.imread(\"A1.jpg\")\n (h,w) = frame.shape[:2]\n center = (w//2,h//2)\n M = cv.getRotationMatrix2D(center,0,1.0)\n rotated = cv.warpAffine(frame,M,(w,h),borderMode=cv.BORDER_REPLICATE)\n cap.release()\n cv.destroyAllWindows()\n rotated=cv.resize(rotated, (640,640))\n return rotated\n\n\"\"\"\nraw = take_picture(0)\n# cv.imshow('frame',test)\n# cv.waitKey(0)\nimg = raw\n\nimg_blur=cv.blur(img,(3,3),0)\ngray_blur_kuy = cv.bilateralFilter(img, 9, 75, 75)\ngray_blur_kuy = cv.medianBlur(gray_blur_kuy, 11)\n\n# cv.imshow('blur', gray_blur_kuy)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n\nmask=cv.subtract(img,gray_blur_kuy)\n# cv.imshow('mask', mask)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n\nfinal=cv.add(img_blur,1*mask)\n# cv.imshow('final', final)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n\nfinal = cv.cvtColor(final,cv.COLOR_BGR2GRAY)\ncv.imshow('final2', final)\ncv.waitKey(0)\ncv.destroyAllWindows()\n\nedges=auto_canny(final)\ncv.imshow('edges', edges)\ncv.waitKey(0)\ncv.destroyAllWindows()\n\nlines = hough_line(edges)\n# print(lines)\n# print(len(lines))\nlines = reduce_line(lines)\nliness=img.copy()\nshow_line(lines,liness)\ncv.imshow('lines', liness)\ncv.waitKey(0)\ncv.destroyAllWindows()\n\nh_lines, v_lines = h_v_lines(lines)\npoints = intersections(h_lines, v_lines )\npoints = cluster_points(points)\nprint('img shape',img.shape)\n# print(len(points),points,(points[1][0],points[1][1]))\n# img_copy = img\n# for i in range(len(points)) :\n# cv.circle(img_copy,(int(points[i][0]),int(points[i][1])),5,(255, 0, 0),-1)\n# cv.imshow('points', img_copy)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n# cv.imshow('img', img)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\nimg2=img\n\npoints=clean_point2(points)\nprint('shape point',points.shape)\n# img_copy = img\n# for i in range(len(points)) :\n# for j in range(len(points)) :\n# cv.circle(img_copy,(int(points[i][j][0]),int(points[i][j][1])),5,(0, 255, 0),-1)\n# cv.imshow('points2', img_copy)\n# cv.waitKey(0)\n# cv.destroyAllWindows()\n\n# box=get_box(points,img)\nbox=np.zeros(shape=(8,8,4,2),dtype=int)\n# print(box)\n# print(points)\n# print('...........................................................')\ntestpoint = np.sort(points,axis=1)\nprint('shape testpoint',testpoint.shape)\n# print(testpoint.sort(axis=1))\n# print(points)\npicture=np.float32([[0,0],[124,0],[124,124],[0,124]])\nprint('crop picture')\nfor i in range(0,8) :\n for j in range(0,8) :\n box[i][j][0]=testpoint[i][j]\n box[i][j][1]=testpoint[i][j+1]\n box[i][j][2]=testpoint[i+1][j+1]\n box[i][j][3]=testpoint[i+1][j]\n #########ครอปภาพ\n my_box=np.float32([box[i][j][0],box[i][j][1],box[i][j][2],box[i][j][3]])\n my_picture=cv.getPerspectiveTransform(my_box,picture)\n img_output=cv.warpPerspective(img2,my_picture,(124,124))\n #cv.imwrite(img_name+'_'+str(i)+str(j)+\".png\",img_output)\n # path = 'main/klui_crop_dataset1'\n path = 'kluiUI/klui_tempbox/img'\n # cv.imwrite(os.path.join(path , '30'+'_'+str(i)+str(j)+'.png'),img_output)\n cv.imwrite(os.path.join(path , str(i)+str(j)+'.png'),img_output)\n\n\"\"\"\n\n\n\n\n","repo_name":"tammapon/ChessRobot","sub_path":"highlevel_system/kluiUI/test_klui/fullchess.py","file_name":"fullchess.py","file_ext":"py","file_size_in_byte":9505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69993000890","text":"#!/usr/bin/python -x\n\nimport sys\nfrom datetime import datetime\n\nimport os, fnmatch\nfrom shutil import move\nimport copy\nimport time\nimport datetime\nimport numpy as np\nfrom scipy.optimize import linprog\nfrom scipy.interpolate import interp1d\nfrom scipy.interpolate import make_interp_spline, BSpline\nfrom scipy import interpolate\nfrom numpy.linalg import solve\nfrom fractions import Fraction\nfrom difflib import SequenceMatcher\n\n#from PIL import Image\nfrom scipy import misc\nfrom scipy import ndimage as ndi\nimport math\nimport glob\nfrom scipy.optimize import curve_fit\nfrom scipy.constants import physical_constants\nfrom scipy.optimize import brentq\nfrom scipy.integrate import cumtrapz, trapz, simps\nfrom pymatgen.core import Structure\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom dfttk.analysis.ywutils import get_expt, formula2composition, get_melting_temperature\n\nimport re\nimport json\nimport subprocess\nfrom shutil import copyfile\n\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\nfrom difflib import SequenceMatcher\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\nimport platform\n\ndef pngplot(cmd):\n item = [s for s in cmd.split() if s!=\"\"]\n cmdfile = item[1].replace(\";\",\"\")\n with open(cmdfile,\"r\") as f:\n lines = f.readlines()\n pngfile = cmdfile.replace(\".plt\",\"_png.plt\")\n\n with open (pngfile,\"w\") as f:\n for line in lines:\n if line.startswith(\"set terminal\"):\n f.write('{}\\n'.format(\"set terminal png font Times_Roman 96 size 4096,3072 linewidth 7\"))\n elif line.startswith(\"set encoding\"):\n f.write('{}\\n'.format(\"set encoding utf8\"))\n elif line.startswith(\"set output\"):\n f.write('{}\\n'.format(line.replace(\".eps\",\".png\")))\n if cmdfile == \"vdos.plt\":\n f.write('{}\\n'.format('set xlabel \"Phonon frequncy (THz)\"'))\n f.write('{}\\n'.format('set ylabel \"Phonon DOS (1/THz/cell)\"'))\n elif cmdfile == \"vdis.plt\":\n f.write('{}\\n'.format('set xlabel \"Direction\"'))\n f.write('{}\\n'.format('set ylabel \"Phonon frequncy (THz)\"'))\n else:\n f.write('{}'.format(line))\n return \"gnuplot \"+cmdfile, \"gnuplot \"+pngfile\n\ndef plot(cmd):\n cmd0, cmd1 = pngplot(cmd)\n #print(\"ssssssss\",cmd0,cmd1)\n output0 = subprocess.run(cmd0, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n print(output0)\n output1 = subprocess.run(cmd1, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n print(output1)\n\n\n#from elements import elements\n\nMM_of_Elements = {'H': 1.00794, 'He': 4.002602, 'Li': 6.941, 'Be': 9.012182, 'B': 10.811, 'C': 12.0107, 'N': 14.0067,\n 'O': 15.9994, 'F': 18.9984032, 'Ne': 20.1797, 'Na': 22.98976928, 'Mg': 24.305, 'Al': 26.9815386,\n 'Si': 28.0855, 'P': 30.973762, 'S': 32.065, 'Cl': 35.453, 'Ar': 39.948, 'K': 39.0983, 'Ca': 40.078,\n 'Sc': 44.955912, 'Ti': 47.867, 'V': 50.9415, 'Cr': 51.9961, 'Mn': 54.938045,\n 'Fe': 55.845, 'Co': 58.933195, 'Ni': 58.6934, 'Cu': 63.546, 'Zn': 65.409, 'Ga': 69.723, 'Ge': 72.64,\n 'As': 74.9216, 'Se': 78.96, 'Br': 79.904, 'Kr': 83.798, 'Rb': 85.4678, 'Sr': 87.62, 'Y': 88.90585,\n 'Zr': 91.224, 'Nb': 92.90638, 'Mo': 95.94, 'Tc': 98.9063, 'Ru': 101.07, 'Rh': 102.9055, 'Pd': 106.42,\n 'Ag': 107.8682, 'Cd': 112.411, 'In': 114.818, 'Sn': 118.71, 'Sb': 121.760, 'Te': 127.6,\n 'I': 126.90447, 'Xe': 131.293, 'Cs': 132.9054519, 'Ba': 137.327, 'La': 138.90547, 'Ce': 140.116,\n 'Pr': 140.90465, 'Nd': 144.242, 'Pm': 146.9151, 'Sm': 150.36, 'Eu': 151.964, 'Gd': 157.25,\n 'Tb': 158.92535, 'Dy': 162.5, 'Ho': 164.93032, 'Er': 167.259, 'Tm': 168.93421, 'Yb': 173.04,\n 'Lu': 174.967, 'Hf': 178.49, 'Ta': 180.9479, 'W': 183.84, 'Re': 186.207, 'Os': 190.23, 'Ir': 192.217,\n 'Pt': 195.084, 'Au': 196.966569, 'Hg': 200.59, 'Tl': 204.3833, 'Pb': 207.2, 'Bi': 208.9804,\n 'Po': 208.9824, 'At': 209.9871, 'Rn': 222.0176, 'Fr': 223.0197, 'Ra': 226.0254, 'Ac': 227.0278,\n 'Th': 232.03806, 'Pa': 231.03588, 'U': 238.02891, 'Np': 237.0482, 'Pu': 244.0642, 'Am': 243.0614,\n 'Cm': 247.0703, 'Bk': 247.0703, 'Cf': 251.0796, 'Es': 252.0829, 'Fm': 257.0951, 'Md': 258.0951,\n 'No': 259.1009, 'Lr': 262, 'Rf': 267, 'Db': 268, 'Sg': 271, 'Bh': 270, 'Hs': 269, 'Mt': 278,\n 'Ds': 281, 'Rg': 281, 'Cn': 285, 'Nh': 284, 'Fl': 289, 'Mc': 289, 'Lv': 292, 'Ts': 294, 'Og': 294,\n 'ZERO': 0}\n\nperiodictable = MM_of_Elements.keys() #\"\"\" list of all elements from the periodic table\"\"\"\n\n\nfrom math import atan2,degrees\n#Label line with line2D label data\n#get from https://github.com/cphyc/matplotlib-label-lines\ndef labelLine(line,x,label=None,align=True,**kwargs):\n\n ax = line.axes\n xdata = line.get_xdata()\n ydata = line.get_ydata()\n\n if (x < xdata[0]) or (x > xdata[-1]):\n print('x label location is outside data range!')\n return\n\n #Find corresponding y co-ordinate and angle of the line\n ip = 1\n for i in range(len(xdata)):\n if x < xdata[i]:\n ip = i\n break\n\n y = ydata[ip-1] + (ydata[ip]-ydata[ip-1])*(x-xdata[ip-1])/(xdata[ip]-xdata[ip-1])\n\n if not label:\n label = line.get_label()\n\n if align:\n #Compute the slope\n dx = xdata[ip] - xdata[ip-1]\n dy = ydata[ip] - ydata[ip-1]\n ang = degrees(atan2(dy,dx))\n\n #Transform to screen co-ordinates\n pt = np.array([x,y]).reshape((1,2))\n trans_angle = ax.transData.transform_angles(np.array((ang,)),pt)[0]\n\n else:\n trans_angle = 0\n\n #Set a bunch of keyword arguments\n if 'color' not in kwargs:\n kwargs['color'] = line.get_color()\n\n if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):\n kwargs['ha'] = 'center'\n\n if ('verticalalignment' not in kwargs) and ('va' not in kwargs):\n kwargs['va'] = 'center'\n\n if 'backgroundcolor' not in kwargs:\n kwargs['backgroundcolor'] = ax.get_facecolor()\n\n if 'clip_on' not in kwargs:\n kwargs['clip_on'] = True\n\n if 'zorder' not in kwargs:\n kwargs['zorder'] = 2.5\n\n ax.text(x,y,label,rotation=trans_angle,**kwargs)\n\n\n#get from https://github.com/cphyc/matplotlib-label-lines\ndef labelLines(lines,align=True,xvals=None,**kwargs):\n\n ax = lines[0].axes\n labLines = []\n labels = []\n\n #Take only the lines which have labels other than the default ones\n for line in lines:\n label = line.get_label()\n if \"_line\" not in label:\n labLines.append(line)\n labels.append(label)\n\n if xvals is None:\n xmin,xmax = ax.get_xlim()\n xvals = np.linspace(xmin,xmax,len(labLines)+2)[1:-1]\n\n for line,x,label in zip(labLines,xvals,labels):\n labelLine(line,x,label,align,**kwargs)\n\n\n\"\"\"SGTE fitting using\nT - temperature\na - fitting parameters\n\"\"\"\ndef SGTE(T,a):\n fval = a[0]+a[1]*T\n if len(a) > 2:\n fval += a[2]*T*np.log(T)\n if len(a) > 3:\n fval += a[3]*T*T\n if len(a) > 4:\n fval += a[4]*T*T*T\n if len(a) > 5:\n fval += a[5]/T\n return(fval)\n\n\n\"\"\"SGTE fitting with two parameters\"\"\"\ndef SGTE2(T, a, b):\n return (SGTE(T, [a,b]))\n\n\n\"\"\"SGTE fitting with three parameters\"\"\"\ndef SGTE3(T, a, b, c):\n return (SGTE(T, [a,b,c]))\n\n\n\"\"\"SGTE fitting with four parameters\"\"\"\ndef SGTE4(T, a, b, c, d):\n return (SGTE(T, [a,b,c,d]))\n\n\n\"\"\"SGTE fitting with five parameters\"\"\"\ndef SGTE5(T, a, b, c, d, e):\n return (SGTE(T, [a,b,c,d,e]))\n\n\n\"\"\"SGTE fitting with six parameters\"\"\"\ndef SGTE6(T, a, b, c, d, e, f):\n return (SGTE(T, [a,b,c,d,e,f]))\n\n\n\"\"\"SGTE fitting for heat capacity one parameter\"\"\"\ndef SGTEC1(T,a):\n return C_SGTE(T,[a])\n\n\n\"\"\"SGTE fitting for heat capacity two parameters\"\"\"\ndef SGTEC2(T,a,b):\n return C_SGTE(T,[a,b])\n\n\n\"\"\"SGTE fitting for heat capacity three parameters\"\"\"\ndef SGTEC3(T,a,b,c):\n return C_SGTE(T,[a,b,c])\n\n\n\"\"\"SGTE fitting for heat capacity four parameters\"\"\"\ndef SGTEC4(T,a,b,c,d):\n return C_SGTE(T,[a,b,c,d])\n\n\n\"\"\"SGTE fitting for heat capacity\"\"\"\ndef C_SGTE(T,a):\n fval = 0\n if len(a) > 0:\n fval += a[0]\n if len(a) > 1:\n fval += a[1]*T\n if len(a) > 2:\n fval += a[2]*T*T\n if len(a) > 3:\n fval += a[3]/T/T\n return(fval)\n\n\ndef SGTES(T,f):\n s = 0.0\n if len(f)>0:\n s += f[0]\n if len(f)>1:\n s += f[1]*np.log(T)\n if len(f)>2:\n s += f[2]*T\n if len(f)>3:\n s += f[3]*T*T\n if len(f)>4:\n s += f[4]/T/T\n return s\n\ndef SGTEH(T,f):\n h = 0.0\n if len(f)>0:\n h += f[0]\n if len(f)>1:\n h += f[1]*T\n if len(f)>2:\n h += f[2]*T*T\n if len(f)>3:\n h += f[3]*T*T*T\n if len(f)>4:\n h += f[4]/T\n return h\n\ndef SGTEC(T,f):\n s = 0.0\n if len(f)>0:\n s += f[0]\n if len(f)>1:\n s += f[1]*T\n if len(f)>2:\n s += f[2]*T*T\n if len(f)>3:\n s += f[3]/T/T\n return s\n\n\ndef CSGTEfit(f, x, y):\n popt,pcov = curve_fit(f, x, y)\n z = C_SGTE(x,popt)\n ferror=math.sqrt(((z-y)**2).sum()/len(z))\n return(popt,ferror)\n\n\ndef fitStoichiometricCp(x,y, thr=0.001):\n f,ferror = CSGTEfit(SGTEC2, x, y)\n if ferror > thr:\n f,ferror = CSGTEfit(SGTEC3, x, y)\n if ferror > thr:\n f,ferror = CSGTEfit(SGTEC4, x, y)\n return f,ferror\n\n\ndef H_SGTE(T,c):\n h = 0.\n if len(c)>0:\n h += c[0]*T\n if len(c)>1:\n h += c[1]/2*T*T\n if len(c)>2:\n h += c[2]/3*T*T*T\n if len(c)>3:\n h += -c[3]/T\n return h\n\ndef fitStoichiometricH(x,y,c):\n zz = H_SGTE(x,c)\n h = (y - zz).sum()/len(y)\n ferror=math.sqrt(((h+zz-y)**2).sum()/len(zz))\n h = [h]\n if len(c)>0:\n h.append(c[0])\n if len(c)>1:\n h.append(c[1]/2)\n if len(c)>2:\n h.append(c[2]/3)\n if len(c)>3:\n h.append(-c[3])\n return h,ferror\n\n\ndef S_SGTE(T,c):\n s = 0.\n if len(c)>0:\n s += c[0]+c[0]*np.log(T)\n if len(c)>1:\n s += c[1]*T\n if len(c)>2:\n s += c[2]/2*T*T\n if len(c)>3:\n s += -c[3]/2/T/T\n return s\n\ndef fitStoichiometricS(x,y,c):\n zz = S_SGTE(x,c)\n b = (y - zz).sum()/len(y)\n ferror=math.sqrt(((b+zz-y)**2).sum()/len(zz))\n s = []\n if len(c)>0:\n s.append(b+c[0])\n s.append(c[0])\n if len(c)>1:\n s.append(c[1])\n if len(c)>2:\n s.append(c[2]/2)\n if len(c)>3:\n s.append(-c[3]/2)\n return s,ferror\n\ndef fitStoichiometric(x,y, thr=1.0):\n f,ferror = SGTEfit(SGTE2, x, y)\n if ferror > thr:\n f,ferror = SGTEfit(SGTE3, x, y)\n if ferror > thr:\n f,ferror = SGTEfit(SGTE4, x, y)\n if ferror > thr:\n f,ferror = SGTEfit(SGTE5, x, y)\n if ferror > thr:\n f,ferror = SGTEfit(SGTE6, x, y)\n return f,ferror\n\ndef SGTEfit(f, x, y):\n popt,pcov = curve_fit(f, x, y)\n z = SGTE(x,popt)\n ferror=math.sqrt(((z-y)**2).sum()/len(z))\n return(popt,ferror)\n\ndef outexpressionG(f0):\n out = \"\"\n for i,f in enumerate(f0):\n if i==0:\n out += ' {:+g}'.format(f)\n elif i==1:\n out += ' {:+g}*T'.format(f)\n elif i==2:\n out += ' {:+g}*T*log(T)'.format(f)\n elif i==3:\n out += ' {:+g}*T*T'.format(f)\n elif i==4:\n out += ' {:+g}*T*T*T'.format(f)\n elif i==5:\n out += ' {:+g}/T'.format(f)\n return out\n\ndef outexpressionS(f0):\n out = \"\"\n for i,f in enumerate(f0):\n if i==0:\n out += ' {:+g}'.format(f)\n elif i==1:\n out += ' {:+g}*log(T)'.format(f)\n elif i==2:\n out += ' {:+g}*T'.format(f)\n elif i==3:\n out += ' {:+g}*T*T'.format(f)\n elif i==4:\n out += ' {:+g}/T/T'.format(f)\n return out\n\ndef outexpressionH(f0):\n out = \"\"\n for i,f in enumerate(f0):\n if i==0:\n out += ' {:+g}'.format(f)\n elif i==1:\n out += ' {:+g}*T'.format(f)\n elif i==2:\n out += ' {:+g}*T*T'.format(f)\n elif i==3:\n out += ' {:+g}*T*T*T'.format(f)\n elif i==4:\n out += ' {:+g}/T'.format(f)\n return out\n\ndef outexpressionCp(f0):\n out = \"\"\n for i,f in enumerate(f0):\n if i==0:\n out += ' {:+g}'.format(f)\n elif i==1:\n out += ' {:+g}*T'.format(f)\n elif i==2:\n out += ' {:+g}*T*T'.format(f)\n elif i==3:\n out += ' {:+g}/T/T'.format(f)\n return out\n\n\ndef proStoichiometricG():\n #try:\n x = zthermo.get(\"temperature (K)\")\n y = zthermo.get(\"Gibbs energy (eV/atom)\")\n H298 = threcord.get(\"H298.15 (J/mol-atom)\")\n x = np.array(list(map(float, x)))\n y = np.array(list(map(float, y)))*eVtoJ - H298\n i0 = 0\n for i,T in enumerate(x):\n if T < T0:\n i0 = i\n ifit0 = i0-15\n ifit0 = max(ifit0,0)\n\n f,ferror = fitStoichiometric(x[ifit0:],y[ifit0:])\n gout = 'G(T) =' + outexpressionG(f)\n #print(gout)\n s = []\n h = []\n c = []\n if len(f) >0:\n h.append(f[0])\n if len(f) >1:\n s.append(-f[1])\n if len(f) >2:\n s = []\n s.append(-f[1]-f[2])\n s.append(-f[2])\n h.append(-f[2])\n c.append(-f[2])\n if len(f) >3:\n s.append(-2.0*f[3])\n h.append(-f[3])\n c.append(-2.0*f[3])\n if len(f) >4:\n s.append(-3.0*f[4])\n h.append(-2.0*f[4])\n c.append(-6.0*f[4])\n if len(f) >5:\n s.append(f[5])\n h.append(2.0*f[5])\n c.append(-2.0*f[5])\n sout = 'S(T) =' + outexpressionS(s)\n hout = 'H(T) =' + outexpressionH(h)\n cout = 'Cp(T) =' + outexpressionCp(c)\n \"\"\"\n print (sout)\n print (hout)\n print (cout)\n \"\"\"\n uncertanty = {}\n SGTErec.update({\"G-H298.15 (J/mol-atom)\":gout})\n SGTErec.update({\"H-H298.15 (J/mol-atom)\":hout})\n SGTErec.update({\"S (J/mol-atom/K)\":sout})\n SGTErec.update({\"Cp (J/mol-atom/K)\":cout})\n SGTErec.update({\"fitting uncertainty\":round(ferror,1)})\n return(f,h,s,c,x[i0:])\n\n\ndef proStoichiometricCp():\n #try:\n uncertanty = {}\n x = zthermo.get(\"temperature (K)\")\n y = zthermo.get(\"Cp (J/mol-atom/K)\")\n H298 = threcord.get(\"H298.15 (J/mol-atom)\")\n x = np.array(list(map(float, x)))\n y = np.array(list(map(float, y)))\n i0 = 0\n for i,T in enumerate(x):\n if T < T0:\n i0 = i\n ifit0 = i0\n ifit0 = max(ifit0,0)\n #print(\"xxxxxxxx=\",x[ifit0:],T0)\n c,cerror = fitStoichiometricCp(x[ifit0:],y[ifit0:])\n\n y = zthermo.get(\"enthalpy (J/mol-atom)\")\n y = np.array(list(map(float, y))) - H298\n h,herror = fitStoichiometricH(x[ifit0:],y[ifit0:],c)\n\n y = zthermo.get(\"entropy (J/mol-atom/K)\")\n y = np.array(list(map(float, y)))\n s,serror = fitStoichiometricS(x[ifit0:],y[ifit0:],c)\n\n f = [h[0]]\n if len(s) >0:\n f.append(-s[0]+c[0])\n f.append(-c[0])\n if len(c) >1:\n f.append(-c[1]/2)\n if len(c) >2:\n f.append(-c[2]/6)\n if len(c) >3:\n f.append(-c[3]/2)\n gout = 'G(T) =' + outexpressionG(f)\n #print (gout)\n\n SGTErec.update({\"T\":[x[ifit0], x[-1]]})\n SGTErec.update({\"Cp (J/mol-atom/K)\":[outexpressionCp(c),{\"error\":round(cerror,2)}]})\n SGTErec.update({\"H-H298.15 (J/mol-atom)\":[outexpressionH(h),{\"error\":round(herror,2)}]})\n SGTErec.update({\"S (J/mol-atom/K)\":[outexpressionS(s),{\"error\":round(serror,2)}]})\n SGTErec.update({\"G-H298.15 (J/mol-atom)\":[outexpressionG(f),{\"error\":round(herror,2)}]})\n return(f,h,s,c,x[i0:])\n\n\nclass thermoplot:\n def __init__(self, folder,thermodynamicproperty,x,y,reflin=None, yzero=None,fitted=None,xT=None,xlabel=\"T (K)\", lp=False,\n ylabel=None, ytext=None, xlim=None, elonly=None, expt=None, CoT=False, label=None, single=False, plottitle=None):\n\n plt.rc('font', size=24)\n self.fig,self.ax=plt.subplots()\n self.fig.set_size_inches(12,9)\n self.ax.yaxis.set_ticks_position('both')\n self.cwd = os.getcwd()\n os.chdir( folder )\n\n self.folder = folder\n self.thermodynamicproperty = thermodynamicproperty\n self.x = np.array(x)\n self.y = np.array(y)\n self.reflin = reflin\n self.yzero = yzero\n self.fitted = fitted\n self.xT = xT\n self.ytext = ytext\n self.elonly = elonly\n self.expt = expt\n self.CoT = CoT\n self.single = single\n if plottitle is not None:\n self.plottitle = plottitle.split('_')[0]\n else:\n self.plottitle = \"DFTTK\"\n\n self._xlabel = xlabel\n self.lp = lp\n self._ylabel = thermodynamicproperty\n if ylabel!=None: self._ylabel = ylabel\n self._label = self.thermodynamicproperty\n if label!=None: self._label = label\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+\".png\"\n\n self.ax.set_xlim([0,np.array(list(map(float,x))).max()])\n self.plot_xlim = np.array(list(map(float,x))).max()\n self.xlim = xlim\n if xlim!=None:\n try: self.ax.set_xlim([0,xlim])\n except: self.ax.set_xlim(xlim)\n\n if self.thermodynamicproperty==\"0 K total energies (eV/atom)\": self.plot_EV()\n elif self.thermodynamicproperty==\"Helmholtz energy (eV/atom)\": self.plot_Helmholtz_energy_v0()\n elif self.thermodynamicproperty==\"Helmholtz energy analysis (eV/atom)\": self.plot_Helmholtz_energy_v1()\n elif self.thermodynamicproperty.lower()==\"Effective charge carrier concentration ($e/cm^{3}$)\".lower():\n self.plot_Effective_charge_carrier_concentration()\n elif self.thermodynamicproperty.lower()==\"Electron DOS (States/Atom/eV)\".lower(): self.plot_Electron_DOS()\n elif self.thermodynamicproperty.lower()==\"Bulk modulus (GPa)\".lower(): self.plot_Bulk_modulus()\n elif self.thermodynamicproperty.lower()==\"LTC analysis (1/K)\".lower(): self.plot_LTC_analysis()\n elif self.thermodynamicproperty==\"Gamma point phonons\": self.plot_Gamma_point_phonons()\n elif self.thermodynamicproperty.lower()!=\"heat capacities (J/mol-atom/K)\".lower(): self.plot_default()\n else: self.plot_Heat_Capacity()\n\n if self.plottitle!=None: plt.title(self.plottitle)\n plt.xlabel(self._xlabel)\n plt.ylabel(self._ylabel)\n self.ax.legend(loc=0, prop={'size': 24})\n #plt.legend(loc=0, prop={'size': 24})\n\n self.fig.savefig(self.fname,bbox_inches='tight')\n plt.close(self.fig)\n\n os.chdir( self.cwd )\n head,tail = os.path.split(folder)\n figures.update({self.thermodynamicproperty:os.path.join(tail,self.fname)})\n\n\n def plot_EV(self):\n self._xlabel = \"atomic volume ($\\AA^3$)\"\n self._ylabel = \"0 K total energies (eV/atom)\"\n self.ax.set_xlim([min(self.x)*0.95,max(self.x)*1.05])\n if self.lp:\n self.ax.plot(self.x, self.y, marker='o', markersize=12,\n color='r', linestyle='-', label=self._label)\n else:\n self.ax.plot(self.x, self.y, fillstyle='none', marker='o', markersize=12,\n color='k', linestyle='None', label=self._label)\n xnew = np.linspace(min(self.x)*0.95,max(self.x)*1.05, 300)\n from dfttk.pythelec import BMvol4, BMvol, alt_curve_fit\n f2, pcov = alt_curve_fit(BMvol4, self.x, self.y)\n ynew = BMvol(xnew, f2)\n self.ax.plot(xnew,ynew,'-',linewidth=1,color='b', label=\"BMvol4\")\n\n\n def plot_Helmholtz_energy_v0(self):\n self.fig.set_size_inches(12,11)\n self._xlabel = \"Atomic volume ($\\AA^3$)\"\n plt.ylabel(\"Helmholtz energy (eV/atom)\")\n self.ax.plot(self.x, self.y, marker='o', markersize=4, color='k', linestyle=':')\n self.ax.set_xlim([min(self.x)*0.95,max(self.x)*1.05])\n fd = 0.05*(max(self.y)-min(self.y))\n self.ax.set_ylim([min(self.y)-fd,max(self.y)+2*fd])\n v,xx,t,o,f = self.reflin\n for i,T in enumerate(t):\n #self.ax.plot(v, o[i], fillstyle='none', marker='+', mew=2, markersize=12, color='b', linestyle='None')\n self.ax.plot(v, o[i], fillstyle='none', marker='+', markersize=12, color='b', linestyle='None')\n self.ax.plot(xx, f[i], color='b', linestyle='-')\n for i, l1 in enumerate(plt.gca().get_lines()):\n if i==0:\n x0 = 0.5*(min(self.x)+max(self.x))\n labelLine(l1,x0,label=r'$V_{eq}$',align = True)\n else:\n ii = (i-2)//4\n if (ii*4+2!=i): continue\n x0 = 0.90*0.95*min(self.x)+0.10*1.05*max(self.x)\n labelLine(l1,x0,label=r'${} K$'.format(int(t[ii*2])),align = True)\n\n\n def plot_Helmholtz_energy_v1(self):\n self.fig.set_size_inches(12,11)\n self._xlabel = \"Atomic volume ($\\AA^3$)\"\n plt.ylabel(\"Helmholtz energy (eV/atom)\")\n self.ax.plot(self.x, self.y, marker='o', markersize=4, color='k', linestyle=':')\n self.ax.set_xlim([min(self.x)*0.95,max(self.x)*1.05])\n fd = 0.05*(max(self.y)-min(self.y))\n self.ax.set_ylim([min(self.y)-fd,max(self.y)+2*fd])\n v,xx,t,o,f = self.reflin\n for i,T in enumerate(t):\n self.ax.plot(v, o[i], fillstyle='none', marker='+', mew=2, markersize=12, color='r', linestyle='None')\n #spl = make_interp_spline(v, o[i], k=3) # type: BSpline\n spl = interp1d(v, o[i])\n power_smooth = spl(xx)\n self.ax.plot(xx, power_smooth, color='r', linestyle=':')\n self.ax.plot(xx, f[i], color='b', linestyle='-')\n for i, l1 in enumerate(plt.gca().get_lines()):\n if i==0:\n x0 = 0.5*(min(self.x)+max(self.x))\n labelLine(l1,x0,label=r'$V_{eq}$',align = True)\n else:\n ii = (i-3)//6\n if (ii*6+3!=i): continue\n x0 = 0.90*0.95*min(self.x)+0.10*1.05*max(self.x)\n labelLine(l1,x0,label=r'${} K$'.format(int(t[ii*2])),align = True)\n\n\n def plot_Effective_charge_carrier_concentration(self):\n self.ax.set_yscale('symlog')\n yy = self.y[self.x>0]\n xx = self.x[self.x>0]\n self.ax.plot(xx,yy,'-',linewidth=2,color='b', label=self._label)\n if self.xlim!=None:\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+'_'+str(int(self.xlim))+\".png\"\n\n\n def plot_Electron_DOS(self):\n self.ax.plot(self.x,self.y,'-',linewidth=2,color='b', label=self._label)\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+'_'+str(str(-self.xlim[0]))+\"eV.png\"\n\n\n def plot_Bulk_modulus(self):\n self.ax.plot(self.x,self.reflin,'--',linewidth=2,color='k', label=self._label+\",$B_s$\")\n self.ax.plot(self.x,self.y,'-',linewidth=2,color='b', label=self._label+\",$B_T$\")\n plot_expt(self.expt, 'bulk modulus', self.ax, xlim=self.xlim)\n\n\n def plot_LTC_analysis(self):\n self.ax.ticklabel_format(axis='y',style='sci',scilimits=(-2,4))\n self.ax.plot(self.x,self.y,'-',linewidth=2,color='b', label=\"dfttk\")\n self.ax.plot(self.x,self.reflin,'--',linewidth=2,color='k', label=\"splev\")\n\n\n def plot_Gamma_point_phonons(self):\n if self.reflin is not None:\n self.ax.plot(self.x,self.reflin,':',linewidth=1,color='k')\n self.ax.plot(self.x,self.y,'-',linewidth=2,color='b', label=self._label)\n self.ax.set_xlim([min(self.x)*1.05,max(self.x)*1.05])\n xx0 = np.array(self.ytext[0])\n yy0 = np.array(self.ytext[1])\n ss0 = self.ytext[2]\n for i in range (len(ss0)):\n self.ax.text(xx0[i], yy0[i], ss0[i], color='r', rotation=90,\n horizontalalignment='left', verticalalignment='bottom')\n\n\n def plot_default(self):\n if self.thermodynamicproperty.split('(')[0].strip()==\"Debye temperature\":\n self.y=self.y[self.x>0]\n self.x=self.x[self.x>0]\n if self.yzero != None:\n y0 = np.nanmin(np.array(list(map(float,self.y))))\n y1 = np.nanmax(np.array(list(map(float,self.y))))\n self.ax.set_ylim([min(0.0, y0),y1*1.05])\n self.ax.ticklabel_format(axis='y',style='sci',scilimits=(-2,4))\n if self.reflin is not None:\n self.ax.plot(self.x,self.reflin,':',linewidth=1,color='k')\n self.ax.plot(self.x,self.y,'-',linewidth=2,color='b', label=self._label)\n if self.fitted!=None:\n self.ax.plot(self.xT[::5],self.fitted[::5],'--',fillstyle='none', marker='o', markersize=12,\n linewidth=2,color='k', label=\"fitted\")\n if self.xlim!=None:\n self.ax.set_xlim([0.0,self.xlim])\n self.ax.set_ylim([0.98*min(self.y),1.02*max(self.y)])\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+'_'+str(self.xlim)+\".png\"\n if self.thermodynamicproperty==\"LTC (1/K)\":\n plot_expt(self.expt, 'linear thermal expansion', self.ax, xlim=self.plot_xlim)\n elif self.thermodynamicproperty==\"Entropy (J/mol-atom/K)\":\n plot_expt(self.expt, 'entropy', self.ax, xlim=self.plot_xlim)\n elif self.thermodynamicproperty==\"Enthalpy-H298 (J/mol-atom)\":\n plot_expt(self.expt, 'enthalpy', self.ax, xlim=self.plot_xlim)\n elif self.thermodynamicproperty==\"Lorenz number ($WΩK^{−2}$)\":\n self.ax.set_ylim([min(2.e-8, np.array(self.y).min()),max(3.e-8,np.array(self.y).max())])\n\n\n def plot_Heat_Capacity(self):\n if self.fitted!=None:\n y = np.array(self.y)\n y0,y1,y2 = y[:,0], y[:,1], y[:,2]\n self.ax.set_ylim([0.0,np.array(list(map(float,y0))).max()*1.05])\n self.ax.plot(self.x,y0,'-',linewidth=2,color='b', label=self._label+\",$C_p$\")\n self.ax.plot(self.xT[::5],self.fitted[::5],'--',fillstyle='none', marker='o', markersize=12,\n linewidth=2,color='k', label=\"fitted\")\n self.ax.plot(self.x,y1,'--',linewidth=2,color='black', label=\"$C_v$\")\n self.ax.plot(self.x,y2,':',linewidth=2,color='g', label=\"$C_{v,ion}$\")\n y2 = np.array(list(map(float,y1))) - np.array(list(map(float,y2)))\n self.ax.plot(self.x,y2,'-.',linewidth=2,color='r', label=\"$C_{el}$\")\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+\"_fitted.png\"\n else:\n x = np.array(self.x)\n y = np.array(self.y)\n y0,y1 = y[:,0], y[:,1]\n y2 = y0 - y1\n if self.CoT:\n y0 = y0[x>0]\n y = y2[x>0]\n x = x[x>0]\n if self.elonly!=None:\n self._xlabel = \"$T (K)$\"\n self._ylabel = \"$C_el/T$ (J/mol-atom/K/K)\"\n y = y[x<=self.elonly*1.2]\n x = x[x<=self.elonly*1.2]\n self.ax.set_xlim([0.0,self.elonly])\n self.ax.plot(x,y/x,'-.',linewidth=2,color='r', label=self._label+\",$C_{el}/T$\")\n ymax = plot_expt(self.expt, 'electronic heat capacity', self.ax, CoT=self.CoT, xlim=self.elonly)\n self.ax.set_ylim([0.0,max(ymax,(y/x).max())*1.1])\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+\\\n '_'+str(self.elonly)+'_el_oT'\".png\"\n elif self.xlim!=None:\n self._xlabel = \"$T^2 (K^2)$\"\n self._ylabel = \"$C/T$ (J/mol-atom/K/K)\"\n y = y0/x\n x = x*x\n y = y[x 1.e-2:\n self.ax.plot(x,y2,'-.',linewidth=2,color='r', label=self._label+\",$C_{el}$\")\n plot_expt(self.expt, 'electronic heat capacity', self.ax, xlim=self.xlim)\n \"\"\"\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+'_'+str(self.xlim)+\".png\"\n elif self.elonly!=None:\n self.ax.set_xlim([0.0,self.elonly])\n y2 = y2[x<=self.xlim*1.1]\n x = x[x<=self.xlim*1.1]\n self.ax.plot(x,y2,'-.',linewidth=2,color='r', label=self._label+\",$C_{el}$\")\n plot_expt(self.expt, 'electronic heat capacity', self.ax, xlim=self.elonly)\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+'_'+str(self.elonly)+'_el.png'\n else:\n if self.single:\n self.ax.plot(x,y0,'-',linewidth=2,color='b', label=self._label+\",$C_{v,lat+el}$\")\n self.ax.plot(x,y1,'--',linewidth=2,color='black', label=\"$C_{v,lat}$\")\n else:\n self.ax.plot(x,y0,'-',linewidth=2,color='b', label=self._label+\",$C_{p,lat+el}$\")\n self.ax.plot(x,y1,'--',linewidth=2,color='black', label=\"$C_{p,lat}$\")\n plot_expt(self.expt, 'heat capacity', self.ax, xlim=self.plot_xlim)\n \"\"\"\n if y2.max() > 1.e-2:\n self.ax.plot(x,y2,'-.',linewidth=2,color='r', label=self._label+\",$C_{el}$\")\n plot_expt(self.expt, 'electronic heat capacity', self.ax, xlim=self.plot_xlim)\n \"\"\"\n self.fname = self.thermodynamicproperty.split('(')[0].strip().replace(' ','_')+\".png\"\n\n plt.gca().set_ylim(bottom=0)\n\n\ndef plot_expt (expt, prp, ax, CoT=False, xlim=None):\n #global mindex\n mindex = 0\n ymax = 0.0\n if expt!=None:\n for rec in expt:\n if prp!=rec['property']: continue\n try:\n xval = np.array(rec['T'])\n yval = np.array(rec['val'])\n except:\n try:\n lines = np.array(rec['data'])\n xval = lines[0::2]\n yval = lines[1::2]\n except:\n continue\n Author = rec['Author']\n Unit = rec['Unit']\n natom = rec['natom']\n yval /= natom\n\n if Unit=='mJ/K' : yval /= 1000.\n elif Unit=='cal/K' : yval *= 4.184\n\n if CoT:\n if prp!='electronic heat capacity':\n xx = xval[xval>0]*xval[xval>0]\n yy = yval[xval>0]/xval[xval>0]\n yy = yy[xx0]\n yy = yval[xval>0]/xval[xval>0]\n yy = yy[xx0:\n if Author.startswith('Andersson(CALPHAD)'):\n ax.plot(xx,yy, marker='o', color='b',markersize=10, mew=2,\n linestyle='None', fillstyle='none', label=Author.split(',')[0])\n elif Author.startswith('Andersson(CALPHAD)') or \\\n Author.startswith('Chase(JANAF)'):\n ax.plot(xx,yy, marker='s', color='k',markersize=8, mew=2,\n linestyle='None', fillstyle='none', label=Author.split(',')[0])\n \"\"\"\n try:\n ax.plot(xx,yy, marker=markers[mindex%len(markers)], markersize=8, mew=2,\n linestyle='None', fillstyle='none', label=Author.split(',')[0])\n except:\n ax.plot(xx,yy, marker=markers[mindex%len(markers)], markersize=8,\n linestyle='None', label=Author.split(',')[0])\n \"\"\"\n else:\n ax.plot(xx,yy, marker=markers[mindex%len(markers)], markersize=8,\n linestyle='None', label=Author.split(',')[0])\n ymax = max(yy.max(), ymax)\n mindex += 1\n return ymax\n\ndef myjsonout(data,fp,indent=\"\",comma=\"\"):\n\t#print (data)\n\tmj = ''\n\tif (isinstance(data,dict)):\n\t\tfp.write('{}\\n'.format('{'))\n\t\t\t#sys.stdout.write('\\n{}{}\\n'.format(indent, '{'))\n\t\tnkey = 0\n\t\tfor key in sorted(set(data.keys())):\n\t\t\tnkey += 1\n\t\t\tif nkey!=len(data):\n\t\t\t\tcomma1 = \",\"\n\t\t\telse:\n\t\t\t\tcomma1 = \"\"\n\t\t\tval = data[key]\n\t\t\tjval = json.dumps(val)\n\t\t\tjkey = json.dumps(key)\n\t\t\t#print (val)\n\t\t\tif (isinstance(val,dict)):\n\t\t\t\tfp.write('{}{}: '.format(indent+\" \",jkey))\n\t\t\t\tmyjsonout(val,fp,indent+\" \",comma1)\n\t\t\telif (isinstance(val,tuple)):\n\t\t\t\t#print (val)\n\t\t\t\tout = list(val)\n\t\t\t\t#print(out)\n\t\t\t\tfp.write('{}{}: {}{}\\n'.format(indent + \" \", jkey, out, comma1))\n\t\t\telif (isinstance(val,str)):\n\t\t\t\tif (indent == \"\"):\n\t\t\t\t\tfp.write('{}{}: {}{}\\n'.format(indent + \" \", jkey, jval, comma1))\n\t\t\t\telse:\n\t\t\t\t\tfp.write('{}{}: {}{}\\n'.format(indent + \" \", jkey, jval, comma1))\n\t\t\telse:\n\t\t\t\tif (indent==\"\"):\n\t\t\t\t\tfp.write('{}{}: {}{}\\n'.format(indent + \" \", jkey, jval, comma1))\n\t\t\t\telse:\n\t\t\t\t\tfp.write('{}{}: {}{}\\n'.format(indent + \" \", jkey, jval, comma1))\n\n\t\t\t\t#print(val)\n\t\t\t\t\"\"\"\n\t\t\t\tif (nkey!=len(data)):\n\t\t\t\t\tsys.stdout.write('{}{}: {},\\n'.format(indent+\" \", key, val))\n\t\t\t\telse:\n\t\t\t\t\tsys.stdout.write('{}{}: {}\\n'.format(indent+\" \", key, val))\n\t\t\t\t\"\"\"\n\t\tif comma==',':\n\t\t\tfp.write('{}{}{}\\n\\n'.format(indent,'}', comma))\n\t\telse:\n\t\t\tfp.write('{}{}{}\\n'.format(indent, '}', comma))\n\n\ndef Myjsonout(data,out):\n if (isinstance(data,dict)):\n myjsonout(data, out, indent=\"\", comma=\"\")\n elif (isinstance(data,list)):\n out.write(\"[\\n\")\n for j,rec in enumerate(data):\n if j!=len(data)-1: myjsonout(rec, out, indent=\"\", comma=\",\")\n else: myjsonout(rec, out, indent=\"\", comma=\"\")\n out.write(\"]\\n\")\n\n\ndef similar(pp,pall):\n known = [\"L12\", \"delta\", \"D022\", \"Gamma\"]\n ii = -1\n for o in known:\n if pp.find(o)>-1:\n pname = o\n ii = 0\n break\n if ii == -1:\n return \"unknown\"\n\n s = 0.0\n for i,p in enumerate(pall):\n snew = SequenceMatcher ( None, pname, p ).ratio()\n if snew > s:\n ii = i\n s = snew\n print (pp, \"= \", pall[ii], \" by \", s)\n if s > 0.5:\n return pall[ii]\n else:\n return \"unknown\"\n\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n\ndef formula2elist(formula):\n formula = formula.replace(\" \",'').replace(\"-\",'').replace(\",\",'')\n newc = \"\"\n \"\"\"Follow the convention, elemental symbol must start from capital letter\"\"\"\n for c in formula:\n if c in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\n newc = newc + '|'\n newc = newc + c\n els = newc.split('|')\n els = [k for k in els if k != '']\n\n \"\"\"now get the composition for each element\"\"\"\n ele = []\n com = []\n for el in els:\n newel = \"\"\n newcc = \"\"\n for c in el:\n if c.isalpha():\n newel = newel + c\n else:\n newcc = newcc + c\n\n if (newel not in periodictable):\n raise ValueError('\"'+newel+'\" is not an element! your formula is wrong!')\n ele.append(newel)\n\n if (len(newcc)!=0):\n if (isfloat(newcc)):\n com.append(float(newcc))\n else:\n raise ValueError('\"'+newcc+'\" is not an element! your formula is wrong!')\n else:\n com.append(1.0)\n com = np.array(list(map(float,com)))\n com = com/sum(com)\n #sorted the sequence and merge the duplicate\n elist = sorted(set(ele))\n clist = np.zeros(len(elist), dtype=float)\n for j,el in enumerate(ele):\n ix = elist.index(el)\n clist[ix] += com[j]\n\n return elist\n\ndef prety_formulaO(longphasename):\n puc = longphasename.split('|')[-1]\n _els,_nat=formula2composition(puc)\n\ndef prety_formula(_els,_nat):\n els = sorted(set(_els))\n nat = np.zeros(len(els),dtype=int)\n for i,el in enumerate(_els):\n ix = els.index(el)\n nat[ix] += _nat[i]\n\n Nd = min(nat)\n for i in range(Nd,0,-1):\n out = True\n for j in range(len(nat)):\n if ((nat[j]//i)*i!=nat[j]):\n out = False\n break\n if out:\n break\n form = \"\"\n for j,el in enumerate(els):\n ix = nat[j]//i\n form = form+el\n if ix!=1:\n form = form+str(ix)\n return form\n\n\ndef Genergy(thermofile,dir0):\n tmelt = 9999.\n ele = threcord.get(\"Elements\")\n if ele!=None:\n if len(ele)==1:\n tmelt = ELEMENTS[ele[0]].tmelt\n\n folder = os.path.join(dir0,\"figures\")\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n tmp = [s for s in thermofile.split('/') if s!=\"\"]\n tmp[-1] = 'vdos_Cij'\n vdos_e_Cij = '/'.join(tmp)\n #print(\"Cij\",vdos_e_Cij)\n if os.path.exists(vdos_e_Cij) :\n vdos_e_Cij = np.loadtxt(vdos_e_Cij, comments=\"#\", dtype=np.float)\n ij = 0\n for i in range(1,7):\n for j in range(i,7):\n ij = ij + 2\n if abs(vdos_e_Cij[:,ij]).max() > 1.0:\n thermoplot(folder,\"C\"+\"_\"+str(i)+\"_\"+str(j),list(vdos_e_Cij[:,0]),list(vdos_e_Cij[:,ij]),yzero=0.0)\n thermoplot(folder,\"B_v\",list(vdos_e_Cij[:,0]),list(vdos_e_Cij[:,43]),yzero=0.0)\n thermoplot(folder,\"G_v\",list(vdos_e_Cij[:,0]),list(vdos_e_Cij[:,44]),yzero=0.0)\n thermoplot(folder,\"E_v\",list(vdos_e_Cij[:,0]),list(vdos_e_Cij[:,45]),yzero=0.0)\n sys.exit()\n\n\n thermo = np.loadtxt(thermofile, comments=\"#\", dtype=np.float)\n thermo[np.isnan(thermo)] = 0.0\n for i,cp in enumerate(thermo[:,6]):\n if cp > CpMax: break\n elif thermo[i,0] > tmelt: break\n\n thermo = thermo[0:i,:]\n\n Vstack=interpolate.splrep(thermo[:,0], thermo[:,1])\n V298 = float(interpolate.splev(T0, Vstack))\n Hstack=interpolate.splrep(thermo[:,0], thermo[:,4])\n H298 = float(interpolate.splev(T0, Hstack))\n threcord.update({\"H298.15 (J/mol-atom)\":round(H298,4)})\n Sstack=interpolate.splrep(thermo[:,0], thermo[:,3])\n S298 = float(interpolate.splev(T0, Sstack))\n threcord.update({\"S298.15 (J/mol-atom/K)\":round(S298,6)})\n\n zthermo.update({\"temperature (K)\":list(thermo[:,0])})\n zthermo.update({\"atomic volume ($\\AA^3$)\":list(thermo[:,1])})\n thermoplot(folder,\"atomic volume ($\\AA^3$)\",list(thermo[:,0]),list(thermo[:,1]))\n zthermo.update({\"Gibbs energy (eV/atom)\":list(thermo[:,2])})\n zthermo.update({\"enthalpy (J/mol-atom)\":list(thermo[:,4])})\n zthermo.update({\"entropy (J/mol-atom/K)\":list(thermo[:,3])})\n zthermo.update({\"Cp (J/mol-atom/K)\":list(thermo[:,6])})\n\n if fitCp:\n g,h,s,c,x=proStoichiometricCp()\n else:\n g,h,s,c,x=proStoichiometricG()\n\n threcord.update({\"SGTE fitting\":SGTErec})\n thermoplot(folder,\"Gibbs energy-H298 (J/mol-atom)\",list(thermo[:,0]),list(thermo[:,2]*eVtoJ-H298),fitted=list(SGTE(x,g)), xT=list(x))\n thermoplot(folder,\"enthalpy-H298 (J/mol-atom)\",list(thermo[:,0]),list(thermo[:,4]-H298), fitted=list(SGTEH(x,h)), xT=list(x))\n #thermoplot(folder,\"enthalpy-H298 (J/mol-atom)\",list(thermo[:,0]),list(thermo[:,4]-H298), fitted=list(SGTE(x,g)+x*SGTES(x,s)), xT=list(x))\n thermoplot(folder,\"entropy (J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,3]),yzero=0.0, fitted=list(SGTES(x,s)), xT=list(x))\n\n zthermo.update({\"LTC (1/K)\":list(thermo[:,5])})\n thermoplot(folder,\"LTC (1/K)\",list(thermo[:,0]),list(thermo[:,5]),yzero=0.0)\n zthermo.update({\"Cv (J/mol-atom/K)\":list(thermo[:,14])})\n zthermo.update({\"Cv,ion (J/mol-atom/K)\":list(thermo[:,7])})\n Cele = [round(c,6) for c in thermo[:,14]-thermo[:,7]]\n zthermo.update({\"Cele (J/mol-atom/K)\":Cele})\n ncols = [6,14,7]\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]),fitted=list(SGTEC(x,c)), xT=list(x))\n ncols = [6,8]\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), expt=expt)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), xlim=300,expt=expt)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), xlim=70,expt=expt)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), xlim=100,expt=expt, CoT=True)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), xlim=1000,expt=expt, CoT=True)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), xlim=10000,expt=expt, CoT=True)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), elonly=300, expt=expt)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), elonly=300, expt=expt, CoT=True)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), elonly=70, expt=expt)\n thermoplot(folder,\"heat capacities ((J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xT=list(x), elonly=70, expt=expt, CoT=True)\n zthermo.update({\"Debye temperature (K)\":list(thermo[:,13])})\n thermoplot(folder,\"Debye temperature (K)\",list(thermo[:,0]),list(thermo[:,13]),yzero=0.0)\n thermoplot(folder,\"Debye temperature (K)\",list(thermo[:,0]),list(thermo[:,13]),yzero=0.0, xlim=70)\n zthermo.update({\"bulk modulus (GPa)\":list(thermo[:,15])})\n thermoplot(folder,\"bulk modulus (GPa)\",list(thermo[:,0]),list(thermo[:,15]),yzero=0.0)\n\n threcord.update({\"zthermodynamic properies\":zthermo})\n threcord.update({\"Atomic volume at 298.15 K ($\\AA^3$)\":round(V298,6)})\n\n with open(vdos_e.replace('thermo/vdos_e','tplate/POSCAR'), 'r') as f:\n vvv = f.readlines()\n natom = sum([int(vv) for vv in vvv[6].split(' ') if vv!=\"\"])\n structure.update({\"number of atoms in POSCAR\":natom})\n\n #natom = threcord.get(\"number of atoms in the primitive unit cell\")\n\n with open(vdos_e.replace('thermo/vdos_e','thermo/data.in'), 'r') as f:\n vvv = f.readlines()\n Vfiles = []\n Pfiles = []\n volumes = []\n energies = []\n for vv in vvv[1:]:\n v = vv.split(' ')\n Pfiles.append(\"phonon/\"+v[2].split('/')[-1].replace('\\n', '').replace('\"', ''))\n Vfiles.append(v[2].split('/')[-1].replace('\\n', '').replace('\"', ''))\n volumes.append(round(float(v[0])/natom,6))\n energies.append(round(float(v[1])/natom,6))\n structure.update({\"Static vasp settings\":Vfiles})\n structure.update({\"phonon vasp settings and force constants\":Pfiles})\n threcord.update({\"volumes\":volumes})\n threcord.update({\"energies\":energies})\n\n with open(dir0+'/E-V.dat','w') as f:\n for i,v in enumerate(volumes):\n f.write('{} {}\\n'.format(v,energies[i]))\n #cmd = \"YWfit -BMvol <\"+dir0+'/E-V.dat | grep \"f_expr(x) = \"'\n ffun = \"-Morse\"\n cmd = \"YWfit \"+ffun+\" <\"+dir0+'/E-V.dat | grep \"f_expr(x) = \"'\n output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n print(output)\n cwd = os.getcwd()\n os.chdir( os.path.join(dir0, \"figures\"))\n fitF = output.stdout\n with open('E-V.plt','w') as f:\n f.write('set terminal postscript landscape enhanced color \"Times_Roman\" 20\\n')\n f.write('set encoding iso_8859_1\\n')\n f.write('set pointsize 1.2\\n')\n f.write('set size 0.95,0.95\\n')\n f.write('set output \"E-V.eps\"\\n')\n f.write('{}\\n'.format(fitF))\n f.write('set key right bottom\\n')\n f.write('set xlabel \"atomic volume ($\\AA^3$)\\n')\n f.write('set ylabel \"static energy (eV/atom)\\n')\n f.write('plot \"../E-V.dat\" title \"calculated\" w p pt 7, \\\\\\n')\n f.write(' f_expr(x) title \"'+ffun+'\" w l lt -1\\n')\n #cmd = \"gnuplot E-V.plt; convert -fuzz 100% -transparent white -rotate 90 -density 120x120 E-V.eps E-V.png\"\n cmd = \"gnuplot E-V.plt; convert -background white -alpha remove -rotate 90 -density 120x120 E-V.eps E-V.png\"\n plot(cmd)\n\n figures.update({\"static E-V curve\": \"figures/E-V.png\"})\n threcord.update({\"figures\":figures})\n os.chdir(cwd)\n return Vfiles,Pfiles,g\n\ndef BMfitP(x,z):\n p = 0.0\n N = len(z)\n for n in range(N):\n p = p + float(N-n-1)*z[n]*x**(N-n+0.5)*(-2.0/3.0)\n return (-p)\n\ndef BMfit(v,p,g, T):\n global tPmax\n v = np.array(list(map(float,v)))\n p = np.array(list(map(float,p)))\n g = np.array(list(map(float,g)))\n f = g - p*v\n x = v**(-2.0/3.0)\n z = np.polyfit(x,f,BMvol)\n gf = np.poly1d(z)\n if (Debug==1):\n for i, vv in enumerate(v):\n print(vv, p[i], BMfitP(x[i],z), f[i], gf(x[i]))\n\n xx = x[0]\n xd = (x[len(x)-1] - x[0])*0.02\n pp = []\n gg = []\n vv = []\n for i in range(999):\n ppxx = BMfitP(xx,z)\n if (ppxx > tPmax*1.1):\n break\n pp.append(ppxx)\n vv.append(xx**(-1.5))\n gg.append(gf(xx)+ppxx*xx**(-1.5))\n xx = xx + xd\n try:\n s = interpolate.splrep(pp, gg)\n sv = interpolate.splrep(pp, vv)\n except ValueError:\n print(\"*******fetal ERROR: BMvol of order: \", BMvol, \" fetal fitting error at T= \", T)\n print(pp)\n print(gg)\n sys.exit()\n gx =interpolate.splev(txx, s)\n vx =interpolate.splev(txx, sv)\n if (Debug==1):\n for i, pp in enumerate(txx):\n print(pp*eVtoGPa, gx[i])\n for i, pp in enumerate(p):\n print(pp*eVtoGPa, g[i])\n sys.exit()\n class result:\n G = gx\n V = vx\n return(result)\n\ndef mkDict(line):\n rec = {}\n skiprec = False\n ss = str(line)[0:].replace(\"'\",\"\").split()\n ss = [k for k in ss if k != '']\n for nc,el in enumerate(ss):\n if isfloat(el):\n break\n if len(within)!=0:\n for el in ss[0:nc]:\n if el not in within:\n return True, None, None, None\n\n _sideal = 0\n _PN = \"\"\n i = nc*2+2\n while i < len(ss):\n #print (\"ncx=\", nc, ss[i])\n if ss[i] == \"PQ\":\n try:\n _PQ = float(ss[i+1])\n #threcord.update({\"amount of imaginary phonon mode\":float('{:.6f}'.format(_PQ))})\n Uncertainty.update({\"amount of imaginary phonon mode\":round(_PQ,6)})\n skiprec = _PQ >= PQ\n i += 1\n if skiprec:\n if not paper: print (ss[nc*2+1],\"skipped, PQ=\", ss[i])\n break\n except:\n skiprec = True\n print (\"********Wrong record\", ss)\n break\n elif ss[i] == \"EQ\":\n try:\n _EQ = float(ss[i+1])\n #threcord.update({\"0 K energy uncertainty (eV/atom)\":float('{:.6f}'.format(_EQ))})\n Uncertainty.update({\"0 K energy uncertainty (eV/atom)\":round(_EQ,6)})\n skiprec = _EQ >= EQ\n i += 1\n if skiprec:\n print (ss[nc*2+1],\"skipped, EQ=\", ss[i])\n break\n except:\n skiprec = True\n print (\"********Wrong record\", ss)\n break\n elif ss[i] == \"PN\":\n _PN = ss[i+1].strip(\"/\")\n threcord.update({\"Phase name\":_PN})\n i += 1\n mpid = \"\"\n try:\n mp = _PN.index(\"mp-\")\n mpid = _PN[mp:].split('_')[0]\n except:\n pass\n structure.update({\"mpid\":mpid})\n elif ss[i] == \"E0\":\n #threcord.update({\"static energy (eV/atom)\":float('{:.6f}'.format(float(ss[i+1].strip(\"/\"))))})\n threcord.update({\"Static energy (eV/atom)\":round(float(ss[i+1]),6)})\n i += 1\n elif ss[i] == \"TT\":\n Tup = float(ss[i+1])\n threcord.update({\"Tmax\":Tup})\n i += 1\n if Tup < Tupmax:\n print (ss[nc*2+1],\"skipped, Tmax=\", ss[i])\n skiprec = True\n break\n elif isfloat(ss[i]):\n _sideal = float(ss[i])\n i += 1\n\n if skiprec:\n return True, None, None, None\n\n if nc!=0:\n threcord.update({\"Uncertainty\":Uncertainty})\n space = ss[nc*2].strip(\"/\").split(\"|\")\n structure.update({\"space group\":int(space[0])})\n structure.update({\"point group symmetry\":space[1]})\n structure.update({\"space group symmetry\":space[2]})\n structure.update({\"primitive unit cell formula\":space[3]})\n elist, clist = formula2composition(space[3])\n pnatom = sum(clist)\n structure.update({\"number of atoms in the primitive unit cell\":int(pnatom)})\n\n tComponents = ss[0:nc]\n tnComponents = np.array(list(map(int,ss[nc:nc+nc])))\n natom = sum(tnComponents)\n tnComponents = tnComponents/natom\n\n Components = sorted(set(tComponents))\n nComponents = np.zeros(len(Components))\n for i0,el in enumerate(tComponents):\n ix = Components.index(el)\n nComponents[ix] = nComponents[ix] + tnComponents[i0]\n\n compositions = []\n for i in range(len(Components)):\n compositions.append(int(0.1+natom*nComponents[i]))\n threcord.update({\"Elements\":Components})\n threcord.update({\"Occupancies\":list(compositions)})\n\n\n i = nc*2+2\n while i < len(ss):\n if ss[i] == \"disordered\":\n if i+1>=len(ss):\n _sideal = -sum(nComponents*np.log(nComponents))\n elif isfloat(ss[i+1]):\n i += 1\n if float(ss[i])<0.0:\n _sideal = -sum(nComponents*np.log(nComponents))\n else:\n _sideal = float(ss[i])\n else:\n _sideal = -sum(nComponents*np.log(nComponents))\n i += 1\n threcord.update({\"Ideal mixing entropy (kB/atom)\":_sideal})\n\n keys = threcord.keys()\n if nc==0: nc=-1\n vdos_e = str(ss[nc+nc+1]).replace('//','/')\n threcord.update({\"Calculation date\":str(datetime.datetime.fromtimestamp(os.path.getmtime(vdos_e)))})\n #threcord.update({\"Calculation date\":str(date.fromtimestamp(os.path.getatime(vdos_e)))})\n if _PN==\"\":\n try:\n _PN = [s for s in vdos_e.split('/') if s!=\"\"][-3]\n except:\n _PN = \"unknown\"\n\n dir0 = _PN\n idx = 1\n while True:\n if not os.path.exists(dir0): break\n recordfile = dir0,\"record.json\"\n newdir = False\n try:\n if os.path.exists(recordfile):\n with open(recordfile) as jsonfile:\n orec = json.load (jsonfile)\n okeys = orec.keys()\n for k in keys:\n v = threcord.get(k)\n for ok in okeys:\n if ok != k: continue\n newdir = orec.get(ok) != v\n if newdir: break\n if k == \"Static energy\":\n if k in okeys:\n if abs(float(v-okeys.get(k))) < THR0: newdir = False\n except:\n pass\n\n if not newdir: break\n idx += 1\n dir0 = _PN+\"#\"+str(idx)\n\n oldPN = dir0\n newPN = PhaseName.get(_PN)\n if newPN != None: dir0 = newPN\n #print (_PN,dir0,PhaseName)\n\n threcord.update({\"Phase name\":dir0})\n\n \"\"\"\n if paper:\n pname = threcord.get(\"primitive unit cell formula\")\n n0 = 1\n for px in papers:\n if pname == px.split('#')[0]:\n n0 += 1\n if n0!=1:\n pname = pname +'#'+str(n0)\n papers.append(pname)\n dir0 = pname\n global start\n print (\"thermo files extracting cost\", time.time()-start)\n start = time.time()\n \"\"\"\n return False, vdos_e, dir0, oldPN\n\n\ndef VASPResults(dir0,vdos_e,Vfiles, Pfiles, phdft=\"phonon\"):\n head, tail = os.path.split(vdos_e)\n hdir, tail = os.path.split(head)\n natom = structure.get(\"number of atoms in POSCAR\")\n pdir = os.path.join(hdir,phdft)\n phdir = os.path.join(dir0,'phonon')\n if not os.path.exists(phdir):\n os.mkdir(phdir)\n for ff in Vfiles:\n vdir = os.path.join(dir0,ff)\n if not os.path.exists(vdir):\n os.mkdir(vdir)\n pvdir = os.path.join(phdir,ff)\n if not os.path.exists(pvdir):\n os.mkdir(pvdir)\n\n vdos = os.path.join(hdir,ff,\"vdos.out\")\n copyfile(vdos,os.path.join(vdir,'vdos.out'))\n print(vdos)\n\n poscar = os.path.join(hdir,ff,\"CONTCAR\")\n if not os.path.exists(poscar):\n poscar = os.path.join(hdir,ff,\"Static.CON\")\n if not os.path.exists(poscar):\n poscar = os.path.join(hdir,ff,\"POSCAR\")\n copyfile(poscar,os.path.join(vdir,'POSCAR'))\n\n outcar = os.path.join(hdir+ff,\"OUTCAR\")\n if os.path.exists(outcar):\n output = subprocess.run(\"grep POTCAR \"+outcar+\" | sort -u\", shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n POTCAR = str(output.stdout)\n else:\n outcar = os.path.join(pdir,ff,\"OUTCAR.gz\")\n if not os.path.exists(outcar):\n outcar = os.path.join(pdir,ff,\"Static.OUT.gz\")\n output = subprocess.run(\"zgrep POTCAR \"+outcar+\" | sort -u\", shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n POTCAR = str(output.stdout)\n\n #print (outcar)\n with open(os.path.join(vdir,'POTCAR'), \"w\") as text_file:\n text_file.write(POTCAR)\n\n doscar = os.path.join(hdir,ff,\"DOSCAR\")\n if not os.path.exists(doscar):\n doscar = os.path.join(hdir,ff,\"Static.DOS.gz\")\n head,tail = os.path.split(doscar)\n ddoscar = os.path.join(head, tail.replace(\"Static.DOS\", \"DOSCAR\"))\n copyfile(doscar, os.path.join(vdir,ddoscar))\n\n oszicar = os.path.join(hdir,ff,\"OSZICAR\")\n if not os.path.exists(doscar):\n oszicar = os.path.join(hdir,ff,\"Static.OSZ\")\n output = subprocess.run(\"grep E0= \"+oszicar+\" | tail -1 | awk '{print $5}'\", shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n E0 = float(output.stdout)\n E0 /= natom\n with open(os.path.join(vdir,'energy'), \"w\") as text_file:\n text_file.write(str(E0))\n\n incars = fnmatch.filter(os.listdir(hdir,ff), 'INCAR_*')\n if len(incars)==0:\n INCAR = os.path.join(hdir,\"tplate\",\"INCAR.Static\")\n else:\n INCAR = os.path.join(hdir,ff,incars[0])\n d0 = os.path.getmtime(INCAR)\n for ii in range(1,len(incars)):\n d1 = os.path.getmtime(os.path.join(hdir,ff,incars[ii]))\n if d1 > d0:\n INCAR = os.path.join(hdir,ff,incars[ii])\n d1 = d0\n copyfile(INCAR, os.path.join(vdir,'INCAR'))\n copyfile(os.path.join(hdir,\"tplate\",\"KPOINTS\"), os.path.join(vdir,'KPOINTS'))\n\n sposcar = os.path.join(pdir,ff,\"POSCAR\")\n copyfile(sposcar, os.path.join(pvdir,'POSCAR'))\n soutcar = os.path.join(pdir,ff,\"OUTCAR\")\n if os.path.exists(soutcar):\n output = subprocess.run(\"grep POTCAR \"+soutcar+\" | sort -u\", shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n POTCAR = str(output.stdout)\n else:\n soutcar = os.path.join(pdir,ff,\"OUTCAR.gz\")\n output = subprocess.run(\"zgrep POTCAR \"+soutcar+\" | sort -u\", shell=True, stdout=subprocess.PIPE,\n universal_newlines=True)\n POTCAR = str(output.stdout)\n\n with open(os.path.join(pvdir,'POTCAR'), \"w\") as text_file:\n text_file.write(POTCAR)\n\n incars = fnmatch.filter(os.listdir(pdir,ff), 'INCAR_*')\n if len(incars)==0:\n INCAR = os.path.join(pdir,\"tplate\",\"INCAR\")\n else:\n INCAR = pdir,ff,incars[0]\n d0 = os.path.getmtime(INCAR)\n for ii in range(1,len(incars)):\n d1 = os.path.getmtime(os.path.join(pdir,ff,incars[ii]))\n if d1 > d0:\n INCAR = os.path.join(pdir,ff,incars[ii])\n d1 = d0\n copyfile(INCAR, os.path.join(pvdir,'INCAR'))\n copyfile(os.path.join(pdir,\"tplate\",\"KPOINTS\"), os.path.join(pvdir,'KPOINTS'))\n\n sposcar = os.path.join(pdir,ff,\"POSCAR\")\n sxml = os.path.join(pdir,ff,\"vasprun.xml\")\n if not os.path.exists(sxml):\n sxml = os.path.join(pdir,ff,\"vasprun.xml.gz\")\n\n cwd = os.getcwd()\n os.chdir( pvdir )\n cmd = 'vasp_fij -outc '+soutcar+\" -xml \"+sxml+\" -conc \"+sposcar + \" >& /dev/null\"\n os.system(cmd)\n os.chdir( cwd )\n\n global start\n print ( round(time.time()-start,3), \"Secs. costed in VASP files extracting\")\n start = time.time()\n\ndef extractGph():\n phononmode = {}\n with open(\"symmetry.out\", \"r\") as f:\n lines = f.readlines()\n i = 0\n while i < len(lines):\n ss = [s for s in lines[i].strip().split(' ') if s!='']\n if len(ss) >= 5:\n if ss[2] == \"Modes\" and ss[4] in [\"silent_mode\", \"raman_active\", \"ir_active\"]:\n mode = []\n for ii in range(int(ss[0])):\n i += 1\n mm = [s for s in lines[i].strip().replace('(',' ').replace(')',' ').split(' ') if s!='']\n mode.append(float(mm[3]))\n phononmode.update({ss[1]+\" ( \"+ss[4]+\" )\": sorted(mode)})\n i += 1\n threcord.update({\"gamma point phonons (cm-1) \":phononmode})\n\n\nclass BornMix:\n def __init__(self, dir0, V0, V1, ff1, phdir298):\n F0 = os.path.join(dir0,V0,'dielecfij.out')\n if not os.path.exists(F0): return\n F1 = os.path.join(dir0,V1,'dielecfij.out')\n if not os.path.exists(F1): return\n with open (F0, 'r') as fp: data0 = fp.readlines()\n with open (F1, 'r') as fp: data1 = fp.readlines()\n out = os.path.join(phdir298,'dielecfij.out')\n with open (out, 'w') as fp:\n for i, line in enumerate(data0):\n self.mix(line, data1[i], ff1, fp)\n\n def simplemix(self, ss0, ss1, ff1, fp):\n for i,s0 in enumerate(ss0):\n if s0 == ss1[i]: fp.write(' {}'.format(s0))\n else: fp.write(' {}'.format( float(s0)+(1.-float(ff1))*(float(ss1[i])-float(s0)) ))\n fp.write('\\n')\n\n def sitemix(self, ss0, ss1, ff1, fp):\n for i,s0 in enumerate(ss0):\n if s0 == ss1[i]: fp.write(' {}'.format(s0))\n else:\n change = float(ss1[i])-float(s0)\n if change>=0.5: change -= 1.\n if change<=-0.5: change += 1.\n fp.write(' {}'.format( float(s0)+(1.-float(ff1))*change ))\n fp.write('\\n')\n\n def mix(self, line0, line1, ff1, fp):\n ss0 = [f.strip() for f in line0.split(' ') if f!='']\n ss1 = [f.strip() for f in line1.split(' ') if f!='']\n if len(ss0) >= 4:\n if ss0[3] in periodictable and ss0[3] in periodictable: self.sitemix(ss0, ss1, ff1, fp)\n else: self.simplemix(ss0, ss1, ff1, fp)\n else:\n self.simplemix(ss0, ss1, ff1, fp)\n\n\ndef Phonon298(dir0, pvdos=False):\n V298 = threcord.get(\"Atomic volume at 298.15 K ($\\AA^3$)\")\n phdir298 = os.path.join(dir0, 'phonon298.15K')\n if not os.path.exists(phdir298):\n os.mkdir(phdir298)\n volumes = threcord.get(\"volumes\")\n i1 = 0\n for ii,vv in enumerate(volumes):\n if float(vv) < V298:\n i1 += 1\n i1 -= 1\n i1 = max(i1, 0)\n i1 = min(i1, len(volumes)-2)\n dV = float(volumes[i1+1]) - float(volumes[i1])\n ff1 = (float(volumes[i1+1]) - V298)/dV\n cmd = \"Ymix -mlat -f \"+str(ff1)+ \" \" \\\n + os.path.join(dir0,Pfiles[i1],\"superfij.out\") + \" \" \\\n + os.path.join(dir0,Pfiles[i1+1],\"superfij.out\") + \" >\" \\\n + os.path.join(phdir298,\"superfij.out\")\n output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n #print(output)\n mix = BornMix(dir0, Pfiles[i1], Pfiles[i1+1], ff1, phdir298)\n\n cwd = os.getcwd()\n os.chdir( phdir298 )\n\n cmd = \"Yphon -tranI 2 -eps -nqwave \"+ str(nqwave)+ \" =1 and ngroup<=2:\n dfile = \"dfile.tri\"\n structure.update({\"crystal system\": \"Triclinic\"})\n elif ngroup>=3 and ngroup<=15:\n dfile = \"dfile.mon\"\n structure.update({\"crystal system\": \"Monoclinic\"})\n elif ngroup>=16 and ngroup<=74:\n dfile = \"dfile.oth\"\n structure.update({\"crystal system\": \"Orthorhombic\"})\n elif ngroup>=75 and ngroup<=142:\n dfile = \"dfile.tet\"\n structure.update({\"crystal system\": \"Tetragonal\"})\n elif ngroup>=143 and ngroup<=167:\n dfile = \"dfile.rho\"\n structure.update({\"crystal system\": \"Trigonal\"})\n elif ngroup>=168 and ngroup<=194:\n dfile = \"dfile.hcp\"\n structure.update({\"crystal system\": \"Hexagonal\"})\n elif ngroup>=195 and ngroup<=220:\n dfile = \"dfile.scc\"\n structure.update({\"crystal system\": \"Cubic\"})\n elif ngroup>=221 and ngroup<=224:\n dfile = \"dfile.bcc\"\n structure.update({\"crystal system\": \"Cubic({bcc})\"})\n elif ngroup>=225 and ngroup<=230:\n dfile = \"dfile.fcc\"\n structure.update({\"crystal system\": \"Cubic({fcc})\"})\n\n if dfile != \"\":\n PATH_TO_STORE_CONFIG = dfttkconfig.default_path()\n plotdatabase = os.path.join(dfttkconfig.get_abspath(PATH_TO_STORE_CONFIG),'analysis','database')\n copyfile(os.path.join(plotdatabase,dfile),os.path.join(phdir298,dfile))\n cwd = os.getcwd()\n os.chdir( phdir298 )\n import platform\n if platform.system()==\"Linux\":\n cmd = 'timeout 6 pos2s Symmetry.pos -THR 3.e-4 >&symmetry.out'\n output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n print(output)\n\n Gph = os.path.exists(\"symmetry.mode\")\n if Gph:\n cmd = \"Yphon -Gfile symmetry.mode -tranI 2 -eps -pdis \"+dfile0+ \" symmetry.out\"\n else:\n cmd = \"Yphon -tranI 2 -eps -pdis \"+dfile0+ \" symmetry.out\"\n if os.path.exists('dielecfij.out') : cmd = cmd + ' -Born dielecfij.out'\n\n output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n #print(output)\n if Gph:\n extractGph()\n\n cmd = \"gnuplot vdis.plt; convert -background white -alpha remove -rotate 90 -density 120x120 vdis.eps vdis.png\"\n plot(cmd)\n figures.update({\"phonon dispersion at 298.15 K\": \"phonon298.15K/vdis.png\"})\n os.chdir( cwd )\n\n threcord.update({\"figures\":figures})\n\n cwd = os.getcwd()\n os.chdir( phdir298 )\n cfile = [\"findsym.log\",\"run.log\",\"exactQ.out\",\"run\",\"symmetry.out\"]\n for f in cfile:\n if os.path.exists(f):\n os.remove(f)\n os.chdir( cwd )\n\n global start\n print (round(time.time()-start,3), \"Secs. costed in calculations of phonon properties at 298.15K\")\n start = time.time()\n\n#outs = [\"space group\", \"point group symmetry\", \"space group symmetry\"]\nouts = [\"space group\",\"space group symmetry\"]\ndef addpapers(g,formula,pname):\n g[0] += float(threcord.get(\"H298.15 (J/mol-atom)\"))\n if threcord.get(\"Ideal mixing entropy (kB/atom)\")!=None:\n g[1] -= R*float(threcord.get(\"Ideal mixing entropy (kB/atom)\"))\n\n sys.stdout.write(\"{},{}\".format(pname,formula))\n for ss in outs:\n sys.stdout.write(\",{}\".format(structure.get(ss)))\n sys.stdout.write(\",{:.6g}\".format(g[0]))\n sys.stdout.write(\",{:.6g}\".format(g[1]))\n sys.stdout.write(\",{:.5g}\".format(g[2]))\n sys.stdout.write(\",{:.3e}\".format(g[3]))\n sys.stdout.write(\",{:.3e}\".format(g[4]))\n sys.stdout.write(\",{:.3e}\".format(g[5]))\n\n try:\n pq = 100.*threcord.get(\"Uncertainty\").get(\"amount of imaginary phonon mode\")\n eq = eVtoJ*threcord.get(\"Uncertainty\").get(\"0 K energy uncertainty (eV/atom)\")\n except:\n pq = 0.0\n eq = 0.0\n warning = SGTErec.get(\"G-H298.15 (J/mol-atom)\")[1].get(\"error\")\n #print(pq,eq,warning)\n #sys.stdout.write(\",{:.1f},{:.0f},{:.0f},\".format(pq,eq,warning))\n sys.stdout.write(\",{:.1f},\".format(pq))\n\n if warning > 99:\n sys.stdout.write(\" **********WARNING, fittit error is too large! {}\".format(warning))\n if abs(g[2]) > 32:\n sys.stdout.write(\" **********WARNING, Cp at room condition is abnormal! {}\".format(abs(g[2])))\n sys.stdout.write(\"\\n\")\n\n\nmarkers=['o', 'v', 'd', '^', '<', '>', 's', '*', 'x', '+', '1', '2']\n\nk_B = 8.6173303e-5\nR = 8.3144598\n\neVtoGPa = 160.21766208\neVtoJ = 96486.9\nTHRE0 = 1.e-5\nnqwave = 2.e6\nnqwave = 1.e6\nnqwave = 4.e6\n\nT0 = 298.15\nupdate = True\n\ninput_within = False #\"\"\" key to cotrol the within input\"\"\"\nformula_within = \"\" #\"\"\"chemical formula\"\"\"\nwithin = []\n\nPQ = 0.075\nEQ = 0.015\nPQ = 0.01\nEQ = 0.01\nCpMax = 50.\nTupmax = 2000.0\nstart = time.time()\ngamma_phonons = {}\nthrecord = {}\nfigures = {}\nzthermo = {}\nSGTErec = {}\nstructure = {}\nexpt = None\nUncertainty = {}\nnphases = 0\ndebug = False\nfitCp = False\npaper = True\nphdft = \"phonon\"\nexpt = None\nxlim = None\npvdos = False\n\nphases = []\npapers = []\nPhaseName = {}\n\ndef getdoslim(e, dos, xlim):\n xx, yy = [], []\n for i,energy in enumerate(e):\n if energy >xlim[0] and energy =xlim and x!=thermo[-1,0]:\n thermo = thermo[0:i+1,:]\n xlim = None\n break\n\n if expt!=None:\n meltingT = get_melting_temperature(expt, formula)\n if meltingT!=None:\n for i,x in enumerate(thermo[:,0]):\n if x>=meltingT and x!=thermo[-1,0]:\n thermo = thermo[0:i+1,:]\n break\n\n for i,cp in enumerate(thermo[:,6]):\n if cp > CpMax:\n thermo = thermo[0:i,:]\n break\n\n \"\"\"\n f2=interpolate.splrep(thermo[:,0], thermo[:,1])\n V298 = float(interpolate.splev(T0, Vstack))\n Hstack=interpolate.splrep(thermo[:,0], thermo[:,4])\n H298 = float(interpolate.splev(T0, Hstack))\n Sstack=interpolate.splrep(thermo[:,0], thermo[:,3])\n S298 = float(interpolate.splev(T0, Sstack))\n \"\"\"\n if T0 <= thermo[-1,0] :\n f2=interp1d(thermo[:,0], thermo[:,1])\n V298 = f2(T0)\n f2=interp1d(thermo[:,0], thermo[:,4])\n H298 = f2(T0)\n f2=interp1d(thermo[:,0], thermo[:,3])\n S298 = f2(T0)\n else:\n print (\"\\nWarning! T0=\", T0, \"is higher than the T up limit:\", thermo[-1,0], \\\n \" no SGTE fitting will be performed\\n\")\n\n if volumes is not None:\n if T0 <= thermo[-1,0] :\n T = thermo[:,0]\n V = thermo[:,1]\n A = thermo[:,5]\n B = thermo[:,9]\n C = thermo[:,7]\n G = 3*A[T>T0]*B[T>T0]*physical_constants['Avogadro constant'][0]*1e-21*V[T>T0]/C[T>T0]\n v = V[T>T0]\n g = V298*G/v\n g = sum(g)/len(g)\n readme['Gruneisen parameter']= round(g,3)\n Gmax = 1.2*max(G)\n gl = [i for i,v in enumerate(C) if v > 0]\n g = 3*A[gl]*B[gl]*physical_constants['Avogadro constant'][0]*1e-21*V[gl]/C[gl]\n t = T[gl]\n Gmin = min(g)\n if Gmax>0: Gmin = max(Gmin,-Gmax)\n if Gmin>0: Gmin = 0\n ix = 0\n for i in range (len(g)-2,0,-1):\n if g[i]>Gmax or g[i] 300: break\n tmp = max(tmp, thermo[i,6]-thermo[i,8])\n if tmp>1.e-2:\n thermoplot(folder,\"Heat capacities (J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), elonly=300, expt=expt, CoT=True, label=plotlabel,plottitle=plottitle)\n thermoplot(folder,\"Debye temperature (K)\",list(thermo[:,0]),list(thermo[:,10]),yzero=0.0, xlim=xlim, label=plotlabel,plottitle=plottitle)\n thermoplot(folder,\"Debye temperature (K)\",list(thermo[:,0]),list(thermo[:,10]),yzero=0.0, xlim=70, label=plotlabel,plottitle=plottitle)\n if volumes is not None:\n bs = np.ones((len(thermo[:,9])), dtype=float)\n bs[1:] = thermo[1:,6]/thermo[1:,7]*thermo[1:,9]\n thermoplot(folder,\"Bulk modulus (GPa)\",list(thermo[:,0]),list(thermo[:,9]), reflin=list(bs) , expt=expt, yzero=0.0,xlim=xlim, label=plotlabel,plottitle=plottitle)\n thermoplot(folder,\"Seebeck coefficients (μV/K)\",list(thermo[:,0]),list(thermo[:,16]),xlim=xlim, label=plotlabel,plottitle=plottitle)\n thermoplot(folder,\"Lorenz number ($WΩK^{−2}$)\",list(thermo[:,0]),list(thermo[:,17]),xlim=xlim, label=plotlabel,plottitle=plottitle)\n thermoplot(folder,\"Absolute thermal electric force (V)\",list(thermo[:,0]),list(thermo[:,15]), xlim=xlim, label=plotlabel,plottitle=plottitle)\n thermoplot(folder,\"Effective charge carrier concentration ($e/cm^{3}$)\",list(thermo[:,0]),\n list(thermo[:,18]/thermo[:,1]*1e24), label=plotlabel,plottitle=plottitle)\n thermoplot(folder,\"Effective charge carrier concentration ($e/cm^{3}$)\",list(thermo[:,0]),\n list(thermo[:,18]/thermo[:,1]*1e24), xlim=100, label=plotlabel,plottitle=plottitle)\n if len(gamma_phonons)!=0: readme['gamma phonons (cm^{-1})']= gamma_phonons\n if doscar!=None:\n from dfttk.pythelec import pregetdos, getdos\n with open (doscar, \"r\") as fp:\n edn, eup, vde, dos_energies, vaspEdos = pregetdos(fp) # Line 186\n NELECTRONS, E0, dF, e, dos, Eg =\\\n getdos(-15, 15, 0.0, 10001, 1000., edn, eup, vde, dos_energies, vaspEdos)\n if Eg <0.0: Eg=0.\n xlim = [-0.1, Eg+0.1]\n xx, yy = getdoslim(dos_energies, vaspEdos, xlim)\n thermoplot(folder,\"Electron DOS (States/Atom/eV)\",list(xx),list(np.array(yy)/natoms), xlim=xlim,\n xlabel=\"Band energy (eV)\", label=plotlabel,plottitle=plottitle)\n xlim = [-0.2, Eg+0.2]\n xx, yy = getdoslim(dos_energies, vaspEdos, xlim)\n thermoplot(folder,\"Electron DOS (States/Atom/eV)\",list(xx),list(np.array(yy)/natoms), xlim=xlim,\n xlabel=\"Band energy (eV)\", label=plotlabel,plottitle=plottitle)\n xlim = [-0.5, Eg+0.5]\n xx, yy = getdoslim(dos_energies, vaspEdos, xlim)\n thermoplot(folder,\"Electron DOS (States/Atom/eV)\",list(xx),list(np.array(yy)/natoms), xlim=xlim,\n xlabel=\"Band energy (eV)\", label=plotlabel,plottitle=plottitle)\n xlim = [-1.0, Eg+1.0]\n xx, yy = getdoslim(dos_energies, vaspEdos, xlim)\n thermoplot(folder,\"Electron DOS (States/Atom/eV)\",list(xx),list(np.array(yy)/natoms), xlim=xlim,\n xlabel=\"Band energy (eV)\", label=plotlabel,plottitle=plottitle)\n xlim = [-2.0, Eg+2.0]\n xx, yy = getdoslim(dos_energies, vaspEdos, xlim)\n thermoplot(folder,\"Electron DOS (States/Atom/eV)\",list(xx),list(np.array(yy)/natoms), xlim=xlim,\n xlabel=\"Band energy (eV)\", label=plotlabel,plottitle=plottitle)\n xlim = [-5.0, Eg+5.0]\n xx, yy = getdoslim(dos_energies, vaspEdos, xlim)\n thermoplot(folder,\"Electron DOS (States/Atom/eV)\",list(xx),list(np.array(yy)/natoms), xlim=xlim,\n xlabel=\"Band energy (eV)\", label=plotlabel,plottitle=plottitle)\n xlim = [-10., Eg+10.]\n xx, yy = getdoslim(dos_energies, vaspEdos, xlim)\n thermoplot(folder,\"Electron DOS (States/Atom/eV)\",list(xx),list(np.array(yy)/natoms), xlim=xlim,\n xlabel=\"Band energy (eV)\", label=plotlabel,plottitle=plottitle)\n return True\n\n\ndef plotCMD(thermofile, volumes=None, energies=None, expt=None, xlim=None, _fitCp=True,\n poscar=None, vdos=None, doscar=None, natoms=1, plotlabel=None):\n global fitCp\n fitCp = _fitCp\n #print(expt)\n phasedir,tail = os.path.split(thermofile)\n if phasedir==\"\": phasedir=\".\"\n folder = os.path.join(phasedir,\"figures\")\n print(\"All figures have been outputed into: \", folder, \" with T uplimt:\", xlim, \"\\n\\nEnjoy!\\n\")\n if not os.path.exists(folder):\n os.mkdir(folder)\n if volumes is not None: thermoplot(folder,\"0 K total energies (eV/atom)\",volumes, energies)\n\n thermo = np.loadtxt(thermofile, comments=\"#\", dtype=np.float)\n thermo[np.isnan(thermo)] = 0.0\n if len (thermo) < 1:\n print(\"\\nCorrupted thermofile for\", thermofile, \"Please check it!\")\n return False\n\n if xlim is not None:\n for i,x in enumerate(thermo[:,0]):\n if x>=xlim:\n thermo = thermo[0:i,:]\n xlim = None\n break\n\n for i,cp in enumerate(thermo[:,6]):\n if cp > CpMax:\n thermo = thermo[0:i,:]\n break\n\n \"\"\"\n f2=interpolate.splrep(thermo[:,0], thermo[:,1])\n V298 = float(interpolate.splev(T0, Vstack))\n Hstack=interpolate.splrep(thermo[:,0], thermo[:,4])\n H298 = float(interpolate.splev(T0, Hstack))\n Sstack=interpolate.splrep(thermo[:,0], thermo[:,3])\n S298 = float(interpolate.splev(T0, Sstack))\n \"\"\"\n f2=interp1d(thermo[:,0], thermo[:,1])\n V298 = f2(T0)\n f2=interp1d(thermo[:,0], thermo[:,4])\n H298 = f2(T0)\n f2=interp1d(thermo[:,0], thermo[:,3])\n S298 = f2(T0)\n\n #print(H298,V298,S298)\n\n if volumes is not None: Plot298(folder, V298, volumes)\n\n threcord.update({\"H298.15 (J/mol-atom)\":H298})\n threcord.update({\"S298.15 (J/mol-atom/K)\":S298})\n\n zthermo.update({\"temperature (K)\":list(thermo[:,0])})\n zthermo.update({\"atomic volume ($\\AA^3$)\":list(thermo[:,1])})\n zthermo.update({\"Gibbs energy (eV/atom)\":list(thermo[:,2])})\n zthermo.update({\"enthalpy (J/mol-atom)\":list(thermo[:,4])})\n zthermo.update({\"entropy (J/mol-atom/K)\":list(thermo[:,3])})\n zthermo.update({\"Cp (J/mol-atom/K)\":list(thermo[:,6])})\n if fitCp:\n proStoichiometricCp()\n else:\n proStoichiometricG()\n with open(os.path.join(folder,\"..\",\"record.json\"), 'w') as fp:\n myjsonout(SGTErec, fp, indent=\"\", comma=\"\")\n myjsonout(SGTErec, sys.stdout, indent=\"\", comma=\"\")\n\n thermoplot(folder,\"Atomic volume ($\\AA^3$)\",list(thermo[:,0]),list(thermo[:,1]), xlim=xlim, label=plotlabel)\n thermoplot(folder,\"Gibbs energy-H298 (J/mol-atom)\",list(thermo[:,0]),list(thermo[:,2]*eVtoJ-H298), xlim=xlim, label=plotlabel)\n #print(thermo[:,4]-H298)\n thermoplot(folder,\"Enthalpy-H298 (J/mol-atom)\",list(thermo[:,0]),list(thermo[:,4]-H298),\n expt=expt, xlim=xlim, label=plotlabel)\n thermoplot(folder,\"Entropy (J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,3]),yzero=0.0, xlim=xlim, label=plotlabel)\n\n thermoplot(folder,\"LTC (1/K)\",list(thermo[:,0]),list(1.e06*thermo[:,5]),yzero=0.0, xlim=xlim, label=plotlabel)\n ncols = [6,8]\n #print('eeeeeeee', plotlabel, expt)\n thermoplot(folder,\"Heat capacities (J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), expt=expt, xlim=xlim, label=plotlabel)\n thermoplot(folder,\"Heat capacities (J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xlim=300,expt=expt, label=plotlabel)\n thermoplot(folder,\"Heat capacities (J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), xlim=100,expt=expt, CoT=True, label=plotlabel)\n tmp = 0.0\n for i,v in enumerate(thermo[:,0]):\n if v >300: break\n tmp = max(tmp, thermo[i,6]-thermo[i,8])\n if tmp>1.e-2:\n thermoplot(folder,\"Heat capacities (J/mol-atom/K)\",list(thermo[:,0]),list(thermo[:,ncols]), elonly=300, expt=expt, CoT=True, label=plotlabel)\n thermoplot(folder,\"Debye temperature (K)\",list(thermo[:,0]),list(thermo[:,13]),yzero=0.0, xlim=xlim, label=plotlabel)\n thermoplot(folder,\"Debye temperature (K)\",list(thermo[:,0]),list(thermo[:,13]),yzero=0.0, xlim=70, label=plotlabel)\n #thermoplot(folder,\"Bulk modulus (GPa)\",list(thermo[:,0]),list(thermo[:,15]),yzero=0.0,xlim=xlim, label=plotlabel)\n bs = copy.deepcopy(thermo[:,9])\n for i,Cv in enumerate(thermo[1:,7]):\n if Cv>0.0: bs[i] = thermo[i,6]/Cv*thermo[i,9]\n thermoplot(folder,\"Bulk modulus (GPa)\",list(thermo[:,0]),list(thermo[:,9]), reflin=list(bs) , expt=expt, yzero=0.0,xlim=xlim, label=plotlabel)\n T = copy.deepcopy(thermo[:,0])\n t22 = copy.deepcopy(thermo[:,22])\n for i,tval in enumerate(t22):\n if T[i] <=0.0 : T[i]=1.e-8\n if t22[i] <=0.0 : t22[i]=1.e-8\n Lfactor = physical_constants['Boltzmann constant'][0]/physical_constants['atomic unit of charge'][0]**2/physical_constants['Avogadro constant'][0]\n thermoplot(folder,\"Seebeck coefficients (μV/K)\",list(thermo[:,0]),list(thermo[:,21]/t22/T),xlim=xlim, label=plotlabel)\n thermoplot(folder,\"Lorenz number ($WΩK^{−2}$)\",list(thermo[:,0]),list((thermo[:,6]-thermo[:,8])/t22*Lfactor),xlim=xlim, label=plotlabel)\n thermoplot(folder,\"Absolute thermal electric force (V)\",list(thermo[:,0]),list(thermo[:,19]), xlim=xlim, label=plotlabel)\n thermoplot(folder,\"Effective charge carrier concentration ($e/cm^{3}$)\",list(thermo[:,0]),\n list(thermo[:,22]/thermo[:,1]*1e24), label=plotlabel)\n thermoplot(folder,\"Effective charge carrier concentration ($e/cm^{3}$)\",list(thermo[:,0]),\n list(thermo[:,22]/thermo[:,1]*1e24), xlim=100, label=plotlabel)\n\n\ndef addvdos(x,y,f,w,h):\n for i,v in enumerate(x):\n dx = v - f\n y[i] += math.exp(-(dx/w)**2)*h\n\n\ndef plotRaman(folder, fp, vdos, plottitle=None):\n lines=fp.readlines()\n for i,line in enumerate(lines):\n if line.startswith(\"Setting workspace & pre-optimizing : Section time \"):\n lines = lines[i+2:]\n break\n for i,line in enumerate(lines):\n ff = [f for f in line.strip().split(\" \") if f!=\"\"]\n if len(ff) < 3: continue\n if ff[2]==\"Modes\":\n lines = lines[i:]\n break\n\n I = []\n F_lo = []\n M = []\n F = []\n A = []\n G = {}\n global gamma_phonons\n gamma_phonons = {}\n for i,line in enumerate(lines):\n #if line.startswith(\"Handling symmetry : Section time \"): break\n #if line.startswith(\"Handling symmetry : Section time \"): break\n ff = [f for f in line.strip().split(\" \") if f!=\"\"]\n if len(ff) < 3: continue\n if ff[2]==\"Modes\":\n active = ff[4]\n continue\n\n if line.startswith(\" No irrep THz\"): continue\n try:\n int(ff[0])\n except:\n break\n\n M.append(ff[1])\n I.append(ff[0])\n F.append(float(ff[2])/0.0299792458)\n A.append(active)\n kk = '{} {:05}'.format(ff[1],int(ff[0]))\n #THz = round(float(ff[2]),3)\n #THz = str(round(float(ff[2]),3))+\" THz\",\n #cm = str(round(float(ff[2])/0.0299792458,1))+\" cm-1\"\n cm = round(float(ff[2])/0.0299792458,1)\n try:\n F_lo.append(float(ff[3])/0.0299792458)\n if cm!=0 and active==\"ir_active\":\n cm = str(round(float(ff[2])/0.0299792458,1))+\"(TO)+\"+str(round(float(ff[3])/0.0299792458,1))+\"(LO)\"\n except:\n pass\n if cm!=0: gamma_phonons[kk] = [cm, active]\n #print(I,M,F,A)\n x = vdos[:,0]*1.e-12/0.0299792458\n y = vdos[:,1]*1.e+12*0.0299792458\n yy = np.zeros((len(y)), dtype=float)\n w = max(x)*0.001\n h = 0.1*max(y)\n x0 = []\n y0 = []\n s0 = []\n for i,f in enumerate(F):\n if M[i].lower().startswith(\"e\"): hh = h*2\n elif M[i].lower().startswith(\"t\"): hh = h*3\n else: hh = h\n if float(f)<1.e-3: continue\n if len(F_lo)!=0:\n if A[i]==\"ir_active\":\n if M[i].lower().startswith(\"e\"): hh = h\n elif M[i].lower().startswith(\"t\"): hh = h*2\n else: hh = h/2\n addvdos(x,yy,float(f),w,hh)\n x0.append(float(f)-18*w)\n y0.append(hh)\n s0.append(M[i]+\"(TO)\")\n if hh==h/2: h = hh\n addvdos(x,yy,float(F_lo[i]),w,h)\n x0.append(float(F_lo[i])-18*w)\n y0.append(h)\n s0.append(M[i]+\"(LO)\")\n else:\n addvdos(x,yy,float(f),w,hh)\n x0.append(float(f)-18*w)\n y0.append(hh)\n s0.append(M[i])\n else:\n addvdos(x,yy,float(f),w,hh)\n x0.append(float(f)-18*w)\n y0.append(hh)\n s0.append(M[i])\n ix = sorted(range(len(x0)), key=lambda k: x0[k])\n _M = []\n _x0 = []\n _y0 = []\n _s0 = []\n for i in range(len(ix)):\n _M.append(s0[ix[i]])\n _x0.append(x0[ix[i]])\n _y0.append(y0[ix[i]])\n ss = s0[ix[i]]\n if len(ss)==1:\n ss = '$'+ss+'$'\n elif len(ss)>0:\n if ss[1].isdigit() or ss[1].isalpha():\n aa = ss[1:len(ss)].split('(')\n if len(aa)>1:\n ss = '$'+ss[0]+'_{'+aa[0]+'}^{('+aa[1]+'}$'\n else:\n ss = '$'+ss[0]+'_{'+aa[0]+'}$'\n else: ss = '$'+ss+'$'\n _s0.append(ss)\n M = _M\n x0 = _x0\n y0 = _y0\n s0 = _s0\n\n #print(x0)\n #print(M)\n nx0 = []\n ny0 = []\n ns0 = []\n nn0 = []\n #adjust y0 for overlapping text\n for i,v in enumerate(x0):\n ff = False\n for j in range(len(nx0)):\n if abs(x0[i] - nx0[j]) < 30*w:\n ns0[j] = ns0[j]+'+'+s0[i]\n nx0[j]= (nx0[j]*nn0[j]+x0[i])/(nn0[j]+1)\n ny0[j]= max(ny0[j], y0[i])\n nn0[j] += 1\n ff = True\n break\n if ff: continue\n nx0.append(x0[i])\n ny0.append(y0[i])\n ns0.append(s0[i])\n nn0.append(1)\n\n if len(s0)>0:\n for i,s in enumerate(ns0):\n ss = [f for f in s.split('+') if f!=\"\"]\n if len(ss)>7:\n ns0[i] = '+'.join(ss[0:7])+'+...'\n thermoplot(\"./\",\"Gamma point phonons\",list(x),list(yy),\n reflin=list(y), xlabel=\"Phonon frequency($cm^{-1}$)\", ytext=[nx0,ny0,ns0], ylabel=\"Phonon DOS ($states.cm$)\",plottitle=plottitle)\n #reflin=list(y), xlabel=\"Phonon frequency(THz)\", ytext=[nx0,ny0,ns0], ylabel=\"Phonon DOS ($THz^{-1}$)\")\n fn = \"Gamma_point_phonons.png\"\n move(fn, os.path.join(folder,fn))\n\ndef Plot298(folder, V298, volumes, debug=False, plottitle=None, local=None):\n import dfttk.scripts.config_dfttk as dfttkconfig\n PATH_TO_STORE_CONFIG = dfttkconfig.default_path()\n plotdatabase = os.path.join(dfttkconfig.get_abspath(PATH_TO_STORE_CONFIG),'analysis','database')\n #print (plotdatabase, folder)\n if local is None:\n ydir = os.path.join(folder,'..','Yphon')\n else:\n ydir = os.path.join(folder,'..')\n\n vdict = {}\n for root, dirs, files in os.walk(ydir):\n for dir in dirs:\n poscar = os.path.join(ydir,dir,'POSCAR')\n if os.path.exists(poscar):\n structure = Structure.from_file(poscar)\n vol = 'V{:010.6f}'.format(structure.volume)\n vdict[vol]=dir\n try:\n natom = len(structure.sites)\n sa = SpacegroupAnalyzer(structure)\n ngroup = sa.get_space_group_number()\n except:\n return\n\n #print(natom,ngroup)\n i1 = 0\n for ii,vv in enumerate(volumes):\n if float(vv) < V298:\n i1 += 1\n i1 -= 1\n i1 = max(i1, 0)\n i1 = min(i1, len(volumes)-2)\n dV = float(volumes[i1+1]) - float(volumes[i1])\n ff1 = (float(volumes[i1+1]) - V298)/dV\n\n vol = 'V{:010.6f}'.format(float(natom*volumes[i1]))\n dir1 = vdict[vol]\n file1 = os.path.join(ydir,dir1,'superfij.out')\n if not os.path.exists(file1):\n print (\"\\nWARNING! I cannot find file :\", file1, \" so that I will not do phonon298.15 for you!\\n\")\n return\n vol = 'V{:010.6f}'.format(float(natom*volumes[i1+1]))\n dir2 = vdict[vol]\n file2 = os.path.join(ydir,dir2,'superfij.out')\n if not os.path.exists(file2):\n print (\"\\nWARNING! I cannot find file :\", file2, \" so that I will not do phonon298.15 for you!\\n\")\n return\n\n phdir298 = os.path.join(ydir,'Phonon298.15')\n if not os.path.exists(phdir298):\n os.mkdir(phdir298)\n cmd = \"Ymix -mlat -f \"+str(ff1)+ \" \"+file1+ \" \"+file2 +\" >\"+os.path.join(phdir298,\"superfij.out\")\n output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n #print(output)\n mix = BornMix(ydir, dir1, dir2, ff1, phdir298)\n\n cwd = os.getcwd()\n os.chdir( phdir298 )\n\n _nqwave = \"\"\n if debug:\n _nqwave = \"-nqwave \"+ str(1.e4)\n cmd = \"Yphon -tranI 2 -eps \"+ _nqwave+ \" Raman.mode\"\n if os.path.exists('dielecfij.out') : cmd = cmd + ' -Born dielecfij.out'\n move(\"vdos.out\", 'vdos.sav')\n output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n #print(output)\n move(\"vdos.sav\", 'vdos.out')\n vdos = np.loadtxt(\"vdos.out\", comments=\"#\", dtype=np.float)\n if os.path.exists(\"Raman.mode\") :\n with open (\"Raman.mode\", \"r\") as fp:\n plotRaman(os.path.join(cwd,folder), fp, vdos, plottitle=plottitle)\n\n dfile = \"\"\n if ngroup>=1 and ngroup<=2:\n dfile = \"dfile.tri\"\n elif ngroup>=3 and ngroup<=15:\n dfile = \"dfile.mon\"\n elif ngroup>=16 and ngroup<=74:\n dfile = \"dfile.oth\"\n elif ngroup>=75 and ngroup<=142:\n dfile = \"dfile.tet\"\n elif ngroup>=143 and ngroup<=167:\n dfile = \"dfile.rho\"\n elif ngroup>=168 and ngroup<=194:\n dfile = \"dfile.hcp\"\n elif ngroup>=195 and ngroup<=220:\n dfile = \"dfile.scc\"\n elif ngroup>=221 and ngroup<=224:\n dfile = \"dfile.bcc\"\n elif ngroup>=225 and ngroup<=230:\n dfile = \"dfile.fcc\"\n dfile = os.path.join(plotdatabase, dfile)\n if dfile != \"\":\n head,dfile0 = os.path.split(dfile)\n copyfile(dfile,dfile0)\n cmd = \"Yphon -tranI 2 -eps -pdis \"+dfile0+ \" Raman.mode\"\n if os.path.exists('dielecfij.out') : cmd = cmd + ' -Born dielecfij.out'\n output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n universal_newlines=True)\n #print(output)\n move(\"vdos.sav\", 'vdos.out')\n vdos = np.loadtxt(\"vdos.out\", comments=\"#\", dtype=np.float)\n if os.path.exists(\"Raman.mode\") :\n with open (\"Raman.mode\", \"r\") as fp:\n plotRaman(cwd,folder, fp, vdos)\n\n dfile = \"\"\n if ngroup>=1 and ngroup<=2:\n dfile = \"dfile.tri\"\n elif ngroup>=3 and ngroup<=15:\n dfile = \"dfile.mon\"\n elif ngroup>=16 and ngroup<=74:\n dfile = \"dfile.oth\"\n elif ngroup>=75 and ngroup<=142:\n dfile = \"dfile.tet\"\n elif ngroup>=143 and ngroup<=167:\n dfile = \"dfile.rho\"\n elif ngroup>=168 and ngroup<=194:\n dfile = \"dfile.hcp\"\n elif ngroup>=195 and ngroup<=220:\n dfile = \"dfile.scc\"\n elif ngroup>=221 and ngroup<=224:\n dfile = \"dfile.bcc\"\n elif ngroup>=225 and ngroup<=230:\n dfile = \"dfile.fcc\"\n dfile = os.path.join(plotdatabase,dfile)\n if dfile != \"\":\n head,dfile0 = os.path.split(dfile)\n copyfile(dfile,dfile0)\n cmd = \"Yphon -tranI 2 -eps -pdis \"+dfile0+ \" len(sys.argv)):\n break\n continue\n if (sys.argv[count] == \"-pvdos\"):\n pvdos = True\n if (sys.argv[count] == \"-within\"):\n input_within = True\n elif (sys.argv[count] == \"-formula\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n formula = sys.argv[count]\n elif (sys.argv[count] == \"-expt\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n expt = sys.argv[count]\n elif (sys.argv[count] == \"-phdft\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n phdft = sys.argv[count]\n elif (sys.argv[count] == \"-xlim\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n xlim = float(sys.argv[count])\n elif (sys.argv[count] == \"-T0\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n T0 = float(sys.argv[count])\n elif (sys.argv[count] == \"-phasename\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n phasename = str(sys.argv[count])\n elif (sys.argv[count] == \"-cpmax\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n CpMax = float(sys.argv[count])\n elif (sys.argv[count] == \"-THRE0\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n THRE0 = float(sys.argv[count])\n elif (sys.argv[count] == \"-PQ\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n PQ = float(sys.argv[count])\n elif (sys.argv[count] == \"-EQ\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n EQ = float(sys.argv[count])\n elif (sys.argv[count] == \"-plot\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n plotlabel = sys.argv[count]\n elif (sys.argv[count] == \"-Tupmax\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n Tupmax = float(sys.argv[count])\n elif (sys.argv[count] == \"-nqwave\"):\n count = count + 1\n if (count > len(sys.argv)):\n break\n nqwave = float(sys.argv[count])\n elif (sys.argv[count] == \"-fitG\"):\n fitCp = False\n elif (sys.argv[count] == \"-fitCp\"):\n fitCp = True\n elif (sys.argv[count] == \"-debug\"):\n debug = True\n elif (os.path.exists(sys.argv[count])):\n justplot=sys.argv[count]\n else:\n print (\"*******Unknown option\", sys.argv[count])\n count = count + 1\n\n if formula_within!=\"\":\n within = formula2elist(formula_within)\n print (\"data to be extracted within \",within)\n\n #print (phasename)\n #if True:\n try:\n with open (phasename,'r') as f:\n lines = f.readlines()\n for ll in lines:\n line = ll.strip('\\n').replace(',', ' ').replace(':', ' ')\n ss = [s.strip() for s in line.split(' ') if s.strip()!='']\n if len(ss)>1: PhaseName.update({ss[0]:ss[1]})\n except:\n pass\n \"\"\"\n \"\"\"\n\n if expt!=None: expt=get_expt(expt, formula)\n\n if justplot==None: lines = sys.stdin.readlines()\n else:\n plotCMD(justplot, volumes=None, energies=None, expt=expt, xlim=xlim, _fitCp = fitCp, plotlabel=plotlabel)\n sys.exit()\n\n sys.stdout.write(\"G(T)=a+b*T+c*T*Ln(T)+d*T*T+e*T*T*T+f/T (J/mol-atom)\\n\")\n sys.stdout.write(\"Phase,comp\")\n for ss in outs:\n sys.stdout.write(\",{}\".format(ss))\n #sys.stdout.write(\",a,b,c,d,e,f,PQ(%),EQ(J),GQ(J),\\n\".format(ss))\n sys.stdout.write(\",a,b,c,d,e,f,PQ(%),\\n\".format(ss))\n\n for line in lines:\n if line.strip()==\"\": continue\n skip,vdos_e,dir0,oldPN = mkDict(line)\n if skip: continue\n #phases.append([threcord.get(\"phase name\"),threcord.get(\"mpid\")])\n phases.append(threcord.get(\"Phase name\"))\n nphases += 1\n #print (line.strip())\n\n if not os.path.exists(dir0):\n os.mkdir(dir0)\n else:\n if not update: continue\n\n Vfiles,Pfiles,g = Genergy(vdos_e,dir0)\n addpapers(g,dir0,oldPN)\n\n if not debug:\n VASPResults(dir0,vdos_e,Vfiles, Pfiles, phdft=phdft)\n try:\n Phonon298(dir0, pvdos=pvdos)\n except:\n pass\n threcord.update({\"structure\":structure})\n #threcord.delete({\"number of atoms in the primitive unit cell\")\n\n with open(dir0 + '/record.json', 'w') as fp:\n myjsonout(threcord, fp, indent=\"\", comma=\"\")\n\n print (\"\\n\", phases, \"\\n\")\n print (\"\\n\", nphases, \"phases extracted\\n\")\n","repo_name":"PhasesResearchLab/dfttk","sub_path":"dfttk/analysis/ywplot.py","file_name":"ywplot.py","file_ext":"py","file_size_in_byte":100198,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"14069326118","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\n\nfrom keras.models import load_model\n\nimport matplotlib.pyplot as plt\n\n\"\"\"\nModule for additional training\n\"\"\"\n\n# load dataset\ndf = pd.read_csv('dorcus_DL_study.csv')\n\n# number of training\nnumTraining = 10000\n\n# number of class\nCls = list(df['class'].unique()) # [1, 2, 3, 4]\nnumCls = len(Cls)\n\n# set data\nY = df['class']\nX = df[['days_elapse', 'weight_gram']]\n\n# convert data into vector\ndef __trY(y):\n for i, t in enumerate(y):\n yield np.eye(1, numCls, t-1)\n\n# set data for training\ntrY = np.array(list(__trY(Y))).reshape(len(Y), numCls)\ntrX = np.array(X)\n\n# load model\nmodel = load_model('model.h5')\n\n# compile\nmodel.compile(loss='categorical_crossentropy', optimizer='adagrad', metrics=['accuracy'])\n\n# training\nhis = model.fit(trX, trY, nb_epoch=numTraining, verbose=2)\n\n# plot result\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\nax1.plot(his.history['loss'])\n\n# save figure\nplt.savefig('loss.png')\n\n# save model\njson_string = model.to_json()\nopen('model.json', 'w').write(json_string)\n\n# save parameters\nmodel.save_weights('param.h5')\n","repo_name":"MitsuruFujiwara/Multi-Class","sub_path":"Additional_Tranining.py","file_name":"Additional_Tranining.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34591551315","text":"# -*- coding: utf-8 -*-\n\n# This script describes the calculation of aggregate income based on the concept\n# of the fiscal multiplier as a geometric series. It is described in the\n# accompanying iPython Notebook and at:\n#\n# http://misunderheard.org/monetary_economics/2016/11/20/circular-flow-of-government-money/\n#\n\n# import libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nG = 100 # government spending\ntheta = 0.2 # tax rate\n\nn_rounds = 30 # number of rounds we'll consider\n\n# create an array of numbers from 0-30, one for each spending round\nr = np.arange(0,n_rounds)\n\n# solve equation 1 for each individual round\ny = G*(1-theta)**r\n\n# solve equation 2 for each individual round\nsum_y = G*(1-(1-theta)**(r+1))/(1-(1-theta))\n\n# plot\nplt.bar(r,sum_y, color='r',label='cumulative income')\nplt.bar(r,y, color='b', label='spending round income')\nplt.grid()\nplt.legend(loc='center right')\nplt.xlabel('Spending round, n')\nplt.ylabel('Income')\n\nplt.tight_layout()\nplt.show()\n","repo_name":"spatchcock/monetary_economics_python","sub_path":"scripts/4_circular_flow_of_government_money.py","file_name":"4_circular_flow_of_government_money.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"28260122579","text":"from threading import Thread, Lock\nfrom math import log\n\nimport cv2\nimport numpy as np\nfrom time import sleep, time\n\nclass Chassis:\n \"\"\"\n The lightest class to implement interface to control the car.\n \n The methods to reimplement are :\n - compute_speed\n - compute_direction\n \n In these methods, firstly, the current value must follow the variation of the targeted value.\n On the other hand, the pwm must be computed and returned with the current value.\n \n \"set_speed\" and \"set_direction\" methods shouldn't be rewritten\n to preserve the abstraction of the interface.\n \n The speed/direction dictionary could have new key/value.\n \"\"\"\n def __init__(self):\n \"\"\"\n Attribute initialization\n \"\"\"\n self.speed = {\n \"range_pwm\": (375, 409, 413), # Be careful, wrong values could destroy the car.\n \"current\": 0,\n \"target\": 0,\n \"pin\": 5,\n \n }\n \n self.direction = {\n \"range_pwm\": (315, 410, 530),\n \"current\": 0,\n \"target\": 0,\n \"pin\": 15,\n \n }\n \n self.speed_lock = Lock()\n self.dir_lock = Lock()\n \n # Connection initialization with servos \n try:\n from Adafruit_PCA9685 import PCA9685\n\n self.pwm = PCA9685()\n self.pwm.set_pwm_freq(60)\n except Exception as e:\n print(\"Error :\", e)\n\n\n def start(self):\n \"\"\"\n Start moving loop.\n \n Return the object to chain the declaration and the start\n car = Chassis().start()\n \n @return: itself after init\n \"\"\"\n self.high_speed_trace = 0\n \n self.move_thread = Thread(target=self._moving_loop, args=())\n self.move_thread.start()\n \n return self\n \n \n def _moving_loop(self):\n \"\"\"\n Compute then apply.\n We insert a little delay after to prevent fits and starts during course correction\n \"\"\"\n \n while True:\n pwm = self.compute_speed()\n self.pwm.set_pwm(self.speed[\"pin\"], 0, int(pwm))\n \n pwm = self.compute_direction()\n self.pwm.set_pwm(self.direction[\"pin\"], 0, int(pwm))\n \n sleep(0.01)\n \n def compute_speed(self):\n \"\"\"\n Set the current value with the target.\n Compute PWM from current value\n \n @return: pwm value\n \"\"\"\n \n self.speed_lock.acquire()\n self.speed[\"current\"] = self.speed[\"target\"] \n self.speed_lock.release()\n \n stop_pwm, start_pwm, max_pwm = speed[\"range_pwm\"]\n value = self.speed[\"current\"]\n \n # If the target speed is negative, we consider that it is 0\n if value > 0:\n pwm_val = (max_pwm - start_pwm) * speed[\"current\"] + start_pwm\n else:\n pwm_val = start_pwm\n return pwm_val\n \n \n def compute_direction(self):\n \"\"\"\n Set the current value with the target.\n Compute PWM from current value\n \n @return: pwm value\n \"\"\"\n self.dir_lock.acquire()\n self.direction[\"current\"] = self.direction[\"target\"] \n self.dir_lock.release()\n \n max_left_pwm, straight_pwm, max_right_pwm = self.direction[\"range_pwm\"]\n value = self.direction[\"current\"]\n \n # The PWM computation is not the same if the car turn to the right or to the left\n if value > 0:\n pwm_val = (max_right_pwm - straight_pwm)* value + straight_pwm\n else:\n pwm_val = (straight_pwm - max_left_pwm)* (1+value) + max_left_pwm\n return pwm_val\n \n def set_speed(self, value):\n \"\"\"\n Clip the value, then set the desired speed\n \n @param value: normalized speed with a float\n @return: cliped value\n \"\"\"\n if value > 1:\n value = 1\n elif value < -1:\n value = -1\n \n self.speed_lock.acquire()\n self.speed[\"target\"] = value\n self.speed_lock.release()\n \n return value\n \n def set_direction(self, value):\n \"\"\"\n Clip the value, then set the desired direction\n \n @param value: normalized direction with a float\n @return: cliped value\n \"\"\"\n if value > 1:\n value = 1\n elif value < -1:\n value = -1\n \n self.dir_lock.acquire()\n self.direction[\"target\"] = value\n self.dir_lock.release()\n \n return value\n \n def configure(self):\n \"\"\"\n A quick function to visualize the effect of a precise PWM value\n \"\"\"\n print(\"Pwm direction\")\n pwm = self.direction[\"range_pwm\"][1]\n while True:\n c = input()\n if \"q\" in c:\n pwm -= 5\n elif \"d\" in c:\n pwm += 5\n elif \"s\" in c:\n break\n print(pwm)\n self.pwm.set_pwm(self.direction[\"pin\"], 0, pwm)\n \n \n print(\"Pwm speed\")\n pwm = self.speed[\"range_pwm\"][1]\n while True:\n c = input()\n if \"q\" in c:\n pwm -= 5\n elif \"d\" in c:\n pwm += 5\n elif \"s\" in c:\n break\n print(pwm)\n self.pwm.set_pwm(self.speed[\"pin\"], 0, pwm)\n\n\nclass Car(Chassis):\n \"\"\"\n Add an inertia term to prevent effects of outliers with the prediction.\n \n The car shouldn't make sudden changes of speed and direction.\n Sudden changes create chao and the car could not be controled anymore.\n Moreover, we try to protect the car and the components\n \n If the new value is close to the last one, we can change it.\n But if they are completely differents, the value applied is a mix between the 2.\n \"\"\"\n def __init__(self):\n super().__init__()\n \n self.speed = {\n \"range_pwm\": (390, 407, 414),\n \"current\": 0,\n \"target\": 0,\n \"pin\": 5,\n \"inertia\": 0.8,\n \n }\n \n self.direction = {\n \"range_pwm\": (315, 410, 530),\n \"current\": 0,\n \"target\": 0,\n \"pin\": 10,\n \"inertia\": 0.7,\n \n }\n \n def compute_speed(self):\n \"\"\"\n Set the current value with the target.\n Then compute PWM from current value\n \n @return: pwm value\n \"\"\"\n self.speed_lock.release()\n self.speed[\"current\"] = self._compute_offset(\n self.speed[\"target\"],\n self.speed[\"current\"],\n self.speed[\"inertia\"]\n )\n self.speed_lock.release()\n \n stop_pwm, start_pwm, max_pwm = self.speed[\"range_pwm\"]\n value = self.speed[\"current\"]\n if value > 0:\n pwm_val = (max_pwm - start_pwm)* value + start_pwm\n else:\n pwm_val = start_pwm\n return pwm_val\n \n def compute_direction(self):\n \"\"\"\n Set the current value with the target.\n Then compute PWM from current value\n \n @return: pwm value\n \"\"\"\n self.dir_lock.acquire()\n self.direction[\"current\"] = self._compute_offset(\n self.direction[\"target\"],\n self.direction[\"current\"],\n self.direction[\"inertia\"]\n )\n self.dir_lock.release()\n \n value = self.direction[\"current\"]\n max_left_pwm, straight_pwm, max_right_pwm = self.direction[\"range_pwm\"]\n if value > 0:\n pwm_val = (max_right_pwm - straight_pwm)* value + straight_pwm\n else:\n pwm_val = (straight_pwm - max_left_pwm)* (1+value) + max_left_pwm\n return pwm_val\n \n def _compute_offset(self, target, current, inertia):\n \"\"\"\n The distance between the desired value and the current value is passed in the log function.\n For long distances between target and current, the change in value will be smoothed.\n \n @param target: the desired value to apply \n @param current: the actual value\n @param intertia: float between 0 and 1, the larger it is, the smaller the change will be\n @return: the modified value\n \"\"\"\n # Stop when the target is already achieved\n if current == target:\n return current \n elif current > target:\n ratio = -1\n else:\n ratio = 1\n \n # Magic calculation\n offset = log(1.1+abs(target - current)) * (1 - inertia)/2\n \n # Avoid exceeding limit values\n if current > target:\n offset *= -1\n if current + offset < target:\n new = target\n else:\n new = current + offset\n elif current < target:\n if current + offset > target:\n new = target\n else:\n new = current + offset\n \n return new\n\n\nclass F1(Car):\n \"\"\"\n Just disable the clipping for speed value. The problem is :\n * The car could brake too hard and stop at low speed\n * The car could slow down too slowly at high speed because of car inertia\n \n To prenvent this behaviour, we add high speed trace.\n If the high speed trace is 0, the clipping for lower speed value is enabled\n In any other cases, no clipping.\n \n Many magic numbers in compute_speed method, they were obtained empirically.\n \"\"\"\n def __init__(self):\n super().__init__()\n \n self.speed = {\n \"range_pwm\": (375, 409, 413), # if battery is low 375 410 417\n \"current\": 0,\n \"target\": 0,\n \"pin\": 5,\n \n }\n \n self.direction = {\n \"range_pwm\": (315, 410, 530),\n \"current\": 0,\n \"target\": 0,\n \"pin\": 15,\n \"inertia\": 0.7,\n \n }\n \n def compute_speed(self):\n \"\"\"\n Set the current value with the target.\n Then compute PWM from current value\n \n @return: pwm value\n \"\"\"\n \n self.speed_lock.acquire()\n speed = self.speed\n \n # The case of no clipping\n if speed[\"target\"] < 0 and self.high_speed_trace == 0:\n speed[\"current\"] = 0\n # No clipping\n else:\n # For logs\n if speed[\"target\"] < 0:\n print(\"BREAK !\")\n speed[\"current\"] = speed[\"target\"]\n self.speed = speed\n self.speed_lock.release()\n \n # When the car accelerates quickly is the only case\n # where the trace is filling \n if speed[\"current\"] > 0.45:\n self.high_speed_trace += 0.4\n elif speed[\"current\"] < 0: \n self.high_speed_trace -= 0.1\n else:\n self.high_speed_trace -= 0.02\n \n # Clip the high_speed_trace to prevent extreme value\n # Simulating when the car reachs maximum or minimum speed\n if self.high_speed_trace < 0:\n self.high_speed_trace = 0\n elif self.high_speed_trace > 1.2:\n self.high_speed_trace = 1.2\n \n stop_pwm, start_pwm, max_pwm = speed[\"range_pwm\"]\n if speed[\"current\"] >= 0:\n pwm_val = (max_pwm - start_pwm)* speed[\"current\"] + start_pwm\n else:\n pwm_val = stop_pwm\n return pwm_val\n \nif __name__ == \"__main__\":\n car = Car().start()\n while True:\n car.set_direction(-1)\n sleep(5)\n car.set_direction(1)\n sleep(5)\n","repo_name":"Koyaani/titaniumcar","sub_path":"titaniumcar/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":11828,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"4638796798","text":"# You are a hiker preparing for an upcoming hike. You are given heights, a 2D array of size rows x columns, where heights[row][col] represents the height of cell (row, col). You are situated in the top-left cell, (0, 0), and you hope to travel to the bottom-right cell, (rows-1, columns-1) (i.e., 0-indexed). You can move up, down, left, or right, and you wish to find a route that requires the minimum effort.\n\n# A route's effort is the maximum absolute difference in heights between two consecutive cells of the route.\n\n# Return the minimum effort required to travel from the top-left cell to the bottom-right cell.\n\n# DFS\nclass Solution:\n def minimumEffortPath(self, heights: List[List[int]]) -> int:\n def dfs(x, y, grid, visited, height):\n m = len(grid)\n n = len(grid[0])\n\n if x == m - 1 and y == n - 1:\n return True\n visited.add((x, y))\n df = [(-1,0), (1, 0), (0,1), (0,-1)]\n for dx, dy in df:\n new_x = x + dx\n new_y = y + dy\n if new_x < 0 or new_y < 0 or new_x >= m or new_y >= n or (new_x, new_y) in visited or abs(grid[new_x][new_y] - grid[x][y]) > height:\n continue\n if dfs(new_x, new_y, grid, visited, height):\n return True\n return False\n l = 0\n r = 10 ** 7\n while l < r:\n m = (l + r) //2\n if dfs(0, 0, heights, set(), m):\n r = m\n else:\n l = m + 1\n return l\n \n# BFS\nclass Solution:\n def minimumEffortPath(self, heights: List[List[int]]) -> int:\n m = len(heights)\n \n if not m :\n return 0\n n = len(heights[0])\n \n heap = [(0,0,0)]\n visit = set()\n res = 0\n while heap:\n distance,x,y = heappop(heap)\n visit.add((x,y))\n res = max(res, distance)\n if x == m - 1 and y == n - 1:\n return res\n for dx, dy in [(-1,0),( 1,0), (0,1), (0,-1) ]:\n nx = x + dx\n ny = y + dy\n if nx >= 0 and nx < m and ny >= 0 and ny < n and (nx,ny) not in visit:\n nd = abs(heights[nx][ny] - heights[x][y])\n heappush(heap, (nd, nx,ny)) \n","repo_name":"littlefattiger/My_LC_solution","sub_path":"python/by_tag/binary_search/1631. Path With Minimum Effort.py","file_name":"1631. Path With Minimum Effort.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21064579702","text":"import sys\r\nsys.path.append('../')\r\nsys.path.append('../ImgProcessing/')\r\n\r\nimport gradio as gr\r\nimport pandas as pd\r\nimport torch\r\nfrom torchvision import transforms, models\r\nimport torch.nn as nn\r\nimport cv2\r\nfrom imProcessingPipeline import improcessing as process\r\n\r\ndevice = torch.device('cpu')\r\n\r\ndf_colorMapping = pd.read_csv('../Labelling/colorLabelEncodingMapping.csv') \r\nnum_color_class = len(df_colorMapping.index) - 1\r\nlabels_color = df_colorMapping['Class'].tolist()\r\n\r\ndf_textureMapping = pd.read_csv('../Labelling/texture2LabelEncodingMapping.csv') \r\nnum_texture_class = len(df_textureMapping.index) - 1\r\nlabels_texture = df_textureMapping['Class'].tolist()\r\n\r\nvgg11_color = models.alexnet()\r\nvgg11_color.classifier[6] = nn.Linear(vgg11_color.classifier[6].in_features , num_color_class)\r\nvgg11_color.load_state_dict(torch.load(\"color_weights.pth\", map_location=device))\r\nvgg11_color.eval()\r\n\r\nvgg11_texture = models.alexnet()\r\nvgg11_texture.classifier[6] = nn.Linear(vgg11_texture.classifier[6].in_features , num_texture_class)\r\nvgg11_texture.load_state_dict(torch.load(\"texture_weights.pth\", map_location=device))\r\nvgg11_texture.eval()\r\n\r\ndef get_confidences(img, model, labels, num_class):\r\n with torch.no_grad():\r\n output = model(img)\r\n preds = nn.functional.softmax(output[0], dim = 0)\r\n confidence = {labels[i]: float(preds[i]) for i in range(num_class)} \r\n pred = max(confidence, key = confidence.get)\r\n return confidence, pred\r\n\r\ndef predict(img, model_name):\r\n print(model_name)\r\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\r\n processed_img = process(img)[0]\r\n processed_img = cv2.cvtColor(processed_img, cv2.COLOR_BGR2RGB)\r\n \r\n transform = transforms.ToTensor()\r\n input = transform(processed_img).unsqueeze(0)\r\n # confidence_color, pred_color = None, None\r\n confidence_color, pred_color = get_confidences(input, vgg11_color, labels_color, num_color_class)\r\n confidence_texture, pred_texture = get_confidences(input, vgg11_texture, labels_texture, num_texture_class)\r\n \r\n pred_msg = f'This is a {pred_color} {pred_texture} sherd!'\r\n return processed_img, confidence_color, confidence_texture, pred_msg\r\n \r\n# with gr.Blocks() as demo:\r\n# with gr.Column(scale=0.5, min_width=600):\r\n# name = gr.Image(type=\"pil\")\r\n# greet_btn = gr.Button(\"Greet\")\r\n# with gr.Column(scale=0.5, min_width=600):\r\n# output = gr.Textbox(label=\"Output Box\")\r\n# greet_btn.click(fn=greet, inputs=name, outputs=output)\r\n\r\ndescription = '''Digitization of archaeology is in great demand. Since 2009, a team of researchers and students led by Dr. Cobb has been investigating the area around Vedi, Armenia, aiming at understanding human life and mobility in the ancient landscapes of the Near East. A large volume of sherds was excavated and documented with photography. Inspired by the recent advancement in computer vision and deep learning, this project attempts to explore various deep learning models to classify and compare sherds unearthed. \\n More info can be found in http://openarchaeology.org/armenia/index\r\n'''\r\n\r\ndemo = gr.Interface(fn=predict, \r\n # inputs=gr.Image(type=\"pil\"),\r\n # outputs=[gr.Label(num_top_classes=3), \"text\"],\r\n inputs=[gr.Image(label='Please Upload Sherd Image', type=\"numpy\"),\r\n gr.Radio(choices = [\"AlexNet\", \"VGG\", \"ResNet\", \"SimNet\"], value='AlexNet',label=\"Please select a model\")],\r\n examples=[[\"exampleImgs/raw_sherd2.jpg\",None],\r\n [\"exampleImgs/raw_sherd4.jpg\",None],\r\n [\"exampleImgs/raw_sherd8.jpg\",None]],\r\n outputs=[gr.Image(label='Processed Image'),\r\n gr.Label(label='Color Prediction',num_top_classes=3), \r\n gr.Label(label='Texture Prediction', num_top_classes=3),\r\n gr.Textbox(label=\"Prediction\")],\r\n title = \"⛏️⛏️⛏️Machine Learning in Archaeology⛏️⛏️⛏️\", \r\n description= description,\r\n allow_flagging=\"never\")\r\n\r\n\r\n\r\ndemo.launch(server_port=8080) \r\n\r\n","repo_name":"RikidWai/MLinArchaeology","sub_path":"UI/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"44266386534","text":"import time\nfrom typing import Dict, List, Literal\n\nfrom ai.priority_algorithm.interfaces import Priority\nfrom ai.priority_algorithm.mutations.local_max import mutate_local_max\nfrom ai.priority_algorithm.mutations.random_swap import mutate_random_swap\nfrom ai.priority_algorithm.mutations.robinhood import mutate_robinhood\nfrom ai.priority_algorithm.priority import TokenizationPriority\nfrom ai.priority_algorithm.priority_teamset import PriorityTeamSet, PriorityTeam\nfrom benchmarking.simulation.algorithm_translator import AlgorithmTranslator\nfrom models.enums import DiversifyType, TokenizationConstraintDirection\nfrom models.student import Student\nfrom models.team import Team\nfrom old.team_formation.app.team_generator.algorithm.algorithms import WeightAlgorithm\nfrom old.team_formation.app.team_generator.student import Student as AlgorithmStudent\nfrom old.team_formation.app.team_generator.team import Team as AlgorithmTeam\nfrom old.team_formation.app.team_generator.team_generator import TeamGenerationOption\n\n\nclass PriorityAlgorithm(WeightAlgorithm):\n \"\"\"Class used to select teams using a priority algorithm.\"\"\"\n\n MAX_KEEP: int = 3 # nodes\n MAX_SPREAD: int = 3 # nodes\n MAX_ITERATE: int = 1500 # times\n MAX_TIME: int = 1 # seconds\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.student_dict: Dict[int, Student] = {}\n self.priorities: List[Priority] = []\n self.students: List[Student] = []\n\n def set_default_weights(self):\n self.options.diversity_weight = 1\n self.options.preference_weight = 1\n self.options.requirement_weight = 1\n self.options.social_weight = 1\n\n def create_student_dict(self, students: List[Student]) -> Dict[int, Student]:\n student_dict = {}\n for student in students:\n student_dict[student.id] = student\n return student_dict\n\n def create_priority_objects(self) -> List[Priority]:\n priorities = []\n\n # todo: depends on the input dictionary object structure,\n # would be better if the input dict just had the right types\n def get_strategy(constraint: Literal[\"diversify\", \"concentrate\"]):\n if constraint == \"diversify\":\n return DiversifyType.DIVERSIFY\n if constraint == \"concentrate\":\n return DiversifyType.CONCENTRATE\n raise TypeError\n\n def get_direction(limit_option: Literal[\"min_of\", \"max_of\"]):\n if limit_option == \"min_of\":\n return TokenizationConstraintDirection.MIN_OF\n if limit_option == \"max_of\":\n return TokenizationConstraintDirection.MAX_OF\n raise TypeError\n\n for priority in self.options.priorities:\n priorities.append(\n # todo: currently, only tokenization priorities are supported\n TokenizationPriority(\n attribute_id=priority[\"skill_id\"],\n strategy=get_strategy(priority[\"constraint\"]),\n direction=get_direction(priority[\"limit_option\"]),\n threshold=priority[\"limit\"],\n value=priority[\"value\"],\n )\n )\n return priorities\n\n def generate_initial_teams(\n self,\n students: List[AlgorithmStudent],\n teams: List[AlgorithmTeam],\n team_generation_option: TeamGenerationOption,\n ) -> PriorityTeamSet:\n self.set_default_weights()\n initial_teams = super().generate(students, teams, team_generation_option)\n initial_team_set = AlgorithmTranslator.algorithm_teams_to_team_set(\n initial_teams\n )\n priority_teams: List[PriorityTeam] = []\n for team in initial_team_set.teams:\n priority_team = PriorityTeam(\n team=team, student_ids=[student.id for student in team.students]\n )\n priority_teams.append(priority_team)\n\n return PriorityTeamSet(priority_teams=priority_teams)\n\n def generate(\n self,\n students: List[AlgorithmStudent],\n teams: List[AlgorithmTeam],\n team_generation_option: TeamGenerationOption,\n ) -> List[AlgorithmTeam]:\n self.students = AlgorithmTranslator.algorithm_students_to_students(students)\n self.student_dict = self.create_student_dict(self.students)\n self.priorities = self.create_priority_objects()\n start_time = time.time()\n iteration = 0\n team_sets = [\n self.generate_initial_teams(students, teams, team_generation_option)\n ]\n\n while (\n time.time() - start_time\n ) < self.MAX_TIME and iteration < self.MAX_ITERATE:\n new_team_sets: List[PriorityTeamSet] = []\n for team_set in team_sets:\n new_team_sets += self.mutate(team_set)\n team_sets = new_team_sets + team_sets\n team_sets = sorted(\n team_sets,\n key=lambda ts: ts.calculate_score(self.priorities, self.student_dict),\n reverse=True,\n )\n team_sets = team_sets[: self.MAX_KEEP]\n iteration += 1\n return AlgorithmTranslator.teams_to_algorithm_teams(\n self.save_team_compositions_to_teams(team_sets[0])\n )\n\n def save_team_compositions_to_teams(\n self, priority_team_set: PriorityTeamSet\n ) -> List[Team]:\n teams: List[Team] = []\n\n # empty underlying teams\n for priority_team in priority_team_set.priority_teams:\n priority_team.team.empty()\n\n for priority_team in priority_team_set.priority_teams:\n students = [\n self.student_dict[student_id]\n for student_id in priority_team.student_ids\n ]\n self.save_students_to_team(priority_team.team, students)\n teams.append(priority_team.team)\n return teams\n\n def mutate(self, team_set: PriorityTeamSet) -> List[PriorityTeamSet]:\n \"\"\"\n Mutate a single teamset into child teamsets\n \"\"\"\n algorithm = 1\n cloned_team_sets = [\n team_set.clone() for _ in range(PriorityAlgorithm.MAX_SPREAD)\n ]\n if algorithm == 1:\n return [\n mutate_random_swap(cloned_team_set)\n for cloned_team_set in cloned_team_sets\n ]\n elif algorithm == 2:\n return [\n mutate_robinhood(cloned_team_set, self.priorities, self.student_dict)\n for cloned_team_set in cloned_team_sets\n ]\n elif algorithm == 3:\n return [\n mutate_local_max(\n cloned_team_sets[0], self.priorities, self.student_dict\n ),\n *[\n mutate_random_swap(cloned_team_set)\n for cloned_team_set in cloned_team_sets[1:]\n ],\n ]\n","repo_name":"Teamable-Analytics/algorithms","sub_path":"ai/priority_algorithm/priority_algorithm.py","file_name":"priority_algorithm.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"73221653688","text":"###################################################################################\n# Graph of lemon similarity to 25 others fruits.\n###################################################################################\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import linalg as LA\nimport networkx as nx\n\n\n\ndef PCA(data, dims_rescaled_data=2):\n \"\"\"\n returns: data transformed in 2 dims/columns + regenerated original data\n pass in: data as 2D NumPy array\n \"\"\"\n m, n = data.shape\n # mean center the data\n data -= data.mean(axis=0)\n # calculate the covariance matrix\n R = np.cov(data, rowvar=False)\n # calculate eigenvectors & eigenvalues of the covariance matrix\n # use 'eigh' rather than 'eig' since R is symmetric,\n # the performance gain is substantial\n evals, evecs = LA.eigh(R)\n # sort eigenvalue in decreasing order\n idx = np.argsort(evals)[::-1]\n evecs = evecs[:,idx]\n # sort eigenvectors according to same index\n evals = evals[idx]\n # select the first n eigenvectors (n is desired dimension\n # of rescaled data array, or dims_rescaled_data)\n evecs = evecs[:, :dims_rescaled_data]\n # carry out the transformation on the data using eigenvectors\n # and return the re-scaled data, eigenvalues, and eigenvectors\n return np.dot(evecs.T, data.T).T, evals, evecs\n\n\n\n\nv = np.load(\"compdb_fruit.npy\")\nitems = [\"Coconut\", \"Grapefruit\", \"Banana\", \"Grape\", \"Blackberry\", \"Guava\", \"Peach\", \"Sweet cherry\", \"Date\", \"Passion fruit\", \"Kiwi\", \"Pineapple\", \"Custard apple\",\n \"Star fruit\", \"Papaya\", \"Lime\", \"Lemon\", \"Pummelo\", \"Mandarin\", \"Orange\", \"Quince\", \"Black crowberry\", \"Apple\", \"Strawberry\", \"Lichee\", \"Mango\"]\n\nlemon_w = [ 67, 76, 61, 85, 45, 84, 76, 49, 68, 31, 65, 69, 25, 50, 89, 104, 293, 42, 158, 181, 31, 1, 0, 60, 29, 71]\n\ndef plot_weighted_graph(pos):\n \"Plot a weighted graph\"\n\n # 2. Add nodes\n G = nx.Graph() # Create a graph object called G\n node_list = [\"Coconut\", \"Grapefruit\", \"Banana\", \"Grape\", \"Blackberry\", \"Guava\", \"Peach\", \"Sweet cherry\", \"Date\", \"Passion fruit\", \"Kiwi\", \"Pineapple\", \"Custard apple\"\n , \"Star fruit\", \"Papaya\", \"Lime\", \"Lemon\", \"Pummelo\", \"Mandarin\", \"Orange\", \"Quince\", \"Black crowberry\", \"Apple\", \"Strawberry\", \"Lichee\", \"Mango\"]\n for node in node_list:\n G.add_node(node)\n\n \n nx.draw_networkx_nodes(G, pos, node_color='m', node_size=500)\n\n # 3. add labels to the nodes\n labels = {}\n for node_name in node_list:\n labels[str(node_name)] = str(node_name)\n nx.draw_networkx_labels(G, pos, labels, font_size=8)\n\n # 4. Add the edges \n for i, w in enumerate(lemon_w):\n if i == 16:\n continue\n G.add_edge(node_list[16], node_list[i], weight=w)\n\n all_weights = []\n # 4 a. Iterate through the graph nodes to gather all the weights\n for (node1, node2, data) in G.edges(data=True):\n all_weights.append(data['weight']) # we'll use this when determining edge thickness\n\n # 4 b. Get unique weights\n unique_weights = list(set(all_weights))\n\n # 4 c. Plot the edges - one by one!\n\n for weight in unique_weights:\n # 4 d. Form a filtered list with just the weight you want to draw\n weighted_edges = [(node1, node2) for (node1, node2, edge_attr) in G.edges(data=True) if\n edge_attr['weight'] == weight]\n # 4 e. I think multiplying by [num_nodes/sum(all_weights)] makes the graphs edges look cleaner\n width = weight * len(node_list) * 2.0 / sum(all_weights)\n nx.draw_networkx_edges(G, pos, edgelist=weighted_edges, width=width, edge_color='gray', label=weight)\n\n # Plot the graph\n plt.axis('off')\n plt.legend(loc=3, fontsize='xx-small')\n plt.title('Lemon molecular structure similarity')\n plt.savefig(\"chess_legends.png\")\n\n plt.show()\n\n\n\nidx = np.argwhere(np.all(v[..., :] == 0, axis=0))\nv2 = np.delete(v, idx, axis=1)\n\nv3, val, vec = PCA(v2)\nposi = {}\nfor j, item in enumerate(items):\n posi[item] = v3[j]\n\nplot_weighted_graph(posi)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"orCosta/Data_Project","sub_path":"graphs/lemon1.py","file_name":"lemon1.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74800609529","text":"# -*- ecoding: utf-8 -*-\n# @ModuleName: attention\n# @Author: wk\n# @Email: 306178200@qq.com\n# @Time: 2022/6/10 7:40 PM\n\nimport torch\nfrom torch import nn\nimport numpy as np\n\n\nclass ScaledDotProductAttention(nn.Module):\n \"\"\" Scaled Dot-Product Attention \"\"\"\n\n def __init__(self, dropout_rate=0.):\n super(ScaledDotProductAttention, self).__init__()\n self.dropout = None\n if dropout_rate > 0:\n self.dropout = nn.Dropout(dropout_rate)\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, W_q, W_k, W_v, scale=None, mask=None):\n attention = torch.bmm(W_q, W_k.transpose(1, 2))\n if scale:\n attention = attention / scale\n if mask:\n attention = attention.masked_fill_(mask, -np.inf)\n attention = self.softmax(attention)\n if self.dropout is not None:\n attention = self.dropout(attention)\n output = torch.bmm(attention, W_v)\n return output, attention\n\n\nclass MultiHeadAttention(nn.Module):\n \"\"\" Multi-head attention module \"\"\"\n\n def __init__(self, input_dim, attention_dim=None, num_heads=1, dropout_rate=0.,\n use_residual=True, use_scale=False, layer_norm=False, align_to=\"input\"):\n super(MultiHeadAttention, self).__init__()\n if attention_dim is None:\n attention_dim = input_dim // num_heads\n self.attention_dim = attention_dim\n self.output_dim = num_heads * attention_dim\n self.num_heads = num_heads\n self.use_residual = use_residual\n self.align_to = align_to\n self.scale = attention_dim ** 0.5 if use_scale else None\n self.W_q = nn.Linear(input_dim, self.output_dim, bias=False)\n self.W_k = nn.Linear(input_dim, self.output_dim, bias=False)\n self.W_v = nn.Linear(input_dim, self.output_dim, bias=False)\n if input_dim != self.output_dim:\n if align_to == \"output\":\n self.W_res = nn.Linear(input_dim, self.output_dim, bias=False)\n elif align_to == \"input\":\n self.W_res = nn.Linear(self.output_dim, input_dim, bias=False)\n else:\n self.W_res = None\n self.dot_product_attention = ScaledDotProductAttention(dropout_rate)\n self.layer_norm = nn.LayerNorm(self.output_dim) if layer_norm else None\n self.dropout = nn.Dropout(dropout_rate) if dropout_rate > 0 else None\n\n def forward(self, query, key, value, mask=None):\n residual = query\n\n # linear projection\n query = self.W_q(query)\n key = self.W_k(key)\n value = self.W_v(value)\n\n # split by heads\n batch_size = query.size(0)\n query = query.view(batch_size * self.num_heads, -1, self.attention_dim)\n key = key.view(batch_size * self.num_heads, -1, self.attention_dim)\n value = value.view(batch_size * self.num_heads, -1, self.attention_dim)\n if mask:\n mask = mask.repeat(self.num_heads, 1, 1)\n # scaled dot product attention\n output, attention = self.dot_product_attention(query, key, value, self.scale, mask)\n # concat heads\n output = output.view(batch_size, -1, self.output_dim)\n # final linear projection\n if self.W_res is not None:\n if self.align_to == \"output\": # AutoInt style\n residual = self.W_res(residual)\n elif self.align_to == \"input\": # Transformer stype\n output = self.W_res(output)\n if self.dropout is not None:\n output = self.dropout(output)\n if self.use_residual:\n output = output + residual\n if self.layer_norm is not None:\n output = self.layer_norm(output)\n output = output.relu()\n return output, attention\n\n\nclass MultiHeadSelfAttention(MultiHeadAttention):\n def forward(self, X):\n output, _ = super(MultiHeadSelfAttention, self).forward(X, X, X)\n return output\n\n\nclass SqueezeExcitationLayer(nn.Module):\n def __init__(self, num_fields, reduction_ratio=3):\n super(SqueezeExcitationLayer, self).__init__()\n reduced_size = max(1, int(num_fields / reduction_ratio))\n self.excitation = nn.Sequential(nn.Linear(num_fields, reduced_size, bias=False),\n nn.ReLU(),\n nn.Linear(reduced_size, num_fields, bias=False),\n nn.ReLU())\n\n def forward(self, feature_emb):\n Z = torch.mean(feature_emb, dim=-1, out=None)\n A = self.excitation(Z)\n V = feature_emb * A.unsqueeze(-1)\n return V\n","repo_name":"HaSai666/rec_pangu","sub_path":"rec_pangu/models/layers/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"77"} +{"seq_id":"15613625526","text":"from flask import Flask, render_template, request, redirect, url_for \nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager, current_user, login_user, logout_user\n\nfrom flask_moment import Moment\nfrom datetime import datetime\nfrom pytz import timezone\n\napp = Flask(\"__name__\")\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///twitter.db\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\napp.secret_key = \"TWITTER_FLASK_SECRET_KEY\"\n\nlocal_timezone = timezone(\"America/Sao_Paulo\")\n\n# Inicialização e configuração do SQL-Alchemy\ndb = SQLAlchemy(app)\n\n# Inicialização do Moment (pacote de formatação de datas).\nmoment = Moment(app)\n\n# Inicialização e configuração do Flask-Login\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = \"login\"\n\n# Importação dos módulos responsáveis pelo gerenciamento de dados/login.\nfrom auth.forms import LoginForm, RegisterForm, EditForm\nfrom models.User import User\nfrom models.Tweet import Tweet\n\n# Rotas de erros de requisição\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('errorhandlers/404.html'), 404\n\n@app.route(\"/\")\ndef index():\n if current_user.is_authenticated:\n return redirect(url_for(\"dashboard\"))\n\n form = RegisterForm()\n\n return render_template(\n \"pages/index.html\", \n form = form,\n page_title = \"Twitter. É o que está acontecendo\"\n )\n\n@app.route(\"/login\", defaults={ \"success\": False }, methods=[\"GET\", \"POST\"])\n@app.route(\"/login/\", methods=[\"GET\", \"POST\"])\ndef login(success):\n if current_user.is_authenticated:\n return redirect(url_for(\"dashboard\"))\n\n form = LoginForm()\n register_form = RegisterForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email = form.email.data).first()\n\n if user is None:\n return render_template(\n \"pages/login.html\", \n invalid_credential = True, \n form = form, \n register_form = register_form,\n page_title = \"Entrar no Twitter\"\n )\n\n login_user(user)\n\n return redirect(url_for(\"dashboard\"))\n\n return render_template(\n \"pages/login.html\", \n form = form, \n register_form = register_form,\n page_title = \"Entrar no Twitter\",\n success = success\n )\n\n@app.route(\"/logout\", methods=[\"GET\", \"POST\"])\ndef logout():\n logout_user()\n\n return redirect(url_for(\"index\"))\n\n@app.route(\"/register\", methods=[\"POST\"])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for(\"dashboard\"))\n\n form = RegisterForm()\n errors = {}\n\n if form.validate_on_submit():\n if not form.validate_email(form.email):\n errors[\"email\"] = True\n\n if not form.validate_username(form.username):\n errors[\"username\"] = True\n\n if errors:\n return render_template(\n \"pages/register.html\",\n error_email = errors[\"email\"],\n error_username = errors[\"username\"],\n form = form,\n page_title = \"Inscrever-se no Twitter\"\n )\n\n user = User(\n form.name.data,\n form.username.data,\n form.email.data,\n form.password.data,\n form.birth_date.data,\n local_timezone.localize(datetime.now()),\n )\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for(\"login\", success = True))\n else:\n return render_template(\n \"pages/register.html\", \n form = form,\n page_title = \"Inscrever-se no Twitter\"\n )\n\n@app.route(\"/home\")\ndef dashboard():\n tweets = Tweet.query.join(User, Tweet.id_user == User.id).add_columns(\n Tweet.id,\n Tweet.tweet,\n Tweet.tweeted_at,\n User.id,\n User.name,\n User.username,\n User.verified,\n ).order_by(Tweet.tweeted_at.desc())\n\n current_date = local_timezone.localize(datetime.now())\n\n return render_template(\n \"pages/home.html\",\n active_page = \"home\",\n tweets = tweets,\n now = current_date,\n page_title = \"Página Inicial\"\n )\n\n@app.route(\"/profile\", defaults={ \"username\": -1 })\n@app.route(\"/profile/\")\ndef profile(username):\n def search_tweets(id):\n return Tweet.query.join(User, Tweet.id_user == User.id).filter(Tweet.id_user == id).add_columns(\n Tweet.id,\n Tweet.tweet,\n Tweet.tweeted_at,\n User.id,\n User.name,\n User.username,\n User.verified,\n ).order_by(Tweet.tweeted_at.desc())\n\n user = {}\n tweets = {}\n active_page = \"profile\"\n\n if username == current_user.username or username == -1:\n tweets = search_tweets(current_user.id)\n user = current_user\n else:\n active_page = None\n\n user = User.query.filter_by(username = username).first()\n if user is not None:\n tweets = search_tweets(user.id)\n \n current_date = local_timezone.localize(datetime.now())\n form = EditForm()\n\n if user is None:\n return render_template(\n \"pages/profile_empty.html\",\n page_title = \"Perfil\",\n username = username\n )\n else:\n return render_template(\n \"pages/profile.html\",\n user = user,\n tweets = tweets,\n now = current_date,\n active_page = active_page,\n page_title = user.name,\n form = form\n )\n\n\n@app.route(\"/send_tweet\", methods=[\"POST\"])\ndef send_tweet():\n tweet_form = request.form[\"inputTweet\"]\n\n tweet = Tweet(\n current_user.id,\n tweet_form,\n local_timezone.localize(datetime.now())\n )\n\n db.session.add(tweet)\n db.session.commit()\n\n return redirect(url_for(\"dashboard\"))\n\n@app.route(\"/save_edit\", methods=[\"POST\"])\ndef save_edit():\n form = EditForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(id = current_user.id).first()\n user.name = form.name.data\n user.bio = form.bio.data\n\n db.session.commit()\n login_user(user)\n\n return redirect(url_for(\"profile\"))\n else:\n return redirect(url_for(\"profile\"), error_while_editing = True)\n\n\nwith app.app_context():\n db.create_all()\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"rdgrb/flask-twitter","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72871856569","text":"\n\nimport pandas as pd\nimport pygal.maps.world\nfrom pygal.style import NeonStyle\nimport pickle\n\nfrom urllib.request import urlopen as ureq\nfrom bs4 import BeautifulSoup as soup\nimport json\n\n\n# pie chart of districts\nurl='https://api.covid19india.org/v2/state_district_wise.json'\nuclient=ureq(url)\npage_html1=uclient.read()\nuclient.close()\n\n\npage_soup=soup(page_html1,\"html.parser\")\npage_json=json.loads(page_soup.text)\n\npie1=pygal.Pie( inner_radius=0.4,style=NeonStyle,font_family='googlefont:Raleway')\npie1.title='Pie chart Of Indian states'\n\ntest1=pd.DataFrame(page_json)\n\nfor i in range(len(test1)):\n ans=0\n test=pd.DataFrame(test1.iloc[i,0])\n pie=pygal.Pie( inner_radius=0.4,style=NeonStyle,font_family='googlefont:Raleway')\n pie.title='Pie chart Of '+test1.iloc[i,1]\n\n for j in range(len(test)):\n ans+=int(test.iloc[j,1])\n pie.add(test.iloc[j,4],test.iloc[j,1])\n pie.render_to_file('./plots/'+test1.iloc[i,2]+'_Pie.svg')\n \n pie1.add(test1.iloc[i,1],ans)\n\npie1.render_to_file('./plots/Country_pie.svg')\n\n\nempty_states={'DD','DN','LD','NL','SK'}\n\nfor state in empty_states:\n\tpie1=pygal.Pie( inner_radius=0.4,style=NeonStyle,font_family='googlefont:Raleway')\n\tpie1.render_to_file('./plots/'+state+'_Pie.svg')\n","repo_name":"Ethical-coder/stat19corona","sub_path":"pie_graphs.py","file_name":"pie_graphs.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"25428104022","text":"#!/usr/bin/env python3\n\nimport conf\nimport ingester\nimport logging\nimport json\nimport os\nimport requester\n\nif __name__ == '__main__':\n\n stats = ingester.Stats()\n stats.script(conf.SCRIPT_NAME, conf.SCRIPT_VERSION, {\n 'api_path': conf.API_PATH,\n 'auth_user': conf.AUTH_USER,\n 'site_name': conf.SITE_NAME,\n 'ingester': conf.INGESTER_PATH,\n 'limit': conf.LIMIT,\n 'request': {\n 'retries': conf.REQUEST_RETRIES,\n 'timeout': conf.REQUEST_TIMEOUT,\n },\n 'cache': {\n 'enabled': conf.CACHE_ENABLED,\n 'path': conf.CACHE_PATH,\n 'seconds': conf.CACHE_SECONDS,\n },\n 'log': {\n 'enabled': conf.LOG_TO_FILE,\n 'file': conf.LOG_FILE,\n 'level': conf.LOG_LEVEL,\n },\n })\n\n users = requester.get_item(path='user/search?username=%')\n projects = requester.get_item(path='project')\n issues = requester.get_item(path='search?maxResults=1000')\n\n # Output data\n data = {\n 'users': users,\n 'projects': projects,\n 'issues': issues,\n }\n\n stats.finish()\n stats.count(data)\n \n if conf.INGESTER_PATH:\n uplink = ingester.ChunkedIngesterLink(conf.INGESTER_PATH)\n uplink.chunk_dict(data)\n uplink.stats(stats)\n if conf.VERBOSE:\n data['stats'] = stats\n print(json.dumps(data, indent=4))\n","repo_name":"cryptosteer/requester_jira","sub_path":"jira.py","file_name":"jira.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14336300481","text":"import torchvision as tv\nfrom torch.utils.data import DataLoader\nimport torch\nfrom WarmSch import GradualWarmupScheduler\nimport torch.cuda.amp as amp\nimport torch.optim as optim\nfrom DataSet import AssembleDataSet\nfrom Backbone import ResNet\nfrom YoloV1 import YoloDetection\nfrom YoloV1 import YoloLoss\nfrom collections import OrderedDict\n\nif __name__ == \"__main__\":\n ### config\n device = \"cuda:0\"\n boundingBoxesNum = 3\n labelsNum = 1\n SGrid = 8\n imageSize = 1024\n coordLambda = 5\n noObjLambda = 0.008\n LR = 1e-5\n multiplier = 100\n reg_lambda = 1e-5\n ### In current version, the batch size only can be 1 !!!\n batchSize = 1\n warmEpoch = 5\n epoch = 25\n displayTimes = 10\n if_loadPre_TrainWeight = True\n preTrainWeightLoadPath = \"resnext101_32x8d-8ba56ff5.pth\"\n trainCheckPointSavePath = \"./trainCheckPoint/\"\n\n ### Model\n backbone = ResNet()\n backboneLastChannels = backbone.last_channel\n yoloModel = YoloDetection(backBoneOutChannels=backboneLastChannels, backbone=backbone,\n BoundingBoxes=boundingBoxesNum, num_classes=labelsNum, SGrid=SGrid, imageSize=imageSize).to(device)\n yoloModel = yoloModel.train(True)\n\n ### Data set\n transforms = tv.transforms.Compose(\n [tv.transforms.ToTensor(),\n tv.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n dataSet = AssembleDataSet('./AssembleDataSet/',transforms , imageSize=imageSize)\n dataLoader = DataLoader(dataSet,batch_size=batchSize, shuffle=True, num_workers=2, pin_memory=True,)\n\n ### Optimizer\n optimizer = optim.SGD(yoloModel.parameters(), lr=LR, momentum=0.9, weight_decay=reg_lambda, nesterov=True)\n cosine_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, epoch, eta_min=0, last_epoch=-1)\n scheduler = GradualWarmupScheduler(optimizer, multiplier=multiplier, total_epoch=warmEpoch,\n after_scheduler=cosine_scheduler)\n scaler = amp.GradScaler()\n\n ### Loss\n lossCri = YoloLoss(coOrd = coordLambda, noObj= noObjLambda, BoundingBoxes=boundingBoxesNum,\n num_classes=labelsNum, SGrid=SGrid, imageSize=imageSize, device = device).to(device)\n if if_loadPre_TrainWeight:\n preTrainDic = torch.load(preTrainWeightLoadPath)\n newDic = OrderedDict()\n for key, value in preTrainDic.items():\n if \"fc\" not in key:\n newDic[key] = value\n yoloModel.backbone.load_state_dict(newDic)\n\n currentTrainingTimes = 0\n for e in range(1, epoch + 1):\n for times , (NImages, NBoxes, NLabels) in enumerate(dataLoader):\n NImages = NImages.to(device)\n #print(NImages.shape)\n NBoxes = NBoxes.to(device)\n #print(NBoxes.shape)\n NLabels = NLabels.to(device)\n #print(NLabels.shape)\n optimizer.zero_grad()\n with amp.autocast():\n ## confidence [N, S, S, B], boxes [N, S, S, B * 4], condClasses [N, S, S, NUM_CLASSES]\n preConfidence, preBoxes, preCondClasses = yoloModel(NImages)\n ordinateLoss, objLoss, noObjLoss, classesLoss = \\\n lossCri(preConfidence=preConfidence,\n preBoxes=preBoxes,\n preCondClasses=preCondClasses,\n groundTruth=NBoxes,\n groundLabels=NLabels)\n totalLoss = ordinateLoss + objLoss + noObjLoss + classesLoss\n #print(torch.isnan(totalLoss).tolist())\n if torch.isnan(totalLoss).tolist() is False:\n #print(\"Update\")\n scaler.scale(totalLoss).backward()\n scaler.step(optimizer)\n scaler.update()\n currentTrainingTimes += 1\n if currentTrainingTimes % displayTimes == 0 and torch.isnan(totalLoss).tolist() is False:\n with torch.no_grad():\n print(\"######################\")\n print(\"Epoch : %d , Training time : %d\" % (e, currentTrainingTimes))\n print(\"Total Loss is %.3f \" % (totalLoss.item()))\n print(\"Coordinate loss is {}\".format(ordinateLoss.item()))\n print(\"Object confident loss {}\".format(objLoss.item()))\n print(\"No object confident loss {}\".format(noObjLoss.item()))\n print(\"Classes Loss {}\".format(classesLoss))\n print(\"Learning rate is \", optimizer.state_dict()['param_groups'][0][\"lr\"])\n torch.save(yoloModel.state_dict(),\n trainCheckPointSavePath + str(currentTrainingTimes) + \"Times.pth\")\n scheduler.step()\n\n","repo_name":"zoubohao/YOLO-V1-Pytorch","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70589829370","text":"def drivers(speed):\n #what is input and output?\n global reverseDrivers\n global parkedDrivers\n global slowDrivers\n global safeDrivers\n global speeders\n if speed<0:\n reverseDrviers=reverseDrivers+1\n if speed<1:\n parkedDrivers=parkedDrivers+1\n if speed<40:\n slowDrivers=slowDrivers+1\n if speed<=65:\n safeDrivers=safeDrivers+1\n else:\n speeders=speeders+1","repo_name":"BDillman19/Python","sub_path":"mediasources-4ed/mediasources-4ed/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29362732059","text":"#Zodiaco\nday = int(input(\"Día de nacimiento >>> \"))\nmonth = int(input(\"Mes de nacimiento >>> \"))\nif month == 3 and 22 <= day <= 31 or month == 4 and 1 <= day <= 20:\n sign = \"Aries\"\nelif month == 4 and 21 <= day <= 30 or month == 5 and 1 <= day <= 21:\n sign = \"Tauro\"\nelif month == 5 and 22 <= day <= 31 or month == 6 and 1 <= day <= 21:\n sign = \"Géminis\"\nelif month == 6 and 22 <= day <= 30 or month == 7 and 1 <= day <= 23:\n sign = \"Cáncer\"\nelif month == 7 and 24 <= day <= 31 or month == 8 and 1 <= day <= 23:\n sign = \"Leo\"\nelif month == 8 and 24 <= day <= 31 or month == 9 and 1 <= day <= 23:\n sign = \"Virgo\"\nelif month == 9 and 24 <= day <= 30 or month == 10 and 1 <= day <= 23:\n sign = \"Libra\"\nelif month == 10 and 24 <= day <= 31 or month == 11 and 1 <= day <= 22:\n sign = \"Escorpio\"\nelif month == 11 and 23 <= day <= 30 or month == 12 and 1 <= day <= 22:\n sign = \"Sagitario\"\nelif month == 12 and 23 <= day <= 31 or month == 1 and 1 <= day <= 20:\n sign = \"Capricornio\"\nelif month == 1 and 21 <= day <= 31 or month == 2 and 1 <= day <= 19:\n sign = \"Acuario\"\nelif month == 2 and 20 <= day <= 28 or month == 3 and 1 <= day <= 21:\n sign = \"Piscis\"\nelse:\n print(\"fecha inválida\")\nprint(sign)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej7/hito1_ej7_c910055801f58691213a4edcefcbe9ae.py","file_name":"hito1_ej7_c910055801f58691213a4edcefcbe9ae.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29455631209","text":"import random\n\ndef ocultar_letras(palabra,cantidad):\n palabra_lista=list(palabra)\n\n i=0\n while i <= cantidad:\n n_random=random.randint(0,len(palabra))\n if palabra_lista[n_random] != \"_\":\n palabra_lista[n_random]=\"_\"\n i+=1\n palabra_final=\"\".join(palabra_lista)\n \n return palabra_final\n\ndef revisar_letra(palabra_secreta,palabra,letra):\n \n palabra_secreta_lista=list(palabra_secreta)\n palabra_lista=list(palabra)\n posicion=0\n contador=0\n for i in palabra_secreta_lista:\n if i == letra:\n palabra_lista[posicion]=letra\n contador += 1\n \n posicion += 1\n \n \n palabra=\"\".join(palabra_lista)\n return palabra\n\n\n\n\n\"\"\"\nif __name__ == \"__main__\":\n \n\n \n lista=[\"lepidoptero1\",\"lepidoptero2\",\"lepidoptero3\"]\n palabra_secreta=lista[random.randint(0,len(lista))]\n\n \n intentos =0\n while intentos <=7:\n strg=str(input(\"ingresaletra\"))\n print(revisar_letra(\"lepidoptero\", revisar_letra(palabra_secreta,palabra,letra), strg)[0])\n intentos +=1\n\n \n \"\"\" \n\n\n\n\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema4_ej1/tema4_ej1_a92d7371145c5afee168ad19d7884abd.py","file_name":"tema4_ej1_a92d7371145c5afee168ad19d7884abd.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43184231704","text":"# Escreva um programa que crie uma lista de palavras e imprima a palavra mais longa e a palavra mais curta da lista\n\nlista = ['jogo', 'banana', 'paralelepipedo', 'água', 'pedra', 'ola']\npalavraLonga = lista[0]\npalavraCurta = lista[0]\n\nfor palavra in lista:\n if len(palavra) > len(palavraLonga):\n palavraLonga = palavra\n elif len(palavra) < len(palavraCurta):\n palavraCurta = palavra\n\nprint(f'Palavra Longa {palavraLonga}')\nprint(f'Palavra Curta {palavraCurta}')\n\n","repo_name":"BrenoOrtiz/Raciocinio_Algoritmico","sub_path":"PBL07/ex05.py","file_name":"ex05.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31956217545","text":"from conans import ConanFile, CMake\nfrom conan.tools.build import cross_building\nfrom conan.tools import files\nfrom io import StringIO\nimport os\n\n\n# legacy validation with Conan 1.x\nclass TestPackageV1Conan(ConanFile):\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n generators = \"cmake\", \"cmake_find_package_multi\"\n\n def requirements(self):\n self.requires(self.tested_reference_str)\n if not cross_building(self):\n self.requires(\"libpcap/1.10.1\")\n\n def configure(self):\n if not cross_building(self):\n self.options[\"libpcap\"].shared = True\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def test(self):\n if not cross_building(self):\n # Use libpcap DLL as a replacement for npcap DLL\n # It will not provide all the functions\n # but it will cover enough to check that what we compiled is correct\n files.rm(self, \"wpcap.dll\", \"bin\")\n files.copy(self, \"pcap.dll\", src=self.deps_cpp_info['libpcap'].bin_paths[0], dst=\"bin\")\n files.rename(self, os.path.join(\"bin\", \"pcap.dll\"), os.path.join(\"bin\", \"wpcap.dll\"))\n\n bin_path = os.path.join(\"bin\", \"test_package\")\n output = StringIO()\n self.run(bin_path, run_environment=True, output=output)\n test_output = output.getvalue()\n print(test_output)\n assert \"libpcap version 1.10.1\" in test_output\n","repo_name":"conan-io/conan-center-index","sub_path":"recipes/npcap/all/test_v1_package/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":835,"dataset":"github-code","pt":"77"} +{"seq_id":"30307470701","text":"import os\nimport sys\nfrom dataclasses import dataclass\nimport numpy as np\nimport pandas as pd\nfrom sklearn.compose import ColumnTransformer, make_column_transformer, make_column_selector\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\n\nfrom src.exception import CustomException\nfrom src.logger import logging\nfrom src.utils import save_object\n\n\n@dataclass\nclass DataTransformationConfig:\n preprocessor_obj_file_path:str = os.path.join(\"artifacts\", \"preprocessing.pkl\")\n\n\nclass DataTransformation:\n\n def __init__(self):\n self.data_transformation_config = DataTransformationConfig()\n\n def get_data_transformer_obj(self):\n \"\"\"\n this function is used to perform data transformation based on different types of features\n \"\"\"\n try:\n # preprocessing pipeline for numerical features\n num_pipeline = Pipeline(\n steps=[\n (\"impute\", SimpleImputer(strategy=\"median\")),\n (\"normalize\", StandardScaler()),\n ]\n )\n\n # preprocessing pipeline for categorical features\n # cat_pipeline = Pipeline(\n # steps=[\n # ('impute', SimpleImputer(strategy=\"most_frequent\")),\n # (\"normalize\", OneHotEncoder()),\n # (\"normalize\", StandardScaler())\n # ]\n # )\n\n logging.info(\"Numerical and/or categorical features are imputed, (encoded) and scaled\")\n\n # preprocessing = ColumnTransformer(\n # (\"num_pipeline\", num_pipeline, make_column_selector(dtype_include=np.number)),\n # (\"cat_pipeline\", cat_pipeline, make_column_selector(dtype_include=object)),\n # )\n\n # the data set has only numerical features\n preprocessing = make_column_transformer(\n (num_pipeline, make_column_selector(dtype_include=np.number))\n )\n\n return preprocessing\n\n except Exception as e:\n raise CustomException(e, sys)\n\n\n def initiate_data_transformation(self, train_path, test_path):\n try:\n train_df = pd.read_csv(train_path)\n test_df = pd.read_csv(test_path)\n\n logging.info(\"Read train and test datasets completed\")\n\n logging.info(\"Obtaining preprocessing object file\")\n\n preprocessing_obj = self.get_data_transformer_obj()\n\n target_columns = [\"Y1\", \"Y2\"]\n input_feature_train_df = train_df.drop(columns=target_columns, axis=1)\n target_feature_train_df = train_df[target_columns]\n\n input_feature_test_df = test_df.drop(columns=target_columns, axis=1)\n target_feature_test_df = test_df[target_columns]\n\n logging.info(\n \"Applying preprocessing object on training dataframe and testing datasets\"\n )\n\n input_feature_train_arr = preprocessing_obj.fit_transform(input_feature_train_df)\n input_feature_train_df_preprocessed = pd.DataFrame(input_feature_train_arr, columns=input_feature_train_df.columns,\n index=input_feature_train_df.index)\n\n input_feature_test_arr=preprocessing_obj.transform(input_feature_test_df) # no fit() on test data!\n input_feature_test_df_preprocessed = pd.DataFrame(input_feature_test_arr, columns=input_feature_test_df.columns,\n index=input_feature_test_df.index)\n\n # two sets of train/test for heating and cooling loads, respectively\n train_df1 = pd.concat([input_feature_train_df_preprocessed, target_feature_train_df[target_columns[0]]], axis=1)\n test_df1 = pd.concat([input_feature_test_df_preprocessed, target_feature_test_df[target_columns[0]]], axis=1)\n\n train_df2 = pd.concat([input_feature_train_df_preprocessed, target_feature_train_df[target_columns[1]]], axis=1)\n test_df2 = pd.concat([input_feature_test_df_preprocessed, target_feature_test_df[target_columns[1]]], axis=1)\n\n logging.info(f\"Saved preprocessing object\")\n\n save_object(\n file_path=self.data_transformation_config.preprocessor_obj_file_path,\n obj=preprocessing_obj,\n )\n\n return (\n train_df1,\n test_df1,\n train_df2,\n test_df2,\n self.data_transformation_config.preprocessor_obj_file_path,\n )\n\n except Exception as e:\n raise CustomException(e, sys)","repo_name":"ShuoLi0208/Building-Load-Prediction","sub_path":"src/components/data_transformation.py","file_name":"data_transformation.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3825009878","text":"# coding: u8\n\nimport jsbeautifier\n\n\nopts = jsbeautifier.default_options()\nopts.indent_size = 2\nopts.break_chained_methods = True\nopts.comma_first = True\n\nmin_js_file_path = '../wx7c8d593b2c3a7703_3.wxapkg.unpack/game.js'\nres = jsbeautifier.beautify_file(min_js_file_path)\n\nopen('./game.beautified2.js', 'w').write(res)\n","repo_name":"Shu-Ji/wechat_micro_jump_game_hero","sub_path":"js_beautifier.py","file_name":"js_beautifier.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"77"} +{"seq_id":"17027901102","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom datetime import datetime\nfrom decimal import Decimal as D\nimport json\n\nfrom mock import patch\n\nfrom gratipay.billing.payday import Payday\nfrom gratipay.models.participant import Participant\nfrom gratipay.testing import Harness\nfrom gratipay.testing.billing import BillingHarness\nfrom gratipay.utils.history import get_end_of_year_balance, iter_payday_events\n\n\ndef make_history(harness):\n alice = harness.make_participant('alice', claimed_time=datetime(2001, 1, 1, 0, 0, 0))\n harness.alice = alice\n harness.make_exchange('braintree-cc', 50, 0, alice)\n harness.make_exchange('braintree-cc', 12, 0, alice, status='failed')\n harness.make_exchange('paypal', -40, 0, alice)\n harness.make_exchange('paypal', -5, 0, alice, status='failed')\n harness.db.run(\"\"\"\n UPDATE exchanges\n SET timestamp = \"timestamp\" - interval '1 year'\n \"\"\")\n harness.past_year = int(harness.db.one(\"\"\"\n SELECT extract(year from timestamp)\n FROM exchanges\n ORDER BY timestamp ASC\n LIMIT 1\n \"\"\"))\n harness.make_exchange('braintree-cc', 35, 0, alice)\n harness.make_exchange('braintree-cc', 49, 0, alice, status='failed')\n harness.make_exchange('paypal', -15, 0, alice)\n harness.make_exchange('paypal', -7, 0, alice, status='failed')\n\n\nclass TestHistory(BillingHarness):\n\n def test_iter_payday_events(self):\n now = datetime.now()\n Payday().start().run()\n\n Enterprise = self.make_team(is_approved=True)\n self.obama.set_payment_instruction(Enterprise, '10.00') # >= MINIMUM_CHARGE!\n for i in range(2):\n with patch.object(Payday, 'fetch_card_holds') as fch:\n fch.return_value = {}\n Payday.start().run()\n self.db.run(\"\"\"\n UPDATE paydays\n SET ts_start = ts_start - interval '1 week'\n , ts_end = ts_end - interval '1 week';\n UPDATE payments\n SET timestamp = \"timestamp\" - interval '1 week';\n UPDATE transfers\n SET timestamp = \"timestamp\" - interval '1 week';\n \"\"\")\n\n\n obama = Participant.from_username('obama')\n picard = Participant.from_username('picard')\n\n assert obama.balance == D('0.00')\n assert picard.balance == D('20.00')\n\n Payday().start() # to demonstrate that we ignore any open payday?\n\n # Make all events in the same year.\n delta = '%s days' % (364 - (now - datetime(now.year, 1, 1)).days)\n self.db.run(\"\"\"\n UPDATE paydays\n SET ts_start = ts_start + interval %(delta)s\n , ts_end = ts_end + interval %(delta)s;\n UPDATE payments\n SET timestamp = \"timestamp\" + interval %(delta)s;\n UPDATE transfers\n SET timestamp = \"timestamp\" + interval %(delta)s;\n \"\"\", dict(delta=delta))\n\n events = list(iter_payday_events(self.db, picard, now.year))\n assert len(events) == 7\n assert events[0]['kind'] == 'totals'\n assert events[0]['given'] == 0\n assert events[0]['received'] == 20\n assert events[1]['kind'] == 'day-open'\n assert events[1]['payday_number'] == 2\n assert events[2]['balance'] == 20\n assert events[-1]['kind'] == 'day-close'\n assert events[-1]['balance'] == 0\n\n events = list(iter_payday_events(self.db, obama))\n assert events[0]['given'] == 20\n assert len(events) == 11\n\n def test_iter_payday_events_with_failed_exchanges(self):\n alice = self.make_participant('alice', claimed_time='now')\n self.make_exchange('braintree-cc', 50, 0, alice)\n self.make_exchange('braintree-cc', 12, 0, alice, status='failed')\n self.make_exchange('paypal', -40, 0, alice, status='failed')\n events = list(iter_payday_events(self.db, alice))\n assert len(events) == 5\n assert events[0]['kind'] == 'day-open'\n assert events[0]['balance'] == 50\n assert events[1]['kind'] == 'credit'\n assert events[1]['balance'] == 50\n assert events[2]['kind'] == 'charge'\n assert events[2]['balance'] == 50\n assert events[3]['kind'] == 'charge'\n assert events[3]['balance'] == 50\n assert events[4]['kind'] == 'day-close'\n assert events[4]['balance'] == 0\n\n def test_get_end_of_year_balance(self):\n make_history(self)\n balance = get_end_of_year_balance(self.db, self.alice, self.past_year, datetime.now().year)\n assert balance == 10\n\n\nclass TestHistoryPage(Harness):\n\n def setUp(self):\n Harness.setUp(self)\n make_history(self)\n\n def test_participant_can_view_history(self):\n assert self.client.GET('/~alice/history/', auth_as='alice').code == 200\n\n def test_admin_can_view_closed_participant_history(self):\n self.make_exchange('braintree-cc', -30, 0, self.alice)\n self.alice.close()\n\n self.make_participant('bob', claimed_time='now', is_admin=True)\n response = self.client.GET('/~alice/history/?year=%s' % self.past_year, auth_as='bob')\n assert \"automatic charge\" in response.body\n\nclass TestExport(Harness):\n\n def setUp(self):\n Harness.setUp(self)\n make_history(self)\n\n def test_export_json(self):\n r = self.client.GET('/~alice/history/export.json', auth_as='alice')\n assert json.loads(r.body)\n\n def test_export_json_aggregate(self):\n r = self.client.GET('/~alice/history/export.json?mode=aggregate', auth_as='alice')\n assert json.loads(r.body)\n\n def test_export_json_past_year(self):\n r = self.client.GET('/~alice/history/export.json?year=%s' % self.past_year, auth_as='alice')\n assert len(json.loads(r.body)['exchanges']) == 4\n\n def test_export_csv(self):\n r = self.client.GET('/~alice/history/export.csv?key=exchanges', auth_as='alice')\n assert r.body.count('\\n') == 5\n","repo_name":"amir17688/google_data_p2","sub_path":"67173_test_history.py_C__Users_user_Desktop_data_2_data_google_data_gratipay_gratipay.com_tests_py.py","file_name":"67173_test_history.py_C__Users_user_Desktop_data_2_data_google_data_gratipay_gratipay.com_tests_py.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3569825654","text":"import math\nimport random\n\nclass DirectEncoding:\n \"\"\"Direct Encoding, \n d:domain, including the values [0, 1, ..., d-1]\n epsilon: privacy budget, >= 0 \n \"\"\"\n def __init__(self, d, epsilon):\n self.__d = d # input size\n self.__epsilon = epsilon # privacy budget\n self.__p = math.exp(epsilon) / (math.exp(epsilon)+d-1) # probability of pertubation into itself\n self.__q = 1.0 / (math.exp(epsilon)+d-1) # probability of pertubation into xxx\n self.__counterPert = d*[0] # count perturbed number\n self.__counterReal = d*[0] # count real number [0,0,...,0]\n self.__n = 0 # the number of users\n \n # Encode(v) = x\n def __encoding(self, v):\n self.__n += 1\n self.__counterReal[v] += 1 # counter\n x = v\n return x\n\n def __perturbing(self, x):\n y = self.__random_pick(x)\n self.__supports(y)\n return y\n\n def PE(self, x):\n e = self.__encoding(x)\n pe = self.__perturbing(e)\n return pe\n\n # aggregation: pure Protocol\n def aggregation(self):\n self.__counterEsti = self.__d*[0]\n n = self.__n\n pStar = self.__p\n qStar = self.__q\n for i in range(self.__d):\n self.__counterEsti[i] = (self.__counterPert[i]-n*qStar)/(pStar-qStar)\n \n # pure protocol, numerical/analytical value of variance, \n # another way to calc is n*(d-2+e)/(e-1)/(e-1)\n def var_analytical(self):\n e = math.exp(self.__epsilon)\n assert e>1, 'var analytical error : e<=1'\n n = self.__n\n pStar = self.__p\n qStar = self.__q\n return n*qStar*(1-qStar)/(pStar-qStar)/(pStar-qStar)\n\n # empirical value of variance\n # f: list of probability; n : the number \n def var_empirical(self, f, n):\n d = self.__d\n sum = 0.0\n for i in range(d):\n sum += (self.__counterEsti[i] - 1.0*f[i]*n) ** 2\n return sum / d\n\n # set n, just for analysis\n def set_n(self, n):\n self.__n = n\n\n def get_n(self):\n return self.__n\n\n def get_class_name(self):\n return str(self.__class__.__name__)\n\n # a set of input values that y \"supports\"\n # counter\n def __supports(self, y):\n self.__counterPert[y] += 1\n\n # input number v\n def __random_pick(self, v):\n x = random.uniform(0, 1)\n if x <= self.__p: # Pr[0, p] perturbed into itself\n return v\n # p+(d-1)q = 1\n index = int((x-self.__p)/self.__q) # [0, 1, ..., d-2]\n if index >= self.__d-2: # Prevent accidents\n index = self.__d-2\n if index >= x: # skip x\n index += 1\n return index\n '''\n # Below method is too slow\n # except x\n cumulative_probability = self.__p\n item = 0 #[0, 1, ..., d-1]\n if item == v:\n item += 1\n item_probabitity = self.__q\n\n while 1:\n #print(self.__p, self.__q, item, cumulative_probability)\n cumulative_probability += item_probabitity\n if x <= cumulative_probability:\n return item\n item += 1\n if item == v:\n item += 1\n if item >= self.__d: # bound\n return self.__d-1\n '''","repo_name":"VFVrPQ/LDP","sub_path":"protocols/DirectEncoding.py","file_name":"DirectEncoding.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"12420660133","text":"import sys\ninput = sys.stdin.readline\nn = int(input())\nres = 1\nfor i in range(n):\n lst = list(map(int,input().split()))\n if(lst[0]==res): #res에 첫번쨰 컵의 번호를 계속 저장해 나간다.\n res = lst[1]\n elif(lst[1]==res):\n res = lst[0]\n\nprint(res)","repo_name":"stndk178/b-python","sub_path":"단계별/기타/브론즈/브론즈3/1547.py","file_name":"1547.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2027764326","text":"from django.db import models\nfrom accounts.models import User\nfrom . course_category_model import CourseCategory, CourseType, Language\n\n\nclass Course(models.Model):\n \"\"\"Course models\"\"\"\n title = models.CharField(max_length=255, blank=False, null=False)\n description = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n start_date = models.DateField()\n end_date = models.DateField()\n start_time = models.TimeField()\n end_time = models.TimeField()\n category = models.ForeignKey(CourseCategory, on_delete=models.SET_NULL, null=True, blank=True)\n course_type = models.ForeignKey(CourseType, on_delete=models.SET_NULL, null=True, blank=True)\n language = models.ForeignKey(Language, on_delete=models.SET_NULL, null=True, blank=True)\n files = models.CharField(max_length=255, null=True, blank=True)\n enrolleds = models.ManyToManyField(User, through=\"Enrollment\")\n created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, related_name=\"created_by\")\n\n class Meta:\n constraints = [\n models.UniqueConstraint(fields=['title', 'created_by'], name='unique_course_title_for_a_user')\n ]\n\n def __str__(self) -> str:\n return f'{self.title} - {self.created_at}'\n\n\nclass Enrollment(models.Model):\n \"\"\"enrollment models\"\"\"\n student = models.ForeignKey(User, on_delete=models.CASCADE)\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n joined_date = models.DateField(auto_now_add=True)\n\n def __str__(self) -> str:\n return f'{self.course} - {self.student} - {self.joined_date}'\n\n","repo_name":"codingRah/e-learning-backend","sub_path":"course/api/models/course_model.py","file_name":"course_model.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2122791372","text":"\nleft = 5\nright = 2589\narr = [digit for digit in range (left,right+1)]\n\ndef get_digits(num):\n \n digits=[]\n while num > 0:\n num, rem = divmod(num, 10)\n digits.append(rem)\n \n return digits\n\ndef is_divisible(num):\n\n for digit in get_digits(num):\n if digit == 0 or num % digit > 0:\n return False\n\n return True\n\n","repo_name":"serdarselcuk/pythonStudy","sub_path":"leetcode/self_dividing.py","file_name":"self_dividing.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26631892431","text":"import re\n\nimport jee\nimport logger\nimport modeling\n\nhandlers = {}\n\n\nclass Dependency(object):\n \"\"\"If one CI depends on another CI, we say there's a dependency between them.\"\"\"\n\n def __init__(self, scpType, scpContext, applicationTopology):\n self.scpType = scpType\n self.scpContext = scpContext\n clazz = handlers.get(scpType, HttpDependencyHandler)\n logger.debug('Got handler: ', clazz)\n self.handler = clazz(scpType, scpContext, applicationTopology)\n\n def resolve(self):\n return self.handler.handle()\n\n\ndef handler(type):\n def decorate(func):\n handlers[type] = func\n return func\n\n return decorate\n\n\nclass DependencyHandler(object):\n def __init__(self, scpType, scpContext, applicationTopology):\n self.scpType = scpType\n self.scpContext = scpContext\n self.applicationTopology = applicationTopology\n\n def handle(self):\n self._parseScpContext()\n\n result = []\n for t in self.applicationTopology:\n container, applications = t\n matchedApplications = [app for app in applications if app is not None and self._match(app)]\n if matchedApplications:\n logger.debug('Found matched application: ', matchedApplications)\n result.append((container, matchedApplications))\n\n return result\n\n def _parseScpContext(self):\n raise NotImplementedError('DependencyHandler is supposed to be an interface or abstract class')\n\n def _match(self, applications):\n raise NotImplementedError('DependencyHandler is supposed to be an interface or abstract class')\n\n def _hasContextRoot(self, app, contextRoot):\n contextList = self._toContextList(contextRoot)\n moduleContexts = [m.contextRoot for m in app.getModules() if isinstance(m, jee.WebModule)]\n for context in contextList:\n for moduleContext in moduleContexts:\n logger.debug('Try to match module \"%s\" with context \"%s\" ' % (moduleContext, context))\n if moduleContext is not None and (moduleContext == context \\\n or filter(None, moduleContext.split('/')) == filter(None, context.split('/'))):\n return True\n logger.debug('No module matched with contexts ', contextList)\n return False\n\n def _toContextList(self, contextRoot):\n # '/a/b/c' -> 'a/b/c'\n # '/a/b/login.do' -> '/a/b'\n result = [contextRoot]\n a = [c for c in contextRoot.split('/')]\n trimLastContext = '/'.join(a[0:-1])\n if trimLastContext:\n result.append(trimLastContext)\n logger.debug('Resolve context list: ', result)\n return result\n\n\n@handler(type='websphere_ws')\n@handler(type='weblogic_ws')\nclass WebServiceDependencyHandler(DependencyHandler):\n WS_PATTERNS = {'websphere_ws': re.compile(r'/?(\\w+)/services/(\\w+)/?'),\n 'weblogic_ws': re.compile(r'/?(\\w+)/(\\w+)/?')}\n\n def __init__(self, scpType, scpContext, applicationTopology):\n super(WebServiceDependencyHandler, self).__init__(scpType, scpContext, applicationTopology)\n self.contextRoot = None\n self.serviceName = None\n\n def _parseScpContext(self):\n pattern = WebServiceDependencyHandler.WS_PATTERNS[self.scpType]\n m = pattern.match(self.scpContext)\n if m:\n self.contextRoot = m.group(1)\n self.serviceName = m.group(2)\n else:\n raise ValueError('%s: %s cannot be parsed.' % (self.scpType, self.scpContext))\n\n def _match(self, app):\n return self._hasContextRoot(app, self.contextRoot) and self._hasWebService(app)\n\n def _hasWebService(self, app):\n for m in app.getModules():\n for w in m.getWebServices():\n if w.getName() == self.serviceName:\n return True\n return False\n\n def __repr__(self):\n pass\n\n\n@handler(type='ejb')\nclass EjbDependencyHandler(DependencyHandler):\n def __init__(self, scpType, scpContext, applicationTopology):\n super(EjbDependencyHandler, self).__init__(scpType, scpContext, applicationTopology)\n self.jndiName = None\n\n def _parseScpContext(self):\n self.jndiName = self.scpContext\n\n def _match(self, app):\n return self._hasJndiName(app)\n\n def _hasJndiName(self, app):\n for m in app.getModules():\n for e in m.getEntries():\n if e.getJndiName() == self.jndiName:\n return True\n return False\n\n\n def __repr__(self):\n pass\n\n\n@handler(type='http')\nclass HttpDependencyHandler(DependencyHandler):\n def __init__(self, scpType, scpContext, applicationTopology):\n super(HttpDependencyHandler, self).__init__(scpType, scpContext, applicationTopology)\n self.contextRoot = None\n\n def _parseScpContext(self):\n self.contextRoot = self.scpContext\n\n def _match(self, app):\n return self._hasContextRoot(app, self.contextRoot)\n\n def __repr__(self):\n pass\n\n\ndef resolveApplicationDependency(scpType, scpContext, applicationTopology):\n logger.debug('Start to resolve application dependency for SCP: type = \"%s\", context = \"%s\"' % (scpType, scpContext))\n d = Dependency(scpType, scpContext, applicationTopology)\n return d.resolve()\n\n\ndef resolveJmsDependency(scpType, scpContext, jmsTopology):\n logger.debug('Start to resolve jms dependency for SCP: type = \"%s\", context = \"%s\"' % (scpType, scpContext))\n result = []\n if scpType == 'jms':\n for t in jmsTopology:\n containerOsh, jmsDestinations = t\n matchedJmsDestination = [jms for jms in jmsDestinations if jms.getName() == scpContext]\n result.append((containerOsh, matchedJmsDestination))\n return result\n\n\ndef resolveDependency(scpType, scpContext, reporterCreator, applicationResult, OSHVResult):\n if scpType == 'jms':\n jmsTopology = reporterCreator.getJmsDsReporter().jmsTopology\n logger.debug('Found jms topology: %s' % jmsTopology)\n\n matchedJmsTopology = resolveJmsDependency(scpType, scpContext, jmsTopology)\n for t in matchedJmsTopology:\n containerOsh, jmsDestinations = t\n OSHVResult.add(containerOsh)\n # J2EE domain needs a membership link to J2EE server\n if containerOsh.getObjectClass() == 'j2eedomain':\n OSHVResult.add(modeling.createLinkOSH('member', containerOsh, applicationResult.application.getOsh()))\n\n for j in jmsDestinations:\n OSHVResult.add(j.getOsh())\n else:\n applicationTopology = reporterCreator.getApplicationReporter().applicationTopology\n logger.debug('Found application topology: %s' % applicationTopology)\n\n matchedAppTopology = resolveApplicationDependency(scpType, scpContext, applicationTopology)\n for t in matchedAppTopology:\n container, applications = t\n OSHVResult.add(container.getOsh())\n # J2EE domain needs a membership link to J2EE server\n if isinstance(container, jee.Domain):\n OSHVResult.add(modeling.createLinkOSH('member', container.getOsh(),\n applicationResult.application.getOsh()))\n\n for a in applications:\n osh = a.getOsh()\n OSHVResult.add(osh)\n applicationResult.applicationresources.append(osh)\n","repo_name":"chundcm/cda-record","sub_path":"reference/ucmdb/discovery/asm_dependency_resolver.py","file_name":"asm_dependency_resolver.py","file_ext":"py","file_size_in_byte":7451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69904107449","text":"from sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy import select, update, delete\n\nfrom models.models import user\n\n\nasync def get_user_by_tg_id(\n session: AsyncSession,\n telegram_id: int,\n):\n query = select(user).where(user.telegram_id == telegram_id)\n\n result = await session.execute(query)\n\n return result.scalars().first()\n","repo_name":"waterstark/telegram_bot","sub_path":"service/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41504590391","text":"from airflow import DAG #importing the DAG in here\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.latest_only_operator import LatestOnlyOperator\nfrom datetime import datetime,timedelta\n\ndefault_args={\n \"start_date\":datetime(2022,11,24),\n \"owner\":\"Airflow\",\n \"depends_on_start\":False,\n \"retries\":2,\n \"retry_delay\":timedelta(seconds=5)\n}\n\nwith DAG(dag_id=\"latest_backfilling\",default_args=default_args,\n schedule_interval=timedelta(hours=6),\n catchup=True,\n ) as dag:\n task1=LatestOnlyOperator(task_id=\"task_latest\")\n task2=DummyOperator(task_id=\"task2\")\n task3=DummyOperator(task_id=\"task3\")\n task4=DummyOperator(task_id=\"task4\")\n \n task1 >> [task2,task4]\n \n ","repo_name":"psarangi550/Apache_Aiflow_Detailed","sub_path":"dags/latest_backfilling.py","file_name":"latest_backfilling.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28749441649","text":"import numpy as np\n\n\ndef center_crop(l, x, y, ts, p, bboxes, old_shape, new_shape):\n \"\"\"\n Crops events and annotations to a centered region of the specified shape.\n Events and bounding boxes are then shifted so that the top-left event margins\n always start at (0,0)\n \"\"\"\n\n new_h, new_w = new_shape\n old_h, old_w = old_shape\n\n x_min, x_max = x.min(), x.max()\n y_min, y_max = y.min(), y.max()\n\n new_top = (x_max - x_min - new_w) // 2\n new_left = (y_max - y_min - new_h) // 2\n\n events_inside = np.logical_and.reduce([x >= new_left, x < new_left + new_w,\n y >= new_top, y < new_top + new_h])\n new_x, new_y, new_ts, new_p = x[events_inside], y[events_inside], \\\n ts[events_inside], p[events_inside]\n new_x -= new_x.min()\n new_y -= new_y.min()\n new_l = new_x.shape[0]\n\n bboxes[:, [0, 2]] *= old_w\n bboxes[:, [1, 3]] *= old_h\n\n new_bboxes = bboxes.copy()\n new_bboxes[:, [0, 2]] = np.clip(bboxes[:, [0, 2]] * old_w - new_x.min(), 0, new_w) / new_w\n new_bboxes[:, [1, 3]] = np.clip(bboxes[:, [1, 3]] * old_h - new_x.min(), 0, new_h) / new_h\n\n return new_l, new_x, new_y, new_ts, new_p, new_bboxes\n\n\ndef apply_nms(batch_bboxes, batch_scores, batch_valid=None, iou_threshold=0.5):\n \"\"\"\n Applies Non-Maximum-Suppression on the provided boxes.\n Implementation taken from: http://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n\n :param batch_bboxes: a [batch_size, num_boxes, 4] array providing the parameters of the bounding boxes\n (x_center, y_center, w_box, h_box).\n :param batch_scores: a [batch_size, num_boxes] array providing the scores associated with each bounding box\n :param batch_valid: a [batch_size, num_boxes] boolean mask used to specify which values must be considered valid\n across the batch. Optional, if not provided, all the boxes will be considered in the computation\n :param iou_threshold: scalar, the threshold on the IOU. Optional, default 0.5.\n :return: a list of 2 numpy arrays representing the indices in batch_bboxes of the selected boxes\n \"\"\"\n\n batch_valid = batch_valid if batch_valid is not None else [None] * batch_bboxes.shape[0]\n\n picked_idx = []\n # Loops over the batch dimension\n for bboxes, scores, valid in zip(batch_bboxes, batch_scores, batch_valid):\n\n if valid is not None:\n bboxes = bboxes[valid]\n scores = scores[valid]\n # compute mapping from valid indices to original\n valid_idx_to_original = np.where(valid)[0]\n\n # if there are no boxes\n if len(bboxes) == 0:\n picked_idx.append([])\n else:\n # initialize the list of picked indexes\n pick = []\n\n # grab the coordinates of the bounding boxes\n x = bboxes[:, 0]\n y = bboxes[:, 1]\n w = bboxes[:, 2]\n h = bboxes[:, 3]\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by their score\n area = w * h\n idxs = np.argsort(scores)\n\n # keep looping while some indexes still remain in the indexes\n # list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # compute the top-left and bottom-right coordinates of the intersections\n # between the current box and all the remaining ones\n xx1 = np.maximum(x[i] - w[i] / 2, x[idxs[:last]] - w[idxs[:last]] / 2)\n yy1 = np.maximum(y[i] - h[i] / 2, y[idxs[:last]] - h[idxs[:last]] / 2)\n xx2 = np.minimum(x[i] + w[i] / 2, x[idxs[:last]] + w[idxs[:last]] / 2)\n yy2 = np.minimum(y[i] + h[i] / 2, y[idxs[:last]] + h[idxs[:last]] / 2)\n\n # compute the width and height of the intersection's boxes\n ww = np.maximum(0, xx2 - xx1)\n hh = np.maximum(0, yy2 - yy1)\n\n # compute IOUs\n iou = (ww * hh) / (area[idxs[:last]] + area[i] - (ww * hh))\n\n # delete from the list of remaining indexes, the current one (last) and those\n # of the bounding boxes with an IOU above the threshold with the current box\n idxs = np.delete(idxs, np.concatenate(([last], np.where(iou >= iou_threshold)[0])))\n\n # if a 'batch_valid' array has been provided, 'pick' will contain the indices of the filtered\n # boxes, we need to map them back to original array's indices\n pick = pick if valid is None else list(valid_idx_to_original[pick])\n picked_idx.append(pick)\n\n # HACK: it uses sum(list of lists, []) to flatten the list =D\n idx_axis_0 = np.array(sum([[batch] * len(idx) for batch, idx in enumerate(picked_idx)], []))\n idx_axis_1 = np.array(sum(picked_idx, []))\n\n return [idx_axis_0, idx_axis_1]\n\n","repo_name":"marcocannici/async-ev-cnn","sub_path":"src/libs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"40332089257","text":"'''\nThis program expects a kilogram input value and converts\nthat value to grams, pounds, and ounces when the user\npushes the Convert button.\n\n@author Rodriguez Acosta Ernesto Antonio\n'''\n\nfrom tkinter import *\n\nwindow = Tk()\nwindow.title(\"Kg-Converter\")\nwindow.geometry(\"\") # Auto resize\n\ndef kg_converter():\n print(kg_entry_value.get())\n\n txt_grams.delete(1.0, END)\n txt_pounds.delete(1.0, END)\n txt_ounces.delete(1.0, END)\n\n kg_to_grams = float(kg_entry_value.get()) * 1000\n kg_to_pounds = float(kg_entry_value.get()) * 2.20462\n kg_to_ounces = float(kg_entry_value.get()) * 35.274\n\n txt_grams.insert(END,kg_to_grams)\n txt_pounds.insert(END,kg_to_pounds)\n txt_ounces.insert(END,kg_to_ounces)\n\nkg_label = Label(window, text=\"Kg\")\nkg_label.grid(row=0, column=0)\n\nkg_entry_value = StringVar()\n\nkg_entry = Entry(window, textvariable=kg_entry_value)\nkg_entry.grid(row=0, column=1)\n\nbtn_convert = Button(window, text=\"Convert\", justify=LEFT, command=kg_converter)\nbtn_convert.grid(row=0, column=2)\n\ntxt_grams = Text(window, height=1, width=20)\ntxt_grams.grid(row=1, column=0)\n\ntxt_pounds = Text(window, height=1, width=20)\ntxt_pounds.grid(row=1, column=1)\n\ntxt_ounces = Text(window, height=1, width=20)\ntxt_ounces.grid(row=1, column=2)\n\nwindow.mainloop()","repo_name":"ernestoacosta75/python-megacourse","sub_path":"tkinter_section/converter_script.py","file_name":"converter_script.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23069377525","text":"import torch, cv2, os\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport numpy as np\nfrom PIL import Image\n\nclass ContentLoss(nn.Module):\n\tdef __init__(self, Content):\n\t\tsuper(ContentLoss, self).__init__()\n\t\tself.Content = Content.detach()\n\n\tdef forward(self, Generated):\n\t\tif(calc_loss):\n\t\t\tself.loss = F.mse_loss(self.Content, Generated)\n\t\treturn Generated\n\nclass StyleLoss(nn.Module):\n\tdef __init__(self, Style):\n\t\tsuper(StyleLoss, self).__init__()\n\t\tself.Style = Style.detach()\n\t\tself.gram_S = gram_calc(self.Style).detach()\n\n\tdef forward(self, Generated):\n\t\tif(calc_loss):\n\t\t\tgram_G = gram_calc(Generated)\n\t\t\tself.loss = F.mse_loss(self.gram_S, gram_G)\n\t\treturn Generated\n\nclass Normalize(nn.Module):\n\tdef __init__(self, mean, variance):\n\t\tsuper(Normalize, self).__init__()\n\t\tself.mean = mean.view(-1, 1, 1)\n\t\tself.variance = variance.view(-1, 1, 1)\n\n\tdef forward(self, x):\n\t\treturn (x - mean) / variance\n\ndef freeze_model(model):\n\tfor f in model.parameters():\n\t\tf.requires_grad = False\n\treturn model\n\ndef gram_calc(t):\n\tbs, nc, h, w = t.shape\n\tt_ = t.view(bs, nc, h*w)\n\tgram = torch.bmm(t_, t_.permute(0, 2, 1)) / (bs*nc*h*w)\n\treturn gram\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ns_name, c_name = 'images/style.jpg', 'images/content.jpg'\nsave_dir = 'save'\n\ntradeoff = 0.5\nalpha, beta = 10000000 * (tradeoff), 1 * (1-tradeoff)\ns_size, c_size = 512, 512\n\nepoch = 1000\n\nto_tensor_s = transforms.Compose([transforms.Resize(s_size), transforms.ToTensor()])\nto_tensor_c = transforms.Compose([transforms.Resize(c_size), transforms.ToTensor()])\n\ns_img = to_tensor_s(Image.open(s_name)).unsqueeze(0).to(device)\nc_img = to_tensor_c(Image.open(c_name)).unsqueeze(0).to(device)\n\nvgg = freeze_model(models.vgg19_bn(pretrained=True).to(device).features)\n\nstyle_layers = [0, 7, 14, 27, 40]\nstyle_weight = [1.0, 1.0, 1.0, 1.0, 1.0]\n\ncontent_layers = [30]\ncontent_weight = [1.0]\n\nstyle_losses = []\ncontent_losses = []\ncalc_loss = False\n\nmodel = nn.Sequential(Normalize(torch.tensor([0.485, 0.456, 0.406]).to(device), torch.tensor([0.229, 0.224, 0.225]).to(device)))\nfor i, layer in enumerate(vgg.children()):\n\tif(isinstance(layer, nn.ReLU)):\n\t\tmodel.add_module(str(i), nn.ReLU(inplace = False))\n\telse:\n\t\tmodel.add_module(str(i), layer)\n\tif(i in style_layers):\n\t\tstyle_loss_layer = StyleLoss(model(s_img).detach())\n\t\tmodel.add_module('Style-'+str(i), style_loss_layer)\n\t\tstyle_layers.remove(i)\n\t\tstyle_losses.append(style_loss_layer)\n\tif(i in content_layers):\n\t\tcontent_loss_layer = ContentLoss(model(c_img).detach())\n\t\tmodel.add_module('Content-'+str(i), content_loss_layer)\n\t\tcontent_layers.remove(i)\n\t\tcontent_losses.append(content_loss_layer)\n\tif(len(style_layers) == 0 and len(content_layers) == 0):\n\t\tbreak\n\ninitial_image = torch.from_numpy(np.random.uniform(0, 1, size=c_img.data.shape).astype(np.float32)).to(device)\noptimizer = optim.Adam([initial_image.requires_grad_()], lr = 0.01)\ncalc_loss = True\n\nfor cur_epoch in range(epoch):\n\tdef closure():\n\t\tinitial_image.data.clamp_(0, 1)\n\t\toptimizer.zero_grad()\n\t\tstyle_loss = 0.0\n\t\tcontent_loss = 0.0\n\t\tmodel(initial_image)\n\t\tfor i, sl_layer in enumerate(style_losses):\n\t\t\tstyle_loss += sl_layer.loss * style_weight[i]\n\t\tfor i, cl_layer in enumerate(content_losses):\n\t\t\tcontent_loss += cl_layer.loss * content_weight[i]\n\t\tstyle_loss *= alpha\n\t\tcontent_loss *= beta\n\n\t\tloss = style_loss + content_loss\n\t\tloss.backward()\n\t\tinitial_image.data.clamp_(0, 1)\n\n\t\tprint(float(style_loss), float(content_loss), float(loss))\n\t\tif(cur_epoch % 70 == 0):\n\t\t\twrite_image = cv2.cvtColor(((initial_image.detach().cpu().numpy().squeeze(0).transpose(1, 2, 0))*255.0).astype(np.uint8), cv2.COLOR_RGB2BGR)\n\t\t\tcv2.imwrite(os.path.join(save_dir, str(cur_epoch)+'.jpg'), write_image)\n\n\toptimizer.step(closure)\n\t\n\n\n\n","repo_name":"ericlearning/style-transfer","sub_path":"StyleTransfer/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9855648295","text":"import json\nfrom spotipy import Spotify, util\nfrom spotipy.oauth2 import SpotifyClientCredentials\nfrom tqdm import tqdm\n\n\nclass Worker(object):\n \"\"\" The Worker does work on the Musical Graph\n\n This design allows for the separation of the graph itself and the\n code that processes it.\n \"\"\"\n def __init__(self, graph):\n self.graph = graph\n\n def add_artists(self, artists):\n assert type(artists) == list\n \"\"\"Add artists to graph\"\"\"\n for artist in tqdm(artists):\n self._add_artist(artist)\n self.graph.save()\n\n def _add_artist(self, artist):\n # We'll only add an artist if they return a record in the\n # Spotify API\n raw = self._search_for_artist(artist)\n try:\n processed = self._get_artist_meta(raw)\n self.graph.G.add_node(processed['name'], type='artist')\n for genre in processed['genres']:\n self.graph.G.add_node(genre, type=\"genre\")\n self.graph.G.add_edge(artist, genre)\n except TypeError:\n pass\n self.graph.save()\n\n def _search_for_artist(self, artist):\n \"\"\"Searches for the artist and returns the data from the first result\n \"\"\"\n token = self._generate_token()\n if token:\n sp = Spotify(client_credentials_manager=token)\n search_results = sp.search(q=artist, type='artist')\n try:\n first_result = search_results['artists']['items'][0]\n return first_result\n except IndexError:\n pass\n\n def _get_artist_meta(self, response):\n \"\"\"Processes the json response for the desired fields\"\"\"\n fields = ['genres', 'id', 'name']\n if response is not None:\n return {field: response[field] for field in fields}\n\n def _generate_token(self):\n with open('.secrets/spotify.json') as f:\n credential = json.loads(f.read())\n client_manager = SpotifyClientCredentials(**credential['spotify'])\n return client_manager\n","repo_name":"mrklees/musical-graphs","sub_path":"MusicalGraph/Worker.py","file_name":"Worker.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17241852935","text":"import spacy\nimport numpy as np\nimport pandas as pd\nimport cupy\n\nfrom keras.models import Sequential, model_from_json\nfrom keras.layers import LSTM, Dense, Embedding, Bidirectional\nfrom keras.layers import TimeDistributed\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\n\ndef compile_lstm(embeddings, shape, settings):\n model = Sequential()\n model.add(\n Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=shape[\"max_length\"],\n trainable=False,\n weights=[embeddings],\n mask_zero=True,\n )\n )\n model.add(TimeDistributed(Dense(shape[\"nr_hidden\"], use_bias=False)))\n model.add(\n Bidirectional(\n LSTM(\n shape[\"nr_hidden\"],\n recurrent_dropout=settings[\"dropout\"],\n dropout=settings[\"dropout\"],\n )\n )\n )\n model.add(Dense(shape[\"nr_class\"], activation=\"sigmoid\"))\n model.compile(\n optimizer=Adam(lr=settings[\"lr\"]),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"],\n )\n return model\n\ndef get_embeddings(vocab, using_gpu=True):\n if using_gpu:\n return cupy.asnumpy(vocab.vectors.data)\n else:\n return vocab.vectors.data\n\ndef encode_labels(labels, one_hot=True):\n labels = labels.reset_index(drop=True)\n categories = list(labels.unique())\n Y = np.zeros((len(labels), 1))\n \n for i, cat in enumerate(categories):\n Y[labels.index[labels == cat]] = i\n \n if one_hot:\n return to_categorical(Y, num_classes=len(categories))\n else:\n return Y\n \ndef get_features_from_sentences(sentences, max_length=300):\n X = np.zeros((len(sentences), max_length), dtype=\"int32\")\n i = 0\n for sent in sentences:\n for j, token in enumerate(sent):\n if j >= max_length:\n break\n vector_id = token.vocab.vectors.find(key=token.orth)\n if vector_id >= 0:\n X[i, j] = vector_id\n else:\n X[i, j] = 0\n i += 1\n return X\n \ndef get_labelled_sentences_from_doc(doc, doc_label, min_length=15):\n labels = []\n sentences = []\n for sent in doc.sents:\n if len(sent) >= min_length:\n sentences.append(sent)\n labels.append(doc_label)\n \n return sentences, np.asarray(labels, dtype=\"int32\")\n\ndef encode_text_and_labels(nlp, text, labels, batch_size=64):\n Xs = []\n Ys = []\n encoded_labels = encode_labels(labels)\n for i, doc in enumerate(nlp.pipe(sample_texts, batch_size=1, disable=[\"parser\", \"tagger\", \"ner\"])):\n sentences, y = get_labelled_sentences_from_doc(doc, encoded_labels[i])\n \n # TODO: record sentence or token statistics here\n \n Xs.append(get_features_from_sentences(sentences))\n Ys.append(y)\n return np.vstack(Xs), np.vstack(Ys)\n \ndef group_sentences(document, num_sentences=4):\n sentence_count = 0\n for token in document:\n if sentence_count % num_sentences == 0 and token.is_sent_start:\n token.is_sent_start = True\n sentence_count += 1\n elif token.is_sent_start:\n token.is_sent_start = False\n sentence_count += 1\n return document\n","repo_name":"ktang012/myanimelist_data_collection","sub_path":"keras_utils.py","file_name":"keras_utils.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24867930242","text":"\"\"\"empty message\n\nRevision ID: c2e818d0a8c5\nRevises: 3861cb255be9\nCreate Date: 2020-08-21 15:08:30.843106\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c2e818d0a8c5'\ndown_revision = '3861cb255be9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('article',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=20), nullable=False),\n sa.Column('title', sa.String(length=20), nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.Column('date', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_unique_constraint(None, 'users', ['username'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'users', type_='unique')\n op.drop_table('article')\n # ### end Alembic commands ###\n","repo_name":"Startooth/blog","sub_path":"migrations/versions/c2e818d0a8c5_.py","file_name":"c2e818d0a8c5_.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29484677829","text":"class Twitter:\n def __init__(self):\n self.trending_topics=[]\n def tweet(self,mensaje):\n if len(mensaje)>140:\n return False\n mensaje_lista = list(mensaje)\n a = mensaje_lista.index(\"#\")\n for i in range(a,len(mensaje)-1) :\n b = mensaje_lista.index(\" \")\n hashtg =list(mensaje_lista[a:b])\n self.trending_topics.append(hashtg)\n \nif __name__ == \"__main__\":\n twitter=Twitter()\n twitter.tweet(\"gano #laroja\")\n twitter.tweet(\"grande #chile\")\n twitter.tweet(\"#laroja con dos goles, le gano a brasil, grande #laroja\")\n print(twitter.trending_topics)\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema9_ej1/tema9_ej1_pcherrera.py","file_name":"tema9_ej1_pcherrera.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18534943943","text":"#!/usr/bin/env python3\n\nimport argparse\nimport pycparser\n\nclass EnumVisitor(pycparser.c_ast.NodeVisitor):\n def __init__(self, file):\n self.file = file\n def visit_Enum(self, enum):\n if enum.coord.file != self.file:\n return\n next_value = 0\n for enum_value in enum.values.enumerators:\n if enum_value.value is not None:\n # Unsure how to handle assignment of non-numerical value - maybe leave as is?\n if isinstance(enum_value.value, pycparser.c_ast.Constant):\n next_value_str = enum_value.value.value\n next_value = int(next_value_str, 16) if \"x\" in next_value_str else int(next_value_str)\n print(f\".set {enum_value.name}, {next_value}\")\n next_value += 1\n else:\n print(f\".set {enum_value.name}, {next_value}\")\n next_value += 1\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Prints out enum values for assembler.\", prefix_chars='+')\n parser.add_argument(\"file\", help=\"input include file\")\n parser.add_argument(\"cpp\", help=\"c pre-processor\")\n parser.add_argument(\"cppflags\", help=\"c pre-processor flags\", nargs=\"*\")\n\n args = parser.parse_args()\n ast = pycparser.parse_file(args.file, True, args.cpp, args.cppflags) \n\n # print guard\n guard_prefix = args.file.replace(\"include/\", \"\")\n guard_prefix = guard_prefix.replace(\".\", \"_\")\n guard_name = guard_prefix.upper() + \"_ASM_INC\"\n print(f\".ifndef {guard_name}\")\n print(f\".set {guard_name}, 1\\n\")\n\n visitor = EnumVisitor(args.file)\n visitor.visit(ast)\n\n print(f\"\\n.endif @ {guard_name}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zeldaret/tmc","sub_path":"tools/extract_include_enum.py","file_name":"extract_include_enum.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":464,"dataset":"github-code","pt":"77"} +{"seq_id":"32545714792","text":"\"\"\"\nCircular Queue Factory : Optimise memory usage, circulate the head & tail pointers in a queue\nFirst In First Out (FIFO) \nEnqueue: Add an element to the end of the queue\nDequeue: Remove an element from the front of the queue\nIsEmpty: Check if the queue is empty\nIsFull: Check if the queue is full\nPeek: Get the value of the front of the queue without removing it\n\nComplexity\n--------------------\nThe complexity of the enqueue and dequeue operations of a circular queue is O(1) for (array implementations).\n\nApplications\n--------------------\nCPU scheduling\nMemory management\nTraffic Management\n\"\"\"\n\nfrom re import T\nfrom tkinter import E\nfrom typing import TypeVar\nfrom queueType import OperationType\n\nMAX_SIZE = 'maxSize'\nSTACK = TypeVar('STACK', list, set)\n\nclass CircularQueueFactory():\n def __init__(self, **kwargs):\n self.__maxStackSize = kwargs[MAX_SIZE] if MAX_SIZE in kwargs else 100\n self.__queue: STACK = list('#') * self.__maxStackSize\n self.__head = -1\n self.__tail = -1\n \n def enqueue(self, item):\n # check mod of tail lets say 4+1 %5 is equal 0 and head is in 0\n if (self.__tail + 1) % self.__maxStackSize == self.__head:\n raise OverflowError('Queue is full')\n elif self.__head == -1:\n self.__head, self.__tail = 0, 0\n self.__queue[self.__tail] = item\n else:\n self.__tail = (self.__tail + 1) % self.__maxStackSize\n self.__queue[self.__tail] = item\n \n def dequeue(self):\n if self.__head == -1:\n raise BufferError('Queue is empty')\n # check head + 1 mod, let's say head in 4+1 % 5 = 0, then made last element pop\n elif (self.__head+1) % self.__maxStackSize == self.__tail:\n popItem = self.__queue[self.__head]\n self.__tail, self.__head = -1, -1\n return popItem\n else:\n popItem = self.__queue[self.__head]\n self.__head = (self.__head + 1) % self.__maxStackSize\n return popItem\n \n def printQueue(self):\n if self.__head == -1:\n print('Empty Queue')\n elif self.__tail >= self.__head:\n for index in range(self.__head, self.__tail + 1):\n print(self.__queue[index], end=' ')\n print('')\n else:\n for index in range(self.__head, self.__maxStackSize):\n print(self.__queue[index], end=' ')\n for index in range(0, self.__tail + 1):\n print(self.__queue[index], end=' ')\n \n# Testing\ncircularQueue = CircularQueueFactory(maxSize = 5)\ncircularQueue.printQueue()\ntry:\n circularQueue.enqueue('Rob')\n circularQueue.enqueue('Geo')\n circularQueue.enqueue('Sara')\n circularQueue.enqueue('Reena')\n circularQueue.enqueue('Amaya')\n circularQueue.enqueue('Ivan')\nexcept:\n pass\nprint('Queue Items:', flush=True)\ncircularQueue.printQueue()\n\nprint(circularQueue.dequeue())\nprint(circularQueue.dequeue())\nprint(circularQueue.dequeue())\nprint('Queue Items:', flush=True)\ncircularQueue.printQueue()","repo_name":"Himesh-Codes/DSA-Python","sub_path":"Queue/circularQueue.py","file_name":"circularQueue.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"70105442168","text":"mylist = []\nn = int(input())\nfor i in range(n):\n k = int(input())\n mylist.append(k)\ndest = 0\nswap_done = 0\nanswer = 0\nfor i in range(n-1):\n for j in range(n-1):\n if mylist[j]>mylist[j+1]:\n dest = j+1\n swap_done = 1\n if swap_done:\n break\nif dest:\n for i in range(n-1):\n answer = answer + abs(abs(mylist[i])-abs(mylist[dest]))\nprint(answer)","repo_name":"gaushwravetu/Coding-Problems","sub_path":"code chef/incr.py","file_name":"incr.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29369317126","text":"import datetime\n\nimport altair as alt\nimport pandas as pd\nimport streamlit as st\n\nfrom netspeedlogger.netspeedlogger import selectall_with_date_range, timeseries_chart\n\nst.set_page_config(layout=\"wide\")\nst.title(\"Internet Speed Test Results\")\n\nCHART_WIDTH = 1600\nCHART_HEIGHT = 400\n\nmin_date = datetime.date.today() - datetime.timedelta(days=7)\nmax_date = datetime.date.today() + datetime.timedelta(days=1)\n\na_date = st.date_input(\"Pick a date\", (min_date, max_date))\n\n\ndat = selectall_with_date_range(min_date=str(a_date[0]), max_date=str(a_date[1]))\n\nif not isinstance(dat, pd.DataFrame):\n st.markdown(\"No data - run `netspeedlogger speedtest` first!\")\nelse:\n dat[\"download_speed\"] = dat[\"download_speed\"] / (1024 * 1024)\n dat[\"upload_speed\"] = dat[\"upload_speed\"] / (1024 * 1024)\n dat[\"hour\"] = [int(i[11:13]) for i in dat[\"timestamp\"]]\n\n st.subheader(\"Download Speed (Mb/s)\")\n st.altair_chart(timeseries_chart(dat, \"download_speed\", CHART_HEIGHT, CHART_WIDTH))\n\n col1, col2, col3 = st.columns(3)\n\n col1.altair_chart(\n alt.Chart(dat, title=\"Download Speed by Hour(Mb/s)\")\n .mark_boxplot(extent=\"min-max\")\n .encode(x=\"hour:O\", y=\"download_speed:Q\")\n .properties(height=CHART_HEIGHT, width=CHART_WIDTH / 3 - 50)\n )\n\n col2.altair_chart(\n alt.Chart(dat, title=\"Upload Speed by Hour (Mb/s)\")\n .mark_boxplot(extent=\"min-max\")\n .encode(x=\"hour:O\", y=\"upload_speed:Q\")\n .properties(height=CHART_HEIGHT, width=CHART_WIDTH / 3 - 50)\n )\n\n col3.altair_chart(\n alt.Chart(dat, title=\"Ping by Hour (ms)\")\n .mark_boxplot(extent=\"min-max\")\n .encode(x=\"hour:O\", y=\"ping:Q\")\n .properties(height=CHART_HEIGHT, width=CHART_WIDTH / 3 - 50)\n )\n\n st.subheader(\"Ping (ms)\")\n st.altair_chart(timeseries_chart(dat, \"ping\", CHART_HEIGHT, CHART_WIDTH))\n\n st.subheader(\"Upload Speed (Mb/s)\")\n st.altair_chart(timeseries_chart(dat, \"upload_speed\", CHART_HEIGHT, CHART_WIDTH))\n","repo_name":"radinplaid/netspeedlogger","sub_path":"netspeedlogger/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36989734782","text":"from shemaFuncs import *\nimport re\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set_style(\"darkgrid\")\n# sns.set_context(\"paper\")\nsns.set(rc={'figure.figsize':(20, 16)})\nsns.set_context(\"talk\")\npd.set_option(\"display.max_columns\", None)\npd.set_option(\"display.max_rows\", 20)\n\nglobalColumnNames = ['indexID', 'text', 'version', 'book', 'chapter', 'verse', 'isHeb', 'isEng']\n\nhebDfName = './data/hebDf.pkl'\nengDfName = './data/engDf.pkl'\n\n###################################################\n####\n# load in hebrew torah texts and metadata; create a DF\nheb_text, heb_version, heb_book, heb_chapter, heb_verse = loadHebTorah()\nheb_version = ['Hebrew Text'] * len(heb_version)\nheb_text = np.array(heb_text)\nheb_version = np.array(heb_version)\nheb_book = np.array(heb_book)\nheb_chapter = np.array(heb_chapter)\nheb_verse = np.array(heb_verse)\nhebIndex = [ b+'.'+str(c)+'.'+str(v) for b, c, v in zip(heb_book, heb_chapter, heb_verse)]\nhebIndex = np.array(hebIndex)\nheb_isHeb = [1] * len(hebIndex)\nheb_isHeb = np.array(heb_isHeb)\nheb_isEng = [0] * len(hebIndex)\nheb_isEng = np.array(heb_isEng)\n\nhebDf = pd.DataFrame(np.vstack([hebIndex, \n heb_text, \n heb_version, \n heb_book, \n heb_chapter, \n heb_verse, \n heb_isHeb, \n heb_isEng]).T,\n columns = globalColumnNames)\nprint(\"\\n***here is the hebrew dataframe:\\n\")\nprint(hebDf.head())\n\n####\n## load in english torah texts and metadata; create a DF\n\n# first text \n_, eng_text = loadTest()\n# remove HTML tags\nremoveTags = lambda x: re.sub(r'<.*?>', '', x)\nvecRemoveTags = np.vectorize(removeTags)\neng_text = vecRemoveTags(eng_text)\n# remove non-alpha characters\ncleanEnglish = lambda x: re.sub(r'[^A-Za-z ]+', '', x)\nvecCleanEnglish = np.vectorize(cleanEnglish)\neng_text = vecCleanEnglish(eng_text)\n# make all lower case\nallLower = lambda x: x.lower()\nvecAllLower = np.vectorize(allLower)\neng_text = vecAllLower(eng_text)\n\n# load in english torah metadata\n_, eng_metaData, = loadBookVersions()\n\neng_version, eng_version_book, eng_version_chapter, eng_version_verse = eng_metaData\neng_version = np.array(eng_version)\neng_version_book = np.array(eng_version_book)\neng_version_chapter = np.array(eng_version_chapter)\neng_version_verse = np.array(eng_version_verse)\neng_isHeb = [0] * len(eng_text)\neng_isHeb = np.array(eng_isHeb)\neng_isEng = [1] * len(eng_text)\neng_isEng = np.array(eng_isEng)\nengIndex = [ b+'.'+str(c)+'.'+str(v) for b, c, v in zip(eng_version_book, eng_version_chapter, eng_version_verse)]\nengIndex = np.array(engIndex)\n\nengDf = pd.DataFrame(np.vstack([engIndex, \n eng_text, \n eng_version, \n eng_version_book, \n eng_version_chapter, \n eng_version_verse, \n eng_isHeb, \n eng_isEng]).T,\n columns = globalColumnNames)\nprint(\"\\n***here is the english dataframe:\\n\")\nprint(engDf.head())\n\n# create a pivoted engDf, to match the rows of the hebrewDf.\n# save it as copy, and make another copy, to add stuff to it. \n# make a copy of the hebrew too.\n\nengDfPivoted = engDf.pivot(index='indexID', columns='version', values='text').copy()\nengDfPivotedCopy = engDfPivoted.copy()\nhebDfCopy = hebDf.copy\nprint(\"\\n***here is the pivotedenglish dataframe:\\n\")\nprint(engDfPivoted.head())\n\nhebDf.to_pickle(hebDfName)\nengDfPivoted.to_pickle(engDfName)\n","repo_name":"jonathancosme/shema","sub_path":"textCleaning.py","file_name":"textCleaning.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74213017528","text":"'''\r\nUnittests for GroupAnagrams.py\r\nFebruary 2021 Jakub Kazimierski\r\n'''\r\n\r\nimport unittest\r\nfrom GroupAnagrams import groupAnagrams, compare_lists\r\n\r\nclass test_GroupAnagrams(unittest.TestCase): \r\n '''\r\n Class with unittests for GroupAnagrams.py\r\n '''\r\n\r\n def setUp(self):\r\n '''\r\n Sets up input.\r\n '''\r\n self.input = [\"yo\", \"act\", \"flop\", \"tac\", \"foo\", \"cat\", \"oy\", \"olfp\"]\r\n self.output = [[\"yo\", \"oy\"], [\"flop\", \"olfp\"], [\"act\", \"tac\", \"cat\"], [\"foo\"]]\r\n return self.input, self.output\r\n\r\n # region Unittests\r\n def test_ExpectedOutput(self):\r\n '''\r\n Checks if returned output is as expected.\r\n '''\r\n input_arr, output_arr = self.setUp()\r\n output = groupAnagrams(input_arr)\r\n \r\n self.assertEqual(compare_lists(output, output_arr), True)\r\n # endregion\r\n\r\nif __name__ == \"__main__\":\r\n '''\r\n Main method for test cases.\r\n '''\r\n unittest.main()","repo_name":"JakubKazimierski/PythonPortfolio","sub_path":"AlgoExpert_algorithms/Medium/GroupAnagrams/test_GroupAnagrams.py","file_name":"test_GroupAnagrams.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"71220309049","text":"class Filme:\n def __init__(self, nome, ano, duracao):\n self.nome = nome\n self.ano = ano\n self.duracao = duracao\n\n\nclass Serie:\n def __init__(self, nome, ano, temp):\n self.nome = nome\n self.ano = ano\n self.temp = temp\n\n\nvingadores = Filme('vingadores - guerra infinita', 2018, 160)\nprint(vingadores.nome)\n\nChaves = Serie('atlanta', 2018, 2)\nprint(f'Nome: {Chaves.nome} - Ano: {Chaves.ano} - Temporadas: {Chaves.temp}')\n","repo_name":"gusttavocaruso/trybeExercises","sub_path":"MODULO.04_computerScience/BLOCO.34_POO/34.1 - INTRO POO/live-lecture/filmes.py","file_name":"filmes.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"44623137026","text":"from PIL import Image\nimport numpy as np\n\ndef load_image(img_path):\n img = Image.open(img_path)\n if img.mode != 'RGB':\n img = img.convert('RGB')\n img = img.resize((128, 128), Image.BILINEAR)\n img = np.array(img).astype('float32')\n img = img.transpose((2, 0, 1)) # HWC to CHW\n img = img / 255 \n return img","repo_name":"EITD/PlantDisease","sub_path":"common/load_image.py","file_name":"load_image.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"42120920499","text":"# daydayup 一周中学习进步5天,退步两天(每天退步前一天的%1),问要达到37.78,每天至少需要进步多少\n# 20-09-22\n# fyj\n\ndef Dayup(df):\n day = 1.0\n\n for i in range(365):\n if i % 7 in [6,0]:\n day = (1 - 0.01) * day\n else:\n day = (1 + df) * day\n\n return day\n\nupdayfactor = 0.01\nwhile (Dayup(updayfactor)<37.78):\n updayfactor += 0.001\n\nprint(\"这种方式下,每天��努力参数至少为{:.3f},才能达到每天都进步时那样的成果\".format(updayfactor))","repo_name":"EdwinVan/Python","sub_path":"09-22/daydayup-3-5.py","file_name":"daydayup-3-5.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22516768017","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nfrom functools import lru_cache as cache\n\nclass Solution:\n def numDupDigitsAtMostN(self, n: int) -> int:\n digits = [int(ch) for ch in str(n)] # 将 n 拆分成各个位的数字\n\n @cache\n def dfs(pos, used, tight):\n if pos == len(digits): # 只要能填到超过digits长度,说明必然存在一个合法答案,返回1\n return 1\n\n # 通过tight决定当前填写数字的上限\n upper_limit = digits[pos] if tight else 9\n\n ans = 0\n for num in range(upper_limit + 1):\n # num已经被使用过了\n if (1 << num) & used > 0: continue\n\n # 下一轮dfs的tight按下式决定(不能超过限制)\n new_tight = tight and num == digits[pos]\n\n # 下一轮的used按下式决定(考虑前导零的情况)\n if num == 0 and used == 0:\n new_used = 0\n else:\n new_used = used | (1 << num)\n\n ans += dfs(pos + 1, new_used, new_tight)\n\n return ans\n\n # 最后别忘了题目要求的和dfs出来的不是同一个东西\n return n - dfs(0, 0, True) + 1","repo_name":"ftakanashi/JobProjects","sub_path":"LeetCode/1012.至少有一位重复的数字/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"13602449650","text":"import pygame\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\n\nclass HealthBar():\n \"\"\"Class for the healthbar objects\"\"\"\n def __init__(self, x, y, health, max_health) -> None:\n self.x = x\n self.y = y\n self.health = health\n self.max_health = max_health\n\n def draw(self, health, screen):\n \"\"\"Updates health bar to reflect current health\"\"\"\n self.health = health\n ratio = self.health / self.max_health\n pygame.draw.rect(screen, BLACK, (self.x - 2, self.y - 2, 154, 24))\n pygame.draw.rect(screen, RED, (self.x, self.y, 150, 20))\n pygame.draw.rect(screen, GREEN, (self.x, self.y, 150 * ratio, 20))","repo_name":"sb3-spec/scrolling_shooter","sub_path":"healthbar.py","file_name":"healthbar.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4275493356","text":"import time\nimport socket\nimport os\nimport tkinter as tk\nfrom tkinter import Entry, filedialog\nimport tqdm\nfrom tkinter import *\nwindow = tk.Tk()\nwindow.title(\"Data Transfer Software\")\nwindow.geometry(\"800x400\")\ntitle = tk.Label(\n window,\n text = \"Data Trasnfer Software\",\n bg = \"white\",\n fg = \"black\",\n font = (\"Arial Bold\", 18)\n)\ntitle.pack()\nLabel(window, text=\"Host IP: \").pack()\nhost_tf = Entry(window, width=100)\nhost_1 = host_tf.pack()\ndef send_file():\n try:\n host = host_tf.get() \n def send_file_function(filename, host, port):\n SEPARATOR = \"\"\n BUFFER_SIZE = 4096\n filesize = os.path.getsize(filename)\n s = socket.socket()\n print(f\"Connecting to {host}:{port}\")\n connecting_1 = tk.Label(\n window,\n text = f\"Connecting to {host}:{port}\"\n )\n connecting_1.pack()\n s.connect((host, int(port)))\n tk.Label(window, text = f\"Connection to {host} was completed successfully\").pack()\n s.send(f\"{filename}{SEPARATOR}{filesize}\".encode())\n with open(filename, \"rb\") as file:\n ok = True\n while ok:\n bytes_read = file.read(BUFFER_SIZE)\n if not bytes_read:\n break\n s.sendall(bytes_read)\n s.close()\n import time\n time.sleep(1.2)\n print(\"\")\n print(\"\")\n print(f\"Your file has been successfully sent to {host} through port {port}\")\n tk.Label(window, text = f\"Your file has been successfully sent to {host} through port {port}\").pack()\n print(\"\")\n import time\n port = 5001\n root = tk.Tk()\n root.title(\"File\")\n root.withdraw()\n file_path = filedialog.askopenfilename()\n file = str(file_path)\n send_file_function(file, str(host), int(port))\n except Exception as e:\n print(e)\n Label(window, text = str(e)).pack()\n\n\ndef receive_file():\n try:\n hostname = socket.gethostname()\n SERVER_HOST = socket.gethostbyname(hostname)\n SERVER_PORT = 5001\n\n BUFFER_SIZE = 1024 * 4\n\n SEPARATOR = \"\"\n\n s = socket.socket()\n s.bind((SERVER_HOST, SERVER_PORT))\n s.listen()\n Label(window, text = f\"Listening as {SERVER_HOST}:{SERVER_PORT}\").pack()\n client_socket, address = s.accept()\n Label(window, text = f\"{address} is connected.\").pack()\n received = client_socket.recv(BUFFER_SIZE).decode()\n filename, filesize = received.split(SEPARATOR)\n filename = os.path.basename(filename)\n filesize = int(filesize)\n with open(filename, \"wb\") as f:\n while True:\n bytes_read = client_socket.recv(BUFFER_SIZE)\n if not bytes_read:\n break\n f.write(bytes_read)\n\n client_socket.close()\n current_folder = os.getcwd()\n file_location = str(current_folder) + \"/\" + str(filename)\n s.close()\n time.sleep(1.2)\n print(\" \")\n print(\" \")\n print(f\"Your file has been received from {address} to {SERVER_HOST} through port {SERVER_PORT}.\\nFile location: \" + str(file_location) + \"\\n\")\n Label(window, text= f\"Your file has been received from {address} to {SERVER_HOST} through port {SERVER_PORT}.\\nFile location: \" + str(file_location) + \"\\n\").pack()\n except Exception as e:\n print(e)\n Label(window, text = str(e)).pack()\nsend_file = tk.Button(\n window,\n text = \"Send a File\",\n command = send_file\n)\nsend_file.pack()\nreceive_file = tk.Button(\n window,\n text = \"Receive a File\",\n command = receive_file \n)\nreceive_file.pack()\n\nwindow.mainloop()\n","repo_name":"Alexandru6041/Data_Transfer","sub_path":"Data_Transfer.py","file_name":"Data_Transfer.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36013501509","text":"import unittest\nfrom app import app\nimport json\n\nclass FlaskTest(unittest.TestCase):\n\n def test_index(self):\n tester=app.test_client(self)\n urls=['/','/doctor_list']\n for url in urls:\n response=tester.get(url)\n statuscode=response.status_code\n self.assertEqual(statuscode, 200)\n ids=['1','2','3']\n for id in ids:\n response = tester.get('/doctorpage/'+id)\n statuscode = response.status_code\n self.assertEqual(statuscode, 200)\n\n def test_date_format(self):\n params={'Content-Type': 'application/json'}\n with app.test_client() as c:\n rv = c.post('/get_data', json=params)\n json_data = rv.get_data()\n json_data=json.loads(json_data.decode())\n for i in range(len(json_data['availableHours'])):\n splitted=str(json_data['availableHours'][i][0]).replace('-',':').split(':')\n self.assertEqual('-' in str(json_data['availableHours'][i][0]),True)\n self.assertEqual(len(str(json_data['availableHours'][i][0])) in [9,10,11],True)\n self.assertEqual(splitted[0].isdigit(),True)\n self.assertEqual(splitted[2].isdigit(),True)\n self.assertEqual(splitted[1] in ['00','30'],True)\n self.assertEqual(splitted[3] in ['00','30'],True)\n self.assertEqual(int(splitted[0])>=8 and int(splitted[0])<=16 , True)\n self.assertEqual(int(splitted[2])>=8 and int(splitted[2])<=16 , True)\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"KubickiKacper/BookMed","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13879355676","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# Import the necessary Google Cloud IoT and authentication libraries\nfrom google.cloud import iot_v1\nfrom google.oauth2 import service_account\n\n# Authenticate to Google Cloud IoT using service account credentials\n# Replace 'service_account.json' with the path to your service account key file\ncredentials = service_account.Credentials.from_service_account_file(\n 'service_account.json',\n scopes=['https://www.googleapis.com/auth/cloud-platform']\n)\nclient = iot_v1.DeviceManagerClient(credentials=credentials)\n\n# Define IoT Core registry and device details\n# Replace these values with your specific project, location, registry, and device IDs\nproject_id = 'clodhackhackathon' # Your Google Cloud project ID\nlocation = 'us-central1' # The Google Cloud region\nregistry_id = 'your-registry-id' # The name of your IoT Core registry\ndevice_id = 'device-id' # The name of your IoT device\n\n# Create the device\n# 'parent' is a formatted path specifying the project, location, and registry\nparent = client.registry_path(project_id, location, registry_id)\n\n# Define the device template with the device ID\ndevice_template = {\n \"id\": device_id\n}\n\n# Create the device by sending a request with the parent and device template\ndevice = client.create_device(request={\"parent\": parent, \"device\": device_template})\n\n\n# to store the collected data from iot device in google cloud storage\n\n# In[ ]:\n\n\nfrom google.cloud import storage\n\ndef write_to_gcs(data, bucket_name, file_name):\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(file_name)\n blob.upload_from_string(data)\n\n# Usage:\n# write_to_gcs(preprocessed_data, 'your-bucket-name', 'preprocessed_eeg_data.json')\n\n","repo_name":"AbhignaRagala/Hackcloud","sub_path":"googlecloudIoT.py","file_name":"googlecloudIoT.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14547091685","text":"from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('add/', views.add, name='add'),\n path('edit_/', views.edit, name='edit'),\n path('delete_/', views.delete, name='delete'),\n path('complete_/', views.complete, name='complete'),\n path('signup/', views.sign_up, name='signup'),\n path('login/', auth_views.LoginView.as_view(template_name='registration/login.html'), name='login'),\n]","repo_name":"swsandra/TODOList","sub_path":"ToDoListMiniApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21185005513","text":"import json\nfrom typing import Type\n\nfrom flask import Blueprint, render_template, request, redirect, url_for, g, session, flash\nfrom sqlalchemy.sql.functions import current_user\n\nfrom todo.auth import login_required\nfrom .models import Partida, User\n\nfrom todo import db\nfrom flask import session\n\nbp = Blueprint('todo', __name__, url_prefix='/todo')\n\n\n@bp.route('/home')\n@login_required\ndef home():\n return render_template('todo/home.html')\n\n\n@bp.route('/crear-partida', methods=['GET', 'POST'])\n@login_required\ndef crear_partida(): # crear partida, q reciba una lista de celebrities\n if request.method == 'POST':\n import json\n data = {\n \"retados\": request.form.get(\"retado\"),\n \"modelos\": [\n (cel, desc) for cel, desc in zip(request.form.getlist(\"celebrity\"), request.form.getlist(\"desc\")) if cel\n ]\n }\n partida = Partida(retados=data[\"retados\"], famosos=json.dumps(data[\"modelos\"]),\n created_by=g.user.id, votos='[]')\n db.session.add(partida)\n db.session.commit()\n flash('Partida Creada exitosamente')\n return render_template('todo/home.html')\n else:\n users_list = db.session.query(User).all()\n user = User.query.all()\n return render_template('todo/crear-partida.html', users_list=users_list, users=user)\n\n\n@bp.route('/partida/', methods=['GET', 'POST'])\n@login_required\ndef partida(id):\n partida = db.session.query(Partida).filter_by(id=id).first()\n votos = json.loads(partida.votos)\n famosos = json.loads(partida.famosos)\n if request.method == 'POST':\n votos.append(request.form.get('chosen'))\n partida.votos = json.dumps(votos)\n db.session.commit()\n if len(votos) >0 and len(votos) >= len(famosos)-1:\n index_ganador = int(votos[-1]) #esto me dice quien es la ultima famosa escogida\n ganador = famosos[index_ganador][0] #por ende este es el ganador\n return render_template('todo/partida.html', partida=partida, ganador= ganador)\n if len(votos) == 0: # si la lista de votos es 0 me empieza a soltar los famosos\n index_left = 0\n else:\n index_left = int(votos[-1])\n index_right = len(votos) + 1\n famoso_left = famosos[index_left][0]# esto me recorre la lista, famoso left es el nombre\n famoso_right = famosos[index_right][0]\n return render_template('todo/partida.html', partida=partida, index_right = index_right, index_left = index_left, ganador = None, famoso_left=famoso_left,famoso_right=famoso_right )\n\n\n@bp.route('/mis-partidas/', methods=['GET', 'POST'])\n@login_required\ndef mis_partidas(user_id):\n partidas_recibidas = Partida.query.filter(Partida.retados.contains(str(user_id))).all()\n partidas_enviadas = Partida.query.filter(Partida.created_by.contains(str(user_id))).all()\n\n return render_template('todo/mis-partidas.html', partidas_recibidas= partidas_recibidas,partidas_enviadas=partidas_enviadas, User=User, json=json, len=len)\n\n\n@bp.route(\"/delete/\")\n@login_required\ndef delete(id):\n todo = db.session.query(Partida).filter_by(id=id)\n todo.delete()\n db.session.commit()\n partidas = db.session.query(Partida).all() # me devuelve una lista de partidas\n return render_template('todo/mis-partidas.html', partidas=partidas)\n","repo_name":"Tatiana-vasquez/My_proyecto_final","sub_path":"todo/todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31921914552","text":"from __future__ import annotations\n\nfrom wpimath.kinematics import SwerveDrive4Kinematics, SwerveDrive4Odometry, SwerveModulePosition, SwerveModuleState, ChassisSpeeds\nfrom wpimath.geometry import Translation2d, Rotation2d, Pose2d\nfrom wpimath.controller import PIDController\nfrom wpilib import AnalogGyro, Field2d, SmartDashboard\n\nimport wpimath.kinematics._kinematics\nimport typing\nimport wpimath.geometry._geometry\n\nimport math\n\nfrom SwerveModule import SwerveModule\n\nclass SwerveDrivetrain:\n # I'd suggest not try to use the units functionality, and just stick to floats.\n # Whether we use degrees or radians for angle is debatable:\n # - radians is going to be slightly more efficient, to not be converting back and forth.\n # - degrees is more familiar, and the performance hit may not be that great.\n MAX_SPEED : meters_per_second = 3.0\n MAX_ANGULAR_SPEED : radians_per_second = math.pi # 1/2 rotation per second\n \n def __init__(self):\n\n # Magic number copied from Java example\n # Change these to depend on our wheelbase and track width, like the stuff near the end of __init__().\n self.frontLeftLocation = Translation2d(0.381, 0.381)\n self.frontRightLocation = Translation2d(0.381, -0.381)\n self.backLeftLocation = Translation2d(-0.381, 0.381)\n self.backRightLocation = Translation2d(-0.381, -0.381)\n\n \n #UPDATE 2/3/2023: See lines 19-21 in SwerveModule.py for the full method parameters.\n #EG- driveTalon(1), turnTalon(2), driveEncA(0), driveEncB(1), turnEncA(1), turnEncB(3)\n # It looks like the motor controllers share the same CANbus ID as the Encoder Channel A, \n # but Encoder Channel B for each has their own ID on the CANbus)\n # Since the ctre CANcoders will only take one parameter, we'll most likely be changing the code below to hold four arguements\n \n \n self.frontLeft = SwerveModule(1, 2, 0, 1, 2, 3) \n self.frontRight = SwerveModule(3, 4, 4, 5, 6, 7)\n self.backLeft = SwerveModule(5, 6, 8, 9, 10, 11)\n self.backRight = SwerveModule(7, 8, 12, 13, 14, 15)\n self.swerve_modules = [ self.frontLeft, self.frontRight, self.backLeft, self.backRight ]\n\n # Instead of an analog gyro, let's use the ADXRS450_Gyro class, like MAKO does in the mecanum folder. \n # See https://robotpy.readthedocs.io/projects/wpilib/en/stable/wpilib/ADXRS450_Gyro.html#wpilib.ADXRS450_Gyro\n # and https://github.com/WHEARobotics/MAKO/blob/master/code/mecanum/robot.py\n # You need to update here and where ever the gyro is used. Note that the ADXRS450 outputs negative degrees\n # for CCW, when we need positive. It also doesn't have a getRotation2d() method, so you'll need to \n # make one with Rotation2d.fromDegrees().\n self.gyro = AnalogGyro(0) \n\n # The proper Kinematics and Odometry class to used is determined by the number of modules on the robot.\n # For example, this 4 module robot uses SwerveDrive4Kinematics and SwerveDrive4Odometry.\n self.kinematics = SwerveDrive4Kinematics(\n self.frontLeftLocation, self.frontRightLocation, \n self.backLeftLocation, self.backRightLocation)\n\n self.odometry = SwerveDrive4Odometry(\n self.kinematics, Rotation2d(),\n (\n self.frontLeft.getPosition(),\n self.frontRight.getPosition(),\n self.backLeft.getPosition(),\n self.backRight.getPosition()\n )\n )\n\n # Where are the swerve modules located on the robot?\n # ? These values of 0.5 are taken from https://github.com/4201VitruvianBots/2021SwerveSim/blob/main/WPILib_SwerveControllerCommand/src/main/java/frc/robot/Constants.java\n # But they seem odd. What units are they?\n # They are probably meters, but the thing I don't understand is why they are different than the self.frontLeftLocation, etc. above.\n # I suggest changing them to be the above --Rod\n wheel_base = 0.5\n track_width = 0.5\n half_wheel_base = wheel_base / 2\n half_track_width = track_width / 2\n self.module_positions = [\n # Front left\n Translation2d(half_wheel_base, half_track_width),\n # Front right\n Translation2d(half_wheel_base, -half_track_width),\n # Back left\n Translation2d(-half_wheel_base, half_track_width),\n # Back right\n Translation2d(-half_wheel_base, -half_track_width)\n ]\n\n # The current pose for each swerve module\n # These values are updated in `periodic()`\n self.module_poses = [\n Pose2d(),\n Pose2d(),\n Pose2d(),\n Pose2d()\n ]\n\n # Simulation support\n self.fieldSim = Field2d()\n SmartDashboard.putData('Field', self.fieldSim)\n\n self.gyro.reset()\n\n def periodic(self):\n self._updateOdometry()\n\n ## Update for simulation\n\n # Update module poses\n for i in range(len(self.module_positions)):\n rotate_by = self.module_positions[i].rotateBy(self.get_heading())\n robot_translation = self.get_pose().translation()\n module_position = rotate_by + robot_translation\n # Module's heading is its angle relative to the chassis heading\n module_angle = self.swerve_modules[i].getState().angle + self.get_pose().rotation() \n self.module_poses[i] = Pose2d(module_position, module_angle)\n\n # Update field sim with information\n self.fieldSim.setRobotPose(self.get_pose())\n self.fieldSim.getObject(\"Swerve Modules\").setPoses(self.module_poses)\n\n\n def drive(self, xSpeed : meters_per_second, ySpeed : meters_per_second, rot : radians_per_second, fieldRelative : bool) -> None:\n chassis_speeds = ChassisSpeeds(xSpeed, ySpeed, rot) if not fieldRelative \\\n else ChassisSpeeds.fromFieldRelativeSpeeds(xSpeed, ySpeed, rot, self.gyro.getRotation2d())\n swerveModuleStates = self.kinematics.toSwerveModuleStates(chassis_speeds)\n\n swerveModuleStates = SwerveDrive4Kinematics.desaturateWheelSpeeds(swerveModuleStates, self.MAX_SPEED)\n\n self.frontLeft.setDesiredState(swerveModuleStates[0])\n self.frontRight.setDesiredState(swerveModuleStates[1])\n self.backLeft.setDesiredState(swerveModuleStates[2])\n self.backRight.setDesiredState(swerveModuleStates[3])\n\n\n def _updateOdometry(self):\n self.odometry.update(\n self.gyro.getRotation2d(),\n self.frontLeft.getPosition(),\n self.frontRight.getPosition(),\n self.backLeft.getPosition(),\n self.backRight.getPosition()\n )\n\n def get_heading(self) -> Rotation2d:\n return self.gyro.getRotation2d()\n\n def get_pose(self) -> Pose2d :\n return self.odometry.getPose()\n\n @classmethod\n def getMaxSpeed(cls) -> meters_per_second:\n return cls.MAX_SPEED\n\n\n","repo_name":"WHEARobotics/FRC2023","sub_path":"src/EZSwerve/SwerveDrivetrain.py","file_name":"SwerveDrivetrain.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"28673935922","text":"import datetime\nimport importlib\nimport re\n\nfrom typing import Optional, List\nfrom telegram import Message, Chat, Update, Bot, User\nfrom telegram import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton\nfrom telegram.error import Unauthorized, BadRequest, TimedOut, NetworkError, ChatMigrated, TelegramError\nfrom telegram.ext import CommandHandler, Filters, MessageHandler, CallbackQueryHandler\nfrom telegram.ext.dispatcher import run_async, DispatcherHandlerStop, Dispatcher\nfrom telegram.utils.helpers import escape_markdown\nfrom nandi import dispatcher, updater, TOKEN, WEBHOOK, OWNER_ID, CERT_PATH, PORT, URL, DB_URI, \\\n ALLOW_EXCL\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\n\n\nfrom nandi.GroupSet import ALL_SETUP\nfrom nandi.GroupSet.functions.chat_status import is_user_admin\nfrom nandi.GroupSet.functions.misc import paginate_modules\n\n# Create a new chat bot named nandi, with logic adapter bestmatch. and connecting database.\nchatbot = ChatBot('Nandi',\n storage_adapter='chatterbot.storage.SQLStorageAdapter',\n logic_adapters=[\n {\n 'import_path': 'chatterbot.logic.BestMatch'\n }\n ],\n database_uri=DB_URI)\n\n# Create a new trainer for the chatbot\ntrainer1 = ChatterBotCorpusTrainer(chatbot)\n\n# Train the chatbot based on the english corpus\ntrainer1.train(\n 'chatterbot.corpus.english.ai',\n 'chatterbot.corpus.english.botprofile',\n 'chatterbot.corpus.english.computers',\n 'chatterbot.corpus.english.conversations',\n 'chatterbot.corpus.english.emotion',\n 'chatterbot.corpus.english.food',\n 'chatterbot.corpus.english.gossip',\n 'chatterbot.corpus.english.greetings',\n 'chatterbot.corpus.english.health',\n 'chatterbot.corpus.english.history'\n)\n\ntrainer2 = ListTrainer(chatbot)\n\ntrainer2.train([\n \"Hello\",\n \"Hi\",\n \"Hi there!\",\n \"How are you doing?\",\n \"I'm doing great. How are you?\",\n \"I'm good.\"\n \"How are you?\",\n \"I'm fine\",\n \"Are you alright?\",\n \"Yes, i'm alright\",\n \"I need help\",\n \"Can i help you?\",\n \"Ohh\",\n \"Ya\",\n \"ok\",\n \"ok then\",\n \"That is good to hear\",\n \"Where do you live?\",\n \"I'm a bot, i can't live.\",\n \"Where you from?\",\n \"I'm a bot, built with python\",\n \"What is you name?\",\n \"I'm nandi\",\n \"Thank you.\",\n \"You are welcome.\",\n \"Tata\",\n \"Are you going, bye then.\",\n \"Bye\",\n \"Bye Bye\",\n \"What's your name?\",\n \"I'm nandi.\",\n \"Who are you?\",\n \"I'm nandi, an Ai chatbot.\",\n \"your name please?\",\n \"I'm Nandi\",\n \"Who made you?\",\n \"Sreerag is my creator\",\n \"Who created you?\",\n \"Sreerag is my creator\"\n])\n\n\n#start text \nSTART_TEXT = \"\"\" Welcome {}, Im Nandi\"\"\"\n#help text\nHELP_STRINGS = \"\"\" \nHey there! My name is *{}*.\nI'm a modular group management bot with a few fun extras! Have a look at the following for an idea of some of \\\nthe things I can help you with.\n*Main* commands available:\n - /start: start the bot\n - /help: PM's you this message.\n {}\n Group Setting Options:\n \"\"\".format(dispatcher.bot.first_name, \"\" if not ALLOW_EXCL else \"\\nAll commands can either be used with / or !.\\n\")\n\n#logs on project on terminal \nprint(\"Welcome Sreerag, Im Nandi\")\n#variable to store datas\nIMPORTED = {}\nHELPABLE = {}\n\n#to load all the group setting optinons from the folder GroupSet\nfor setup_name in ALL_SETUP:\n imported_setup = importlib.import_module(\"nandi.GroupSet.\" + setup_name)\n if not hasattr(imported_setup, \"__mod_name__\"):\n imported_setup.__mod_name__ = imported_setup.__name__\n #loading module name to print on the help in a inlinekeyboard on tg.\n if not imported_setup.__mod_name__.lower() in IMPORTED:\n IMPORTED[imported_setup.__mod_name__.lower()] = imported_setup\n else:\n raise Exception(\"Can't have two modules with the same name! Please change one\")\n #help command.\n if hasattr(imported_setup, \"__help__\") and imported_setup.__help__:\n HELPABLE[imported_setup.__mod_name__.lower()] = imported_setup\n\n\n# send help to user to access the command on private message.\ndef send_help(chat_id, text, keyboard=None):\n if not keyboard:\n keyboard = InlineKeyboardMarkup(paginate_modules(0, HELPABLE, \"help\"))\n dispatcher.bot.send_message(chat_id=chat_id,\n text=text,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=keyboard)\n# /start command output for the usess. which will load help if the chat type is private.\n@run_async\ndef start(bot: Bot, update: Update, args: List[str]):\n #chat type if only its a private telegram chat. \n if update.effective_chat.type == \"private\":\n if update.effective_chat.type == \"private\":\n if len(args) >= 1:\n if args[0].lower() == \"help\":\n send_help(update.effective_chat.id, HELP_STRINGS)\n else:\n #if /start command has less than 1 args then..\n first_name = update.effective_user.first_name\n update.effective_message.reply_text(START_TEXT.format(escape_markdown(first_name)), parse_mode=ParseMode.MARKDOWN) \n \n else:\n #output message for /start in TG group.\n update.effective_message.reply_text(\"Hi im nandi!\")\n\n\n#help button pagination and navigation listener to show the preferred output.\n@run_async\ndef help_button(bot: Bot, update: Update):\n query = update.callback_query\n mod_match = re.match(r\"help_module\\((.+?)\\)\", query.data)\n prev_match = re.match(r\"help_prev\\((.+?)\\)\", query.data)\n next_match = re.match(r\"help_next\\((.+?)\\)\", query.data)\n back_match = re.match(r\"help_back\", query.data)\n try:\n if mod_match:\n module = mod_match.group(1)\n text = \"Here is the help for the *{}* module:\\n\".format(HELPABLE[module].__mod_name__) \\\n + HELPABLE[module].__help__\n query.message.reply_text(text=text,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"Back\", callback_data=\"help_back\")]]))\n\n elif prev_match:\n curr_page = int(prev_match.group(1))\n query.message.reply_text(HELP_STRINGS,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(curr_page - 1, HELPABLE, \"help\")))\n\n elif next_match:\n next_page = int(next_match.group(1))\n query.message.reply_text(HELP_STRINGS,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(\n paginate_modules(next_page + 1, HELPABLE, \"help\")))\n\n elif back_match:\n query.message.reply_text(text=HELP_STRINGS,\n parse_mode=ParseMode.MARKDOWN,\n reply_markup=InlineKeyboardMarkup(paginate_modules(0, HELPABLE, \"help\")))\n\n # ensure no spinny white circle\n bot.answer_callback_query(query.id)\n query.message.delete()\n except BadRequest as excp:\n if excp.message == \"Message is not modified\":\n pass\n elif excp.message == \"Query_id_invalid\":\n pass\n elif excp.message == \"Message can't be deleted\":\n pass\n else:\n print(\"Help button error\")\n\n\n@run_async\ndef get_help(bot: Bot, update: Update):\n chat = update.effective_chat # type: Optional[Chat]\n args = update.effective_message.text.split(None, 1)\n\n # ONLY send help in PM\n if chat.type != chat.PRIVATE:\n\n update.effective_message.reply_text(\"Contact me in PM to get the list of possible commands.\",\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(text=\"Help\",\n url=\"t.me/{}?start=help\".format(\n bot.username))]]))\n return\n\n elif len(args) >= 2 and any(args[1].lower() == x for x in HELPABLE):\n module = args[1].lower()\n text = \"Here is the available help for the *{}* module:\\n\".format(HELPABLE[module].__mod_name__) \\\n + HELPABLE[module].__help__\n send_help(chat.id, text, InlineKeyboardMarkup([[InlineKeyboardButton(text=\"Back\", callback_data=\"help_back\")]]))\n\n else:\n send_help(chat.id, HELP_STRINGS)\n\n\n# text message handler \ndef text_messages(bot: Bot, update: Update):\n try: \n if update.effective_chat.type == \"private\":\n # Get current date and time\n currenttime = datetime.datetime.now()\n \n # Format datetime string\n ctime = currenttime.strftime(\"%H:%M:%S\")\n # Get the text the user sent\n gettext = str(update.message.text)\n # this variable stores result of arithmatic \n arthans = 0\n #to find whether it is the user used arithmatic operations.\n isarth = False\n #converting strings to the array.\n arrayinput = gettext.split(' ')\n #finding whether the array length is greater than 2\n if len(arrayinput) >= 2:\n leninput = len(arrayinput) #storing len to variable\n i = 0\n while (i + 2) < leninput:\n if arrayinput[i].isnumeric(): #check first value is numeric then.\n if arrayinput[i + 1] == '+' or arrayinput[i + 1] == '-' or arrayinput[i + 1] == '*' or arrayinput[i + 1] == '/':\n arth = arrayinput[i + 1]\n if arrayinput[i + 2].isnumeric():#check second value is arithmatic operation.\n if arth == '+':\n isarth = True\n arthans = float(arrayinput[i]) + float(arrayinput [i + 2])\n bot_msg = \"{} + {} = {}\".format(arrayinput[i], arrayinput [i + 2], arthans)\n elif arth == '-':\n isarth = True\n arthans = float(arrayinput[i]) - float(arrayinput [i + 2])\n bot_msg = \"{} - {} = {}\".format(arrayinput[i], arrayinput [i + 2], arthans)\n elif arth == '*':\n isarth = True\n arthans = float(arrayinput[i]) * float(arrayinput [i + 2])\n bot_msg = \"{} * {} = {}\".format(arrayinput[i], arrayinput [i + 2], arthans)\n elif arth == '/':\n isarth = True\n if int(arrayinput[i]) > 0: # checking the value is 0 or not.\n arthans = float(arrayinput[i]) / float(arrayinput [i + 2])\n bot_msg = \"{} / {} = {}\".format(arrayinput[i], arrayinput [i + 2], arthans)\n else:\n bot_msg = \"oops, divide by number less than 1 is not possible for me\"\n else:\n i = i + 1\n i = i + 1 # incrementing\n if isarth == True:\n isarth = False\n print(\"true\")\n #Greetings reply according to the time from the server.\n elif \"good morning\" in gettext.lower() or \"good evening\" in gettext.lower() or \"good afternoon\" in gettext.lower():\n if datetime.datetime.now().time() > datetime.time(0, 0, 0, 0) and datetime.datetime.now().time() < datetime.time(12, 30, 0, 0):\n if \"good morning\" not in gettext.lower():\n #checking whether the user giving correct greeting according to the time for the bot.\n bot_msg = \"My time is {}. it's Good morning\".format(ctime)\n else:\n #reply greetings\n bot_msg = str(\"Good morning\") \n elif datetime.datetime.now().time() > datetime.time(12, 30, 0, 0) and datetime.datetime.now().time() < datetime.time(17, 0, 0, 0):\n if \"good afternoon\" not in gettext.lower():\n #checking whether the user giving correct greeting according to the time for the bot.\n bot_msg = \"My time is {}. it's Good Afternoon\".format(ctime)\n else:\n #reply greetings\n bot_msg = str(\"Good Afternoon\")\n else:\n if \"good evening\" not in gettext.lower():\n #checking whether the user giving correct greeting according to the time for the bot.\n bot_msg = \"My time is {}. it's Good Evening\".format(ctime)\n else:\n #reply greetings\n bot_msg = str(\"Good Evening\")\n #if the greetings was good night then..\n elif \"good night\" in gettext.lower():\n if datetime.datetime.now().time() > datetime.time(20, 0, 0, 0) and datetime.datetime.now().time() < datetime.time(23, 58, 0, 0):\n bot_msg = str(\"Good Night\")\n else:\n bot_msg = str(\"Are you going to sleep at this time?\")\n else:\n #if all the above condition is wrong then the normal deep learning chat continues.\n bot_msg = str(chatbot.get_response(gettext))\n #reply message for last received messages.\n bot.sendMessage(chat_id=update.message.chat_id, \n text=bot_msg)\n # logs of chat on the terminals\n print(\"YOU :\", gettext)\n print(\"NANDI: \", bot_msg)\n #exceptional handling.\n except UnicodeEncodeError:\n bot.sendMessage(chat_id=update.message.chat_id, \n text=\"Sorry, I can't get you.\")\n except (KeyboardInterrupt, EOFError, SystemExit):\n bot.sendMessage(chat_id=update.message.chat_id, \n text=\"Thank you. Im going to sleep now\")\n\ndef main():\n\n #intializing all the handlers. with the functions\n start_handler = CommandHandler(\"start\", start, pass_args=True)\n text_handler = MessageHandler(Filters.text, text_messages)\n help_handler = CommandHandler(\"help\", get_help)\n help_callback_handler = CallbackQueryHandler(help_button, pattern=r\"help_\")\n\n #setting handler to the bot \n #/start handler in telegram\n dispatcher.add_handler(start_handler)\n #/help handler in telegram bot\n dispatcher.add_handler(help_handler)\n #handler to listen all the message except command handlers\n dispatcher.add_handler(text_handler)\n #help module selection handler \n dispatcher.add_handler(help_callback_handler)\n\n if WEBHOOK:\n #getting result in wwebhook if its enabled..\n updater.start_webhook(listen=\"127.0.0.1\",\n port=PORT,\n url_path=TOKEN)\n\n if CERT_PATH:\n updater.bot.set_webhook(url=URL + TOKEN,\n certificate=open(CERT_PATH, 'rb'))\n else:\n updater.bot.set_webhook(url=URL + TOKEN)\n #link to set default webhook..\n\n else:\n #delay in getting reply from bot\n updater.start_polling(timeout=15, read_latency=4)\n #to keep the bot active in listening to user. \n updater.idle()\n\n#main method calling.\nif __name__ == '__main__':\n main()","repo_name":"sreeragvv/nandi","sub_path":"nandi/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":16118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14803997399","text":"board = [\n [9,0,0,0,0,0,0,0,0],\n [3,0,0,0,6,0,0,2,0],\n [0,0,5,0,0,0,7,0,3],\n [0,3,1,0,8,4,0,0,0],\n [8,2,0,0,1,0,5,4,9],\n [0,4,0,0,0,0,8,0,0],\n [7,5,0,1,0,6,0,8,0],\n [4,0,0,8,0,0,1,0,0],\n [0,0,0,7,0,0,0,0,0]\n]\n\n\ndef display_board(bo):\n\n \"\"\"\n displays the board\n :param bo: 2d List of ints\n :return: None\n \"\"\"\n\n for i in range(len(bo)):\n if i % 3 == 0 and i != 0:\n print(\"- - - - - - - - - - - -\")\n\n for j in range(len(bo[0])):\n if j % 3 == 0 and j!= 0:\n print (\" | \", end=\"\")\n\n if j == 8:\n print(bo[i][j])\n \n else:\n print(str(bo[i][j]) + \" \", end=\"\")\n\n\n\ndef find_available(bo):\n\n \"\"\"\n finds an empty space (empty spaces set as 0) in the board\n :param bo: incomplete board\n :return: (int, int) row col\n \"\"\"\n \n for i in range(len(bo)):\n for j in range(len(bo[0])):\n if bo[i][j] == 0:\n return (i,j) \n \n return None\n\n\ndef valid(bo, num, pos):\n\n \"\"\"\n Returns if inputted number attempt is valid\n :param bo: 2d list of ints\n :param pos: (row, col)\n :param num: int\n :return: bool\n \"\"\"\n\n #checking rows\n for i in range(len(bo[0])):\n if bo[pos[0]][i] == num and pos[1] != i:\n return False\n \n #checking columns\n for i in range(len(bo)):\n if bo[i][pos[1]] == num and pos[0] != i:\n return False\n \n #check 3x3 box\n box_x = pos[1] // 3\n box_y = pos[0] // 3\n\n for i in range(3*box_y, 3*box_y +3):\n for j in range(3*box_x, 3*box_x +3):\n if bo[i][j] == num and (i,j) != pos:\n return False\n\n return True\n\n\ndef solver(bo):\n\n \"\"\"\n Solves the sudoku board \n :param bo: 2d list of ints\n :return: solution\n \"\"\"\n\n finder = find_available(bo)\n\n if not finder:\n return True\n else:\n row, column = finder\n \n for i in range(1,10):\n if valid(bo, i, (row, column)):\n bo[row][column] = i\n\n if solver(bo):\n return True\n\n bo[row][column] = 0\n\nsolver(board)\ndisplay_board(board)\n ","repo_name":"a-majeed/Sudoku-Solver","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23783674742","text":"import sys\n\ninput = sys.stdin.readline\n\nn = int(input())\na, b = map(int, input().split())\nc = int(input())\nd = []\n\nfor _ in range(n):\n d.append(int(input()))\n\nd.sort(reverse=True)\n\nkcal = c\ncost = a\nresult = c // a\n\nfor i in d:\n kcal += i\n cost += b\n \n kcal_per_won = kcal // cost\n if result > kcal_per_won:\n break\n else:\n result = kcal_per_won\n \nprint(result)","repo_name":"pangpang-study/algorithm-coding-test-study","sub_path":"jhy/211116/최고의 피자.py","file_name":"최고의 피자.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"74989379447","text":"import openmdao.api as om\nimport aerosandbox as asb\nfrom stackelberg import Player, NaGame\nimport numpy as np\nfrom pymoo.algorithms.soo.nonconvex.ga import GA\n\nfrom tqdm import tqdm\nfrom mpire import WorkerPool\n\nimport concurrent.futures\n\nimport time\n\n\n# def udp_airfoil_target(baseline_lower_weights, baseline_upper_weights, iaoa, il, iu):\n# design_lower = baseline_lower_weights + il\n# design_upper = baseline_upper_weights + iu\n# design_airfoil = asb.KulfanAirfoil(\n# name=\"design\", lower_weights=design_lower, upper_weights=design_upper\n# )\n# aero = design_airfoil.get_aero_from_neuralfoil(\n# alpha=iaoa, Re=1e6, mach=0.3, model_size=\"xxxlarge\"\n# )\n# return aero[\"CL\"], aero[\"CD\"], aero[\"CM\"], design_airfoil.max_thickness()\n\n\nclass udp_airfoil(om.ExplicitComponent):\n def initialize(self):\n self.options.declare(\"pop_size\", default=1, desc=\"The number of population\")\n self.options.declare(\"baseairfoil\", default=\"naca0012\", desc=\"The base airfoil\")\n self.options.declare(\"mpi\", default=0, desc=\"The number of processes\")\n\n def setup(self):\n pop_size = self.options[\"pop_size\"]\n self.add_input(\"aoa\", val=0.0, shape=(pop_size, 1), desc=\"attack angle\")\n self.add_input(\n \"kulfan_dl\",\n val=0.0,\n shape=(pop_size, 8),\n desc=r\"CST parameters' change of lower surface\",\n )\n self.add_input(\n \"kulfan_du\",\n val=0.0,\n shape=(pop_size, 8),\n desc=r\"CST parameters' change of upper surface\",\n )\n self.add_output(\"CL\", val=1.0, shape=(pop_size, 1))\n self.add_output(\"CD\", val=1.0, shape=(pop_size, 1))\n self.add_output(\"CM\", val=1.0, shape=(pop_size, 1))\n self.add_output(\"MAXTC\", val=1.0, shape=(pop_size, 1))\n\n baseairfoil = self.options[\"baseairfoil\"]\n self.baseline = asb.KulfanAirfoil(name=baseairfoil)\n # self.baseline.upper_weights\n\n def setup_partials(self):\n self.declare_partials(\n of=[\n \"*\",\n ],\n wrt=[\n \"*\",\n ],\n method=\"fd\",\n )\n\n @staticmethod\n def _target(baseline_lower_weights, baseline_upper_weights, iaoa, il, iu):\n design_lower = baseline_lower_weights + il\n design_upper = baseline_upper_weights + iu\n design_airfoil = asb.KulfanAirfoil(\n name=\"design\", lower_weights=design_lower, upper_weights=design_upper\n )\n aero = design_airfoil.get_aero_from_neuralfoil(\n alpha=iaoa, Re=1e6, mach=0.3, model_size=\"xxxlarge\"\n )\n return aero[\"CL\"], aero[\"CD\"], aero[\"CM\"], design_airfoil.max_thickness()\n\n def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):\n mpi = self.options[\"mpi\"]\n if not mpi:\n aoa = inputs[\"aoa\"]\n dl = inputs[\"kulfan_dl\"]\n du = inputs[\"kulfan_du\"]\n pop_size = dl.shape[0]\n\n pbar = tqdm(total=pop_size, desc=f\"iter: {self.iter_count+1}\", ncols=150)\n cl = []\n cd = []\n cm = []\n maxtc = []\n\n for i in range(pop_size):\n aero = udp_airfoil._target(\n self.baseline.lower_weights,\n self.baseline.upper_weights,\n aoa[i],\n dl[i],\n du[i],\n )\n cl.append(aero[0])\n cd.append(aero[1])\n cm.append(aero[2])\n maxtc.append(aero[3])\n pbar.update(1)\n pbar.close()\n\n outputs[\"CL\"] = np.array(cl).reshape(-1, 1)\n outputs[\"CD\"] = np.array(cd).reshape(-1, 1)\n outputs[\"CM\"] = np.array(cm).reshape(-1, 1)\n outputs[\"MAXTC\"] = np.array(maxtc).reshape(-1, 1)\n\n else:\n aoa = inputs[\"aoa\"]\n dl = inputs[\"kulfan_dl\"]\n du = inputs[\"kulfan_du\"]\n pop_size = dl.shape[0]\n\n with WorkerPool(n_jobs=mpi) as pool:\n mpirs = pool.map(\n udp_airfoil._target,\n zip(\n np.tile(self.baseline.lower_weights, (pop_size, 1)),\n np.tile(self.baseline.upper_weights, (pop_size, 1)),\n aoa,\n dl,\n du,\n ),\n iterable_len=pop_size,\n progress_bar=True,\n progress_bar_options={\n \"desc\": f\"iter: {self.iter_count}\",\n \"ncols\": 150,\n },\n )\n cl = []\n cd = []\n cm = []\n maxtc = []\n for i in mpirs:\n cl.append(i[0])\n cd.append(i[1])\n cm.append(i[2])\n maxtc.append(i[3])\n\n outputs[\"CL\"] = np.array(cl).reshape(-1, 1)\n outputs[\"CD\"] = np.array(cd).reshape(-1, 1)\n outputs[\"CM\"] = np.array(cm).reshape(-1, 1)\n outputs[\"MAXTC\"] = np.array(maxtc).reshape(-1, 1)\n\n def compute_partials(self, inputs, partials, discrete_inputs=None):\n ...\n\n def plot(self):\n import matplotlib.pyplot as plt\n import scienceplots\n\n plt.style.use([\"science\", \"notebook\", \"no-latex\"])\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n\n fig = plt.figure(0, layout=\"constrained\")\n ax = fig.add_subplot(111)\n\n ax.plot(\n self.baseline.coordinates[:, 0],\n self.baseline.coordinates[:, 1],\n \"r-\",\n label=\"baseline\",\n )\n ax.legend(prop={\"size\": 20})\n ax.tick_params(which=\"both\", top=False, right=False)\n plt.show()\n\n\ndef test_aero():\n pop = 100\n airPlayer = Player(tag=\"airfoil\", optimizer_type=\"External\")\n airPlayer.optimizer = GA(pop_size=pop)\n model = airPlayer.model\n air = model.add_subsystem(\n name=\"air\",\n subsys=udp_airfoil(baseairfoil=\"rae2822\", pop_size=pop, mpi=0),\n promotes_inputs=[\"*\"],\n promotes_outputs=[\"*\"],\n )\n\n model.add_design_var(\"aoa\", lower=-5.0, upper=5.0)\n model.add_design_var(\"kulfan_dl\", lower=-0.2, upper=0.0)\n model.add_design_var(\"kulfan_du\", lower=0.0, upper=0.2)\n model.add_objective(\"CL\", scaler=-1.0)\n model.add_constraint(\"MAXTC\", lower=0.12)\n airPlayer.setup()\n\n airPlayer.run_External_driver(termination=(\"n_gen\", 100),savetopkl=True,pklfile='./p0_history.pkl')\n\n print(airPlayer.opt)\n # airPlayer.run_model()\n\n # print(airPlayer[\"CL\"], airPlayer[\"CD\"], airPlayer[\"CM\"], airPlayer[\"MAXTC\"])\n\n\ndef test_nash():\n p1_pop = 100\n p2_pop = 100\n p3_pop = 100\n\n p1 = Player(tag=\"p1\", optimizer_type=\"External\")\n p1.optimizer = GA(pop_size=p1_pop)\n model = p1.model\n air = model.add_subsystem(\n \"air\",\n subsys=udp_airfoil(baseairfoil=\"rae2822\", pop_size=p1_pop, mpi=0),\n promotes_inputs=[\"*\"],\n promotes_outputs=[\"*\"],\n )\n model.add_design_var(\"aoa\", lower=-5.0, upper=5.0)\n model.add_objective(\"CL\", scaler=-1.0)\n model.add_constraint(\"MAXTC\", lower=0.12)\n p1.setup()\n\n p2 = Player(tag=\"p2\", optimizer_type=\"External\")\n p2.optimizer = GA(pop_size=p2_pop)\n model = p2.model\n air = model.add_subsystem(\n name=\"air\",\n subsys=udp_airfoil(baseairfoil=\"rae2822\", pop_size=p2_pop, mpi=0),\n promotes_inputs=[\"*\"],\n promotes_outputs=[\"*\"],\n )\n model.add_design_var(\"kulfan_dl\", lower=-0.2, upper=0.0)\n model.add_objective(\"CL\", scaler=-1.0)\n model.add_constraint(\"MAXTC\", lower=0.12)\n p2.setup()\n\n p3 = Player(tag=\"p3\", optimizer_type=\"External\")\n p3.optimizer = GA(pop_size=p3_pop)\n model = p3.model\n air = model.add_subsystem(\n \"air\",\n subsys=udp_airfoil(baseairfoil=\"rae2822\", pop_size=p3_pop, mpi=0),\n promotes_inputs=[\"*\"],\n promotes_outputs=[\"*\"],\n )\n model.add_design_var(\"kulfan_du\", lower=0.0, upper=0.2)\n model.add_objective(\"CL\", scaler=-1.0)\n model.add_constraint(\"MAXTC\", lower=0.12)\n p3.setup()\n\n class Nash(NaGame):\n def __init__(self, players: list[Player] = ...) -> None:\n self.players = [p1, p2, p3]\n\n def run_External_driver(self):\n self.comm_aoa = np.zeros(1)\n self.comm_kulfandl = np.zeros(8)\n self.comm_kulfandu = np.zeros(8)\n\n Niters = 10\n for i in range(Niters):\n p1.set_srcval(\"kulfan_dl\", self.comm_kulfandl)\n p1.set_srcval(\"kulfan_du\", self.comm_kulfandu)\n\n p2.set_srcval(\"aoa\", self.comm_aoa)\n p2.set_srcval(\"kulfan_du\", self.comm_kulfandu)\n\n p3.set_srcval(\"aoa\", self.comm_aoa)\n p3.set_srcval(\"kulfan_dl\", self.comm_kulfandl)\n\n t1=time.time()\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\n futures1=executor.submit(p1.run_External_driver,termination=('n_gen',10),restart=False,savetopkl=True)\n futures2=executor.submit(p2.run_External_driver,termination=('n_gen',10),restart=False,savetopkl=True)\n futures3=executor.submit(p3.run_External_driver,termination=('n_gen',10),restart=False,savetopkl=True)\n futures=[futures1,futures2,futures3]\n concurrent.futures.wait(futures)\n \n # p1.run_External_driver(termination=('n_gen',10),restart=False,savetopkl=True)\n # p2.run_External_driver(termination=('n_gen',10),restart=False,savetopkl=True)\n # p3.run_External_driver(termination=('n_gen',10),restart=False,savetopkl=True)\n\n t2=time.time()\n print(f\"execute in {t2-t1} seconds\")\n\n opt_aoa=p1.opt['X']\n opt_dl=p2.opt['X']\n opt_du=p3.opt['X']\n\n self.comm_aoa[:]=opt_aoa\n self.comm_kulfandl[:]=opt_dl\n self.comm_kulfandu[:]=opt_du\n nash=Nash()\n nash.setup()\n nash.run_External_driver()\n\n\ndef test_SgE():\n ...\n\nif __name__ == \"__main__\":\n test_aero()\n","repo_name":"Zcaic/vsg","sub_path":"test_copy.py","file_name":"test_copy.py","file_ext":"py","file_size_in_byte":10307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34100419190","text":"import tensorflow as tf\n\nimport likelihood_utils\n\nDATA_FILE = \"data/birth_life_2010.txt\"\n\n# Step 1: read in data from the .txt file\n# data is a numpy array of shape (190, 2), each row is a datapoint\ndata, n_samples = likelihood_utils.read_birth_life_data(DATA_FILE)\n\n# Step 2: create placeholders for X (birth rate) and Y (life expectancy)\nX = tf.placeholder(tf.float32, name='X')\nY = tf.placeholder(tf.float32, name='Y')\n\n# Step 3: create weight and bias, initialized to 0\nw = tf.get_variable('weights', initializer=tf.constant(0.0))\nb = tf.get_variable('bias', initializer=tf.constant(0.0))\n\n# Step 4: construct model to predict Y (life expectancy from birth rate)\nY_predicted = w * X + b \n\n# Step 5: use the square error as the loss function\nloss = tf.square(Y - Y_predicted, name='loss')\n# loss = utils.huber_loss(Y, Y_predicted)\n\n# Step 6: using gradient descent with learning rate of 0.01 to minimize loss\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)\n \nwith tf.Session() as sess:\n # Step 7: initialize the necessary variables, in this case, w and b\n sess.run(tf.global_variables_initializer())\n\n # Step 8: train the model\n for i in range(100): # run 100 epochs\n for x, y in data:\n # Session runs train_op to minimize loss\n sess.run(optimizer, feed_dict={X: x, Y: y})\n\n # Step 9: output the values of w and b\n w_out, b_out = sess.run([w, b])\n\n\n# defining huber loss. Conditionals in TF are different, unless eager execution is on\ndef huber_loss(labels, predictions, delta=14.0):\n residual = tf.abs(labels - predictions)\n\n def f1(): return 0.5 * tf.square(residual)\n\n def f2(): return delta * residual - 0.5 * tf.square(delta)\n return tf.cond(residual < delta, f1, f2)\n\n\n# With tf.data, instead of storing our input data in a non-TensorFlow object, we store it in a tf.data.Dataset object.\n# tf.data.Dataset.from_tensor_slices((features, labels))\n# They can also be numpy arrays\n# dataset = tf.data.Dataset.from_tensor_slices((data[:,0], data[:,1]))\n\n# Then you can iterate through the dataset using ... . This way the bottleneck if no longer the python data_feed loop.\n# iterator = dataset.make_initializable_iterator()\n# X, Y = iterator.get_next()\n\n# ways to manipulate the datasets\n# dataset = dataset.shuffle(1000)\n# dataset = dataset.repeat(100)\n# dataset = dataset.batch(128)\n# dataset = dataset.map(lambda x: tf.one_hot(x, 10))\n# # convert each element of dataset to one_hot vector\n","repo_name":"Makan-Ar/uncertain-hawkes-process","sub_path":"practice/linreg_dataset.py","file_name":"linreg_dataset.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39192898472","text":"from django.contrib.auth.decorators import login_required\nfrom rest_framework.decorators import api_view\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\n\nfrom uks_app.models import User\n\n@login_required\n@api_view(['POST', ])\ndef follow(request):\n user = request.user # ulogovani korisnik\n if request.method == 'POST':\n selected_user = get_object_or_404(User, username=request.data['username']) # onaj kojeg zelim da zapratim\n user.profile.following.add(selected_user.profile) \n selected_user.profile.followers.add(user.profile)\n return HttpResponse('Followed', status=200)\n\n@login_required\n@api_view(['POST', ])\ndef unfollow(request):\n user = request.user # ulogovani korisnik\n if request.method == 'POST':\n selected_user = get_object_or_404(User, username=request.data['username']) # onaj kojeg zelim da otpratim\n user.profile.following.remove(selected_user.profile) \n selected_user.profile.followers.remove(user.profile)\n return HttpResponse('Unfollowed', status=200)","repo_name":"matkovskim/uks-project","sub_path":"uks_app/views/followers_views.py","file_name":"followers_views.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"11155555783","text":"from simpletransformers.classification import ClassificationModel, ClassificationArgs\nfrom rq import get_current_job\n\n\ndef create_model_and_train(train_df, username, reset_train):\n class_weights = (1 / train_df['labels'].value_counts(normalize=True)).to_list()\n model_args = ClassificationArgs(num_train_epochs=1, output_dir=\"output/\" + username, overwrite_output_dir=True)\n\n if reset_train:\n model = ClassificationModel(\"distilbert\", \"distilbert-base-uncased-finetuned-sst-2-english\", args=model_args,\n weight=class_weights, use_cuda=False)\n else:\n model = ClassificationModel(\n \"distilbert\", \"output/\" + username, use_cuda=False, args=model_args,\n weight=class_weights)\n # train the model\n model.train_model(train_df)\n return \"model trained\"\n\n\ndef test_model(test_df, username):\n job = get_current_job()\n\n n = len(test_df) // 20 # chunk row size\n list_df = [test_df[i:i + n] for i in range(0, test_df.shape[0], n)]\n predictions = []\n\n model = ClassificationModel(\n \"distilbert\", \"output/\" + username, use_cuda=False\n )\n for i in range(len(list_df)):\n job.meta['progress'] = i / len(list_df) * 100\n job.save_meta()\n\n list_df_chunk = list_df[i]['text'].to_list()\n prediction_val, _ = model.predict(list_df_chunk)\n predictions.append(prediction_val)\n\n predictions_flat = [item for sublist in predictions for item in sublist]\n\n test_df[\"prediction\"] = predictions_flat\n test_df.to_csv('predict_' + username)\n return 0\n","repo_name":"bhuvaneshwaribasquarane/High_recall_information_retrieval","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35401868087","text":"import client\nimport tensorflow as tf\nimport sys, os\nimport random\nimport numpy as np\nimport glob\n\nclient_ip = os.environ[\"client\"]\nserver_ip = os.environ[\"server\"]\n# client_ip = sys.argv[1]\n# server_ip = sys.argv[2]\n\nprint('CLIENT IP',client_ip)\nprint('SERVER IP',server_ip)\n\n# Load model and data (MobileNetV2, CIFAR-10)\nmodel = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)\nmodel.compile(\"adam\", \"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\n\ndef sampling_data():\n # num_of_each_dataset = 500 #500\n num_of_each_dataset = int(x_train.shape[0]/10)\n split_data_index = []\n while len(split_data_index) < num_of_each_dataset:\n item = random.choice(range(x_train.shape[0]))\n if item not in split_data_index:\n split_data_index.append(item)\n new_x_train = np.asarray([x_train[k] for k in split_data_index])\n new_y_train = np.asarray([y_train[k] for k in split_data_index])\n return new_x_train, new_y_train\n\n# Define Flower client\nclass CifarClient(client.NumPyClient):\n\n def __init__(self, rnd):\n self.round = rnd\n\n def get_parameters(self):\n return model.get_weights()\n\n def fit(self, parameters, config):\n self.round = self.round + 1\n model.set_weights(parameters)\n # model.save_weights('local_models/client_'+client_ip+'_round'+str(self.round)+'_before.h5')\n x_train, y_train = sampling_data()\n model.fit(x_train, y_train, epochs=5, batch_size=16, verbose=2)\n # model.save_weights('local_models/client_'+client_ip+'_round'+str(self.round)+'_after.h5')\n model.save_weights('local_models/client_'+client_ip+'_round'+str(self.round)+'.h5')\n return model.get_weights(), len(x_train), {}\n\n def evaluate(self, parameters, config):\n model.set_weights(parameters)\n loss, accuracy = model.evaluate(x_test, y_test, verbose=2)\n return loss, len(x_test), {\"accuracy\": accuracy}\n\n# Start Flower client\nclient.start_numpy_client(server_ip+\":19191\", client=CifarClient(len(glob.glob('local_models/*.h5'))))","repo_name":"Kundjanasith/docker_swarmlearning","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7118506047","text":"from class_state_vector import state_vector\nfrom class_da_system import da_system\nimport numpy as np\nfrom sys import argv\nimport matplotlib.pyplot as plt\n\ndef main():\n nature_file ='x_nature.pkl'\n nature = state_vector()\n nature = nature.load(nature_file)\n freerun_file = 'x_freerun.pkl'\n freerun = state_vector()\n freerun = freerun.load(freerun_file)\n method = argv[1]\n analysis_file = 'x_analysis_{method}.pkl'.format(method=method)\n das = da_system()\n das = das.load(analysis_file)\n analysis = das.getStateVector()\n plot_rmse_all(nature, freerun, analysis, method, np.s_[:, :],\n \"img/%s/rmse_all.pdf\" % method)\n plot_rmse_all(nature, freerun, analysis, method, np.s_[:, 0:10],\n \"img/%s/rmse_atmos_psi.pdf\" % method)\n plot_rmse_all(nature, freerun, analysis, method, np.s_[:, 10:20],\n \"img/%s/rmse_atmos_temp.pdf\" % method)\n plot_rmse_all(nature, freerun, analysis, method, np.s_[:, 20:28],\n \"img/%s/rmse_ocean_psi.pdf\" % method)\n plot_rmse_all(nature, freerun, analysis, method, np.s_[:, 28:36],\n \"img/%s/rmse_ocean_temp.pdf\" % method)\n\ndef plot_rmse_all(nature, freerun, analysis, method, slice, img_name):\n plt.plot(nature.getTimes(),\n np.linalg.norm(freerun.getTrajectory()[slice] - nature.getTrajectory()[slice],\n axis=1), label='Free run')\n plt.plot(nature.getTimes(),\n np.linalg.norm(analysis.getTrajectory()[slice] - nature.getTrajectory()[slice],\n axis=1), label='Analysis ({method})'.format(method=method))\n if analysis.getEnsembleTrajectory() is not None:\n ptb = analysis.getEnsembleTrajectory()[:, :, :] - analysis.getTrajectory()[:, :, np.newaxis]\n edim = ptb.shape[2]\n sprd = (np.sum(ptb ** 2, axis=2) / (edim - 1.0)) ** 0.5\n plt.plot(nature.getTimes(), np.linalg.norm(sprd[slice], axis=1),\n label='Analysis spread ({method})'.format(method=method))\n plt.legend()\n plt.yscale(\"log\")\n plt.xlabel('Time')\n plt.ylabel('Error', rotation='horizontal', labelpad=20)\n plt.title(img_name)\n plt.tight_layout()\n plt.savefig(img_name)\n plt.close()\n\nmain()\n","repo_name":"UMD-AOSC/DA_Tutorial","sub_path":"MAOOAM/plot_error.py","file_name":"plot_error.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"77"} +{"seq_id":"38677575282","text":"from django.db import models\r\n\r\nfrom utilities.models import SingletonModel\r\n\r\n\r\nclass AdvertisingBanner(SingletonModel):\r\n \"\"\"\r\n model for handling adv images\r\n \"\"\"\r\n\r\n first_row_first_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف اول عکس اول\"\r\n )\r\n first_row_first_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n first_row_second_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف اول عکس دوم\"\r\n )\r\n first_row_second_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n first_row_third_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف اول عگس سوم \"\r\n )\r\n first_row_third_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n first_row_fourth_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف اول عکس چهارم\"\r\n )\r\n first_row_fourth_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n second_row_first_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف دوم عکس اول\"\r\n )\r\n second_row_first_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n second_row_second_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف دوم عکس دوم\"\r\n )\r\n second_row_second_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n second_row_third_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف دوم عکس سوم\"\r\n )\r\n second_row_third_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n second_row_fourth_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف دوم عکس چهارم\"\r\n )\r\n second_row_fourth_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n third_row_first_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف سوم عکس اول\"\r\n )\r\n third_row_first_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n third_row_second_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف سوم عکس دوم \"\r\n )\r\n third_row_second_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n third_row_third_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف سوم عکس سوم\"\r\n )\r\n third_row_third_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n third_row_fourth_image = models.ImageField(\r\n upload_to=\"images/suggestion/\", verbose_name=\"ردیف سوم عکس چهارم\"\r\n )\r\n third_row_fourth_url = models.URLField(\r\n verbose_name=\"آدزس\",\r\n )\r\n\r\n class Meta:\r\n verbose_name = \"بنر تبلیغاتی\"\r\n verbose_name_plural = \"بنرهای تبلیغاتی\"\r\n\r\n def __str__(self):\r\n return \"بنر تبلیغاتی\"\r\n\r\n\r\nclass NoticeBanner(SingletonModel):\r\n \"\"\"\r\n models for showing notif banner in plastapp platform\r\n \"\"\"\r\n\r\n header_banner = models.ImageField(\r\n upload_to=\"banner/\",\r\n verbose_name=\"بنر هدر\",\r\n )\r\n header_banner_mobile = models.ImageField(\r\n upload_to=\"banner/\",\r\n verbose_name=\"بنر هدر سایز موبایل\",\r\n )\r\n header_banner_url = models.URLField(\r\n verbose_name=\"لینک بنر هدر\",\r\n )\r\n\r\n first_row_first = models.ImageField(\r\n upload_to=\"banner/\",\r\n verbose_name=\"ردیف اول عکس اول\",\r\n )\r\n first_row_first_url = models.URLField(\r\n verbose_name=\"ردیف اول لینک اول\",\r\n )\r\n first_row_second = models.ImageField(\r\n upload_to=\"banner/\",\r\n verbose_name=\"ردیف اول عکس دوم\",\r\n )\r\n first_row_second_url = models.URLField(\r\n verbose_name=\"ردیف اول لینک دوم\",\r\n )\r\n\r\n second_row_first = models.ImageField(\r\n upload_to=\"banner/\",\r\n verbose_name=\"ردیف دوم عکس اول\",\r\n )\r\n second_row_first_url = models.URLField(\r\n verbose_name=\"ردیف دوم لینک اول\",\r\n )\r\n second_row_second = models.ImageField(\r\n upload_to=\"banner/\",\r\n verbose_name=\"ردیف دوم عکس دوم\",\r\n )\r\n second_row_second_url = models.URLField(\r\n verbose_name=\"ردیف دوم لینک دوم\",\r\n )\r\n\r\n class Meta:\r\n verbose_name = \"بنر اطالع رسانی\"\r\n verbose_name_plural = \"بنرهای اطلاع رسانی\"\r\n\r\n def __str__(self):\r\n return \"بنر اطلاع رسانی\"\r\n\r\n\r\nclass PartialData(SingletonModel):\r\n special_suggestion_text = models.CharField(\r\n max_length=255, verbose_name=\"متن محصولات ویژه\"\r\n )\r\n special_suggestion_image = models.ImageField(\r\n upload_to=\"partial/\", verbose_name=\" عکس محصولات ویژه\"\r\n )\r\n best_seller_text = models.CharField(\r\n max_length=255, verbose_name=\"متن محولات پرفروش\"\r\n )\r\n best_seller_image = models.ImageField(\r\n upload_to=\"partial/\", verbose_name=\"متن محولات پرفروش\"\r\n )\r\n product_text = models.CharField(max_length=255, verbose_name=\"متن محصولات\")\r\n product_image = models.ImageField(\r\n upload_to=\"partial/\", verbose_name=\" عکس محصولات\"\r\n )\r\n mid_banner_text = models.CharField(max_length=255, verbose_name=\"متن شگفتانه\")\r\n mid_banner_image = models.ImageField(\r\n upload_to=\"partial/\", verbose_name=\"عکس شگفتانه\"\r\n )\r\n\r\n def __str__(self):\r\n return \"ایتم های صفحه اول\"\r\n\r\n class Meta:\r\n verbose_name = \"ایتم صفحه اول\"\r\n verbose_name_plural = \"ایتم های صفحه اول\"\r\n\r\n\r\nclass CustomerClub(models.Model):\r\n title = models.CharField(\r\n verbose_name=\"عنوان\", max_length=255, blank=True, null=True\r\n )\r\n description = models.TextField(verbose_name=\"توضیحات\")\r\n minimum_card_amount = models.IntegerField(\"حداقل سبد خرید\", default=0)\r\n discount_rate = models.IntegerField(\"تخفیف\", default=0)\r\n expired_after_day = models.IntegerField(\"انقضا پس از ... روز\", default=0)\r\n needed_point = models.IntegerField(\"امتیاز مورد نیاز\")\r\n\r\n def __str__(self):\r\n return self.title\r\n\r\n class Meta:\r\n verbose_name = \"کارت تخفیف\"\r\n verbose_name_plural = \"کارت های تخفیف\"\r\n\r\n\r\nclass PointAfterBuy(models.Model):\r\n from_amount = models.IntegerField(\"از قیمت\", default=0)\r\n to_amount = models.IntegerField(\"تا قیمت\", default=0)\r\n prize_point = models.IntegerField(\"امتیاز جایزه\")\r\n\r\n class Meta:\r\n verbose_name = \"امتیاز پس از خرید\"\r\n verbose_name_plural = \"امتیاز پس از خرید\"\r\n","repo_name":"elyas-hedayat/online-shop","sub_path":"advertise/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24362296324","text":"#!/usr/bin/python3\n\n\"\"\"\n\n __main__.py\n\n COSC364 RIP Assignment\n\n Date: 02/05/2019\n\n Written by:\n - Will Cowper (81163265)\n - Jesse Sheehan (53366509)\n \n\"\"\"\n\nimport sys\nimport os.path\n\nimport server\nimport config\n\n\ndef print_usage():\n \"\"\"\n Prints the usage of the program.\n \"\"\"\n print(\"usage: {0} \".format(sys.argv[0]))\n\n\ndef print_filename_error(filename):\n \"\"\"\n Prints a filename error.\n \"\"\"\n print(\"Error: {0} doesn't exist.\".format(filename))\n\n\ndef print_config_error():\n \"\"\"\n Prints a configuration file error.\n \"\"\"\n print(\"Error: Couldn't read the configuration file.\")\n\n\ndef main():\n \"\"\"\n The main entry point to the program.\n \"\"\"\n\n if len(sys.argv) != 2:\n print_usage()\n return -1\n\n filename = sys.argv[1]\n file = None\n conf = None\n\n # accepts config from stdin\n if filename == '--':\n file = sys.stdin\n \n # or from a file\n else:\n if not os.path.exists(filename):\n print_filename_error(filename)\n return -1\n else:\n file = open(filename, \"r\")\n\n try:\n print(\"Reading configuration file... \", end='')\n conf = config.Config()\n conf.parse_file(file)\n print(\"done!\")\n \n except:\n print_config_error()\n return -1\n\n try:\n print(\"Starting RIP router #\" + str(conf.router_id))\n s = server.Server(conf)\n s.start()\n\n # Ignore KeyboardInterrupts\n except KeyboardInterrupt:\n pass\n\n # Re-raise other exceptions\n except Exception as err:\n raise err\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jpsheehan/cosc364-rip2","sub_path":"src/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73579480569","text":"from odoo import api, fields, models, tools,_\nimport time\nimport logging\nimport threading\nfrom odoo.exceptions import UserError, AccessError,ValidationError\n\n_logger = logging.getLogger(__name__)\n\nclass BudgetConsolidate(models.TransientModel):\n\t_name = 'budget.consolidate'\n\t_description = 'Generate Consolidated Budget'\n\n\tdate_from = fields.Date(string='From', required=True, default=lambda *a: time.strftime('%Y-%m-01'))\n\tdepts = fields.Many2many('hr.department',string='Department(s)')\n\n\tbudget_type = fields.Selection([\n\t\t('draft', 'To Submit'),\n\t\t('propose', 'Budget Proposal'),\n\t\t('review1', 'Budget Review'),\n\t\t('consolidate', 'Consolidation'),\n\t\t('review', 'Management Review'),\n\t\t('validate', 'Approved Budget'),\n\t\t('reject', 'Rejected Budget'),\n\t\t('reset', 'Reset To Draft')\n\t\t], string='Request Status', required=True, default='validate')\n\n\t@api.multi\n\tdef print_consolidated_budget(self):\n\t\tself.ensure_one()\n\t\t[data] = self.read()\n\t\tif not data.get('depts'):\n\t\t\traise UserError(_('You have to select at least one department. And try again'))\n\t\tdepartments = self.env['hr.department'].browse(data['depts'])\n\t\tdatas = {\n\t\t\t'ids': [],\n\t\t\t'model': 'hr.department',\n\t\t\t'form': data\n\t\t}\t\t\n\t\treturn self.env.ref('budget_management.action_consolidated_budget_report').with_context(from_transition_model=True).report_action(departments, datas)","repo_name":"madrara256/e-procurement","sub_path":"budget_management/wizard/budget_consolidate.py","file_name":"budget_consolidate.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30598399647","text":"import json\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom helpers import SqlQueries,ColumnLists,ServiceSelector\r\n\r\nfrom airflow.models import BaseOperator\r\nfrom airflow.utils.decorators import apply_defaults\r\nfrom airflow.hooks.http_hook import HttpHook\r\nfrom airflow.hooks.postgres_hook import PostgresHook\r\n\r\n\r\nclass BidServiceReqsToPostgresOperator(BaseOperator):\r\n ui_color = '#74A57F'\r\n\r\n @apply_defaults\r\n def __init__(\r\n self,\r\n db_conn_id,\r\n http_conn_id,\r\n db_autocommit = True,\r\n *args, **kwargs):\r\n\r\n super(BidServiceReqsToPostgresOperator, self).__init__(*args, **kwargs)\r\n self.db_conn_id = db_conn_id\r\n self.http_conn_id = http_conn_id\r\n if db_autocommit is not None:\r\n self.db_autocommit = db_autocommit\r\n\r\n def execute(self, context):\r\n self.log.info(f\"DAG: {context['run_id']} - BidServiceReqsToPostgresOperator executing\")\r\n\r\n # Create the AWS Redshift connection via the PostgresHook\r\n aws_rds_hook = PostgresHook(self.db_conn_id)\r\n\r\n # SELECT high-level applications that have been flagged\r\n fetched_reqs = aws_rds_hook.get_records(SqlQueries.usac_erate_470forms_uncounted_select)\r\n self.log.info(f\"DAG: {context['run_id']} - Processing {len(fetched_reqs)} records\")\r\n\r\n # Create the HTTP client to request data\r\n http_client = HttpHook(http_conn_id = self.http_conn_id, method = 'GET')\r\n\r\n req_count = 0\r\n for req in fetched_reqs:\r\n # Run HTTP GET request and convert returned data into a Pandas Data.Frame\r\n url = \"/resource/39tn-hjzv.json?application_number={}&service_category=Category 2\"\r\n services = json.loads((http_client.run(endpoint = url.format(req[0]))).text)\r\n\r\n df_apps = pd.DataFrame(services) \\\r\n .sort_values(by = [\"application_number\",\"form_version\"], ascending = [True,False]) \\\r\n .reindex(columns = ColumnLists.form470_requests_all_flds) \\\r\n .replace({np.nan: None}) \\\r\n .drop_duplicates(ColumnLists.form470_requests_key_flds, keep = 'last')\r\n req_count += len(df_apps)\r\n\r\n # Identify whether \r\n df_apps['manufacturer'] = df_apps[['manufacturer','other_manufacturer']].apply(ServiceSelector.replace_with_other_value, axis = 1)\r\n df_apps['function'] = df_apps[['function','other_function']].apply(ServiceSelector.replace_with_other_value, axis = 1)\r\n\r\n # INSERT the high-level application records\r\n for ix, row in df_apps[ColumnLists.form470_requests_flds].iterrows():\r\n aws_rds_hook.run(SqlQueries.usac_erate_470forms_reqs_upsert, self.db_autocommit, parameters = list(row))\r\n\r\n self.log.info(f\"DAG: {context['run_id']} - INSERT {req_count} records into erate_470form_requests table\")\r\n","repo_name":"leehunte/udacity-dend-capstone","sub_path":"plugins/operators/bid_servicereq_to_database.py","file_name":"bid_servicereq_to_database.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74213023928","text":"'''\r\nUnittests for MinHeightBST.py\r\nJanuary 2021 Jakub Kazimierski\r\n'''\r\n\r\nimport unittest\r\nfrom MinHeightBST import BST, minHeightBst, preOrderTraverse\r\n\r\nclass test_MinHeightBST(unittest.TestCase): \r\n '''\r\n Class with unittests for MinHeightBST.py\r\n '''\r\n\r\n # region Unittests\r\n def test_preorderOutput(self):\r\n '''\r\n Checks if preorder output is correct for builded tree.\r\n '''\r\n\r\n input_arr = [1, 2, 5, 7, 10, 13, 14, 15, 22]\r\n root = minHeightBst(input_arr)\r\n self.assertEqual(preOrderTraverse(root, []), [10, 2, 1, 5, 7, 14, 13, 15, 22])\r\n\r\n # endregion\r\n\r\nif __name__ == \"__main__\":\r\n '''\r\n Main method for test cases.\r\n '''\r\n unittest.main()","repo_name":"JakubKazimierski/PythonPortfolio","sub_path":"AlgoExpert_algorithms/Medium/MinHeightBST/test_MinHeightBST.py","file_name":"test_MinHeightBST.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"33353325491","text":"import pickle\nimport logging.config\nimport pandas as pd\nfrom src import config\nfrom src.utils import *\nfrom src.utils.set_global_variables import scenario, n_steps, lead_time, target\n\nlogging.config.dictConfig(config.LOGGING_CONFIG)\nlogger = logging.getLogger('loggers')\n\nDATA_2010_PATH = r'XQHh_Nhatle_2010.xlsx'\nDATA_2012_PATH = r'XQHh_Nhatle_2012.xlsx'\nDATA_2016_PATH = r'XQHh_Nhatle_2016.xlsx'\nDATA_2020_PATH = r'XQHh_Nhatle_2020.xlsx'\n\nif __name__ == '__main__':\n logger.info('Loading data...')\n MN_data_2010, MN_data_2012, LM_data_2012, MN_data_2016, MN_data_2020, LM_data_2020 = load_data(DATA_2010_PATH,\n DATA_2012_PATH,\n DATA_2016_PATH,\n DATA_2020_PATH)\n\n logger.info('Cleaning data...')\n df_2010, df_2012, df_2016, df_2020, date = clean_data(MN_data_2010, MN_data_2012, LM_data_2012,\n MN_data_2016, MN_data_2020, LM_data_2020,\n scenario)\n\n logger.info('Creating data scenario...')\n if args.scenario == 1:\n dataset = create_scenario(df_2010, df_2012, df_2016, df_2020,\n scenario=scenario,\n feat_col=['H_KienGiang', 'H_DongHoi', 'H_LeThuy'])\n elif args.scenario == 2:\n dataset = create_scenario(df_2012, df_2020,\n scenario=scenario,\n feat_col=['H_KienGiang', 'H_DongHoi', 'H_LeThuy',\n 'LM_KienGiang', 'LM_LeThuy', 'LM_DongHoi'])\n elif args.scenario == 3:\n # Create artificial forecast rainfall values for 2012 and 2020\n df_2012, df_2020 = feature_engineering(df_2012, df_2020)\n dataset = create_scenario(df_2012, df_2020,\n scenario=scenario,\n feat_col=['H_KienGiang', 'H_DongHoi', 'H_LeThuy',\n 'LM_KienGiang', 'LM_LeThuy', 'LM_DongHoi',\n 'LM_LeThuy_lead1', 'LM_KienGiang_lead1', 'LM_DongHoi_lead1'])\n logger.info(f'DATASET SHAPE: {dataset.shape}')\n\n # Visualization\n plot_pacf_tar(dataset, target) # Plot Partial Autocorrelation Function chart\n plot_avg_water_level(dataset, date, target) # Plot Average Water Level of Target\n\n # Generating inputs and output for modelling\n trainX, testX, trainY, testY = train_test_split(dataset, train_ratio=.8, target_col=target)\n\n # Rescale data by min-max\n trainX, testX, trainY, testY = min_max_scale(trainX, trainY, testX, testY)\n\n # Splitting sequence for training set by year\n year_idx = 0\n train_year_len = [year_idx]\n for year in pd.DatetimeIndex(date).year.unique():\n year_idx = date.loc[pd.DatetimeIndex(date).year == year].shape[0] + year_idx\n if year_idx <= trainX.shape[0]:\n train_year_len.append(year_idx)\n else:\n train_year_len.append(trainX.shape[0])\n trainX_rescale, trainY_rescale = split_data_by_year(trainX, trainY, n_steps, lead_time, scenario, train_year_len)\n\n # Splitting sequence for testing set\n year_idx = 0\n test_year_len = [year_idx]\n for year in pd.DatetimeIndex(date).year.unique():\n year_idx = date.loc[pd.DatetimeIndex(date).year == year].shape[0] + year_idx\n if year_idx - trainX.shape[0] > 0:\n test_year_len.append(year_idx - trainX.shape[0])\n testX_rescale, testY_rescale = split_data_by_year(testX, testY, n_steps, lead_time, scenario, test_year_len)\n\n trainX_rescale = trainX_rescale.reshape((trainX_rescale.shape[0], trainX_rescale.shape[2], trainX_rescale.shape[1]))\n testX_rescale = testX_rescale.reshape((testX_rescale.shape[0], testX_rescale.shape[2], testX_rescale.shape[1]))\n\n logger.info('Saving training data...')\n pickle.dump(trainX_rescale,\n open(f'../postprocess/x_train_rescale_s{scenario}_{target}_{n_steps}_lag_{lead_time}_lead.pkl',\n 'wb'))\n pickle.dump(trainY_rescale,\n open(f'../postprocess/y_train_rescale_s{scenario}_{target}_{n_steps}_lag_{lead_time}_lead.pkl',\n 'wb'))\n pickle.dump(testX_rescale,\n open(f'../postprocess/x_test_rescale_s{scenario}_{target}_{n_steps}_lag_{lead_time}_lead.pkl',\n 'wb'))\n pickle.dump(testY_rescale,\n open(f'../postprocess/y_test_rescale_s{scenario}_{target}_{n_steps}_lag_{lead_time}_lead.pkl',\n 'wb'))\n pickle.dump(testY,\n open(f'../postprocess/y_test_s{scenario}_{n_steps}_{target}_{n_steps}_lag_{lead_time}_lead.pkl',\n 'wb'))\n","repo_name":"htrieu93/Water-level-prediction","sub_path":"src/modelling/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"4931022140","text":"from errorCodes import ErrorCodes as ec, exitMessage\n\n\nclass Nil:\n def __init__(self):\n pass\n\n def __name__(self):\n return 'nil'\n\n\nclass Frame:\n def __init__(self):\n self.frame = {}\n\n try:\n def get_frame(self):\n try:\n return self.frame\n except:\n exitMessage(ec.INVALID_FRAME, \"Frame doesn't exist\")\n\n def def_var(self, var):\n if var not in self.frame.keys():\n self.frame[var] = (None, type(None))\n else:\n exitMessage(ec.SEMANTIC_ERROR, \"Variable already exists\")\n\n def set_var(self, var, value):\n if var in self.frame.keys():\n if type(value) == Nil() or value == 'nil':\n self.frame[var] = (value, Nil())\n elif type(value) == type(bool()) or value == 'true' or value == 'false':\n self.frame[var] = (value, type(bool()))\n elif type(value) == type(float()):\n exitMessage(ec.INVALID_XML, \"Invalid operand type\")\n else:\n self.frame[var] = (value, type(value))\n else:\n exitMessage(ec.INVALID_VARIABLE, \"Variable doesn't exist\")\n\n def set_var_type(self, var, value, typ):\n if var in self.frame.keys():\n if typ == 'int':\n self.frame[var] = (int(value), type(int()))\n elif typ == 'bool':\n self.frame[var] = (value.lower(), type(bool()))\n elif typ == 'string':\n self.frame[var] = (value, type(str()))\n elif typ == 'nil':\n self.frame[var] = (value, Nil())\n else:\n self.frame[var] = (value, typ)\n else:\n exitMessage(ec.INVALID_VARIABLE, \"Variable doesn't exist\")\n\n def get_var(self, var):\n if var in self.frame.keys():\n return self.frame[var]\n else:\n exitMessage(ec.INVALID_VARIABLE, \"Variable doesn't exist\")\n\n def get_var_value(self, var):\n if var in self.frame.keys():\n if self.frame[var][0] == None:\n exitMessage(ec.MISSING_VALUE, \"Variable doesn't exist\")\n return self.frame[var][0]\n else:\n exitMessage(ec.INVALID_VARIABLE, \"Variable doesn't exist\")\n\n def get_var_type(self, var):\n if var in self.frame.keys():\n return self.frame[var][1]\n else:\n # print(\"~~~~~~~\")\n # print(self.frame.keys(),var)\n exitMessage(ec.INVALID_VARIABLE, \"Variable doesn't exist\")\n\n def get_var_type_str(self, var):\n if var in self.frame.keys():\n if self.frame[var][1].__name__ == 'str':\n return 'string'\n elif type(self.frame[var][1]) == type(Nil()):\n return 'nil'\n elif type(self.frame[var][1]) == type(bool()):\n return 'bool'\n elif type(self.frame[var][1]) == type(int()):\n return 'int'\n elif self.frame[var][1] == type(None):\n return type(None)\n else:\n return self.frame[var][1].__name__\n else:\n # print(\"~~~~~~~\")\n # print(self.frame.keys(),var)\n exitMessage(ec.INVALID_VARIABLE, \"Variable doesn't exist\")\n\n def write_frame(self):\n print(self.frame)\n\n def write_var(self, var):\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~\",self.get_var_type_str(var),type(Nil()))\n if self.get_var_value(var) == type(None):\n pass\n elif self.get_var_type_str(var) == 'nil':\n pass\n elif self.get_var_type(var) != type(None):\n print(self.get_var_value(var), end='')\n else:\n exitMessage(ec.MISSING_VALUE, \"Variable doesn't exist\")\n\n except AttributeError:\n exitMessage(ec.INTERNAL_ERROR, \"Invalid fasdsdrame\")\n","repo_name":"TominoFTW/IPP","sub_path":"interpret/frames.py","file_name":"frames.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13133068866","text":"# С��вори послідовність інструкцій, яка отримує рядок від користувача та друкує кожне третє слово з цього рядка.\n# Цикли не використовувати\nwhile 1 == 1:\n s = input(\"\")\n l = s.split()\n print(l[2::3] , type(l))\n\n# Створи генератор списка\n# Вхідний список [1, 2.1, \"f\", \"2\", 3, \"1\", 18, \"df\"]\n# Вихідний список [1, 2.1, -1, '6', 9, '3', 18, -1]\n\nmy_list = [1, 2.1, \"f\", \"2\", 3, \"1\", 18, \"df\"]\nmy_list = [ i if type(i) == float\n else i if (type(i) == int and i % 2 == 0)\n else i ** 2 if (type(i) == int and i % 2 != 0)\n else (str(int(i) * 3)) if (i.isdigit() and type(i) == str)\n else -1 for i in my_list ]\nprint(my_list)","repo_name":"Aleksawa/Aleksawa","sub_path":"list_and_tuples.py","file_name":"list_and_tuples.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35709975239","text":"import sqlite3\nfrom datetime import datetime\nimport json\n\n\n#edit here\n\nclass nPOS:\n def __init__(self,):\n print(\"rPOS has started.\")\n self.c, self.conn = self.connectDb()\n \n \n def createDb(self,):\n conn = sqlite3.connect('db/db.sqlite')\n c = conn.cursor()\n try:\n c.execute('''CREATE TABLE SessionDataLog\n (\"userID\" TEXT NOT NULL , \"tableNumber\" TEXT NOT NULL, \"OrderData\" VARCHAR, \"Notes\" TEXT, \"OpenDatetime\" DATETIME PRIMARY KEY NOT NULL UNIQUE , \"ClosedDatetime\" TEXT , \"BillTotal\" REAL NOT NULL DEFAULT 0.00 , \"PaidTotal\" REAL NOT NULL DEFAULT 0.00)''')\n\n c.execute('''CREATE TABLE SessionData\n (\"userID\" TEXT NOT NULL , \"tableNumber\" TEXT NOT NULL, \"OrderData\" VARCHAR, \"Notes\" TEXT, \"OpenDatetime\" DATETIME PRIMARY KEY NOT NULL UNIQUE , \"ClosedDatetime\" TEXT , \"BillTotal\" REAL NOT NULL DEFAULT 0.00 , \"PaidTotal\" REAL NOT NULL DEFAULT 0.00)''')\n\n print('[db] No sqlite DBs found. Creating them now.')\n print('[db] {}, {} tables created. This is where all records for rPOS will be stored.'.format('SessionDataLog','SessionData'))\n conn.commit()\n #unless database already exists\n except sqlite3.OperationalError:\n print(\"[db] [Welcome Back] Existing sqlite database found. I'll use that one.\")\n pass\n return c, conn\n \n def connectDb(self,):\n c, conn = self.createDb()\n print('[db] Connected to database.')\n return c, conn\n \n def closeDb(self,):\n print('[db] Closing database.')\n self.conn.close()\n\n def saveDb(self,):\n self.conn.commit()\n print(\"[db] Database saved.\")\n\n def inputOrder(self, orderInfo):\n \n openDatetime = datetime.now()\n closedDatetime = None\n\n #parse orderInfo\n userId = orderInfo['userId']\n tableNumber = orderInfo['tableNumber']\n orderData = orderInfo['orderData']\n notes = orderInfo['notes']\n\n billTotal = sum([item['price'] for item in orderData['order']])\n paidTotal = 0.0\n \n inputItem = (userId, tableNumber, json.dumps(orderData), notes, openDatetime, closedDatetime, billTotal, paidTotal)\n \n self.c.execute(\"\"\"INSERT INTO SessionData VALUES\n (?,?,?,?,?,?,?,?)\"\"\", inputItem)\n print(\"[rPOS] Table #:{} - Order saved by: {}.\".format(tableNumber, userId))\n self.saveDb()\n\n def closeOrder(self,tableNumber, userId):\n now = datetime.now()\n self.c.execute(\"\"\"UPDATE SessionData\n SET ClosedDatetime='{d}'\n WHERE tableNumber=='{t}'\n \"\"\".format(d=now, t=tableNumber),)\n print(\"[rPOS] Table #:{} - This table has been closed by: {}.\".format(tableNumber, userId))\n self.saveDb()\n\n def initSession(self, userId):\n print(\"[rPOS] [initSession] Initiated by: {}\".format(userId))\n\n def checkoutSession(self, userId):\n print(\"[rPOS] [checkoutSession] Initiated by: {}\".format(userId))\n\n #do something\n print(\"[rPOS] [checkoutSession] [Success] All important stuff confirmed. Session complete for: {}\".format(userId))\n\n #close db connection\n self.closeDb()\n\n #exit\n print('Done.')\n \n \nif __name__ == \"__main__\":\n \n #TEST INFO\n userId = 'test server station #1'\n tableNumber = \"table21\"\n\n #TEST ORDER\n ORDER = {\n 'userId' : userId,\n 'tableNumber' : tableNumber,\n 'notes' : 'Test order, ready for the cooks!',\n \n 'orderData' : {'order':[\n {'name':'burger',\n 'price':12.50,\n 'mods':['no pickles', 'extra cheese']},\n {'name':'fries',\n 'price':3.50,\n 'mods':[]},\n {'name':'soda',\n 'price':2,\n 'mods':[]},\n {'name':'water',\n 'price':0,\n 'mods':['add lemon']}\n ]\n }}\n\n \n #start the client\n pos = nPOS()\n\n #init a daily session to do business\n pos.initSession(userId)\n\n #input some orders\n pos.inputOrder(ORDER)\n\n #close some orders\n pos.closeOrder(tableNumber, userId)\n\n #do the daily 'checkout' for each terminal\n pos.checkoutSession(userId)\n\n #done\n \n \n","repo_name":"criewaldt/rPOS","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43783176451","text":"import pygame\nimport sys\nimport random\nimport os\nfrom random import randint, choice\n\n# Paths\ngame_dir = os.path.abspath(os.getcwd())\nbackgrounds_path = os.path.join(game_dir, '../graphics/backgrounds')\npipe_path = os.path.join(game_dir, '../graphics/flappyBird/pipes')\nbird_path = os.path.join(game_dir, '../graphics/flappyBird/flying')\nfont_path = os.path.join(game_dir, '../font')\n\n# Dimension, fps, name, background information\nFPS = 60\nWINWIDTH = 800\nWINHEIGHT = 400\nGROUND = 330\nGAMENAME = 'Flappy Bird'\nBACKGROUND_COLOR = (94, 129, 162)\n\n\nclass Obstacle:\n def __init__(self, pipe):\n pipe_up = pygame.image.load(os.path.join(pipe_path, 'pipe_up.png')).convert_alpha()\n pipe_down = pygame.image.load(os.path.join(pipe_path, 'pipe_down.png')).convert_alpha()\n self.rect = []\n self.image = []\n if pipe == 'up':\n pipe_up = pygame.transform.scale(pipe_up, (80, randint(150, 200)))\n self.image.append(pipe_up)\n self.rect.append(self.image[0].get_rect(midbottom=(randint(900, 1100), GROUND)))\n elif pipe == 'down':\n pipe_down = pygame.transform.scale(pipe_down, (80, randint(150, 200)))\n self.image.append(pipe_down)\n self.rect.append(self.image[0].get_rect(midtop=(randint(900, 1100), 0)))\n else:\n p_height = randint(20, 200)\n pipe_up = pygame.transform.scale(pipe_up, (80, p_height))\n pipe_down = pygame.transform.scale(pipe_down, (80, 200-p_height))\n self.image = [pipe_up, pipe_down]\n x_pos = random.randint(900, 1100)\n self.rect.append(pipe_up.get_rect(midbottom=(x_pos, GROUND)))\n self.rect.append(pipe_down.get_rect(midtop=(x_pos, 0)))\n\n def transform(self):\n pass\n\n def draw(self):\n for (s, r) in zip(self.image, self.rect):\n DISPLAYSURF.blit(s, r)\n\n def update(self):\n for r in self.rect:\n r.x -= 5\n\n def get_rect(self):\n return self.rect\n\n def get_x_pos(self):\n return self.rect[0].x\n\n\nclass Bird:\n def __init__(self):\n bird_1 = pygame.image.load(os.path.join(bird_path, 'frame-1.png')).convert_alpha()\n bird_2 = pygame.image.load(os.path.join(bird_path, 'frame-2.png')).convert_alpha()\n bird_3 = pygame.image.load(os.path.join(bird_path, 'frame-3.png')).convert_alpha()\n bird_4 = pygame.image.load(os.path.join(bird_path, 'frame-4.png')).convert_alpha()\n bird_5 = pygame.image.load(os.path.join(bird_path, 'frame-5.png')).convert_alpha()\n bird_6 = pygame.image.load(os.path.join(bird_path, 'frame-6.png')).convert_alpha()\n bird_7 = pygame.image.load(os.path.join(bird_path, 'frame-7.png')).convert_alpha()\n bird_8 = pygame.image.load(os.path.join(bird_path, 'frame-8.png')).convert_alpha()\n self.birdLst = [bird_1, bird_2, bird_3, bird_4, bird_5, bird_6, bird_7, bird_8]\n for i, b in enumerate(self.birdLst):\n self.birdLst[i] = pygame.transform.rotozoom(b, 0, 0.05)\n self.birdIdx = 0\n\n self.image = self.birdLst[self.birdIdx]\n self.rect = self.image.get_rect(midbottom=(300, 150))\n self.gravity = 0\n\n def player_input(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n self.gravity = -3\n else:\n self.gravity = 3\n\n def apply_gravity(self):\n self.rect.y += self.gravity\n if self.rect.y < 0:\n self.rect.y = 0\n\n def bird_animation(self):\n self.birdIdx += 0.2\n if self.birdIdx >= len(self.birdLst):\n self.birdIdx = 0\n self.image = self.birdLst[int(self.birdIdx)]\n\n def update(self):\n self.player_input()\n self.apply_gravity()\n self.bird_animation()\n\n def draw(self):\n DISPLAYSURF.blit(self.image, self.rect)\n\n def get_rect(self):\n return self.rect\n\n def get_y_pos(self):\n return self.rect.y\n\n def set(self):\n self.rect.y = 150\n\n\ndef main():\n global TEXTFONT, DISPLAYSURF, STARTTIME, SCORE\n\n # Pygame initialization and basic set up of the global variables.\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINWIDTH, WINHEIGHT))\n TEXTFONT = pygame.font.Font(os.path.join(font_path, 'Pixeltype.ttf'), 50)\n STARTTIME = 0\n SCORE = 0\n\n pygame.display.set_caption(GAMENAME)\n clock = pygame.time.Clock()\n game_active = False\n\n # Backgrounds\n sky_surface, ground_surface = create_background()\n\n # Intro screen\n game_name = TEXTFONT.render(GAMENAME, False, (111, 196, 169))\n game_name_rect = game_name.get_rect(center=(400, 80))\n\n game_message = TEXTFONT.render('Press Enter to play', False, (111, 196, 169))\n game_message_rect = game_message.get_rect(center=(400, 340))\n\n bird_stand = pygame.image.load(os.path.join(bird_path, 'frame-8.png')).convert_alpha()\n bird_stand = pygame.transform.rotozoom(bird_stand, 0, 0.2)\n bird_stand_rect = bird_stand.get_rect(center=(400, 200))\n\n # Timer\n obstacle_timer = pygame.USEREVENT + 1\n pygame.time.set_timer(obstacle_timer, 1000)\n\n # Bird and Pipes\n bird = Bird()\n obstacle_list = []\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n if game_active:\n if event.type == obstacle_timer:\n obstacle_list.append((Obstacle(choice(['up', 'down', 'both', 'both', 'both']))))\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n # Press esc to quit game,\n # should clear obstacle_list\n game_active = False\n obstacle_list.clear()\n else:\n if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:\n obstacle_list.clear()\n game_active = True\n bird.set()\n STARTTIME = int(pygame.time.get_ticks() / 1000)\n if game_active:\n # Draw backgrounds\n DISPLAYSURF.blit(sky_surface, (0, 0))\n DISPLAYSURF.blit(ground_surface, (0, GROUND))\n\n # Display score\n SCORE = display_score()\n\n # Draw bird\n bird.draw()\n bird.update()\n if bird.get_y_pos() > GROUND:\n game_active = False\n\n # Draw pipe\n for obstacle in obstacle_list:\n obstacle.draw()\n obstacle.update()\n for r in obstacle.get_rect():\n if bird.get_rect().colliderect(r):\n game_active = False\n obstacle_list = [obstacle for obstacle in obstacle_list if obstacle.get_x_pos() >= -100]\n # print(bird.get_y_pos())\n else:\n DISPLAYSURF.fill(BACKGROUND_COLOR)\n DISPLAYSURF.blit(bird_stand, bird_stand_rect)\n score_message = TEXTFONT.render(f'Your score: {SCORE}', False, (111, 196, 169))\n score_message_rect = score_message.get_rect(center=(400, 330))\n DISPLAYSURF.blit(game_name, game_name_rect)\n\n if SCORE == 0:\n DISPLAYSURF.blit(game_message, game_message_rect)\n else:\n DISPLAYSURF.blit(score_message, score_message_rect)\n pygame.display.update()\n clock.tick(FPS)\n\n\ndef display_score():\n current_time = int(pygame.time.get_ticks() / 1000) - STARTTIME\n score_surf = TEXTFONT.render(f'Score: {current_time}', False, (64, 64, 64))\n score_rect = score_surf.get_rect(center=(400, 50))\n DISPLAYSURF.blit(score_surf, score_rect)\n return current_time\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\ndef create_background():\n sky_surface = pygame.image.load(os.path.join(backgrounds_path, 'Sky.png')).convert_alpha()\n sky_surface = pygame.transform.scale(sky_surface, (WINWIDTH, GROUND))\n ground_surface = pygame.image.load(os.path.join(backgrounds_path, 'ground.png')).convert_alpha()\n return sky_surface, ground_surface\n\n\nif __name__ == '__main__':\n main()","repo_name":"quanmai/pygame","sub_path":"games/flappy.py","file_name":"flappy.py","file_ext":"py","file_size_in_byte":8116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14185173655","text":"import time\nimport atexit\nimport struct\nfrom wasabi import msg\nfrom serial import Serial\nfrom WiPHY.utils import Frame, dec2bin, bit2dec, crc_remainder, __CONSTANTS__\n\nclass ASK_Tx():\n \"\"\"\n An Arduino and Python based transmitter for \n 315-433 MHZ ASK modules.\n The module tranmits the frame using arduino\n via serial commands from python.\n\n Inputs\n ------\n * comm_port (str): Arduino communication port.\n * baud_rate (int): Arduino serial communication baud rate.\n * timeout (float): Arduino serial communication timeout. Default: 1s.\n * num_retransmit_retries (int): Number of re-transmission for a frame. Default: 5.\n * barker_seq (int): Barker seq to use in frame.\n * crc_polynomial (int): CRC Polynomial to use for frame checksum.\n \n Attributes\n ----------\n * comm_port (int): Returns Arduino-Python serial communication port.\n * baud_rate (int): Returns Arduino-Python serial communication baud rate.\n * timeout (float): Returns Arduino-Python serial communication timeout.\n * num_retransmit_tries (int): Returns number of frame retry count.\n * arduino_comm_handler (Serial Object): Returns Arduino-Python serial communication object.\n * barker_seq (int): Returns barker seq in use by frame detection. Default: 29.\n * crc_polynomial (int): Returns CRC polynomial in to generate checksum. Default: 13.\n \"\"\"\n\n def __init__(self, \n comm_port,\n baud_rate,\n timeout=1,\n num_retransmit_retries=5,\n barker_seq=29,\n crc_polynomial=13):\n \n try:\n self.__arduino_comm_handler = Serial(comm_port,\n baud_rate,\n timeout=timeout)\n except Exception as e:\n raise RuntimeError(\"Failed to open serial connection with device at %s. Root cause: %s\"%(comm_port, \n e))\n self.__comm_port = str(comm_port)\n self.__baud_rate = int(baud_rate)\n self.__timeout = float(timeout)\n self.__num_retransmit_retries = int(num_retransmit_retries)\n self.__barker_seq = int(barker_seq)\n self.__crc_polynomial = int(crc_polynomial)\n\n # meta data\n self.__crc_polynomial_bit_string = dec2bin(self.crc_polynomial, __CONSTANTS__['FRAME_CHECKSUM_BITS'] + 1)\n\n # Register the clean up function.\n # Will be called if the object is destroyed for any reason.\n atexit.register(self.cleanup)\n\n # Write garbage to wake up the serial connection\n # Hack which is needed to make this work.\n num_bytes_written = self.__arduino_comm_handler.write('ACC'.encode('ascii'))\n assert num_bytes_written == 3\n read_data = self.__arduino_comm_handler.read(3)\n \n msg.good(\"Opened the Serial connection between Python and Arduino using comm port %s, Baud rate: %d.\"%(comm_port, baud_rate)) \n\n @property\n def comm_port(self):\n \"\"\"Returns Arduino-Python serial communication port.\n \"\"\"\n return self.__comm_port\n \n @property\n def baud_rate(self):\n \"\"\"Returns Arduino-Python serial communication baud rate.\n \"\"\"\n return self.__baud_rate\n \n @property\n def timeout(self):\n \"\"\"Returns Arduino-Python serial communication timeout.\n \"\"\"\n return self.__timeout\n \n @property\n def num_retransmit_tries(self):\n \"\"\"Returns number of frame retry count.\n \"\"\"\n return self.__num_retransmit_retries\n \n @property\n def arduino_comm_handler(self):\n \"\"\"Returns Arduino-Python serial communication object.\n \"\"\"\n return self.__arduino_comm_handler\n \n @property\n def crc_polynomial(self):\n \"\"\"Returns CRC polynomial in use to\n calculate checksum.\n \"\"\"\n return self.__crc_polynomial\n \n @property\n def barker_seq(self):\n \"\"\"Returns the barker sequence\n in use by Frame Detector.\n \"\"\"\n return self.__barker_seq\n \n def send(self, payload, seq_id):\n \"\"\"Sends the requested data and returns\n whether it was successfully transmitted or not.\n\n Inputs\n ------\n * payload (int): Must be 6-bit integer.\n * seq_id (int): 2 bit frame sequence.\n\n Returns\n -------\n * (bool, int): Returns whether the frame is transmitted\n successfully and the number of retries.\n \"\"\"\n\n if payload != int(payload):\n raise TypeError(\"Expected payload to be of type int. Got: %s\"%(type(payload)))\n \n if seq_id != int(seq_id):\n raise TypeError(\"Expected seq id to be of type int. Got: %s\"%(type(seq_id)))\n \n payload = int(payload)\n seq_id = int(seq_id)\n\n if payload < 0 or payload > (2**6)-1:\n raise ValueError(\"Payload must be between [0-63]. Got: %d\"%(payload))\n \n frame_bits = dec2bin(seq_id, __CONSTANTS__['FRAME_SEQ_ID_BITS']) + dec2bin(payload, __CONSTANTS__['FRAME_PAYLOAD_BITS'])\n \n checksum = crc_remainder(frame_bits, self.__crc_polynomial_bit_string, '0')\n checksum = bit2dec(checksum)\n\n frame = Frame(preamble=self.barker_seq,\n seq_id=seq_id,\n payload=payload,\n checksum=checksum,\n crc_polynomial=self.crc_polynomial)\n assert frame.is_checksum_valid == True\n return self.__transmit(frame)\n \n def cleanup(self):\n self.arduino_comm_handler.close()\n msg.info(\"Closed the serial connection between Arduino and Python.\")\n \n def __transmit(self, frame):\n \"\"\"Transmits the content of the utils.Frame \n using ASK modulation scheme.\n\n Inputs\n ------\n * frame (utils.Frame Object): Frame to transmit.\n \n Returns\n -------\n * (bool, int): Returns whether the frame is transmitted\n successfully and the number of count it took\n to transmit the frame successfully.\n \"\"\"\n success = False\n data = frame.get_frame_bytes()\n \n self.__arduino_comm_handler.flushInput()\n self.__arduino_comm_handler.flushOutput()\n\n for count in range(0, self.num_retransmit_tries):\n if success != True: \n bytes_read = self.__arduino_comm_handler.write(struct.pack('>BBB', 82, data[0], data[1]))\n assert bytes_read == 3\n time.sleep(0.0005)\n ack = self.__arduino_comm_handler.read(size=3)\n #print(ack)\n recv_frame = ack[1:].hex('-', 1).split('-')\n recv_frame = [ chr(int(b, 16)) for b in recv_frame]\n if chr(ack[0]) == 'A':\n # Check whether data recieved by the Arduino \n # matches the transmitted data.\n frame_str = recv_frame[0] + recv_frame[1]\n if frame_str != frame.get_frame_byte_string():\n success = False, count\n else:\n success = True\n return success, count\n else:\n success = False, count\n \n return success, count","repo_name":"mynkpl1998/WiPHY","sub_path":"WiPHY/tx.py","file_name":"tx.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26457494344","text":"from workstates.Template.crHealth_Template_State import crHealth_Template_State\nfrom buildingblocks.Decoraters import callCount\nimport functools\nimport re\nimport time\nimport os\n\nclass crHealth_PERF_001_State(crHealth_Template_State):\n def __init__(self, *args, **kwargs):\n super(crHealth_Template_State, self).__init__(*args, **kwargs)\n self._latencyDictionary = {}\n self._testCption = None\n\n @property\n def TestCapton(self):\n return self._testCption\n\n @TestCapton.setter\n def TestCaption(self, val):\n self._testCption = val\n\n @callCount\n def _invoke(self, command):\n self._logger.info('------ Performance test invoke counter = {} -------'.format(self._invoke.count))\n columns = ['DCPMMIdleSequentialLatency',\n 'DCPMMIdleRandomLatency',\n 'AverageOfMaxDCPMMBandwidth']\n datarow = ''\n results = ''\n header = ''\n isBandwidth = False\n bandwith = 0\n count = 0\n startSequencialTable = False\n startRandomTable = False\n try:\n outputFolder = self._config['ResultsOutputFolder']\n if not os.path.isdir(outputFolder):\n os.mkdir(outputFolder)\n '''\n # for Unit test\n ret = open(r'C:\\PythonSV\\crAutoFarm\\archive\\PerfOutput.log').read()\n '''\n start_time = time.time()\n cmd = command.replace('./', '{}/{}/'.format(self._config['sftp']['targetPath'], self._config['sftp']['OS']))\n #ret = self._client.ExecuteCommand(cmd)\n ret = '---Hello--'\n print (\"*** Perf test execution time: %s seconds ***\" %(time.time()-start_time))\n\n logfilename = os.path.realpath(r'{}/{}.log'.format(outputFolder, self._testCption))\n with open(logfilename, 'w') as logfile:\n logfile.write(self._testCption)\n logfile.write(ret)\n\n return None\n\n\n self._latencyDictionary['sequential'] = []\n self._latencyDictionary['random'] = []\n if ret is not None and len(ret) > 0:\n for c in columns:\n header = header + c + ' '\n results = header + '\\n'\n for x in ret.split('\\n'):\n if re.match('\\d+\\s+core\\s+DCPMM\\s+sequential', x) is not None:\n startRandomTable = False\n startSequencialTable = True\n if startSequencialTable:\n self._latencyDictionary['sequential'].append(x)\n if re.match('\\d+\\s+core\\s+DCPMM\\s+random', x) is not None:\n startSequencialTable = False\n startRandomTable = True\n if startRandomTable:\n self._latencyDictionary['random'].append(x)\n if re.match('\\s+?=====', x) is not None:\n startSequencialTable = False\n startRandomTable = False\n if re.match('DCPMM idle sequential', x) is not None:\n split = re.split('\\(\\t', x)\n datarow += split[1].replace('\\t', ' ').rstrip(')') + ' '\n if re.match('DCPMM idle random', x) is not None:\n split = re.split('\\(\\t', x)\n datarow += split[1].replace('\\t', ' ').rstrip(')') + ' '\n if re.match('max DCPMM bandwidth', x) is not None:\n isBandwidth = True\n if isBandwidth:\n split = re.split('\\s+', x)\n bandwith += eval(split[len(split) - 1])\n count += 1\n if count == 10:\n results += '{0} {1}\\n'.format(datarow, bandwith/10)\n isBandwidth = False\n header = functools.reduce(lambda a, b : '{} {}'.format(a, b), columns) + ' \\n'\n if results == header and count == 0:\n self._parentWorkThread.NotificationMessage(ret)\n return results\n except Exception as e:\n raise e\n return None\n\n def _comprehensionData(self, data):\n return [x.lstrip().strip() for x in re.split(self._delimiterPattern(), data) \\\n if x.strip() != '' and not x in ['ns']]\n\n def _insertAdditionalTable(self, reportData):\n for k,v in self._latencyDictionary.iteritems():\n toggle = True\n self._insertSubRow([v[0], '', ''], reportData, self._resource['COLOR']['GRAY'])\n self._insertSubRow(['Delay','ns', 'MBPS'], reportData, self._resource['COLOR']['SILVER'])\n\n for i in range(2, len(v) - 4):\n rowData = []\n l = v[i]\n for c in l.split('\\t'):\n rowData.append(c)\n if toggle:\n bkcolor = self._resource['COLOR']['WHITE']\n else:\n bkcolor = self._resource['COLOR']['LIGHYELLOW']\n toggle = not toggle\n self._insertSubRow(rowData, reportData, bkcolor)\n","repo_name":"lyh3/automation","sub_path":"CrAutoFarm/workstates/crHealth/crHealth_PERF_001_State.py","file_name":"crHealth_PERF_001_State.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26832682034","text":"import sys\nimport frida\n\nrdev = frida.get_remote_device()\nsession = rdev.attach(\"抖音短视频\")\n\nscr = \"\"\"\nJava.perform(function(){\n var cls = Java.use(\"com.ss.android.deviceregister.d.a$1\");\n \n cls.a.implementation = function(arg4){\n var res = this.a(arg4);\n console.log(\"res=>\",res);\n console.log(Java.use(\"android.util.Log\").getStackTraceString(Java.use(\"java.lang.Throwable\").$new()));\n return res;\n }\n});\n\"\"\"\n\nscript = session.create_script(scr)\n\n\ndef on_message(message, data):\n print(message, data)\n\n\nscript.on(\"message\", on_message)\nscript.load()\nsys.stdin.read()\n\n","repo_name":"wanghuiyt/LuffyCode","sub_path":"四期直播/19.爬虫&APP逆向/19.抖音评论/1.hook_cdid.py","file_name":"1.hook_cdid.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20869247934","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom modelzoo.common.pytorch.run_utils import half_dtype_instance\n\n\nclass Dice:\n def __init__(\n self,\n num_classes: int,\n to_onehot_y: bool = True,\n to_onehot_x: bool = False,\n use_softmax: bool = True,\n use_argmax: bool = False,\n include_background: bool = False,\n input_shape=None,\n use_native_onehot: bool = True,\n ):\n self.num_classes = num_classes\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n self.to_onehot_x = to_onehot_x\n self.use_softmax = use_softmax\n self.use_argmax = use_argmax\n self.use_native_onehot = use_native_onehot\n self.smooth_nr = 0.0\n self.smooth_dr = 1e-6\n self.include_background = include_background\n self.input_shape = None\n self.bg_mask = None\n if not self.include_background:\n if input_shape:\n self.input_shape = input_shape\n else:\n raise ValueError(\n \"must supply input shape when include_background=False\"\n )\n\n def _create_background_mask(self, device, dtype, ish, chanx):\n from modelzoo.common.pytorch import cb_model as cm\n\n z_shape = ish[0:chanx] + [1] + ish[chanx + 1 :] # [N,1,D,H,W]\n o_shape = (\n ish[0:chanx] + [ish[chanx] - 1] + ish[chanx + 1 :]\n ) # [N,C-1,D,H,W]\n zeros = torch.zeros(z_shape, device=device, dtype=dtype)\n ones = torch.ones(o_shape, device=device, dtype=dtype)\n weights = torch.cat(\n (zeros, ones), chanx\n ) # [N,C,D,H,W] w/ first ch 0'ed\n if cm.use_cs():\n bg_mask = cm.make_constant(weights)\n else:\n bg_mask = weights.to(device)\n return bg_mask\n\n def __call__(self, prediction, target):\n target = torch.unsqueeze(target, 1)\n channel_axis = 1\n reduce_axis = list(range(2, len(prediction.shape)))\n num_pred_ch = prediction.shape[channel_axis]\n\n if self.use_softmax:\n prediction = torch.softmax(prediction, dim=channel_axis)\n elif self.use_argmax:\n prediction = torch.argmax(prediction, dim=channel_axis)\n\n if self.to_onehot_y:\n target = to_one_hot(\n target, channel_axis, self.num_classes, self.use_native_onehot\n )\n if self.to_onehot_x:\n prediction = to_one_hot(\n prediction,\n channel_axis,\n self.num_classes,\n self.use_native_onehot,\n )\n\n if not self.include_background:\n if self.bg_mask is None:\n self.bg_mask = self._create_background_mask(\n target.device,\n prediction.dtype,\n self.input_shape,\n channel_axis,\n )\n assert (\n num_pred_ch > 1\n ), f\"To exclude background the prediction needs more than one channel. Got {num_pred_ch}.\"\n target = target * self.bg_mask\n prediction = prediction * self.bg_mask\n\n assert (\n target.shape == prediction.shape\n ), f\"Target and prediction shape do not match. Target: ({target.shape}), prediction: ({prediction.shape}).\"\n\n intersection = torch.sum(target * prediction, dim=reduce_axis)\n target_sum = torch.sum(target, dim=reduce_axis)\n prediction_sum = torch.sum(prediction, dim=reduce_axis)\n\n res = (2.0 * intersection + self.smooth_nr) / (\n target_sum + prediction_sum + self.smooth_dr\n )\n return res\n\n\ndef to_one_hot(array, channel_axis, num_classes, use_native_onehot):\n if len(array.shape) >= 5:\n array = torch.squeeze(array, dim=channel_axis)\n if use_native_onehot:\n array = F.one_hot(array.long(), num_classes).float()\n else:\n init = torch.zeros(\n array.shape + (num_classes,),\n device=array.device,\n dtype=half_dtype_instance.half_dtype,\n )\n array = init.scatter_(-1, array.long().unsqueeze(-1), 1.0).float()\n array = array.permute(0, 4, 1, 2, 3)\n return array\n\n\nclass DiceCELoss(nn.Module):\n def __init__(\n self, num_classes, input_shape, include_background, wc=0.5, wd=0.5,\n ):\n super(DiceCELoss, self).__init__()\n self.dice = Dice(\n num_classes=num_classes,\n include_background=include_background,\n input_shape=input_shape,\n )\n self.cross_entropy = nn.CrossEntropyLoss()\n self.wc = wc\n self.wd = wd\n if not include_background:\n self.mean_correction = torch.tensor(\n num_classes / (num_classes - 1), dtype=torch.float32,\n )\n else:\n self.mean_correction = torch.tensor(1.0, dtype=torch.float32,)\n self.one_const = torch.tensor(1.0, dtype=torch.float32,)\n\n def forward(self, outputs, labels):\n ce = self.cross_entropy(outputs, labels)\n dc = self.mean_correction * torch.mean(self.dice(outputs, labels))\n loss = self.wc * ce + self.wd * (self.one_const - dc)\n return loss\n\n\nclass DiceScore:\n def __init__(\n self,\n to_onehot_y: bool = True,\n to_onehot_x: bool = True,\n use_argmax: bool = False, # argmax already done in model\n use_softmax: bool = False,\n include_background: bool = False,\n ):\n self.dice = Dice(\n to_onehot_y=to_onehot_y,\n to_onehot_x=to_onehot_x,\n use_softmax=use_softmax,\n use_argmax=use_argmax,\n include_background=include_background,\n )\n\n def __call__(self, labels=None, predictions=None, weights=None):\n return torch.mean(self.dice(predictions, labels), dim=0)\n","repo_name":"Cerebras/modelzoo","sub_path":"modelzoo/vision/pytorch/losses/dice_loss.py","file_name":"dice_loss.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":747,"dataset":"github-code","pt":"77"} +{"seq_id":"41403833750","text":"import pandas as pd\nimport pycountry_convert as pc\npd.options.mode.chained_assignment = None\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\ndef average_age(data_frame):\n\n print(\"\\nAverage age of developers when they wrote their first line of code is :\\n\")\n print('%.2f'%(data_frame[\"Age1stCode\"].apply(pd.to_numeric, errors = 'coerce').dropna().mean())) #1st\n\ndef python_developers(data_frame):\n\n print(\"\\nPercentage of developers who know python in each country is :\")\n print(round(data_frame[data_frame['LanguageWorkedWith'].str.contains('Python',na = False)].groupby('Country').LanguageWorkedWith.count().div(data_frame.groupby('Country').count()['Respondent']).dropna()*100,2)) #2\n\n\n\ndef average_salary(data_frame2):\n\n print(\"\\nAverage salary of developer based on continent in USD is :\")\n print(round(data_frame2.groupby('Continent')['ConvertedComp'].mean(),2)) #3\n \n\ndef desired_language(data_frame):\n\n print(\"\\nMost desired programming language for the year 2020 is :\\n\")\n print(data_frame['LanguageDesireNextYear'].str.split(';', expand = True).stack().value_counts().idxmax()) #--4\n \ndef hobby_coders(data_frame,data_frame2):\n\n print(\"\\nDistribution of people who code as a hobby based on gender and continent in % is :\")\n \n\n data_frame2 = data_frame2[data_frame2.Hobbyist != 'No']\n \n data_frame2.loc[(data_frame2['Gender'] != 'Man')& (data_frame2['Gender'] != 'Woman'),'Gender']='Others'\n\n\n print(round(data_frame2.groupby(['Continent','Gender'])['Hobbyist'].count().div(data_frame2.groupby(['Continent'])['Gender'].count())*100,2)) #5\n\ndef satisfaction(data_frame,data_frame2):\n \n \n data_frame2.loc[(data_frame2['Gender'] != 'Man')& (data_frame2['Gender'] != 'Woman'),'Gender']='Others'\n \n print(\"\\nCareer Satisfaction based on Continent and Gender in % :\\n\")\n print(round(data_frame2.groupby(['Continent','Gender','CareerSat'])['CareerSat'].count().div(data_frame2.groupby(['Continent'])['CareerSat'].count())*100,2))\n \n print(\"\\nJob Satisfaction based on Continent and Gender in % :\\n\")\n print(round(data_frame2.groupby(['Continent','Gender','JobSat'])['JobSat'].count().div(data_frame2.groupby(['Continent'])['JobSat'].count())*100,2))\n\n#if __name__ == '__main__':\ntry: \n data_frame = pd.read_csv('survey_results_public.csv')\nexcept:\n print(\"Error reading data\")\ndata_frame2=data_frame[['Country','ConvertedComp']].copy()\ndata_frame2['Hobbyist'] = data_frame[['Hobbyist']].copy()\ndata_frame2['Gender'] = data_frame[['Gender']].copy()\ndata_frame2['CareerSat'] = data_frame[['CareerSat']].copy()\ndata_frame2['JobSat'] = data_frame[['JobSat']].copy()\ndata_frame2['Continent']=None\ncount=0\n\nfor row in data_frame2['Country']:\n try:\n country_code = pc.country_name_to_country_alpha2(row, cn_name_format=\"default\")\n continent_name = pc.country_alpha2_to_continent_code(country_code)\n country_continent_name = pc.convert_continent_code_to_continent_name(continent_name)\n data_frame2['Continent'][count]=country_continent_name\n count+=1\n except:\n pass\n\n#Below code can be used for manually checking results of each function\n'''while True:\n print(\"\\n\\n\\nSelect options from below to view report:\\n\"\\\n \"\\n1. Average age of developers when they wrote their first line of code\\n\"\\\n \"2. Percentage of developers who know python in each country\\n\"\\\n \"3. Average salary of developer based on continent\\n\"\\\n \"4. Most desired programming language for the year 2020\\n\"\\\n \"5. Distribution of people who code as a hobby based on gender and continent\\n\"\\\n \"6. Report for job and career satisfaction of developer based on their gender and continent\\n\"\\\n \"7. Exit\\n\")\n option = int(input('Option:'))\n if option == 1:\n average_age(data_frame)\n \n elif option == 2:\n python_developers(data_frame)\n elif option == 3:\n average_salary(data_frame2)\n elif option == 4:\n desired_language(data_frame)\n elif option == 5:\n hobby_coders(data_frame,data_frame2)\n elif option == 6:\n satisfaction(data_frame,data_frame2)\n elif option == 7:\n exit()\n else: \n print(\"Invalid option selected\")'''\n \n\n","repo_name":"Preethuraj04/Stackoverflow_survey_2019_analysis","sub_path":"task1_main.py","file_name":"task1_main.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2642111900","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for scraper project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nimport os, sys\n\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"mysite.settings\") #Changed in DDS v.0.3\n\nBOT_NAME = 'blog'\n\n#Setting LOG_STDOUT to True will prevent Celery scheduling to work, 2017-06-06\n#If you know the cause or a fix please report on GitHub\nLOG_STDOUT = False\nLOG_LEVEL = 'INFO'\n\nSPIDER_MODULES = ['dynamic_scraper.spiders', 'blog.scraper',]\nUSER_AGENT = '%s/%s' % (BOT_NAME, '1.0')\n\n# SPLASH_URL = 'http://192.168.59.103:8050'\nSPLASH_URL = 'http://localhost:8050'\nDUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'\nDSCRAPER_SPLASH_ARGS = { 'wait' : 1}\n\n#Scrapy 0.20+\nITEM_PIPELINES = {\n 'dynamic_scraper.pipelines.DjangoImagesPipeline': 200,\n 'dynamic_scraper.pipelines.ValidationPipeline': 400,\n 'blog.scraper.pipelines.DjangoWriterPipeline': 800,\n}\n\nIMAGES_STORE = os.path.join(PROJECT_ROOT, '../thumbnails')\n\nIMAGES_THUMBS = {\n 'small': (170, 170),\n}\n\nDSCRAPER_IMAGES_STORE_FORMAT = 'ALL'\n\nDSCRAPER_LOG_ENABLED = True\nDSCRAPER_LOG_LEVEL = 'ERROR'\nDSCRAPER_LOG_LIMIT = 5\n\n#Scrapy up to 0.18\n# ITEM_PIPELINES = [\n# 'dynamic_scraper.pipelines.ValidationPipeline',\n# 'blog.scraper.pipelines.DjangoWriterPipeline',\n# ]\n\n# SPIDER_MODULES = ['scraper.spiders']\nNEWSPIDER_MODULE = 'scraper.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'scraper (+http://www.yourdomain.com)'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = True\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n#CONCURRENT_REQUESTS = 32\n\n# Configure a delay for requests for the same website (default: 0)\n# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\n#DOWNLOAD_DELAY = 3\n# The download delay setting will honor only one of:\n#CONCURRENT_REQUESTS_PER_DOMAIN = 16\n#CONCURRENT_REQUESTS_PER_IP = 16\n\n# Disable cookies (enabled by default)\n#COOKIES_ENABLED = False\n\n# Disable Telnet Console (enabled by default)\n#TELNETCONSOLE_ENABLED = False\n\n# Override the default request headers:\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'scraper.middlewares.MyCustomSpiderMiddleware': 543,\n#}\nSPIDER_MIDDLEWARES = {\n 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,\n}\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'scraper.middlewares.MyCustomDownloaderMiddleware': 543,\n#}\nDOWNLOADER_MIDDLEWARES = {\n 'scrapy_splash.SplashCookiesMiddleware': 723,\n 'scrapy_splash.SplashMiddleware': 725,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,\n}\n\n# Enable or disable extensions\n# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html\n#EXTENSIONS = {\n# 'scrapy.extensions.telnet.TelnetConsole': None,\n#}\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\n#ITEM_PIPELINES = {\n# 'scraper.pipelines.SomePipeline': 300,\n#}\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See http://doc.scrapy.org/en/latest/topics/autothrottle.html\n#AUTOTHROTTLE_ENABLED = True\n# The initial download delay\n#AUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\n#AUTOTHROTTLE_MAX_DELAY = 60\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\n#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\n#AUTOTHROTTLE_DEBUG = False\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n#HTTPCACHE_ENABLED = True\n#HTTPCACHE_EXPIRATION_SECS = 0\n#HTTPCACHE_DIR = 'httpcache'\n#HTTPCACHE_IGNORE_HTTP_CODES = []\n#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nHTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'","repo_name":"HyeonjuPark/django-tutorial","sub_path":"blog/scraper/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8647953968","text":"import numpy as np\nfrom sklearn.cross_validation import train_test_split\n# ou from sklearn.model_selection import train_test_split\nimport random # pour pouvoir utiliser un g ́en ́erateur de nombres al ́eatoires\nfrom sklearn import neighbors\n\ndef damier(dimension, grid_size, nb_examples, noise = 0):\n data = np.random.rand(nb_examples,dimension)\n labels = np.ones(nb_examples)\n for i in range(nb_examples):\n x = data[i,:];\n for j in range(dimension):\n if int(np.floor(x[j]*grid_size)) % 2 != 0:\n labels[i]=labels[i]*(-1)\n if np.random.rand() \", clf.score(data_test,labels_test))\n \t\n \n","repo_name":"SturgisRaphael/TPIAA","sub_path":"TP1/Damier.py","file_name":"Damier.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71703294330","text":"\nimport sys\nimport os\n\noutput_dict = {}\nref_dict = {}\nfor i,line in enumerate(open(sys.argv[1],'rU')):\n if i%5 == 0:\n id = line.strip()\n elif i%5 == 1:\n ref = line.strip().lower().split()\n ref_dict[id] = ref\n elif i%5 == 2:\n rst = line.strip().replace('','',10).split()\n output_dict[id] = rst\n\ndataset_type = None\nif sys.argv[1].startswith('test'):\n dataset_type = 'test'\nelif sys.argv[1].startswith('dev'):\n dataset_type = 'dev'\n\nfout = open(sys.argv[1]+'.1best','w')\nfref = open(sys.argv[1]+'.ref','w')\nfor id in output_dict.keys():\n print >>fout, ' '.join(output_dict[id]).lower()\n print >>fref, ' '.join(ref_dict[id]).lower()\nfout.close()\nfref.close()\n\nos.system('/home/lsong10/ws/exp.graph_to_seq/mosesdecoder/scripts/generic/multi-bleu.perl %s.ref < %s.1best' %(sys.argv[1],sys.argv[1]))\n\n","repo_name":"freesunshine0316/neural-graph-to-seq-mp","sub_path":"logs_g2s/extract_and_eval.py","file_name":"extract_and_eval.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"77"} +{"seq_id":"42121407389","text":"# 判断一个整数是否是素数\n# 2020/10/21\n# fyj\n\nimport math\n\ndef IsPrime(num):\n times = 0\n if num <= 0:\n print(\"请输入一个正整数\")\n return False\n elif num == 1:\n return True\n elif num > 1:\n for i in range(num+1):\n if i > 1 and num % i == 0:\n times += 1\n if times <= 1:\n return True\n else:\n return False\n\ndef main():\n\n num = eval(input(\"请输入一个正整数: \"))\n if IsPrime(num) == True:\n print(\"{}是素数\\n\".format(num))\n else:\n print(\"{}不是素数\\n\".format(num))\n\nmain()","repo_name":"EdwinVan/Python","sub_path":"Python Homework/20-10-16-week06/5-5.py","file_name":"5-5.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"228173154","text":"import uuid\n\nfrom fastapi import APIRouter, HTTPException, Depends\nfrom starlette import status\n\nfrom .dependencies import get_menu_service\nfrom . import schemas\nfrom .service import MenuService\n\nmenu_router = APIRouter()\n\n\n@menu_router.post(\n \"\", response_model=schemas.Menu, status_code=status.HTTP_201_CREATED\n)\ndef create_menu(\n menu: schemas.MenuCreate,\n menu_service: MenuService = Depends(get_menu_service),\n):\n new_menu = menu_service.create_menu(menu)\n if new_menu is None:\n raise HTTPException(status_code=400, detail=\"Menu already exists.\")\n return new_menu\n\n\n@menu_router.get(\"\")\n# @menu_router.get(\"\", response_model=list[schemas.Menu])\ndef read_menus(menu_service: MenuService = Depends(get_menu_service)):\n return menu_service.get_menus()\n\n\n@menu_router.get(\"/{menu_id}\", response_model=schemas.Menu)\ndef read_menu(\n menu_id: uuid.UUID, menu_service: MenuService = Depends(get_menu_service)\n):\n db_menu = menu_service.get_menu(menu_id)\n if db_menu is None:\n raise HTTPException(status_code=404, detail=\"Menu is not found.\")\n return db_menu\n\n\n@menu_router.delete(\"/{menu_id}\")\ndef delete_menu(\n menu_id: uuid.UUID, menu_service: MenuService = Depends(get_menu_service)\n):\n db_menu_status = menu_service.delete_menu(menu_id)\n if db_menu_status is None:\n raise HTTPException(status_code=404, detail=\"Menu not found.\")\n if db_menu_status == 1:\n return {\"detail\": \"Menu is deleted successfully.\"}\n raise HTTPException(status_code=204, detail=\"Menu not deleted.\")\n\n\n@menu_router.patch(\"/{menu_id}\", response_model=schemas.Menu)\ndef update_menu(\n menu_id: uuid.UUID,\n menu: dict,\n menu_service: MenuService = Depends(get_menu_service),\n):\n db_menu = menu_service.update_menu(menu_id, menu)\n if db_menu == -1:\n raise HTTPException(status_code=404, detail=\"Menu not found\")\n if db_menu == 0:\n raise HTTPException(status_code=200, detail=\"Title is already exist.\")\n return db_menu\n","repo_name":"bimka/menu_catalog","sub_path":"app/src/menus/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17232284659","text":"import os\nimport sys\n\nfrom pvops.text import visualize, preprocess, nlp_utils\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib\nimport nltk\n\ndef test_text_remove_nondate_nums():\n example = r\"This is a test example https://www.google.com 10% #10 101 1-1-1 a-e4 13-1010 10.1 123456789 123/12 executed on 2/4/2020\"\n answer = r\" this is test example executed on 2/4/2020 \"\n assert preprocess.text_remove_nondate_nums(example) == answer\n\n\ndef test_text_remove_numbers_stopwords():\n example = r\"This is a test example 10% #10 101 1-1-1 13-1010 10.1 123456789 123/12 executed on 2/4/2020\"\n answer = r\"This test example executed\"\n\n stopwords_answer = [\n \"a\",\n \"about\",\n \"above\",\n \"after\",\n \"again\",\n \"against\",\n \"ain\",\n \"all\",\n \"am\",\n \"an\",\n \"and\",\n \"any\",\n \"are\",\n \"aren\",\n \"aren't\",\n \"as\",\n \"at\",\n \"be\",\n \"because\",\n \"been\",\n \"before\",\n \"being\",\n \"below\",\n \"between\",\n \"both\",\n \"but\",\n \"by\",\n \"can\",\n \"couldn\",\n \"couldn't\",\n \"d\",\n \"did\",\n \"didn\",\n \"didn't\",\n \"do\",\n \"does\",\n \"doesn\",\n \"doesn't\",\n \"doing\",\n \"don\",\n \"don't\",\n \"down\",\n \"during\",\n \"each\",\n \"few\",\n \"for\",\n \"from\",\n \"further\",\n \"had\",\n \"hadn\",\n \"hadn't\",\n \"has\",\n \"hasn\",\n \"hasn't\",\n \"have\",\n \"haven\",\n \"haven't\",\n \"having\",\n \"he\",\n \"her\",\n \"here\",\n \"hers\",\n \"herself\",\n \"him\",\n \"himself\",\n \"his\",\n \"how\",\n \"i\",\n \"if\",\n \"in\",\n \"into\",\n \"is\",\n \"isn\",\n \"isn't\",\n \"it\",\n \"it's\",\n \"its\",\n \"itself\",\n \"just\",\n \"ll\",\n \"m\",\n \"ma\",\n \"me\",\n \"mightn\",\n \"mightn't\",\n \"more\",\n \"most\",\n \"mustn\",\n \"mustn't\",\n \"my\",\n \"myself\",\n \"needn\",\n \"needn't\",\n \"no\",\n \"nor\",\n \"not\",\n \"now\",\n \"o\",\n \"of\",\n \"off\",\n \"on\",\n \"once\",\n \"only\",\n \"or\",\n \"other\",\n \"our\",\n \"ours\",\n \"ourselves\",\n \"out\",\n \"over\",\n \"own\",\n \"re\",\n \"s\",\n \"same\",\n \"shan\",\n \"shan't\",\n \"she\",\n \"she's\",\n \"should\",\n \"should've\",\n \"shouldn\",\n \"shouldn't\",\n \"so\",\n \"some\",\n \"such\",\n \"t\",\n \"than\",\n \"that\",\n \"that'll\",\n \"the\",\n \"their\",\n \"theirs\",\n \"them\",\n \"themselves\",\n \"then\",\n \"there\",\n \"these\",\n \"they\",\n \"this\",\n \"those\",\n \"through\",\n \"to\",\n \"too\",\n \"under\",\n \"until\",\n \"up\",\n \"ve\",\n \"very\",\n \"was\",\n \"wasn\",\n \"wasn't\",\n \"we\",\n \"were\",\n \"weren\",\n \"weren't\",\n \"what\",\n \"when\",\n \"where\",\n \"which\",\n \"while\",\n \"who\",\n \"whom\",\n \"why\",\n \"will\",\n \"with\",\n \"won\",\n \"won't\",\n \"wouldn\",\n \"wouldn't\",\n \"y\",\n \"you\",\n \"you'd\",\n \"you'll\",\n \"you're\",\n \"you've\",\n \"your\",\n \"yours\",\n \"yourself\",\n \"yourselves\",\n ]\n\n stopwords = nlp_utils.create_stopwords()\n assert stopwords_answer == stopwords\n assert preprocess.text_remove_numbers_stopwords(example, stopwords) == answer\n\n\ndef test_get_dates():\n df = pd.DataFrame(\n [\n {\n \"Date\": \"2020/01/23 12:34:56\",\n \"Document\": \"Find this date 2020/01/23 12:34:56\",\n },\n {\n \"Date\": np.nan,\n \"Document\": \"Find this date March 5 2021 and April 7 2022\",\n },\n ]\n )\n\n answer = [datetime.datetime.strptime(\n \"2020/01/23 12:34:56\", \"%Y/%m/%d %H:%M:%S\")]\n assert answer == preprocess.get_dates(\n df[\"Document\"].iloc[0], df, 0, {\n \"data\": \"Document\", \"eventstart\": \"Date\"}, False\n )\n\n answer = [\n datetime.datetime.strptime(\"2021/03/05 00:00:00\", \"%Y/%m/%d %H:%M:%S\"),\n datetime.datetime.strptime(\"2022/04/07 00:00:00\", \"%Y/%m/%d %H:%M:%S\"),\n ]\n assert answer == preprocess.get_dates(\n df[\"Document\"].iloc[1], df, 1, {\n \"data\": \"Document\", \"eventstart\": \"Date\"}, False\n )\n\n\ndef test_visualize_attribute_timeseries():\n\n dates = pd.Series(\n [\n \"2020/01/23 12:34:56\",\n \"2020/01/24 12:34:56\",\n \"2020/01/25 12:34:56\",\n ]\n )\n\n dates = pd.to_datetime(dates).tolist()\n\n df = pd.DataFrame(\n {\"labels\": [\"A word\", \"B word\", \"C word\"], \"date\": dates})\n\n fig = visualize.visualize_attribute_timeseries(\n df, {\"label\": \"labels\", \"date\": \"date\"}, date_structure=\"%Y-%m-%d\"\n )\n assert isinstance(fig, matplotlib.figure.Figure)\n\n\ndef xtest_visualize_word_frequency_plot():\n # Decommissioned because nltk's freqplot automatically shows\n # the rendered plot, meaning the test will get caught up\n documents = [\"A word\", \"B word\", \"C word\"]\n words = \" \".join(documents)\n tokenized_words = nltk.word_tokenize(words)\n\n fig = visualize.visualize_word_frequency_plot(tokenized_words)\n\n assert isinstance(fig, nltk.FreqDist)\n\n\ndef test_visualize_attribute_connectivity():\n Attribute1 = [\"A\", \"B\", \"C\", \"C\"]\n Attribute2 = [\"X\", \"X\", \"Y\", \"Z\"]\n\n df = pd.DataFrame({\"Attr1\": Attribute1, \"Attr2\": Attribute2})\n\n om_col_dict = {\"attribute1_col\": \"Attr1\", \"attribute2_col\": \"Attr2\"}\n\n fig, edges = visualize.visualize_attribute_connectivity(\n df,\n om_col_dict,\n figsize=(10, 8),\n edge_width_scalar=2,\n graph_aargs={\n \"with_labels\": True,\n \"font_weight\": \"bold\",\n \"node_size\": 30,\n \"font_size\": 35,\n },\n )\n\n assert isinstance(fig, matplotlib.pyplot.Figure)\n assert list(edges) == [(\"A\", \"X\"), (\"X\", \"B\"), (\"C\", \"Y\"), (\"C\", \"Z\")]\n\n matplotlib.pyplot.close()\n\n\ndef test_summarize_text_data():\n\n df = pd.DataFrame(\n [\n {\n \"Date\": \"2020/01/23 12:34:56\",\n \"Document\": \"Find this date 2020/01/23 12:34:56\",\n },\n {\n \"Date\": np.nan,\n \"Document\": \"Find this date March 5 2021 and April 7 2022\",\n },\n ]\n )\n\n answer = {\n \"n_samples\": 2,\n \"n_nan_docs\": 0,\n \"n_words_doc_average\": 7.50,\n \"n_unique_words\": 12,\n \"n_total_words\": 15.00,\n }\n\n info = nlp_utils.summarize_text_data(df, \"Document\")\n\n assert answer == info\n","repo_name":"sandialabs/pvOps","sub_path":"pvops/tests/test_text.py","file_name":"test_text.py","file_ext":"py","file_size_in_byte":6986,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"27166051154","text":"import json\nimport os\ntry:\n # Linux expects the latest package version of 3.31.1 (as of p)\n import pysqlite3 as sqlite3\nexcept ModuleNotFoundError:\n # MacOS expects the latest brew version of 3.32.1 (as of 2020-07-10).\n # Windows builds use the official Python 3.9.13 builds and version of 3.37.2.\n import sqlite3 # type: ignore\n\n\nfrom electrumsv.constants import DATABASE_EXT, MIGRATION_CURRENT, MIGRATION_FIRST\nfrom electrumsv.exceptions import DatabaseMigrationError\n\n\ndef _get_migration(db: sqlite3.Connection) -> int:\n cursor = db.execute(\"SELECT value FROM WalletData WHERE key='migration'\")\n row = cursor.fetchone()\n if row is None:\n raise DatabaseMigrationError(\"wallet database migration metadata not present\")\n return json.loads(row[0])\n\ndef _ensure_matching_migration(db: sqlite3.Connection, expected_migration: int):\n migration = _get_migration(db)\n if migration != expected_migration:\n raise DatabaseMigrationError(\"wallet database migration mismatch, expected \"\n f\"{expected_migration}, got {migration}\")\n\n\ndef create_database(db: sqlite3.Connection) -> None:\n from . import migrations\n with db:\n migrations.migration_0022_create_database.execute(db)\n _ensure_matching_migration(db, MIGRATION_FIRST)\n\n\ndef create_database_file(wallet_path: str) -> None:\n if wallet_path.endswith(DATABASE_EXT):\n raise DatabaseMigrationError(\"wallet path is not base path\")\n if 22 != MIGRATION_FIRST:\n raise DatabaseMigrationError(\"constant MIGRATION_FIRST differs from local version\")\n db_path = wallet_path + DATABASE_EXT\n if os.path.exists(db_path):\n raise DatabaseMigrationError(\"wallet database already exists\")\n\n # Python sqlite bindings automatically enter a transaction which prevents the PRAGMA from\n # exiting, which is why we use no isolation level.\n db = sqlite3.connect(db_path, check_same_thread=False, isolation_level=None)\n db.execute(f\"PRAGMA journal_mode=WAL;\")\n create_database(db)\n db.close()\n\n update_database_file(wallet_path)\n\ndef update_database(db: sqlite3.Connection) -> None:\n # This will error if the database has not been created correctly with the metadata.\n version = _get_migration(db)\n\n from . import migrations\n with db:\n if version == 22:\n migrations.migration_0023_add_wallet_events.execute(db)\n version += 1\n if version == 23:\n migrations.migration_0024_account_transactions.execute(db)\n version += 1\n if version == 24:\n migrations.migration_0025_invoices.execute(db)\n version += 1\n if version == 25:\n migrations.migration_0026_txo_coinbase_flag.execute(db)\n version += 1\n\n if version != MIGRATION_CURRENT:\n db.rollback()\n assert version == MIGRATION_CURRENT, \\\n f\"Expected migration {MIGRATION_CURRENT}, got {version}\"\n\n _ensure_matching_migration(db, MIGRATION_CURRENT)\n\ndef update_database_file(wallet_path: str) -> None:\n if wallet_path.endswith(DATABASE_EXT):\n raise DatabaseMigrationError(\"wallet path is not base path\")\n\n db_path = wallet_path + DATABASE_EXT\n if not os.path.exists(db_path):\n raise DatabaseMigrationError(\"wallet database does not exist\")\n\n db = sqlite3.connect(db_path)\n update_database(db)\n db.close()\n","repo_name":"electrumsv/electrumsv","sub_path":"electrumsv/wallet_database/migration.py","file_name":"migration.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"77"} +{"seq_id":"7206716407","text":"import threading\nfrom db.db import delete_from_scheduler, get_frontier_len, get_pages_len\nimport time\n\nclass Manager(threading.Thread):\n def __init__(self, thread_id):\n threading.Thread.__init__(self)\n self.thread_id = thread_id\n\n def run(self):\n while True:\n delete_from_scheduler()\n time.sleep(0.5)","repo_name":"gallindic/WIER-course","sub_path":"pa1/crawler/crawler/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35703817901","text":"TC = int(input())\n\nfor _ in range(TC):\n n, m = [int(i) for i in input().split()]\n for i in range(m):\n input()\n if n % 2 == 0:\n print(\"yes\")\n else:\n print(\"no\")\n","repo_name":"npkhang99/Competitive-Programming","sub_path":"CodeChef/TEAMFORM.py","file_name":"TEAMFORM.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"26184854829","text":"# https://www.geeksforgeeks.org/check-if-any-king-is-unsafe-on-the-chessboard-or-not/\n\n#Função para checar se algum rei está sob ataque\ndef checkBoard(board):\n # Acha a posição dos dois reis\n for i in range(8):\n for j in range(8):\n # Procura alguma peça que pode atacar o rei branco\n if board[i][j] == 'k':\n \n # Cavalo\n if lookForn(board, 'N', i, j):\n return 1\n \n # Peão\n if lookForp(board, 'P', i, j):\n return 1\n \n # Torre\n if lookForr(board, 'R', i, j):\n return 1\n \n # Bispo\n if lookForb(board, 'B', i, j):\n return 1\n \n # Rainha\n if lookForq(board, 'Q', i, j):\n return 1\n \n # Rei\n if lookFork(board, 'K', i, j):\n return 1\n \n # Procura alguma peça que pode atacar o rei preto\n if board[i][j] == 'K':\n # Cavalo\n if lookForn(board, 'n', i, j):\n return 2\n \n # Peão\n if lookForp(board, 'p', i, j):\n return 2\n \n # Torre\n if lookForr(board, 'r', i, j):\n return 2\n \n # Bispo\n if lookForb(board, 'b', i, j):\n return 2\n \n # Rainha\n if lookForq(board, 'q', i, j):\n return 2\n \n # Rei\n if lookFork(board, 'k', i, j):\n return 2\n return 1\n\n#Confere se o rei está atacando o outro\ndef lookFork(board, c, i, j):\n # Armazena todas possíveis movimentações do rei\n x = [ -1, -1, -1, 0, 0, 1, 1, 1 ]\n y = [ -1, 0, 1, -1, 1, -1, 0, 1 ]\n \n for k in range(8):\n # Incrementa o indice\n m = i + x[k]\n n = j + y[k]\n \n # Verifica se não ultrapassou o limite do tabuleiro\n if inBounds(m, n) and board[m][n] == c:\n return True\n return False\n \n# Checa se a rainha está atacando o rei\ndef lookForq(board, c, i, j):\n \n # A rainha se move como uma\n # combinação do bispo e da torre\n if lookForb(board, c, i, j) or lookForr(board, c, i, j):\n return True\n return False\n \n# Checa se o bispo ataca o rei\ndef lookForb(board, c, i, j):\n # Diagonal inferior direita\n k = 0\n while inBounds(i + ++k, j + k):\n if board[i + k][j + k] == c:\n return True\n if board[i + k][j + k] != '-':\n break\n \n # Diagonal inferior esquerda\n k = 0\n while inBounds(i + ++k, j - k):\n if board[i + k][j - k] == c:\n return True\n if board[i + k][j - k] != '-':\n break\n \n # Diagonal superior direita\n k = 0\n while inBounds(i - ++k, j + k):\n if board[i - k][j + k] == c:\n return True\n if board[i - k][j + k] != '-':\n break\n \n # Diagonal superior esquerda\n k = 0\n while inBounds(i - ++k, j - k):\n if board[i - k][j - k] == c:\n return True\n if board[i - k][j - k] != '-':\n break\n \n return False\n \n# Confere se a torre está atacando o rei\ndef lookForr(board, c, i, j):\n # Abaixo\n k = 0\n while inBounds(i + ++k, j):\n if board[i + k][j] == c:\n return True\n if board[i + k][j] != '-':\n break\n \n # Acima\n k = 0\n while inBounds(i + --k, j):\n if board[i + k][j] == c:\n return True\n if board[i + k][j] != '-':\n break\n \n # Direita\n k = 0\n while inBounds(i, j + ++k):\n if board[i][j + k] == c:\n return True\n if board[i][j + k] != '-':\n break\n \n # Esquerda\n k = 0\n while inBounds(i, j + --k):\n if board[i][j + k] == c:\n return True\n if board[i][j + k] != '-':\n break\n return False\n \n# Confere se o cavalo está atacando o rei\ndef lookForn(board, c, i, j):\n # Todas as possibilidades de movimentação do cavalo\n x = [ 2, 2, -2, -2, 1, 1, -1, -1 ]\n y = [ 1, -1, 1, -1, 2, -2, 2, -2 ]\n \n for k in range(8):\n # Incrementa os índices\n m = i + x[k]\n n = j + y[k]\n \n # Verifica se não ultrapassou o limite do tabuleiro\n if inBounds(m, n) and board[m][n] == c:\n return True\n return False\n \n# Confere se o peão está atacando o rei\ndef lookForp(board, c, i, j):\n if ord(c) >= 65 and ord(c) <= 90:\n # Para peões brancos\n lookFor = 'P'\n if inBounds(i + 1, j - 1) and board[i + 1][j - 1] == lookFor:\n return True\n \n if inBounds(i + 1, j + 1) and board[i + 1][j + 1] == lookFor:\n return True\n else:\n # Para peões pretos\n lookFor = 'p'\n if inBounds(i - 1, j - 1) and board[i - 1][j - 1] == lookFor:\n return True\n if inBounds(i - 1, j + 1) and board[i - 1][j + 1] == lookFor:\n return True\n return False\n \n# Checa se os índices não ultrapassam o limite do tabuleiro\ndef inBounds(i, j):\n return i >= 0 and i < 8 and j >= 0 and j < 8\n \n# Testes\nboard1 = [\n [ '-', '-', 'k', '-', '-', '-', '-', '-' ],\n [ 'p', 'p', 'p', '-', 'p', 'p', 'p', 'p' ],\n [ '-', '-', '-', '-', '-', '-', '-', '-' ],\n [ '-', 'R', '-', '-', '-', 'B', '-', '-' ],\n [ '-', '-', '-', '-', '-', '-', '-', '-' ],\n [ '-', '-', '-', '-', '-', '-', '-', '-' ],\n [ 'P', 'P', 'P', 'P', 'P', 'P', 'P', 'P' ],\n [ 'K', '-', '-', '-', '-', '-', '-', '-' ] \n]\n\nboard2 = [\n [ '-', '-', '-', 'k', '-', '-', '-', '-' ],\n [ 'p', 'p', 'p', '-', 'p', 'p', 'p', 'p' ],\n [ '-', '-', '-', '-', '-', 'b', '-', '-' ],\n [ '-', '-', '-', 'R', '-', '-', '-', '-' ],\n [ '-', '-', '-', '-', '-', '-', '-', '-' ],\n [ '-', '-', '-', '-', '-', '-', '-', '-' ],\n [ 'P', '-', 'P', 'P', 'P', 'P', 'P', 'P' ],\n [ 'K', '-', '-', '-', '-', '-', '-', '-' ] \n]\n\nboard3 = [\n [ 'r', 'n', 'b', 'q', 'k', '-', 'n', 'r' ],\n [ 'p', 'p', 'p', '-', '-', 'p', 'p', 'p' ],\n [ '-', '-', '-', '-', 'p', '-', '-', '-' ],\n [ '-', '-', '-', 'p', '-', '-', '-', '-' ],\n [ '-', '-', '-', '-', '-', '-', '-', '-' ],\n [ '-', '-', '-', '-', '-', '-', '-', '-' ],\n [ 'P', 'P', '-', '-', 'P', 'P', 'P', 'P' ],\n [ 'R', 'N', 'B', 'Q', 'K', 'B', '-', 'R' ] \n]\n\n\nboard = board1\nif checkBoard(board) == 0:\n print(\"No king in danger\")\nelif checkBoard(board) == 1:\n print(\"White king in danger\")\nelse:\n print(\"Black king in danger\")","repo_name":"Ramosfiliped/BCC402-Prog-Avancada","sub_path":"atv-01.py","file_name":"atv-01.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72584735930","text":"import httpx\nimport asyncio\n\nfrom loguru import logger\nimport phonenumbers\n\nfrom user.utils.token import Method, get_ncp_signature\nfrom user.db.rdb.schema import User\n\nfrom common.config.settings import conf\nfrom common.config.consts import NCP_SENS_SMS_URL, SMS_VERIFY_MSG\n\n\nasync def request(client, user: User, verify_code: str):\n phone = phonenumbers.parse(user.phone)\n\n url = NCP_SENS_SMS_URL.format(service_id = conf().NCP_SENS_SERVICE_ID)\n timestamp, signature = get_ncp_signature(method=Method.POST, url=url)\n\n headers = {\n 'accept': 'application/json',\n 'Content-Type': 'application/json; charset=UTF-8',\n 'x-ncp-apigw-timestamp': timestamp,\n 'x-ncp-iam-access-key': conf().NCP_ACCESS_KEY,\n 'x-ncp-apigw-signature-v2': signature\n }\n\n body = {\n 'type' : 'SMS',\n 'countryCode' : str(phone.country_code),\n 'from': '01025201513',\n 'content' : SMS_VERIFY_MSG.format(verify_code = verify_code),\n 'messages' : [{'to': str(phone.national_number)}]\n }\n\n res = await client.post(url=url, headers=headers, json=body)\n\n return res.json()\n\n\nasync def task(user: User, verify_code: str):\n async with httpx.AsyncClient() as client:\n task = request(client, user=user, verify_code=verify_code)\n result = await asyncio.gather(task)\n\n logger.info(result)\n","repo_name":"online-blacksmiths/rocket_uppercut_backend","sub_path":"user/tasks/send_verify_code_sms.py","file_name":"send_verify_code_sms.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12420501423","text":"#다른 방법은 없나?\nimport sys\ninput = sys.stdin.readline\nres = []\nfor i in range(10):\n res.append(int(input()))\n\nx = 0\nidx = 0\nwhile True:\n x += res[idx]\n if x>100 or idx==9: #idx==9가 없으면 런타임 에러 (IndexError)가 남\n break\n #반례) 1 2 3 4 5 6 ... 10인 경우\n idx += 1\nx = sum(res[:idx]); y = sum(res[:idx+1]) #합이 100을 넘어가는 index를 찾아서 그 값과 그다음 값을 비교한다. 답이 100을 넘어갈 수도 있다.\nif(100-x>y-100):\n print(y)\nelif(100-x/',views.deleteroom,name='deleteroom'),\n path('room/update//',views.updateroom,name='updateroom'),\n \n # URL For Staff\n path('staff/',views.staff,name='staff'),\n path('addstaff/', views.addstaff, name='addstaff'),\n path('staff/delete//',views.deletestaff,name='deletestaff'),\n path('staff/update//',views.updatestaff,name='updatestaff'),\n\n # URL For Customer\n path('customer/',views.customer,name='customer'),\n path('addcustomer/',views.addcustomer,name='addcustomer'),\n path('customer/delete//',views.deletecustomer,name='deletecustomer'),\n path('customer/update//',views.updatecustomer,name='updatecustomer'),\n\n # URL For Reservation\n path('reservation/',views.reservation,name='reservation'),\n path('addreservation/',views.addreservation,name='addreservation'),\n path('reservation/delete//',views.deletereservation,name='deletereservation'),\n path('reservation/update//',views.updatereservation,name='updatereservation'),\n\n]\n\n","repo_name":"Dipti158/Django_Hotel_Management_System","sub_path":"app_hotel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17534944627","text":"import sys\ninput = sys.stdin.readline\n\nT = int(input())\n\n# '('이면 스택을 하나 쌓는다.\n# ')'이면 바로 전 스택이 '('이고 스택 길이가 0이 아닌경우 스택에서 마지막 '('를 제거한다.\n# 위 경우가 아니라면 스택에 ')'를 추가한다.\n# 스택의 길이가 0이면 VPS이고 0보다 길다면 VPS가 아니다.\n\nfor _ in range(0,T):\n data = input()\n stack =[]\n\n for i in data:\n if i == '(':\n stack.append(i)\n elif i == ')':\n if len(stack) != 0 and stack[-1] == '(':\n stack.pop()\n else:\n stack.append(')')\n break\n \n if len(stack) == 0:\n print('YES')\n else:\n print('NO')","repo_name":"lsy7Git/BOJ","sub_path":"9012_괄호.py","file_name":"9012_괄호.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18092804848","text":"import fauxfactory\nimport pytest\n\nfrom cfme import test_requirements\nfrom cfme.utils.rest import assert_response\nfrom cfme.utils.rest import delete_resources_from_collection\nfrom cfme.utils.wait import wait_for\n\npytestmark = [\n pytest.mark.long_running,\n pytest.mark.meta(server_roles=['+embedded_ansible']),\n pytest.mark.ignore_stream('upstream', \"5.11\"),\n test_requirements.rest,\n]\n\n\n@pytest.fixture(scope='module')\ndef ansible(appliance):\n appliance.wait_for_embedded_ansible()\n provider, __ = wait_for(\n lambda: appliance.rest_api.collections.providers.find_by(\n name='Embedded Ansible Automation Manager') or False,\n num_sec=200,\n delay=5\n )\n return provider[0]\n\n\n@pytest.fixture(scope='function')\ndef repository(appliance, ansible):\n collection = appliance.rest_api.collections.configuration_script_sources\n uniq = fauxfactory.gen_alphanumeric(5)\n repo_name = f\"test_repo_{uniq}\"\n data = {\n \"name\": repo_name,\n \"description\": f\"Test Repo {uniq}\",\n \"manager_resource\": {\"href\": ansible.href},\n \"related\": {},\n \"scm_type\": \"git\",\n \"scm_url\": \"https://github.com/quarckster/ansible_playbooks\",\n \"scm_branch\": \"\",\n \"scm_clean\": False,\n \"scm_delete_on_update\": False,\n \"scm_update_on_launch\": False\n }\n\n collection.action.create(data)\n assert_response(appliance)\n\n repo_rest, __ = wait_for(\n lambda: collection.find_by(name=repo_name) or False, num_sec=300, delay=5)\n repo_rest = repo_rest[0]\n\n yield repo_rest\n\n if repo_rest.exists:\n repo_rest.action.delete()\n\n\nclass TestReposRESTAPI:\n @pytest.mark.parametrize(\n 'from_collection', [False, True], ids=['from_detail', 'from_collection'])\n def test_edit_repository(self, appliance, repository, from_collection):\n \"\"\"Tests editing repositories using REST API.\n\n Metadata:\n test_flag: rest\n\n Polarion:\n assignee: pvala\n casecomponent: Ansible\n caseimportance: medium\n initialEstimate: 1/4h\n endsin: 5.10\n \"\"\"\n new_description = fauxfactory.gen_alphanumeric(21, start=\"Test Repository \")\n\n if from_collection:\n repository.reload()\n repository_data_edited = {\n \"href\": repository.href,\n \"description\": new_description,\n }\n appliance.rest_api.collections.configuration_script_sources.action.edit(\n repository_data_edited)\n else:\n repository.action.edit(description=new_description)\n\n assert_response(appliance)\n record, __ = wait_for(\n lambda: appliance.rest_api.collections.configuration_script_sources.find_by(\n description=new_description) or False,\n num_sec=180,\n delay=10,\n )\n repository.reload()\n assert repository.description == record[0].description\n\n @pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])\n def test_delete_repository_from_detail(self, appliance, repository, method):\n \"\"\"Deletes repository from detail using REST API\n\n Metadata:\n test_flag: rest\n\n Polarion:\n assignee: pvala\n casecomponent: Ansible\n caseimportance: medium\n initialEstimate: 1/4h\n endsin: 5.10\n\n Bugzilla:\n 1477520\n \"\"\"\n del_action = getattr(repository.action.delete, method.upper())\n del_action()\n assert_response(appliance)\n repository.wait_not_exists(num_sec=300, delay=5)\n\n with pytest.raises(Exception, match='ActiveRecord::RecordNotFound'):\n del_action()\n assert_response(appliance, http_status=404)\n\n def test_delete_repository_from_collection(self, appliance, repository):\n \"\"\"Deletes repository from collection using REST API\n\n Metadata:\n test_flag: rest\n\n Polarion:\n assignee: pvala\n casecomponent: Ansible\n caseimportance: medium\n initialEstimate: 1/4h\n \"\"\"\n delete_resources_from_collection([repository], not_found=False, num_sec=300, delay=5)\n\n\nclass TestPayloadsRESTAPI:\n def test_payloads_collection(self, appliance, repository):\n \"\"\"Checks the configuration_script_payloads collection using REST API.\n\n Metadata:\n test_flag: rest\n\n Polarion:\n assignee: pvala\n casecomponent: Ansible\n caseimportance: medium\n initialEstimate: 1/4h\n endsin: 5.10\n \"\"\"\n collection = appliance.rest_api.collections.configuration_script_payloads\n collection.reload()\n assert collection.all\n for payload in collection.all:\n assert 'AutomationManager::Playbook' in payload.type\n\n def test_authentications_subcollection(self, appliance, repository):\n \"\"\"Checks the authentications subcollection using REST API.\n\n Metadata:\n test_flag: rest\n\n Polarion:\n assignee: pvala\n casecomponent: Ansible\n caseimportance: medium\n initialEstimate: 1/4h\n endsin: 5.10\n \"\"\"\n script_payloads = appliance.rest_api.collections.configuration_script_payloads\n script_payloads.reload()\n assert script_payloads[-1].authentications.name\n\n def test_payloads_subcollection(self, appliance, repository):\n \"\"\"Checks the configuration_script_payloads subcollection using REST API.\n\n Metadata:\n test_flag: rest\n\n Polarion:\n assignee: pvala\n casecomponent: Ansible\n caseimportance: medium\n initialEstimate: 1/4h\n endsin: 5.10\n \"\"\"\n script_sources = appliance.rest_api.collections.configuration_script_sources\n script_sources.reload()\n assert script_sources[-1].configuration_script_payloads\n","repo_name":"ManageIQ/integration_tests","sub_path":"cfme/tests/ansible/test_embedded_ansible_rest.py","file_name":"test_embedded_ansible_rest.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"77"} +{"seq_id":"2355995550","text":"\nasync def resolve_services(stack, info):\n return stack.services.values()\n\n\nasync def resolve_status(service, info) -> int:\n docker = info.context['app']['docker']\n st = info.context['app']['stacker']\n\n states = {\n 'running': 1,\n 'exited': 0,\n 'created': 0,\n 'paused': -1,\n 'not_exist': 0\n }\n\n for cont in await docker.ps(all=True):\n labels = cont['Labels']\n if 'com.docker.compose.project' in labels and labels['com.docker.compose.project'] == st.stack_name('%s_%s' % (service.stack.uid, service.stack.id)):\n if labels['com.docker.compose.service'] == service.name:\n return states[cont['State']]\n return states['not_exist']\n\n\n","repo_name":"aheadWorks/pystacker","sub_path":"pystacker-backend/pystacker/api/schema/query/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41857771248","text":"while(True):\n def welcome():\n print('\\n[만나서 반갑습니다. 계산기의 사용방법은 먼저 두 수를 입력받고 원하는 연산에 대한 번호를 입력하면 됩니다.]')\n\n welcome()\n a = input('두 수를 입력하세요:(q를 입력하면 종료)(,구분) ')\n if( a == 'q'):\n print('종료합니다')\n break\n else:\n a = a.split(',')\n b = input('원하는 연산을 선택하세요: ')\n b = int(b)\n if(b == 1):\n result = int(a[0]) + int(a[1])\n print('덧셈을 선택하셨습니다. 결과는', result )\n elif(b == 2):\n result = int(a[0]) - int(a[1])\n print('뺄셈을 선택하셨습니다. 결과는', result )\n elif (b == 3):\n result = int(a[0]) * int(a[1])\n print('곱셈을 선택하셨습니다. 결과는', result)\n elif (b == 4):\n result = int(a[0]) / int(a[1])\n print('나눗셈을 선택하셨습니다. 결과는', result)\n","repo_name":"ru5199/dev_ruproject_web","sub_path":"Python/source/1206_Wed/quiz56.py","file_name":"quiz56.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12181182774","text":"import pandas\nimport copy\n\ndef parse_interactions(file_path):\n df = pandas.read_csv(file_path, sep=\"\\t\")\n interactions = set()\n for row in df.itertuples():\n interactions.add(frozenset([row.protA, row.protB]))\n return interactions\n\ndef shuffle(interactions):\n interactions = copy.copy(interactions)\n shuffles = set()\n while len(interactions) > 1:\n a = set(interactions.pop())\n b = set(interactions.pop())\n aa = frozenset((a.pop(), b.pop()))\n bb = frozenset((a.pop(), b.pop()))\n if aa not in interactions:\n shuffles.add(aa)\n if bb not in interactions:\n shuffles.add(bb)\n return shuffles\n\n\ndef get_line(interaction, category):\n interaction = list(interaction)\n print(interaction)\n return interaction[0] + \"\\t\" + interaction[1] + \"\\t\" + category\n\n\ndef write_interactome(positome, negatome, output_file_path):\n with open(output_file_path, \"w\") as output_file:\n output_file.write(\"protA\\tprotB\\tinteraction\\n\")\n for neg, pos in zip(positome, negatome):\n line = get_line(neg, \"YES\") + \"\\n\"\n output_file.write(line)\n line = get_line(pos, \"NO\") + \"\\n\"\n output_file.write(line)\n\nif __name__ == \"__main__\":\n i = parse_interactions(\"data/trivial_data.tsv\")\n s = shuffle(i)\n write_interactome(i, s, \"data/interactome.tsv\")\n","repo_name":"ZacharieMenetrier/tagc-predaction","sub_path":"purg/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40747562453","text":"#!/usr/bin/env python\n\nimport os\nimport re\nimport vtk\nimport argparse\nimport pdb\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\n\nfrom vtk.util.numpy_support import numpy_to_vtk as n2v\nfrom vtk.util.numpy_support import vtk_to_numpy as v2n\n\nfrom get_database import Database\nfrom vtk_functions import read_geo, write_geo, get_all_arrays\n\n\nfsize = 20\nplt.rcParams.update({'font.size': fsize})\n\n\ndef plot_area():\n db = Database()\n f_in = db.get_centerline_path('0003_0001')\n f_out = os.path.join(db.img_path, 'radius_vs_area', 'OSMSC_0003_0001_branch0')\n\n # get centerline\n geo = read_geo(f_in).GetOutput()\n arrays, _ = get_all_arrays(geo)\n\n # extract branch\n br = 0\n mask = arrays['BranchId'] == br\n\n # get plot quantities\n path = arrays['Path'][mask]\n area_slice = arrays['CenterlineSectionArea'][mask]\n area_vmtk = arrays['MaximumInscribedSphereRadius'][mask] ** 2 * np.pi\n\n print('factor', area_slice[0] / area_vmtk[0])\n\n # make plot\n fig, ax = plt.subplots(dpi=300, figsize=(6, 6))\n ax.plot(path, area_slice, 'r-')\n ax.plot(path, area_vmtk, 'b-')\n ax.legend(['Area from slicing', 'Area from MISR'])\n ax.set_xlim(left=0)\n ax.set_xticks([0, 2, 4])\n ax.set_xticklabels(['Inlet', '2', '4'])\n plt.xlabel('Branch path [cm]')\n plt.ylabel('Area [cm^2]')\n plt.grid()\n fig.savefig(f_out, bbox_inches='tight')\n\n\ndef plot_model_statistics():\n db = Database()\n geometries_paper = db.get_geometries_select('paper')\n\n pie = defaultdict(lambda: defaultdict(int))\n cats = ['deliverable_category', 'vascular_state', 'treatment', 'image_data_modality', 'paper_reference', 'gender']\n\n names = {'deliverable_category': 'Vascular anatmoy',\n 'vascular_state': 'Vascular state',\n 'treatment': 'Treatment',\n 'image_data_modality': 'Imaging',\n 'paper_reference': 'Literature reference',\n 'gender': 'Gender'}\n\n # count all categories\n for geo in geometries_paper:\n _, err = db.get_bc_type(geo)\n if not err:\n pie['has_bc']['yes'] += 1\n params = db.get_params(geo)\n for cat in cats:\n name = params[cat].capitalize()\n if name == '' or 'Unpublished' in name:\n name = 'None'\n pie[cat][name] += 1\n else:\n pie['has_bc']['no'] += 1\n\n # make plots\n fig, axs = plt.subplots(2, 2, dpi=300, figsize=(30, 20))\n\n selection = ['deliverable_category', 'vascular_state', 'treatment', 'paper_reference']\n for cat, ax in zip(selection, axs.ravel()):\n labels = np.array([re.sub(r'\\([^)]*\\)', '', c) for c in pie[cat].keys()])\n sizes = np.array(list(pie[cat].values()))\n order = np.argsort(sizes)\n\n print('num', np.sum(sizes))\n abs_size = lambda p: '{:.0f}'.format(p * np.sum(sizes) / 100)\n ax.pie(sizes[order], labels=labels[order], autopct=abs_size)\n ax.axis('equal')\n ax.set_title(names[cat], fontsize=40, pad=20)\n\n f_out = os.path.join(db.img_path, 'repository', 'repo_statistics')#.pgf\n fig.savefig(f_out, bbox_inches='tight')\n plt.close(fig)\n\n\ndef main():\n plot_model_statistics()\n plot_area()\n\n\nif __name__ == '__main__':\n descr = 'Make plots for 3D-1D-0D paper'\n main()\n","repo_name":"StanfordCBCL/DataCuration","sub_path":"plots/paper_plots.py","file_name":"paper_plots.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41575019904","text":"from random import randint\n\nrand_num = randint(1, 100)\n\ndef guess():\n print(\"Zgadnij liczbę od 1 do 100!\")\n try:\n num = int(input(\"Twoja liczba: \"))\n except ValueError:\n print(\"Wprowadź poprawną liczbę\")\n guess()\n if num > 100:\n print(\"Liczba jest większa od 100!\")\n guess()\n if num > rand_num:\n print(\"Za dużo\")\n guess()\n elif num < rand_num:\n if num <= 0:\n print(\"Wprowadź liczbę większą od 0\")\n else:\n print(\"Za mało\")\n guess()\n else:\n print(\"Zgadłeś\")\n\nguess()","repo_name":"Ogorodnyk/Warsztat_1_Guess_the_number","sub_path":"Guess_the_number.py","file_name":"Guess_the_number.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40019847575","text":"\"\"\"\nCreated on Mar 18, 2013\n\n@author: menelaos\nCreated with Python2.6\nCompatible with >Python2.6. \n\nExample main for running ArabicToRomanNumerals module\n\nrun as:\npython example_main.py \n options: \n -h (--help) -> prints this help message\n -i (--interactive_extend)-> applies interactive numeral update\n when numbers are >3999. By an order of magnitude each request.\n \n a sequence of integers, separated by space, for\n which the Roman numerals will be evaluated. \n \n\"\"\"\n\nimport sys\nimport getopt\nfrom ArabicToRomanNumerals import ArabicToRomanNumerals\n\ndef main():\n #parse command line options\n try:\n opts, args = getopt.gnu_getopt(sys.argv[1:], \"hi\", [\"help\",\n \"interactive_extend\"])\n except getopt.error as msg:\n print(msg)\n print (\"for help use --help\")\n sys.exit(2)\n #process options\n auto = True\n for o, a in opts:\n if o in (\"-h\",\"--help\"):\n print(__doc__)\n sys.exit(0)\n elif o in (\"-i\",\"--interactive_extend\"):\n auto = False\n print (\"Will apply interactive extend.\")\n else:\n print (\"Provide some input!\")\n print(__doc__)\n \n #work with arguments\n atr = ArabicToRomanNumerals()\n output = (' ================\\n Arabic to Roman: \\n ================ \\n')\n for arg in args:\n while True:\n try:\n number=int(arg)\n roman = atr.create_roman(number, auto)\n output = output + (5*' '+(\"%s = %s \\n\") %(number,roman)) \n break\n except ValueError:\n print (\"Argument %s, cannot be transformed to integer. Input must be integers. Replace!\") % (arg)\n print (\"===================\")\n arg = raw_input(\"Type (h)elp, (s)kip or (e)xit for the obvious, or enter an integer:...\") \n while arg in (\"h\",\"help\"):\n print(__doc__)\n arg = raw_input(\"Type (h)elp, (s)kip or (e)xit for the obivous, or enter an integer:...\")\n if arg in (\"s\", \"skip\"): break\n if arg in (\"e\",\"exit\"): sys.exit(0)\n \n print(output) \n print(\" ----------------\\n Dictionary info:\\n ----------------\")\n print(atr.info)\n \nif __name__==\"__main__\":\n sys.exit(main())\n","repo_name":"MenelaosT/ArabicToRomanNumerals","sub_path":"ArabicToRomanNumerals_main.py","file_name":"ArabicToRomanNumerals_main.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35065887132","text":"from random import random\nimport sys\nimport threading\nimport time\n\ndef ox():\n print(max([random() for x in range(20000000)]))\n\ndef go():\n r = threading.Thread(target=ox)\n r.start()\n ox()\n\n\nprint(\"Python version:\")\nprint(sys.version)\n\nprint(\"Single thread - sequential execution:\")\nstart_time_single_thread = time.time()\nox()\nox()\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time_single_thread))\n\n\nprint(\"Two threads - 'parallel' execution:\")\nstart_time_multi_thread = time.time()\ngo()\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time_multi_thread))\n","repo_name":"anderskm/python-multithreading","sub_path":"main_multithreading.py","file_name":"main_multithreading.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36117994554","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom model_back import TCN\nimport pandas as pd\n\nfrom pathlib import Path\nimport yaml\nimport re\nfrom torchsummary import summary\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\n\nconfigPath = 'config.yml'\nwith open(configPath) as f:\n conf = yaml.load(f, Loader=yaml.FullLoader)\n\nCELLID = conf['columns'][0]\nTIMESTAMP = conf['columns'][1]\nTRAFFIC = conf['columns'][2]\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device(\"cuda\") # 使用 GPU\nelse:\n device = torch.device(\"cpu\") # 如果没有 GPU,使用 CPU\n\n# 定义 scaler\nscaler = StandardScaler()\n\n# 定义数据预处理函数,将时间序列数据转换为窗口特征\ndef getDataset(window_size):\n\n df = pd.read_csv(conf['the_three_area_data_fullpath'])\n # 准备时间序列数据\n data = df[TRAFFIC].values\n data = data.reshape(-1, 3)\n data = scaler.fit_transform(data)\n\n\n trainX = []\n trainY = []\n testX = []\n testY = []\n\n\n for i in range(len(data) - window_size - 2*conf['outputSize'] + 1):\n trainX.append(data[i: i+window_size])\n trainY.append(data[i+window_size: i+window_size+conf['outputSize']])\n\n testX.append(data[len(data) - window_size - conf['outputSize']: len(data) - conf['outputSize']])\n testY.append(data[len(data) - conf['outputSize']: len(data)+1])\n\n trainX = np.array(trainX)\n trainY = np.array(trainY)\n testX = np.array(testX)\n testY = np.array(testY)\n\n # 将数据转换为PyTorch张量\n trainX = torch.Tensor(trainX).transpose(1,2).to(device)\n trainY = torch.Tensor(trainY).transpose(1,2).to(device)\n testX = torch.Tensor(testX).transpose(1,2).to(device)\n testY = torch.Tensor(testY).transpose(1,2).to(device)\n\n # 将数据划分为训练集和测试集\n train_dataset = torch.utils.data.TensorDataset(trainX, trainY)\n test_dataset = torch.utils.data.TensorDataset(testX, testY)\n\n return train_dataset, test_dataset\n\ndef train(windowSize):\n train_dataset, test_dataset = getDataset(windowSize)\n batch_size = conf['batchSize']\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n # 创建TCN模型和优化器\n input_size = train_dataset[0][0].shape[1]\n output_size = train_dataset[0][1].shape[1]\n num_channels = conf['num_channels']\n output_channels = train_dataset[0][1].shape[0]\n kernel_size = conf['kernel_size']\n dropout = 0.2\n model = TCN(input_size, output_size, num_channels, kernel_size, dropout, output_channels).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=conf['lr'])\n summary(model,\n input_size=(train_dataset[0][0].shape[0], train_dataset[0][0].shape[1]),\n device='cuda' if torch.cuda.is_available() else 'cpu'\n )\n\n # 训练TCN模型\n for epoch in range(conf['num_epochs']):\n for i, (inputs, targets) in enumerate(train_loader):\n # 将数据传递到模型中进行训练\n outputs = model(inputs)\n\n loss = nn.functional.mse_loss(outputs, targets)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # 输出训练信息\n # if (i+1) % 100 == 0:\n print(f'Epoch [{epoch+1}/{conf[\"num_epochs\"]}], Loss: {loss.item():.4f}')\n\n torch.save(model, conf['model_fullpath'])\n\ndef getTheThreeData():\n\n files = list(Path(conf['minutely_data_path']).iterdir())\n files.sort()\n\n total = pd.DataFrame()\n for f in files:\n df = pd.read_csv(f)\n total = pd.concat(\n [total,\n df[df[CELLID].isin(conf['cellIdList'])]]\n )\n if re.search(conf['test_end_date'], str(f)):\n break\n\n\n total = total.sort_values(by=[TIMESTAMP, CELLID], ascending=True)\\\n .reindex(columns=[TIMESTAMP, CELLID, TRAFFIC])\\\n .reset_index(drop=True)\n total.to_csv(conf['the_three_area_data_fullpath'], index=False)\n\ndef predict(windowSize):\n model = torch.load(conf['model_fullpath']).to(device)\n train_dataset, test_dataset = getDataset(windowSize)\n batch_size = conf['batchSize']\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n\n # 在测试集上评估TCN模型\n model.eval()\n with torch.no_grad():\n for inputs, targets in test_loader: # batch, channel, traffic\n outputs = model(inputs)\n loss = nn.functional.mse_loss(outputs, targets)\n\n rmse = np.sqrt(loss.item())\n print(f'Test RMSE: {rmse:.4f}')\n\n outputs = outputs.to('cpu')\n targets = targets.to('cpu')\n outputs = outputs.T.squeeze()\n targets = targets.T.squeeze()\n\n outputs = scaler.inverse_transform(outputs)\n targets = scaler.inverse_transform(targets)\n\n idlist=sorted(conf['cellIdList'])\n fig, axs = plt.subplots(3, figsize=(12, 6))\n for i in range(len(idlist)):\n y = targets[:, i].squeeze()\n y_pred = outputs[:, i].squeeze()\n\n axs[i].plot(y, label=f'Truth %s'%(idlist[i]))\n axs[i].plot(y_pred, label=f'Prediction %s'%(idlist[i]))\n axs[i].set_title(idlist[i])\n # plt.legend()\n fig.suptitle(f'Prediction and Truth Value (input_len = %s, loss={rmse:.4f})'\n %(windowSize)\n )\n fig.savefig(conf['pred_result_fullpath']%(windowSize))\n fig.show()\n\n\n\nif __name__ == '__main__':\n\n # getTheThreeData()\n for windowSize in conf['windowSize']:\n train(windowSize)\n predict(windowSize)\n\n","repo_name":"mengdaxing/milano_network_trafficflow","sub_path":"forcaster.py","file_name":"forcaster.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5203244831","text":"import math\n\nimport numpy as np \n\nimport sympy as sym \n\nimport numpy.linalg as npl \n\nimport matplotlib.pyplot as plt\n\nfrom scipy.optimize import fsolve\n\nfrom sympy.utilities import lambdify \n\n# def funzione \n\nx = sym.symbols('x')\n\nfx = sym.exp(x) - 4 * x**2 \n\nf = lambdify(x, fx, np)\n\na = -1 \n\nb = 5\n \n# a) \n\nxx = np.linspace(a, b, 100)\n\nplt.plot(xx, f(xx))\n\nplt.axhline(y=0)\n\nplt.axvline(x=0)\n\nplt.show()\n\n# il grafico sembra avere 3 radici reali, di cui 1 negativa e 2 positive \n\n# le 3 radice sono negli intervalli [-1, 0], [0, 1] e [4, 5] con fsolve trovo le intersezioni con asse X \n# uso come x0 valori che rientrano dentro quell intervallo \n\nx0 = -.8\nalfa1 = fsolve(f, x0)\nx0 = 0.5\nalfa2 = fsolve(f, x0) \nx0 = 4.3\nalfa3 = fsolve(f, x0) \n\nprint(alfa1, alfa2, alfa3)\n\n# b) \n\ngx = 0.5 * sym.exp(x/2)\n\ng = lambdify(x, gx, np)\n\nplt.plot(xx, g(xx))\n\nplt.axhline(y=0)\n\nplt.axvline(x=0)\n\nplt.show()\n\n# dal grafico non credo che gx possa essere usata per determinare tutte le radici di fx \n\n# c) \n\ndef iterazione(gname,x0,tolx,nmax):\n xk=[]\n xk.append(x0)\n x1=gname(x0)\n d=x1-x0\n xk.append(x1)\n it=1\n while it=tolx*abs(x1) :\n x0=x1\n x1=gname(x0)\n d=x1-x0\n it=it+1\n xk.append(x1)\n \n \n if it==nmax:\n print('Raggiunto numero max di iterazioni \\n')\n \n return x1, it,xk\n\n# d) \n\ntoll = 1e-6 \n\nx, it, xk = iterazione(g, .5, toll, 100)\n\nprint('iterazioni= {:d}, soluzione={:e} \\n\\n'.format(it,x))\n\n# risulta convergente a 0.7 quindi vicino alla soluzione\n\nx, it, xk = iterazione(g, 4.5, toll, 100)\n\nprint('iterazioni= {:d}, soluzione={:e} \\n\\n'.format(it,x))\n\n# non risulta convergente la soluzione non esiste...\n\n# e) mi scoccia......","repo_name":"mega2799/pyLab","sub_path":"Esami/17-Giugno2021Parte2.py","file_name":"17-Giugno2021Parte2.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"38237830109","text":"import sys\nsys.path.append('./')\n\nfrom tqdm import tqdm\nimport os\nimport json\nfrom dataset.dataset import DatasetSQUAD\nfrom preprocess.squad_processors import SquadProcessor\nfrom transformers import AutoTokenizer\nfrom preprocess.squad_features import squad_convert_examples_to_features\nfrom torch.utils.data import DataLoader, SequentialSampler\nimport torch \nfrom preprocess.squad_objects import *\nimport numpy as np\nfrom utils.squad_eval_utils import compute_predictions_logits, squad_evaluate\nfrom collections import OrderedDict\nimport copy\nimport torch\nfrom torch import nn\nfrom models.model import MetaLearner\nimport torch.nn.functional as F\nfrom models.model import QAModel\n\n\ndef get_meta_features(mod, batch, features, examples, n_best_val, tokenizer, batch_count, lower_case, eval):\n all_results = []\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n feature_indices = batch[3]\n # print('feature_indices: ',feature_indices)\n outputs = mod(**inputs)\n\n for i, feature_index in enumerate(feature_indices):\n eval_feature = features[feature_index.item()]\n unique_id = int(eval_feature.unique_id)\n start_logits = outputs['start_logits'][i].tolist()\n end_logits = outputs['end_logits'][i].tolist()\n result = SquadResult(unique_id, start_logits, end_logits)\n # print(result.start_logits, result.end_logits, result.unique_id)\n all_results.append(result)\n \n pred_file = os.path.join(os.getcwd(),'output',\"ensemble_test_pred.json\")\n nbest_file = os.path.join(os.getcwd(),'output',\"ensemble_nbest_pred.json\")\n null_log_file = os.path.join(os.getcwd(),'output',\"ensemble_nbest_nulllog.json\")\n # print(all_results[0].start_logits, all_results[0].end_logits)\n # exit(0)\n ## logic for post_processing as input to ensemble\n predictions, all_nbest_json, example_subset = compute_predictions_logits(\n examples,\n features,\n all_results,\n n_best_val, # nbest\n 30, # max answer length\n lower_case, # do lower case\n pred_file,\n nbest_file,\n null_log_file,\n False, # verbose logging\n True, # version with negatives\n 0, tokenizer)\n \n \n # print('\\nBefore: all_nbest_json',all_nbest_json)\n ##### for creating dummies when nbest less than 8\n dummy = OrderedDict([('probability', 0.0), \n ('start_logit', 0.0), \n ('end_logit', 0.0),\n ('text', 'empty')])\n \n for qas_id in all_nbest_json:\n total_nbest_vals = len(all_nbest_json[qas_id])\n for i in range(0,n_best_val-total_nbest_vals):\n all_nbest_json[qas_id].append(copy.deepcopy(dummy))\n\n\n nbest = {i:OrderedDict() for i in range(0,n_best_val,1)}\n for key in all_nbest_json:\n # ind loops through n_best indexes that are populated\n for ind, item in enumerate(all_nbest_json[key]):\n if ind (b, 2*n_best, 2) -> meta model-> (1 x m*n)\n # gt for (b, n_best, 2) -> (b, n_best)\n \n # for first model get (b, n_best, 2)\n batch_features = []\n batch_gt = []\n for batch_elem_key in all_nbest_json:\n batch_elem = all_nbest_json[batch_elem_key]\n n_b_features = []\n n_b_gt = []\n for n_b in batch_elem:\n prob = n_b['probability']\n # normalize by maximum answer length\n text_len = float(len(n_b['text'].split())/30.0)\n start_logit = n_b['start_logit']\n end_logit = n_b['end_logit']\n \n # feature_nb = np.expand_dims(np.array([prob, text_len]),axis=0)\n feature_nb = np.expand_dims(np.array([prob, start_logit, end_logit, text_len]),axis=0)\n n_b_features = np.concatenate((n_b_features, feature_nb), axis=0) if isinstance(n_b_features, np.ndarray) else feature_nb\n\n if not eval:\n f1 = n_b['f1']\n n_b_gt.append(f1)\n \n n_b_features = np.expand_dims(n_b_features, axis=0)\n batch_features = np.concatenate((batch_features, n_b_features), axis=0) if isinstance(batch_features, np.ndarray) else n_b_features\n \n # for gt\n if not eval:\n n_b_gt = np.expand_dims(np.array(n_b_gt), axis=0)\n batch_gt = np.concatenate((batch_gt, n_b_gt), axis=0) if isinstance(batch_gt, np.ndarray) else n_b_gt\n\n # b*n_best*2, b*n_best\n if not eval:\n return [batch_features, batch_gt]\n else:\n return [batch_features, all_nbest_json]\n\ndef ensembleTrain():\n # get from config\n n_best_val = 2\n feat_dim = 4\n\n ds = DatasetSQUAD(download_url=None)\n processor = SquadProcessor(train_file=ds.train_file, dev_file=ds.test_file)\n \n examples = processor.get_dev_examples(ds.dataset_root,'train-v2.0.json')\n # reducing to 20 % of train set for training the meta model\n sub_sample = int(len(examples)*0.2)\n examples = examples[:sub_sample]\n\n # get subset of examples\n device = torch.device('cuda')\n\n # bert base trained 1 epochs\n tokenizer_b0 = AutoTokenizer.from_pretrained('roberta-base',use_fast=False)\n mod_b0 = QAModel(base_model_name='roberta-base')\n mod_b0.load_state_dict(torch.load('../../NLP-Project/QAModel_roberta-base.pt'))\n mod_b0.to(device)\n mod_b0.eval()\n # bert base trained 2 epochs #albert-base-v2 ../../NLP-Project/QAModel_albert-base-v2.pt\n tokenizer_b1 = AutoTokenizer.from_pretrained('roberta-base',use_fast=False)\n mod_b1 = QAModel(base_model_name='roberta-base')\n mod_b1.load_state_dict(torch.load('./output/QA_roberta_full\\QAModel_roberta-base.pt'))\n mod_b1.to(device)\n mod_b1.eval()\n\n features_b0, dataset_b0 = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer_b0,\n max_seq_length=384,\n doc_stride=128,\n max_query_length=64,\n is_training=False\n )\n # samplers\n eval_sampler_b0 = SequentialSampler(dataset_b0)\n eval_dataloader_b0 = DataLoader(dataset_b0, sampler=eval_sampler_b0, batch_size=2)\n\n features_b1, dataset_b1 = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer_b1,\n max_seq_length=384,\n doc_stride=128,\n max_query_length=64,\n is_training=False\n )\n # samplers\n eval_sampler_b1 = SequentialSampler(dataset_b1)\n eval_dataloader_b1 = DataLoader(dataset_b1, sampler=eval_sampler_b1, batch_size=2)\n\n \n\n kl_loss = nn.KLDivLoss(reduction=\"batchmean\")\n\n # load the meta learner model\n meta_learner_mod = MetaLearner(n_best=n_best_val, num_base_models=2, feat_dim=feat_dim)\n meta_learner_mod.to(device)\n # print(meta_learner_mod)\n\n count = 0\n total_epochs = 1\n for epoch in range(total_epochs):\n print(f'\\n Epoch {epoch}')\n for batch_b0, batch_b1 in tqdm(zip(eval_dataloader_b0, eval_dataloader_b1), desc='Training Meta Learner'):\n\n count+=1\n # layer-0 models\n mod_b0.eval()\n mod_b1.eval()\n\n batch_b0 = tuple(t.to(device) for t in batch_b0)\n batch_b1 = tuple(t.to(device) for t in batch_b1)\n\n # b*n_best*2, b*n_best\n X_b0, y_b0 = get_meta_features(mod_b0, batch_b0, features_b0, examples, n_best_val, tokenizer_b0, batch_count=count, lower_case = False, eval=False)\n X_b1, y_b1 = get_meta_features(mod_b1, batch_b1, features_b1, examples, n_best_val, tokenizer_b1, batch_count=count, lower_case = True, eval=False)\n\n try:\n inp_meta = torch.from_numpy(np.concatenate((X_b0, X_b1), axis=1)).float()\n target_meta = torch.from_numpy(np.concatenate((y_b0, y_b1), axis=1)).float()\n target_meta = F.softmax(target_meta, dim=1)\n except Exception:\n print('x_b0 shape: ',X_b0.shape, 'x_b1 shape: ', X_b1.shape)\n print('y_b0 shape: ',y_b0.shape, 'y_b1 shape: ', y_b1.shape)\n continue\n\n # move meta train batch to gpu\n inp_meta = inp_meta.to(device)\n target_meta = target_meta.to(device)\n\n # print(inp_meta)\n meta_out = meta_learner_mod(inp_meta)\n # print(out.shape, out)\n # print(meta_out.shape, target_meta.shape)\n loss_out = kl_loss(meta_out, target_meta)\n\n loss_out.backward()\n if count%15 == 0:\n print('\\nTotal loss: ',loss_out.item())\n meta_learner_mod.zero_grad()\n print('\\nSaving model')\n save_path = f'../../NLP-Project/ensemble_train1_e{epoch}.pt'\n torch.save(meta_learner_mod.state_dict(),f'../../NLP-Project/ensemble_train1_e{epoch}.pt')\n return save_path\n\ndef ensembleEvaluate(ensemble_path):\n # get from config\n n_best_val = 2\n feat_dim = 4\n\n ds = DatasetSQUAD(download_url=None)\n processor = SquadProcessor(train_file=ds.train_file, dev_file=ds.test_file)\n \n examples = processor.get_dev_examples(ds.dataset_root,'dev-v2.0.json')\n # reducing to 20 % of train set for training the meta model\n sub_sample = int(len(examples)*1.0)\n examples = examples[:sub_sample]\n\n device = torch.device('cuda')\n\n # bert base trained 1 epochs\n tokenizer_b0 = AutoTokenizer.from_pretrained('roberta-base',use_fast=False)\n mod_b0 = QAModel(base_model_name='roberta-base')\n mod_b0.load_state_dict(torch.load('../../NLP-Project/QAModel_roberta-base.pt'))\n mod_b0.to(device)\n mod_b0.eval()\n # bert base trained 2 epochs\n # bert base trained 2 epochs #albert-base-v2 ../../NLP-Project/QAModel_albert-base-v2.pt\n tokenizer_b1 = AutoTokenizer.from_pretrained('roberta-base',use_fast=False)\n mod_b1 = QAModel(base_model_name='roberta-base')\n mod_b1.load_state_dict(torch.load('./output/QA_roberta_full\\QAModel_roberta-base.pt'))\n mod_b1.to(device)\n mod_b1.eval()\n\n features_b0, dataset_b0 = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer_b0,\n max_seq_length=384,\n doc_stride=128,\n max_query_length=64,\n is_training=False\n )\n # samplers\n eval_sampler_b0 = SequentialSampler(dataset_b0)\n eval_dataloader_b0 = DataLoader(dataset_b0, sampler=eval_sampler_b0, batch_size=2)\n\n features_b1, dataset_b1 = squad_convert_examples_to_features(\n examples=examples,\n tokenizer=tokenizer_b1,\n max_seq_length=384,\n doc_stride=128,\n max_query_length=64,\n is_training=False\n )\n # samplers\n eval_sampler_b1 = SequentialSampler(dataset_b1)\n eval_dataloader_b1 = DataLoader(dataset_b1, sampler=eval_sampler_b1, batch_size=2)\n\n # load the meta learner model\n meta_learner_mod = MetaLearner(n_best=n_best_val, num_base_models=2, feat_dim=feat_dim)\n meta_learner_mod.load_state_dict(torch.load(ensemble_path))\n meta_learner_mod.to(device)\n meta_learner_mod.eval()\n\n count = 0\n all_nbest_selected = OrderedDict()\n for batch_b0, batch_b1 in tqdm(zip(eval_dataloader_b0, eval_dataloader_b1), desc='Evaluating Meta Learner'):\n count+=1\n # layer-0 models\n mod_b0.eval()\n mod_b1.eval()\n\n batch_b0 = tuple(t.to(device) for t in batch_b0)\n batch_b1 = tuple(t.to(device) for t in batch_b1)\n\n # print(\"examples length before: \",len(examples))\n\n # b*n_best*2, b*n_best\n X_b0, all_nbest_json_b0 = get_meta_features(mod_b0, batch_b0, features_b0, examples, n_best_val, tokenizer_b0, batch_count=count, lower_case=True, eval=True)\n X_b1, all_nbest_json_b1 = get_meta_features(mod_b1, batch_b1, features_b1, examples, n_best_val, tokenizer_b1, batch_count=count, lower_case=True, eval=True)\n\n \n with torch.no_grad():\n try:\n inp_meta = torch.from_numpy(np.concatenate((X_b0, X_b1), axis=1)).float()\n except Exception:\n print('x_b0 shape: ',X_b0.shape, 'x_b1 shape: ', X_b1.shape)\n continue\n # inp_meta = torch.from_numpy(np.concatenate((X_b0, X_b1), axis=1)).float()\n\n # move meta train batch to gpu\n inp_meta = inp_meta.to(device)\n\n # print(inp_meta.shape)\n meta_out = meta_learner_mod(inp_meta)\n\n sorted, indices = torch.sort(meta_out, descending=True)\n models_order = torch.split(indices, split_size_or_sections=1, dim=0)\n\n for bat_num, bat_indices in enumerate(models_order):\n np_bat_indices = bat_indices.cpu().detach().numpy()[0]\n top_nbest = np_bat_indices[:n_best_val]\n top_nbest = [0,1,2,3]\n bat_qas_id = list(all_nbest_json_b0.items())[bat_num][0]\n m0_nbest = list(all_nbest_json_b0.items())[bat_num][1]\n m1_nbest = list(all_nbest_json_b1.items())[bat_num][1]\n combined = m0_nbest + m1_nbest\n combined_nbest = [combined[int(ind)] for ind in top_nbest]\n all_nbest_selected[bat_qas_id] = combined_nbest\n \n # create an all n_best json from all base-0 models (by sorting on argmax)\n\n with open(os.path.join(os.getcwd(),'output',f\"ensemble_selected_nbest.json\"), \"w\") as writer:\n writer.write(json.dumps(all_nbest_selected, indent=4) + \"\\n\")\n\n # taking top-1 prediction for prediction\n \n\n pred_selected = OrderedDict()\n for qas_id in all_nbest_selected:\n pred_selected[qas_id] = all_nbest_selected[qas_id][0]['text']\n\n # save pred_selected as best predictions of the ensemble\n with open(os.path.join(os.getcwd(),'output',f\"ensemble_best_preds.json\"), \"w\") as writer:\n writer.write(json.dumps(pred_selected, indent=4) + \"\\n\")\n \n # evaluate and get results\n results = squad_evaluate(examples, pred_selected)\n print(\"\\nResults: \",results)\n \n\ndef main():\n # training the ensemble\n # save_path = ensembleTrain()\n\n save_path = '../../NLP-Project/ensemble_train1_e0.pt'\n # evaluate the ensemble\n ensembleEvaluate(ensemble_path = save_path)\n \n\nif __name__ == '__main__':\n main()","repo_name":"edwinthomas444/natural-language-understanding","sub_path":"driver/driver_ensemble.py","file_name":"driver_ensemble.py","file_ext":"py","file_size_in_byte":17584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31740654282","text":"import dgl\nimport numpy as np\nimport torch\nfrom scipy.sparse import csc_matrix\n\n\ndef build_inter_graph_from_links(dataset, saved_relation2id=None):\n files = {\n 'train': {\n 'pos': f'data/{dataset}/train_pos.txt',\n 'neg': f'data/{dataset}/train_neg.txt'\n },\n 'valid': {\n 'pos': f'data/{dataset}/valid_pos.txt',\n 'neg': f'data/{dataset}/train_neg.txt'\n }\n }\n if dataset == 'default_drugbank':\n # biodrug_list = list(pd.read_csv(f'data/{dataset}/drug_seqs.csv', header=None).iloc[:, 0])\n dt_types = {'targets', 'enzymes', 'carriers', 'transporters'}\n tt_types = {}\n else:\n dt_types = {'dt'}\n tt_types = {}\n\n drug2id, target2id = {}, {}\n relation2id = {} if not saved_relation2id else None\n drug_cnt, target_cnt = 0, 0\n rel = 0\n triplets = {}\n\n for file_type, file_paths in files.items():\n triplets[file_type] = {}\n for y, path in file_paths.items():\n data = []\n with open(path) as f:\n file_data = [line.split(',') for line in f.read().split('\\n')[:-1]]\n for [u, r, v] in file_data:\n if r == 'dt':\n u_is_d, v_is_d = True, False\n else:\n u_is_d, v_is_d = u.startswith('DB'), v.startswith('DB')\n if u_is_d and u not in drug2id:\n drug2id[u] = drug_cnt\n drug_cnt += 1\n if not u_is_d and u not in target2id:\n target2id[u] = target_cnt\n target_cnt += 1\n if v_is_d and v not in drug2id:\n drug2id[v] = drug_cnt\n drug_cnt += 1\n if not v_is_d and v not in target2id:\n target2id[v] = target_cnt\n target_cnt += 1\n if not saved_relation2id and r not in relation2id:\n relation2id[r] = rel\n rel += 1\n # Save the triplets corresponding to only the known relations\n if r in relation2id:\n data.append([drug2id[u] if u_is_d else target2id[u],\n drug2id[v] if v_is_d else target2id[v],\n relation2id[r]])\n triplets[file_type][y] = np.array(data, dtype=np.uint16)\n\n id2drug = {v: k for k, v in drug2id.items()}\n id2target = {v: k for k, v in target2id.items()}\n id2relation = {v: k for k, v in relation2id.items()}\n\n # Construct the list of adjacency matrix each corresponding to each relation.\n # Note that this is constructed only from the train data.\n adj_dict = {}\n for i in range(len(relation2id)):\n idx = np.argwhere(triplets['train']['pos'][:, 2] == i)\n rel = id2relation[i]\n rel_tuple = (\n \"target\" if (rel in tt_types or rel in dt_types) else \"drug\",\n rel,\n \"drug\" if rel not in tt_types else \"target\"\n ) if rel != 'dt' else ('drug', 'dt', 'target')\n shape = (\n target_cnt if (rel in tt_types or rel in dt_types) else drug_cnt,\n drug_cnt if rel not in tt_types else target_cnt\n )\n if rel == 'dt':\n shape = (drug_cnt, target_cnt)\n print(rel, shape)\n adj_dict[rel_tuple] = csc_matrix(\n (\n np.ones(len(idx), dtype=np.uint8),\n (\n triplets['train']['pos'][:, 0][idx].squeeze(1),\n triplets['train']['pos'][:, 1][idx].squeeze(1)\n )\n ), shape=shape)\n print(drug_cnt, target_cnt)\n return adj_dict, triplets, \\\n drug2id, target2id, relation2id, \\\n id2drug, id2target, id2relation\n\n\ndef ssp_multigraph_to_dgl(adjs, relation2id):\n adjs = {k: v.tocoo() for k, v in adjs.items()}\n # g_dgl = dgl.heterograph({\n # k: (torch.from_numpy(v.row), torch.from_numpy(v.col)) for k, v in adjs.items()\n # })\n # return dgl.to_bidirected(g_dgl)\n graph_dict = {}\n for k, v in adjs.items():\n print(k)\n if k[0] != k[2]:\n graph_dict[k] = (torch.from_numpy(v.row), torch.from_numpy(v.col))\n graph_dict[(k[2], f\"~{k[1]}\", k[0])] = (torch.from_numpy(v.col), torch.from_numpy(v.row))\n relation2id[f\"~{k[1]}\"] = len(relation2id)\n else:\n # graph_dict[k] = (torch.from_numpy(np.hstack((v.row, v.col))),\n # torch.from_numpy(np.hstack((v.col, v.row))))\n graph_dict[k] = (torch.from_numpy(v.row),\n torch.from_numpy(v.col))\n # g_dgl = dgl.heterograph({\n # k: (torch.from_numpy(np.hstack((v.row, v.col))),\n # torch.from_numpy(np.hstack((v.col, v.row))))\n # for k, v in adjs.items()\n # })\n g_dgl = dgl.heterograph(graph_dict)\n return g_dgl, relation2id\n","repo_name":"ZillaRU/ChemBioTIP_old","sub_path":"utils/hete_data_utils.py","file_name":"hete_data_utils.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21236414213","text":"\"\"\"Forced incompressible hydrodynamics on the Mobius strip.\"\"\"\n\nimport numpy as np\nimport dedalus.public as d3\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n# Parameters\nN = 96 # points per unit length\nLx = 12 # strip length\nLy = 1 # strip width\nk_drag = 0.25 # drag scale\nk_diss = 128 # dissipation scale\nk_force = 32 # forcing scale\ndk_force = 2 # forcing width\nfω_std = k_force # forcing amplitude\ndealias = 3 / 2\nstop_sim_time = 10\nsnapshot_sim_dt = 0.05\ntimestepper = d3.RK222\nsafety = 0.25\nmax_dt = 0.01\n\n# Derived parameters\nepsilon = (fω_std / k_force)**2 # energy injection rate\neta = epsilon * k_force**2 # enstrophy injection rate\na = (epsilon * k_drag**2)**(1/3) # drag coefficient\nnu = eta**(1/3) / k_diss**2 # viscosity\n\n# Bases\ncoords = d3.CartesianCoordinates('x', 'y')\ndist = d3.Distributor(coords, dtype=np.float64)\nxbasis = d3.RealFourier(coords['x'], 2*Lx*N, bounds=(0, 2*Lx), dealias=dealias)\nybasis = d3.Chebyshev(coords['y'], Ly*N, bounds=(-Ly/2, Ly/2), dealias=dealias)\n\n# Fields\nψ = dist.Field(name='ψ', bases=(xbasis, ybasis))\nω = dist.Field(name='ω', bases=(xbasis, ybasis))\nfω = dist.Field(name='fω', bases=(xbasis, ybasis))\ntau_1 = dist.Field(name='tau_1', bases=xbasis)\ntau_2 = dist.Field(name='tau_2', bases=xbasis)\ntau_3 = dist.Field(name='tau_3', bases=xbasis)\ntau_4 = dist.Field(name='tau_4', bases=xbasis)\n\n# Substitutions\nu = - d3.skew(d3.grad(ψ))\nlift_basis = ybasis.clone_with(a=-1/2+2, b=-1/2+2) # Second derivative basis\nlift = lambda A, n: d3.Lift(A, lift_basis, n)\ntau_ψ = lift(tau_1, -1) + lift(tau_2, -2)\ntau_ω = lift(tau_3, -1) + lift(tau_4, -2)\n\n# Forcing\nnx, ny = dist.coeff_layout.local_group_arrays(fω.domain, scales=1)\nkx = 2 * np.pi * nx / (2 * Lx)\nky_eff = np.pi * ny / Ly\nk_eff = (kx**2 + ky_eff**2)**0.5\nfilter = np.exp(-(k_eff - k_force)**2/2/dk_force**2)\ndef fill_forcing(timestep):\n fω.fill_random(layout='g')\n fω['c'] *= filter\n fω.normalize()\n fω.data *= fω_std / np.sqrt(timestep)\n\n# Filtering\nnx_ny_even = (nx + ny) % 2 == 0\nnx_ny_odd = (nx + ny) % 2 == 1\ndef clean_scalar(*fields):\n for field in fields:\n field['c'][nx_ny_odd] = 0\ndef clean_pseudo(*fields):\n for field in fields:\n field['c'][nx_ny_even] = 0\n\n# Problem\nproblem = d3.IVP([ψ, ω, tau_1, tau_2, tau_3, tau_4], namespace=locals())\nproblem.add_equation(\"ω + lap(ψ) + tau_ψ = 0\")\nproblem.add_equation(\"dt(ω) - nu*lap(ω) + a*ω + tau_ω = - dot(u, grad(ω)) + fω\")\nproblem.add_equation(\"ψ(y='left') = 0\")\nproblem.add_equation(\"ψ(y='right') = 0\")\nproblem.add_equation(\"ω(y='left') = 0\")\nproblem.add_equation(\"ω(y='right') = 0\")\n\n# Solver\nsolver = problem.build_solver(timestepper)\nsolver.stop_sim_time = stop_sim_time\n\n# Analysis\nsnapshots = solver.evaluator.add_file_handler('snapshots', sim_dt=snapshot_sim_dt, max_writes=100)\nsnapshots.add_task(ω, name=\"vorticity\", scales=2)\nscalars = solver.evaluator.add_file_handler('scalars', iter=10)\nE = d3.Average(d3.dot(u, u)) / 2\nZ = d3.Average(ω**2) / 2\nscalars.add_task(E, name=\"energy\")\nscalars.add_task(Z, name=\"enstrophy\")\n\n# CFL\nCFL = d3.CFL(solver, initial_dt=max_dt, cadence=10, safety=safety,\n max_change=1.5, min_change=0.5, max_dt=max_dt, threshold=0.05)\nCFL.add_velocity(u)\n\n# Flow properties\nflow = d3.GlobalFlowProperty(solver, cadence=10)\nflow.add_property(E, name='E')\nflow.add_property(Z, name='Z')\n\n# Main loop\ntry:\n logger.info('Starting loop')\n while solver.proceed:\n timestep = CFL.compute_timestep()\n fill_forcing(timestep)\n clean_pseudo(ψ, ω, fω)\n solver.step(timestep)\n if (solver.iteration-1) % 10 == 0:\n logger.info('Iteration: %i, Time: %.2e, dt: %.2e' %(solver.iteration, solver.sim_time, timestep))\n logger.info('E: %.2e, Z: %.2e' %(flow.max('E'), flow.max('Z')))\nexcept:\n logger.error('Exception raised, triggering end of main loop.')\n raise\nfinally:\n solver.log_stats()\n","repo_name":"kburns/nonorientable_surfaces","sub_path":"mobius_strip/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"70564150329","text":"\"\"\"Script to encourage documentation addition of changes incurred\n\nMethodology:\n\n Getting latest commit from merging branch and feature branch\n Getting all differences in files under lib directory\n Checking if documentation status is missing, not_updated or updated\n\n This script is to help better document the functionality\n\nNOTE:\n\n This script complies with our python3 coding and documentation standards\n and should be used as a reference guide. It complies with:\n\n 1) Pylint\n 2) Pydocstyle\n 3) Pycodestyle\n 4) Flake8\n\n Run these commands from the CLI to ensure the code is compliant for all\n your pull requests.\n\"\"\"\n\n# Standard imports\nimport argparse\nimport os\nimport sys\n\nimport git\nimport enum\n\n\nclass DocumentationStatus(enum.Enum):\n unknown = 0\n updated = 1\n not_updated = 2\n missing = 3\n\n\ndef _arg_parser_resolver():\n \"\"\"Resolve the CLI arguments provided by the user.\n\n Args:\n None\n\n Returns:\n result: Parsed argument object\n\n \"\"\"\n # Initialize parser and add the CLI options we should expect\n parser = argparse.ArgumentParser()\n # getting merge branch name\n parser.add_argument(\n '--merge_branch_name', type=str, required=True,\n help='Name of the merging to branch')\n # Github repository\n parser.add_argument(\n '--repository', type=str, required=True,\n help='Name of the GitHub repository in the format \"/\"')\n # getting root directory of repository\n parser.add_argument(\n '--directory', type=str, required=False,\n default=os.getcwd(),\n help='The parent directory of files to analyze.')\n # Return parser\n result = parser.parse_args()\n return result\n\n\ndef check_for_documentation(diff_item):\n \"\"\"Determine the documentation status\n\n Args:\n diff_item: Diff to check\n\n Returns:\n doc_status: DocumentationStatus\n\n \"\"\"\n # Extracting the changes made\n file_diffs = diff_item.diff.decode(\"utf-8\")\n # Setting documentation status flag to unknown\n doc_status = DocumentationStatus.unknown\n # Splitting the changes for line by line iteration\n lines = file_diffs.split('\\n')\n # Setting updated doc line count\n edited_doc_line_count = 0\n # Looping over differences\n for line in lines:\n # checking if the line was updated and contains documentation\n if line.strip() and line.startswith('+') and line.__contains__('///'):\n # updating the flag by one\n edited_doc_line_count += 1\n # Checking if no doc was changed\n if edited_doc_line_count == 0:\n # Setting the flag to not_updated\n doc_status = DocumentationStatus.not_updated\n\n # Reading complete file to check if not documentation exist\n # Reading the complete file\n file = diff_item.b_blob.data_stream.read().decode('utf-8')\n # Splitting the line to check if documentation is present or not\n lines = file.split('\\n')\n # Setting the documentation line count flag\n doc_lines = 0\n # Looping over the file lines\n for line in lines:\n # Checking if the line contains any documentation or not\n if line.strip() and line.__contains__('///'):\n # updating the flag by 1\n doc_lines += 1\n # Checking if the documentation lines were present or not\n if doc_lines == 0:\n # Updating the flag to missing\n doc_status = DocumentationStatus.missing\n # Checking if the doc was updated\n elif edited_doc_line_count > 0:\n # Setting the flag to documentation updated\n doc_status = DocumentationStatus.updated\n # return the file documentation status\n return doc_status\n\n\ndef main():\n \"\"\"Analyze dart files.\n\n This function finds, and prints the files that exceed the CLI\n defined defaults.\n\n Returns:\n None\n\n \"\"\"\n # Parsing the command line arguments\n args = _arg_parser_resolver()\n # Getting the git repo\n repo_feature = git.Repo(args.directory)\n (_, repository_directory) = args.repository.split(\"/\")\n repo_merge = git.Repo.clone_from(\"https://github.com/{}.git\".format(args.repository), \"{}/{}\".format(args.directory, repository_directory))\n\n # Do nothing if the branch has a \"/\" in it\n if '/' in args.merge_branch_name:\n return\n \n # Getting latest commit on latest branch\n commit_dev = repo_merge.commit(args.merge_branch_name)\n # Getting latest commit on feature branch\n feature_commit = repo_feature.commit()\n # Loading differences between the two commits\n diff_index = commit_dev.diff(feature_commit, create_patch=True)\n # Setting a flag to keep record of files and their documentation\n lookup = {}\n # Lopping over differences in modified files\n for diff_item in diff_index.iter_change_type('M'):\n # Getting file path of difference\n file_path = diff_item.b_path\n # Checking if a file under codebase(lib) directory was modified\n if file_path.startswith('lib'):\n # Getting file documentation status\n lookup[file_path] = check_for_documentation(diff_item)\n # Lopping over differences in added files\n for diff_item in diff_index.iter_change_type('A'):\n # Getting file path of difference\n file_path = diff_item.b_path\n # Checking if a file under codebase(lib) directory was added\n if file_path.startswith('lib'):\n # Getting file documentation status\n lookup[file_path] = check_for_documentation(diff_item)\n # Filtering files whose documentation status != updated\n filtered_lookup = {k: v for (k, v) in lookup.items() if DocumentationStatus.updated != v}\n # Checking if documentation was updated for all changed files\n if len(filtered_lookup) == 0:\n print('''🚀 {} Hurrah! documentation was updated in all modified/added files'''.format('\\033[92m'))\n sys.exit(0)\n else:\n print(\n '''🔍 {}DOCUMENTATION NOT UPDATED: Files with missing or not updated DartDoc documentation found'''.format(\n '\\033[91m'))\n for failing_file in filtered_lookup:\n print('''>>> File name: {}\\n\\t{}\\n'''.format(failing_file, filtered_lookup[failing_file]))\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PalisadoesFoundation/talawa","sub_path":".github/workflows/documentationcheck.py","file_name":"documentationcheck.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"77"} +{"seq_id":"31041972512","text":"import os\r\nimport time\r\nfrom pydhcplib.dhcp_packet import *\r\nfrom pydhcplib.dhcp_network import *\r\n\r\n# DNS Configuration\r\ndef set_dns(server):\r\n # Determine the operating system\r\n if os.name == 'nt':\r\n # Windows\r\n command = f'netsh interface ip set dns name=\"Wi-Fi\" static {server} primary'\r\n else:\r\n # Linux/macOS\r\n command = f'networksetup -setdnsservers Wi-Fi {server}'\r\n\r\n # Execute the command\r\n os.system(command)\r\n print(f'DNS server set to {server}')\r\n\r\n\r\n# DHCP Configuration\r\nclass MyDhcpApp(DhcpNetworkApp):\r\n def __init__(self):\r\n DhcpNetworkApp.__init__(self, \"dhcp.conf\")\r\n\r\n def handleDhcpDiscover(self, packet):\r\n print(\"DHCP Discover received\")\r\n print(f\"Client MAC: {packet.GetHardwareAddress()}\")\r\n username = packet.GetOptionValue('user_class') # Extract username from user_class option\r\n\r\n # Check if the username matches the desired name\r\n if username == 'specific_name':\r\n print(f\"User with specific name '{username}' discovered!\")\r\n print(\"Sending DHCP Offer\")\r\n\r\n dhcp_offer = DhcpPacketOffer()\r\n dhcp_offer.SetOption('server_id', '192.168.1.1')\r\n dhcp_offer.SetOption('lease_time', 86400)\r\n dhcp_offer.SetOption('subnet_mask', '255.255.255.0')\r\n dhcp_offer.SetOption('router', '192.168.1.1')\r\n dhcp_offer.SetOption('domain_name_server', '8.8.8.8')\r\n\r\n self.SendDhcpPacket(dhcp_offer)\r\n\r\n def handleDhcpRequest(self, packet):\r\n print(\"DHCP Request received\")\r\n print(f\"Client MAC: {packet.GetHardwareAddress()}\")\r\n print(f\"Offered IP: {packet.GetOptionValue('requested_ip')}\")\r\n print(\"Sending DHCP Ack\")\r\n\r\n dhcp_ack = DhcpPacketAck()\r\n dhcp_ack.SetOption('server_id', '192.168.1.1')\r\n dhcp_ack.SetOption('lease_time', 86400)\r\n dhcp_ack.SetOption('subnet_mask', '255.255.255.0')\r\n dhcp_ack.SetOption('router', '192.168.1.1')\r\n dhcp_ack.SetOption('domain_name_server', '8.8.8.8')\r\n\r\n self.SendDhcpPacket(dhcp_ack)\r\n\r\n\r\ndef start_dhcp_server():\r\n print(\"Starting DHCP server...\")\r\n my_dhcp_app = MyDhcpApp()\r\n my_dhcp_app.Start()\r\n try:\r\n while True:\r\n time.sleep(1)\r\n except KeyboardInterrupt:\r\n print(\"Stopping DHCP server...\")\r\n my_dhcp_app.Stop()\r\n\r\n\r\n# Main Program\r\ndef main():\r\n # Replace 'x.x.x.x' with the desired DNS server IP address\r\n dns_server = 'x.x.x.x'\r\n set_dns(dns_server)\r\n\r\n start_dhcp_server()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"harpazofek/AutoDNS-DHCP","sub_path":"dhcp_server.py","file_name":"dhcp_server.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20970502223","text":"import pandas as pd\nimport numpy as np\n\nimport os\nimport sys\nimport logging\nfrom datetime import datetime\nimport time\n\nfrom abc import ABCMeta, abstractmethod\n\nTS_TYPE = {\n \"daily\",\n \"weekly\",\n \"monthly\"\n} # Types for time series\n\nINTRA_DAY_TYPE = {\n \"1min\",\n \"5min\",\n \"15min\",\n \"30min\",\n \"60min\"\n} # Types of intervals for intra day\n\nINTERVAL_TYPE = TS_TYPE.union(INTRA_DAY_TYPE)\n\nTI_TYPE = {\n \"SMA\", # simple moving average\n \"EMA\", # exponential moving average\n \"VWAP\", # volume weighted average price. INTRA DAY ONLY!!\n \"MACD\", # moving average convergence\n \"STOCH\", # stochastic oscillator\n \"RSI\", # relative strength index\n \"ADX\", # average directional movement\n \"CCI\", # commodity channel index\n \"AROON\", # aroon\n \"BBANDS\", # Bollinger bands\n \"AD\", # Chaikin A/D line\n \"OBV\", # balance volume\n} # Types of technical indictors\n\nclass DataAPIBase():\n __metaclass__ = ABCMeta\n \n def __init__(self):\n self.log = logging.getLogger(\"Data API\")\n self._interval = None\n self._symbol = None\n # Intialize all data to empty\n self.ts_data = pd.DataFrame()\n self.ti_data = pd.DataFrame()\n\n @abstractmethod\n def update_symbol_interval(self, symbol=None, interval=None, force_update=False):\n pass\n\n @abstractmethod\n def fetch_ts_data(self):\n pass\n\n @abstractmethod\n def fetch_ti_data(self):\n pass\n\n def get_combined_data(self):\n if self._interval not in INTERVAL_TYPE:\n self.log.error(\"Interval type %s is not supported!\", self._interval)\n raise Exception(\"Interval type not supported!\")\n assert self.ts_data.index.equals(self.ti_data.index), \\\n \"The time series and technical indicator data are not consistent!\"\n data = pd.concat([self.ts_data, self.ti_data], axis=1)\n return data\n","repo_name":"shuailiz/option_analyzer","sub_path":"data_utils/data_api_base.py","file_name":"data_api_base.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4928528579","text":"import cv2 as cv\n\n\ndef main():\n # 读取图像\n src = cv.imread(\"../imgs/opencv.png\")\n # 转换HSV空间\n hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV)\n # 分离通道\n h, s, v = cv.split(hsv)\n cv.namedWindow(\"v\", cv.WINDOW_NORMAL)\n cv.imshow(\"v\", v)\n # 减小亮度\n v1 = v - 30\n cv.namedWindow(\"v1\", cv.WINDOW_NORMAL)\n cv.imshow(\"v1\", v1)\n\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xiawei20161308104/xv_opencv_tutorials","sub_path":"xv_opencv_tutorials/ImageProcessinginOpenCV/split_colorspaces.py","file_name":"split_colorspaces.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"26577570616","text":"import json\r\nimport pathlib\r\n\r\nclass ResponseContainer:\r\n\t\"\"\"\r\n\troute Response\r\n\t:autor: Jajoya\r\n\t:date: 2022-11-23\r\n\t\"\"\"\r\n\tdef __init__(self, className):\r\n\t\tself.name = className\r\n\t\tself.status = None\r\n\t\tself.message = None\r\n\t\tself.is_ok = None\r\n\t\tself.object = None\r\n\t\tself.object_ = None\r\n\t\tself.data_list = []\r\n\t\t\r\n\t\twith open(\"{0}/object.json\".format(pathlib.Path(__file__).parent.resolve()), 'r') as j:\r\n\t\t\tself.structure = json.loads(j.read())\r\n\t\tself.restarObject()\r\n\t\t\"\"\"\r\n\t\t\"\"\"\r\n\r\n\tdef clearObject(self):\r\n\t\tself.object = None\r\n\r\n\tdef restarObject(self):\r\n\t\tself.object = self.structure\r\n\r\n\tdef alertCallContext(self,enpoint):\r\n\t\tprint(\"\"\"\r\n\t\t\tclass: {}\\n\r\n\t\t\tenpoint: {}\\n\r\n\t\t\"\"\".format(self.name, enpoint))\r\n\r\n\tdef getObject(self):\r\n\t\tself.object[\"name_autor\"] = self.name\t\t\r\n\t\tself.object[\"message\"] = self.message\t\t\r\n\t\tself.object[\"is_ok\"] = self.is_ok\t\t\r\n\t\tself.object[\"status\"] = self.status\t\t\r\n\t\tself.object[\"data_object\"] = self.object_\t\t\r\n\t\tself.object[\"data_list\"] = self.data_list\t\t\r\n\t\treturn self.object","repo_name":"Tuzonagamer/tzg_server","sub_path":"app/application/routes/tools/ResponseContainer.py","file_name":"ResponseContainer.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1032391809","text":"import tkinter as tk\nimport numpy as np\n\n\n\nroot = tk.Tk()\n\n#create a canvas window\ncanvas1 = tk.Canvas(root, width = 400, height = 300)\ncanvas1.pack()\n\n#create labels\nsource_label = tk.Label(root,text = \"Source\")\ncanvas1.create_window(100,60,window=source_label)\ntarget_label = tk.Label(root,text = \"Target\")\ncanvas1.create_window(300,60,window=target_label)\n\n#create textbox --> get input\nsource_input = tk.Entry (root) \ncanvas1.create_window(100, 80, window=source_input)\n\ntarget_input = tk.Entry (root) \ncanvas1.create_window(300, 80, window=target_input)\n\n\n\ndef Calculate_Minimum_edit_distance(): \n source = source_input.get()\n target = target_input.get()\n del_cost, ins_cost, sub_cost = costs\n n= len(source)\n m= len(target)\n MED_Matrix = np.zeros((n + 1, m + 1), dtype = 'int32')\n for i in range(1, n + 1):\n MED_Matrix[i][0] = MED_Matrix[i - 1][0] + del_cost\n for i in range(1, m + 1):\n MED_Matrix[0][i] = MED_Matrix[0][i - 1] + del_cost \n for i in range(1, n + 1):\n for j in range(1, m + 1):\n if(source[i - 1] == target[j - 1]):\n MED_Matrix[i][j] = min(\n [MED_Matrix[i - 1][j] + del_cost,\n MED_Matrix[i-1][j - 1] + 0,\n MED_Matrix[i][j - 1] + ins_cost])\n else:\n MED_Matrix[i][j] = min(\n [MED_Matrix[i - 1][j] + del_cost,\n MED_Matrix[i - 1][j - 1] + sub_cost,\n MED_Matrix[i][j - 1] + ins_cost])\n # print(np.matrix(MED_Matrix))\n # print(MED_Matrix[n][m])\n # return MED_Matrix[n][m]\n Score_Med = tk.Label(root, text = \"Med: %d\" % MED_Matrix[n][m], font=('helvetica', 20, 'bold'))\n canvas1.create_window(200, 160, window = Score_Med)\n btn_ShowChart = tk.Button(text='Show Chart')\n canvas1.create_window(200, 200, window=btn_ShowChart)\n return MED_Matrix\n\n\n\n# matrix_result = Calculate_Minimum_edit_distance(source.get(), target.get(), cost)\n# print(matrix_result,\"\\n\",matrix_result[-1,-1])\n\n\ncosts = (1, 1, 2)\nbtn_Cal_Med = tk.Button(text='Calculate Minimun Edit Distance', \n command=Calculate_Minimum_edit_distance,\n font=('helvetica', 9, 'bold'))\ncanvas1.create_window(200, 120, window=btn_Cal_Med)\n\n\n\n\nroot.mainloop()","repo_name":"trinhvanminh/NLPs","sub_path":"Minimun edit distance/previous version/v3.py","file_name":"v3.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12000896284","text":"from json.tool import main\nimport random\nfrom text_classification import main as m\n\n\ndef test_main():\n rand1 = random.seed(1)\n rand2 = random.seed(2)\n \n eval1 = m(rand1)\n eval2 = m(rand2)\n \n if(abs(eval1 - eval2) > 0.1):\n print(\"test failed\")\n else:\n print(\"test succeeded\")\n \n \n\nif __name__ == \"__main__\":\n main()","repo_name":"JulianBiesheuvel/REMLA","sub_path":"src/tests/test_simple.py","file_name":"test_simple.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74661113848","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom sqlalchemy.pool import NullPool\n\nimport config\n\nengine = create_engine(\n config.ETH_DATABASE_URI,\n poolclass=NullPool,\n connect_args={'connect_timeout': 5},\n pool_pre_ping=True,\n echo=False,\n echo_pool=False,\n)\nsession_factory = sessionmaker(autocommit=False, autoflush=True, bind=engine)\nsession = scoped_session(session_factory)","repo_name":"teamsempo/SempoBlockchain","sub_path":"eth_worker/eth_src/sql_persistence/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"} +{"seq_id":"9911707851","text":"import matplotlib.pyplot as plt\n\ndef create_chart(points, filename):\n '''\n points - punkty po transpozycji\\n\n filename - nazwa pliku jak będzie zapisany\n '''\n # narysuj linie na 0,0\n plt.axhline(0, color='black')\n plt.axvline(0, color='black')\n\n # narysuj punkty\n plt.plot(points[0], points[1], \"bo\", label=\"różnica wektora różnic\")\n plt.plot(points[0], points[2], \"r.\", label=\"różnica składowych\")\n plt.legend(loc=\"upper right\")\n\n # pokaż\n # plt.show()\n\n # zapisz\n plt.savefig(\"wykresy/\" + filename + \".png\")","repo_name":"TheVivent-Politechnika-Lodzka/numerki_zad2_w4_gauss_seidl","sub_path":"kod/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27911493151","text":"import manim as M\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\nclass Plot(M.Scene):\r\n def __init__(self):\r\n # Path of the files\r\n signal_path = \"data/signal.csv\"\r\n features_path = \"data/features.csv\"\r\n\r\n # Loading all the required data\r\n self.signal_frame = pd.read_csv(signal_path)\r\n self.features_frame = pd.read_csv(features_path)\r\n self.times = self.signal_frame[\"time\"].values\r\n self.lead1 = self.signal_frame[\"lead1\"].values\r\n self.lead2 = self.signal_frame[\"lead2\"].values\r\n self.beat_idx = self.features_frame[\"position\"].values\r\n\r\n # NOTE: Change the constants below as required\r\n\r\n # Time in seconds to create one cycle\r\n self.CYCLE_CREATION_RUNTIME = 1\r\n # A float between 0 and 1 determining the delay in\r\n # creation of a cycle and removal of previous cycle\r\n self.DELAY_BETWEEN_CYCLES = 0.1\r\n\r\n super().__init__()\r\n\r\n def setup_axes(self):\r\n \"\"\"[Add the required axes to the scene]\"\"\"\r\n\r\n def get_grid(\r\n axes,\r\n color=M.WHITE,\r\n stroke_width=0.2,\r\n ):\r\n \"\"\"[Given an axes it returns a lines of a grid]\r\n\r\n Returns:\r\n [VDict]: [Return a VDict containing the grid lines]\r\n \"\"\"\r\n vertical_lines = M.VGroup()\r\n horizontal_lines = M.VGroup()\r\n\r\n x_start, x_end, x_step = axes.x_range\r\n y_start, y_end, y_step = axes.y_range\r\n\r\n for x in np.arange(x_start + x_step, x_end, x_step):\r\n start_point = axes.coords_to_point(x, y_start)\r\n end_point = axes.coords_to_point(x, y_end)\r\n line = M.Line(start_point, end_point).set_stroke(\r\n color=color, width=stroke_width\r\n )\r\n vertical_lines.add(line)\r\n\r\n for y in np.arange(y_start + y_step, y_end, y_step):\r\n start_point = axes.coords_to_point(x_start, y)\r\n end_point = axes.coords_to_point(x_end, y)\r\n line = M.Line(start_point, end_point).set_stroke(\r\n color=color, width=stroke_width\r\n )\r\n horizontal_lines.add(line)\r\n\r\n mappings = [\r\n (\"vertical_lines\", vertical_lines),\r\n (\"horizontal_lines\", horizontal_lines),\r\n ]\r\n grid = M.VDict(mappings)\r\n\r\n return grid\r\n\r\n # Axes for data of lead1\r\n self.lead1_axes = M.Axes(\r\n x_range=[0, 10, 1],\r\n y_range=[-0.2, 0.5, 0.1],\r\n x_length=6,\r\n y_length=3,\r\n axis_config={\"include_tip\": False, \"number_scale_value\": 0.3},\r\n x_axis_config={\r\n \"numbers_to_include\": np.arange(0, 10 + 1, 1),\r\n },\r\n y_axis_config={\r\n \"decimal_number_config\": {\"num_decimal_places\": 1},\r\n \"numbers_to_include\": np.arange(-0.2, 0.51, 0.1),\r\n \"numbers_to_exclude\": [],\r\n },\r\n tips=False,\r\n ).to_edge(M.UL)\r\n\r\n # Axes for data of lead2\r\n self.lead2_axes = M.Axes(\r\n x_range=[0, 10, 1],\r\n y_range=[-0.5, 0.2, 0.1],\r\n x_length=6,\r\n y_length=3,\r\n axis_config={\"include_tip\": False, \"number_scale_value\": 0.3},\r\n x_axis_config={\r\n \"numbers_to_include\": np.arange(0, 10 + 1, 1),\r\n },\r\n y_axis_config={\r\n \"decimal_number_config\": {\"num_decimal_places\": 1},\r\n \"numbers_to_include\": np.arange(-0.5, 0.21, 0.1),\r\n \"numbers_to_exclude\": [],\r\n },\r\n tips=False,\r\n ).to_edge(M.DL)\r\n\r\n # Adding the axes to the scene\r\n self.add(self.lead1_axes)\r\n self.add(get_grid(self.lead1_axes))\r\n self.add(self.lead2_axes)\r\n self.add(get_grid(self.lead2_axes))\r\n\r\n def setup_points(self):\r\n \"\"\"[Sets up all the required data points for the scene]\"\"\"\r\n lead1_axes_x_max = self.lead1_axes.x_range[1]\r\n self.lead1_all_points = [\r\n self.lead1_axes.coords_to_point(\r\n self.times[i] % (lead1_axes_x_max), self.lead1[i]\r\n )\r\n for i in range(len(self.times))\r\n ]\r\n\r\n lead2_axes_x_max = self.lead2_axes.x_range[1]\r\n self.lead2_all_points = [\r\n self.lead2_axes.coords_to_point(\r\n self.times[i] % (lead2_axes_x_max), self.lead2[i]\r\n )\r\n for i in range(len(self.times))\r\n ]\r\n\r\n self.beats = [\r\n M.Dot(\r\n self.lead1_axes.coords_to_point(\r\n self.times[i] % (lead2_axes_x_max), 0.5\r\n )\r\n )\r\n if i in self.beat_idx\r\n else None\r\n for i in range(len(self.times))\r\n ]\r\n\r\n # An array containing indexes where the graph should wrap around\r\n # along with the start and end index\r\n self.checkpoints = [0]\r\n num_lines = 0\r\n for i in range(1, len(self.times)):\r\n if self.times[i] % (lead2_axes_x_max) != 0:\r\n num_lines += 1\r\n else:\r\n self.checkpoints.append(i - 1)\r\n self.checkpoints = self.checkpoints + [num_lines]\r\n\r\n def construct(self):\r\n self.setup_axes()\r\n self.setup_points()\r\n\r\n # Keeping storage of the cycle for removal from scene later on\r\n # Used in the inner function (lines_with_beats)\r\n lead1_cycle = None\r\n lead2_cycle = None\r\n beats = None\r\n\r\n def lines_with_beats(start, end, line_color, create=True):\r\n \"\"\"[Returns the animations for the graph and the beats]\r\n\r\n Args:\r\n start ([int]): [index of the checkpoint array to start at]\r\n end ([int]): [index of the checkpoint array to end at]\r\n line_color ([string]): [color of the graph]\r\n create (bool, optional):\r\n [boolean to determine if graph should be created or removed].\r\n Defaults to True.\r\n\r\n Returns:\r\n [list]: [contains the animations to be played out for a cycle]\r\n \"\"\"\r\n animate_func = M.Create if create else M.Uncreate\r\n\r\n if create:\r\n start_idx = self.checkpoints[start] + 1\r\n end_idx = self.checkpoints[end]\r\n\r\n lead1_cycle_points = self.lead1_all_points[start_idx:end_idx]\r\n lead2_cycle_points = self.lead2_all_points[start_idx:end_idx]\r\n\r\n nonlocal lead1_cycle\r\n nonlocal lead2_cycle\r\n nonlocal beats\r\n\r\n lead1_cycle = M.VGroup().set_points_smoothly(\r\n lead1_cycle_points\r\n )\r\n lead2_cycle = M.VGroup().set_points_smoothly(\r\n lead2_cycle_points\r\n )\r\n beats = M.VGroup()\r\n\r\n for beat in self.beats[start_idx:end_idx]:\r\n if beat:\r\n beats.add(beat)\r\n else:\r\n lead1_cycle.reverse_points()\r\n lead2_cycle.reverse_points()\r\n\r\n animations = [\r\n animate_func(lead1_cycle.set_color(line_color)),\r\n animate_func(lead2_cycle.set_color(line_color)),\r\n animate_func(beats.set_color(M.RED)),\r\n ]\r\n\r\n return animations\r\n\r\n # Animating the creation of the graphs and beats\r\n for i in range(len(self.checkpoints) - 1):\r\n # Create the first cycle\r\n if i == 0:\r\n self.play(\r\n M.AnimationGroup(\r\n *lines_with_beats(i, i + 1, M.BLUE),\r\n run_time=self.CYCLE_CREATION_RUNTIME,\r\n rate_func=M.rate_functions.ease_in_out_expo,\r\n )\r\n )\r\n # Creating the next cycle and removing the previous\r\n else:\r\n self.play(\r\n M.LaggedStart(\r\n M.AnimationGroup(\r\n *lines_with_beats(i - 1, i, M.TEAL, create=False),\r\n ),\r\n M.AnimationGroup(\r\n *lines_with_beats(i, i + 1, M.BLUE),\r\n ),\r\n lag_ratio=self.DELAY_BETWEEN_CYCLES,\r\n run_time=2 * self.CYCLE_CREATION_RUNTIME,\r\n rate_func=M.rate_functions.ease_in_out_expo,\r\n )\r\n )\r\n\r\n self.wait()\r\n","repo_name":"hbarua05/ECG","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73540493690","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport soundfile as sf\nimport os\n\n#path = 'C:\\\\Users\\\\Nadav\\\\Google Drive\\\\Data Projects\\\\WaveCatching\\\\'\n#file1 = 'Violin-up-down\\\\all_down\\\\chunk827-164.55225.wav'\n#file2 = 'Violin-up-down\\\\all_up\\\\chunk380-164.55225.wav'\n\npath = os.path.realpath(os.path.join(os.path.dirname(__file__)))\nfile1 = os.path.join(path, \"dataset\", \"dataset1\", \"all_down\", \"chunk456-147.0.wav\") # 'Violin-up-down\\\\all_down\\\\'\nfile2 = os.path.join(path, \"dataset\", \"dataset1\", \"all_up\", \"chunk350-147.0.wav\") # 'Violin-up-down\\\\all_down\\\\'\n\ny1, fs = sf.read(file1)\ny1 = y1/np.max(y1)\nN1 = len(y1)\nT = 1/fs\n#time1 = T*np.arange(N1)\n\ny2, fs = sf.read(file2)\ny2 = y2/np.max(y2)\nN2 = len(y2)\n#T = 1/fs\n#time2 = T*np.arange(N2)\n#\n#plt.plot(time1, y1, label=\"down\")\n#plt.plot(time2, y2, label=\"up\")\n#plt.show\n\ncutoff = 60 # Hz, a bit over 50Hz, which is noise\n\nf = np.arange(N1//2)*fs/N1 # Get frequency\nind = [i for i,e in enumerate(f) if e>cutoff and icutoff and i \"C:\\\\..\\\\chromedriver\"\r\npath_to_chromedriver = sys.argv[1]\r\n#path_to_chromedriver = \"D:\\\\Documentos\\\\1. Cuoka\\\\Scraping\\\\chromedriver\"\r\n#path_to_chromedriver = \"C:\\\\Users\\\\lux_f\\\\Documents\\\\chromedriver\"\r\n#path_to_chromedriver = \"C:\\\\Users\\\\administrator\\\\Cuoka\\Chromedriver\\\\chromedriver\"\r\n\r\n# Nombre de la seccion\r\nsection = sys.argv[2]\r\n#section = \"Vestidos\"\r\n\r\n# Path donde se encuentra el script -> \"C:\\\\..\\\\false\\\\\"\r\npath = sys.argv[3]\r\n#path = \"D:\\\\Documentos\\\\1. Cuoka\\\\Scraping\\\\shops\\\\MANGO_true\\\\false\\\\\"\r\n#path = \"C:\\\\Users\\\\lux_f\\\\OneDrive\\\\Documentos\\\\shops\\\\Mango_false\\\\true\\\\\"\r\n#path = \"C:\\\\Users\\\\administrator\\\\Cuoka\\\\shops\\\\Mango_true\\\\false\\\\\"\r\n\r\n# Se recorre el fichero de links y se guardan en una lista\r\nlistOfLinks = []\r\n\r\nfile = open(path + \"Seccion_\" + section + \".txt\", 'r')\r\nfor link in file:\r\n # Quitamos los saltos de linea\r\n listOfLinks.append(link.rstrip())\r\n \r\n# Driver de Chrome\r\nchrome_options = Options()\r\nchrome_options.add_argument(\"--lang=es\")\r\nchrome_options.add_argument(\"--start-maximized\")\r\n\r\ndr = webdriver.Chrome(executable_path = path_to_chromedriver, chrome_options = chrome_options)\r\n\r\n# Creamos fichero con los productos\r\nresult = open(path + \"Productos_\" + section + \".txt\", 'w')\r\nfile_error = open(path + \"Productos_Error_\" + section + \".txt\", 'w')\r\n\r\nfor link in listOfLinks:\r\n # Linea de guiones para separar cada producto\r\n result.write(\"-----------------------------------------------------------\" + \"\\n\")\r\n\r\n connected = False\r\n retries = 3\r\n while not connected & retries > 0:\r\n try:\r\n # Nos conectamos\r\n dr.get(link)\r\n \r\n connected = True\r\n \r\n except:\r\n retries -= 1\r\n time.sleep(2)\r\n continue\r\n\r\n if not connected:\r\n file_error.write(\"No se ha podido abrir el link: \" + link + \"\\n\")\r\n continue\r\n \r\n try:\r\n # Esperamos a que aparezca la imagen un maximo de 60 segundos.\r\n WebDriverWait(dr, 60).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, \"ficha_foto\"))\r\n )\r\n\r\n time.sleep(1)\r\n \r\n except:\r\n file_error.write(\"Imagen no encontrada en: \" + link + \"\\n\")\r\n continue\r\n\r\n # Pinchamos el icono de quitar provincia\r\n try:\r\n dr.find_element_by_css_selector(\"#FormSeleccionProvincia > div > div > div > div > div > div.icon__close\").click()\r\n \r\n except:\r\n pass\r\n \r\n try:\r\n # ****** N O M B R E ****** #\r\n name = dr.find_element_by_class_name(\"nombreProducto\").text\r\n if (len(name) == 0):\r\n raise Exception(\"Nombre vacio\")\r\n \r\n result.write(\"Nombre: \" + name + \"\\n\")\r\n \r\n except:\r\n result.write(\"Nombre: null\\n\")\r\n file_error.write(\"Nombre no encontrado en: \" + link + \"\\n\")\r\n continue \r\n\r\n try:\r\n # ****** D E S C R I P T I O N ****** #\r\n description_list = dr.find_elements_by_css_selector(\"div.panel_descripcion > span\")\r\n full_descr = \"\" \r\n for description_elem in description_list:\r\n description = description_elem.find_element_by_css_selector(\"span\").text.splitlines()[:255]\r\n if full_descr != \"\":\r\n full_descr = full_descr + \". \" + description\r\n else:\r\n full_descr = description\r\n \r\n result.write(\"Descripcion: \" + full_descr + \"\\n\")\r\n \r\n except:\r\n result.write(\"Descripcion: null\\n\")\r\n\r\n try:\r\n # ****** P R E C I O ****** #\r\n price_container = dr.find_element_by_css_selector(\"div.precio_cabecera_producto > div > span\").text.replace(\",\", \".\")\r\n prices_list = price_container.split(\"€\",1)\r\n price = prices_list[0]\r\n result.write(\"Precio: \" + price + \"\\n\")\r\n \r\n except:\r\n result.write(\"Precio: null\\n\")\r\n file_error.write(\"Precio no encontrado en: \" + link + \"\\n\")\r\n continue\r\n\r\n try:\r\n # ****** D E S C U E N T O ****** #\r\n discount = prices_list[1].replace(\"€\", \"\")\r\n result.write(\"Descuento: \" + discount + \"\\n\")\r\n \r\n except:\r\n result.write(\"Descuento: \\n\")\r\n\r\n result.write(\"Link: \" + link + \"\\n\")\r\n\r\n # Colores\r\n try:\r\n # ****** C O L O R E S ****** #\r\n colors = dr.find_elements_by_class_name(\"productColors__buttonContainer\")\r\n \r\n except:\r\n result.write(\"*********************************************************\\n\")\r\n result.write(\" Color: null\\n\")\r\n result.write(\" Icono: null\\n\")\r\n result.write(\" Referencia: null\\n\")\r\n file_error.write(\"Colores no encontrados en: \" + link + \"\\n\")\r\n continue\r\n\r\n for color in colors:\r\n try:\r\n if (len(colors) > 1): \r\n # Hacemos click en cada icono\r\n color.click()\r\n time.sleep(2)\r\n\r\n WebDriverWait(dr, 60).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, \"ficha_foto\"))\r\n )\r\n \r\n except Exception as ex:\r\n result.write(\"*********************************************************\\n\")\r\n result.write(\" Color: null\\n\")\r\n result.write(\" Icono: null\\n\")\r\n result.write(\" Referencia: null\\n\")\r\n file_error.write(\"Color no encontrado en (click): \" + link + \"\\n\")\r\n continue\r\n\r\n try:\r\n # ****** C O L O R N O M B R E ****** #\r\n colorName = dr.find_element_by_class_name(\"producto_color_texto\").text.upper().replace(\"/\", \"-\").replace(\"COLOR: \", \"\")\r\n result.write(\"*********************************************************\\n\")\r\n result.write(\" Color: \" + colorName + \"\\n\")\r\n \r\n except:\r\n result.write(\"*********************************************************\\n\")\r\n result.write(\" Color: null\\n\")\r\n result.write(\" Icono: null\\n\")\r\n result.write(\" Referencia: null\\n\")\r\n file_error.write(\"Nombre de color no encontrado en: \" + link + \"\\n\")\r\n continue\r\n\r\n try:\r\n # ****** C O L O R I C O N O ****** #\r\n colorIcon = color.find_element_by_xpath(\".//img\").get_attribute(\"src\")\r\n result.write(\" Icono: \" + colorIcon + \"\\n\")\r\n \r\n except:\r\n result.write(\" Icono: null\\n\")\r\n\r\n try:\r\n # ****** C O L O R R E F E R E N C I A ****** #\r\n reference = dr.find_element_by_class_name(\"referenciaProducto\").text.rstrip()\r\n\r\n reference = ''.join(ch for ch in reference if ch.isdigit())\r\n \r\n result.write(\" Referencia: \" + reference + \"\\n\")\r\n \r\n except:\r\n result.write(\" Referencia: null\\n\")\r\n file_error.write(\"Referencia no encontrada en: \" + link + \"\\n\")\r\n continue \r\n\r\n # Sacamos las imagenes\r\n try:\r\n images = dr.find_elements_by_id('tableFoto')\r\n\r\n except:\r\n file_error.write(\"Imagenes no encontradas en: \" + link + \"\\n\")\r\n continue\r\n\r\n # ****** I M A G E N E S ****** #\r\n for image in images:\r\n try:\r\n image.click()\r\n time.sleep(1)\r\n image_big = dr.find_element_by_css_selector(\"#panelZoomImagen > div.span12 > img\")\r\n result.write(\" Imagen: \" + image_big.get_attribute(\"src\") + \"\\n\")\r\n\r\n except:\r\n #Hay muchas imagenes fantasma, no escribimos null\r\n pass\r\n \r\n # Hacemos scroll hacia arriba para poder hacer click en los colores\r\n dr.execute_script(\"window.scrollTo(0, 0);\")\r\n time.sleep(1)\r\n \r\n# Creamos un fichero vacio para indicar que ya hemos terminado.\r\nopen(path + section + '_done.dat', 'w')\r\n\r\nfile_error.close()\r\nresult.close()\r\nfile.close()\r\n\r\ndr.quit()\r\n\r\n\r\n","repo_name":"dani3/Cuoka","sub_path":"WebScraper/scripts/Shop rendering/Mango/false/renderProducts.py","file_name":"renderProducts.py","file_ext":"py","file_size_in_byte":8368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12347844482","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.db import IntegrityError\nfrom eds.models import Surtidores, Companias, Registroseriales, Cierreseriales, Facturas, RegistroTanques, MedidasTanques, Tanques, Aplicaciones, CierreVentasDia\nfrom .forms import RegistroserialesForm, FacturasForm, RegistroTanquesForm, VolumenTanquesForm, BusVolumenTanquesForm\nfrom django.contrib import messages\nfrom datetime import datetime, timedelta, time, date\n\n\n\ndef eds (request):\n return render(request, 'eds.html')\n\ndef home (request):\n return render(request, 'home.html')\n\n\ndef panel (request):\n listas = Aplicaciones.objects.all()\n return render(request, 'panel.html', { 'listas':listas })\n\ndef autenticar (request):\n if request.method == 'GET':\n return render(request, 'login.html', {'form': UserCreationForm})\n else: \n if request.POST['password1'] == request.POST['password2']:\n #registra usuario\n try:\n user = User.objects.create_user(username=request.POST['username'], password=request.POST['password1'])\n user.save()\n login(request, user)\n return redirect('panel') \n except IntegrityError: \n return render(request, 'login.html', {\n 'form': UserCreationForm,\n 'error': 'Usuario ya Existe' \n }) \n \n return render(request, 'login.html', {\n 'form': UserCreationForm,\n 'error': 'Contrasena no coincide' \n })\n \n \ndef cerrarsesion(request):\n logout(request)\n return redirect('/') \n \ndef iniciar(request):\n if request.method == 'GET':\n return render(request, 'iniciar.html', {'form': AuthenticationForm})\n else:\n user = authenticate(request, \n username=request.POST['username'], \n password=request.POST['password'] )\n if user is None:\n messages.warning(request, 'Usuario o Password Incorreccto')\n\n return render(request, 'iniciar.html', {\n 'form': AuthenticationForm \n })\n else: \n login(request, user)\n \n return redirect('panel')\n \ndef registroserialdiario(request):\n #messages.success(request, 'Your password was updated successfully!') \n cia = Companias.objects.get(usuario=request.user) \n surtidores = Surtidores.objects.filter(cia = cia.pk) \n #seriales = Registroseriales.objects.filter(cia = cia.pk ) \n seriales = Registroseriales.objects.filter(cia = cia.pk, estatus=False ).order_by('-id')\n \n if request.method == 'GET':\n return render(request, 'registroserialdiario.html', {\n 'form' : RegistroserialesForm,\n 'seriales': seriales,\n 'surtidores': surtidores\n\n } ) \n else:\n try:\n mensaje = \"No se pudo guardar\"\n identificador_surtidor = request.POST['identificador']\n ultimoserial = Registroseriales.objects.filter(cia = cia.pk, identificador= identificador_surtidor ).last()\n compania = Companias.objects.get(usuario=request.user) \n formulario = RegistroserialesForm(request.POST)\n nuevo = formulario.save(commit=False)\n nuevo.usuario = request.user \n nuevo.cia = Companias.objects.get(usuario=request.user)\n nuevo.serialinicio = ultimoserial.serialfinal \n serfinal = request.POST['serialfinal']\n \n #if not(ultimoserial):\n # mensaje = \"error\"\n \n #verificar si el serial final es mayor\n if int(serfinal) > int(ultimoserial.serialfinal): \n cambiar_estado = Surtidores.objects.filter(cia = cia.pk, id = identificador_surtidor).update(cierre=True) \n litros = int(serfinal) - ultimoserial.serialfinal\n nuevo.totallitros = litros\n nuevo.save()\n \n messages.success(request, 'Serial Registrado')\n\n return redirect('registroserialdiario')\n \n #return render(request, 'registroserialdiario.html', {\n #'form' : RegistroserialesForm,\n #'error': litros\n #} ) \n \n else:\n messages.warning(request, 'El serial debe ser mayor al inicial')\n \n return render(request, 'registroserialdiario.html', {\n 'form' : RegistroserialesForm,\n 'seriales': seriales,\n 'surtidores': surtidores\n \n } ) \n \n \n except ValueError: \n return render(request, 'registroserialdiario.html', {\n 'form' : RegistroserialesForm,\n 'seriales': seriales,\n 'surtidores': surtidores\n\n } ) \n \n \n \ndef listaseriales(request):\n cia = Companias.objects.get(usuario=request.user) \n surtidores = Surtidores.objects.filter(cia = cia.pk) \n seriales = Registroseriales.objects.filter(cia = cia.pk ).order_by('-id')\n #seriales = Registroseriales.objects.filter(cia = cia.pk, estatus=False )\n return render(request, 'listaseriales.html', \n { 'seriales': seriales, \n 'surtidores': surtidores\n } \n ) \n \n\ndef cierreserial(request):\n cia = Companias.objects.get(usuario=request.user) \n surtidores = Surtidores.objects.filter(cia = cia.pk) \n seriales = Registroseriales.objects.filter(cia = cia.pk, estatus=False) \n\n \n if request.method == 'GET':\n return render(request, 'cierreserial.html', {\n 'seriales': seriales,\n 'surtidores': surtidores\n\n } ) \n else:\n try:\n mensaje = \"No se pudo guardar\"\n current_date = date.today() \n \n id_surtidor = request.POST['id_serial']\n serial_inicial = Registroseriales.objects.filter(cia = cia.pk, identificador= id_surtidor, estatus=True ).last()\n serial_final = Registroseriales.objects.filter(cia = cia.pk, identificador= id_surtidor, estatus=False ).last()\n datos_surtidor = Surtidores.objects.get(cia = cia.pk, id= id_surtidor) \n\n \n guardar_cierre = Cierreseriales(serial_inicio = serial_inicial.serialfinal,fecha_incio=serial_inicial.creado, serial_final= serial_final.serialfinal )\n #guardar_cierre = Cierreseriales.objects.create(identificador=id_surtidor, cia = cia.pk, , ,)\n guardar_cierre.identificador = Surtidores.objects.get(id=id_surtidor)\n guardar_cierre.cia = Companias.objects.get(usuario=request.user)\n guardar_cierre.usuario = request.user \n guardar_cierre.totallitros = serial_final.serialfinal - serial_inicial.serialfinal\n guardar_cierre.save()\n\n ejecutar_cierre = Registroseriales.objects.filter(cia = cia.pk, identificador= id_surtidor).update(estatus=True)\n cambiar_estado = Surtidores.objects.filter(cia = cia.pk, id = id_surtidor).update(cierre=False) \n \n #Insertar o Actualizar en el Cierre del dia\n cierrediario, created = CierreVentasDia.objects.get_or_create(producto=datos_surtidor.tipo, fecha_cierre=current_date, cia= Companias.objects.get(usuario=request.user), \n usuario=request.user)\n if created:\n cierrediario.litros = serial_final.serialfinal - serial_inicial.serialfinal\n cierrediario.precio = 0.50\n cierrediario.total = cierrediario.litros * cierrediario.precio\n cierrediario.save()\n #messages.success(request, 'Cierre Ejecutado') \n \n\n else:\n #messages.success(request, 'Cierre Actualizado') \n cierrediario.litros = cierrediario.litros + (serial_final.serialfinal - serial_inicial.serialfinal)\n cierrediario.total = cierrediario.litros * 0.50 \n cierrediario.save()\n \n return render(request, 'cierreserial.html', {\n 'seriales': seriales,\n 'surtidores': surtidores,\n 'mensaje': serial_final.serialfinal\n })\n \n except ValueError: \n return render(request, 'cierreserial.html', {\n 'seriales': seriales,\n 'surtidores': surtidores\n\n } ) \n \n \n#Lista de Cierres Diarios \ndef cierrediario(request):\n cia = Companias.objects.get(usuario=request.user) \n lista = CierreVentasDia.objects.filter(cia = cia.pk)\n return render(request, 'listacierrediario.html', \n { 'listas': lista } \n ) \n\ndef eliminarserial(request,id):\n registroserial = Registroseriales.objects.get(id=id)\n registroserial.delete()\n messages.success(request, 'Serial eliminado correctamente') \n return redirect(\"/registroserialdiario/\")\n \n \ndef listacierres(request):\n cia = Companias.objects.get(usuario=request.user) \n cierres = Cierreseriales.objects.filter(cia = cia.pk).order_by('-id')\n return render(request, 'listacierres.html', \n { 'cierres': cierres } \n ) \n\ndef compras(request):\n cia = Companias.objects.get(usuario=request.user) \n lista_facturas = Facturas.objects.filter(cia = cia.pk)\n return render(request, 'compras.html', {'listas': lista_facturas} ) \n \n\ndef nuevafactura(request):\n cia = Companias.objects.get(usuario=request.user) \n \n if request.method == 'GET':\n return render(request, 'nuevafactura.html', {\n 'form' : FacturasForm\n\n } ) \n else:\n try:\n formulario = FacturasForm(request.POST)\n nuevo = formulario.save(commit=False)\n nuevo.usuario = request.user \n nuevo.cia = Companias.objects.get(usuario=request.user)\n nuevo.save()\n messages.success(request, 'Registro guardado exitosamente') \n\n return redirect('compras')\n \n except ValueError: \n messages.warning(request, 'Registro no se pudo guardar') \n return render(request, 'nuevafactura.html', {'form' : FacturasForm}) \n\ndef eliminarfactura(request,id):\n registro = Facturas.objects.get(id=id)\n registro.delete()\n messages.success(request, 'Registro eliminado correctamente') \n return redirect(\"compras\")\n\ndef editarfactura(request, id):\n cia = Companias.objects.get(usuario=request.user) \n registro = Facturas.objects.get(id=id)\n form = FacturasForm(instance=registro) \n\n if request.method == 'POST':\n try:\n form = FacturasForm(request.POST, instance=registro) \n if form.is_valid(): \n form.save()\n messages.success(request, 'Registro guardado exitosamente') \n return redirect('compras')\n \n except ValueError: \n messages.warning(request, form.id) \n return render(request, 'nuevafactura.html', {'form' : form}) \n \n return render(request, 'nuevafactura.html', {\n 'form' : form\n\n } ) \n\n \ndef registrotanques(request):\n #today = datetime.date.today()\n cia = Companias.objects.get(usuario=request.user) \n listas = RegistroTanques.objects.filter(cia = cia.pk ).order_by('-id')[:100]\n \n if request.method == 'POST':\n try: \n #consulta medida de tanque\n litros = MedidasTanques.objects.get(cia=cia.pk, tanque=request.POST['tanque'], medida=request.POST['medida']).litros \n reserva = Tanques.objects.get(cia=cia.pk, id=request.POST['tanque']).reserva_estrategica \n\n form = RegistroTanquesForm(request.POST) \n if form.is_valid(): \n guardar = form.save(commit=False)\n guardar.usuario = request.user \n guardar.cia = Companias.objects.get(usuario=request.user) \n guardar.litros = litros \n guardar.disponible = litros - reserva\n\n guardar.save()\n messages.success(request, 'Registro guardado exitosamente') \n return redirect('registrotanques')\n \n except ValueError: \n messages.warning(request, form.id) \n return render(request, 'nuevafactura.html', {'form' : form}) \n \n return render(request, 'registrotanques.html', \n { 'form': RegistroTanquesForm, \n 'listas': listas } \n ) \n \ndef eliminarregistrotanque(request,id):\n registro = RegistroTanques.objects.get(id=id)\n registro.delete()\n messages.success(request, 'Registro eliminado correctamente') \n return redirect(\"registrotanques\")\n\n \ndef volumentanques(request): \n cia = Companias.objects.get(usuario=request.user) \n listas = MedidasTanques.objects.filter(cia = cia.pk ).order_by('-medida')[:500]\n if request.method == 'POST':\n try: \n #CONSULTAR SI YA EXISTE LA MEDIDA REGISTRADA PARA EL TANQUE\n verificar = MedidasTanques.objects.filter(cia=cia.pk, tanque=request.POST['tanque'], medida = request.POST['medida']) \n if not verificar :\n #consulta medida de tanque\n form = VolumenTanquesForm(request.POST) \n if form.is_valid(): \n guardar = form.save(commit=False)\n guardar.usuario = request.user \n guardar.cia = Companias.objects.get(usuario=request.user) \n guardar.save()\n messages.success(request, 'Registro guardado exitosamente') \n return redirect('volumentanques')\n else: \n messages.warning(request, 'Registro Duplicado') \n\n \n except ValueError: \n messages.warning(request, form.id) \n return render(request, 'volumentanques', {'form' : VolumenTanquesForm}) \n \n\n \n return render(request, 'volumentanques.html',\n {'form': VolumenTanquesForm,\n 'formbuscar' : BusVolumenTanquesForm,\n 'listas': listas }\n )\n \ndef elivolumenmedida(request,id):\n registro = MedidasTanques.objects.get(id=id)\n registro.delete()\n messages.success(request, 'Registro eliminado correctamente') \n return redirect(\"volumentanques\")\n \n #asignacion automatica del volumen de un tanque\ndef autvolumenmedida(request):\n cia = Companias.objects.get(usuario=request.user) \n tanque1 = MedidasTanques.objects.filter(cia = cia.pk, tanque=int(request.POST['tanque'] ))\n \n for listadoMedidas in tanque1:\n tanque2 = MedidasTanques()\n tanque2.cia = Companias.objects.get(usuario=request.user) \n tanque2.usuario = request.user\n tanque2.tanque = Tanques.objects.get(id=request.POST['tanquedestino']) \n tanque2.medida = listadoMedidas.medida\n tanque2.litros = listadoMedidas.litros\n tanque2.save()\n messages.success(request, 'Automatizacion completada') \n \n return redirect(\"volumentanques\")\n \n\n#buscar medidas por tanque\ndef buscarmedidastanques(request): \n cia = Companias.objects.get(usuario=request.user) \n if request.method == 'POST': \n listas = MedidasTanques.objects.filter(cia = cia.pk, tanque = request.POST['tanque'] ).order_by('-medida')[:500]\n else: \n listas = MedidasTanques.objects.filter(cia = cia.pk ).order_by('-medida')[:500]\n \n return render(request, 'volumentanques.html',\n {'form': VolumenTanquesForm,\n 'formbuscar' : BusVolumenTanquesForm,\n 'listas': listas }\n )\n \n ","repo_name":"josevicenteocana/oksistemprivate","sub_path":"eds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16411,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9665715417","text":"\"\"\"This script is modified from [PARE](https://github.com/\nmkocabas/PARE/tree/master/pare/models/layers).\n\nOriginal license please see docs/additional_licenses.md.\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.runner.base_module import BaseModule\nfrom torch.nn.modules.utils import _pair\n\nfrom mmhuman3d.utils.geometry import rot6d_to_rotmat\n\n\nclass LocallyConnected2d(nn.Module):\n \"\"\"Locally Connected Layer.\n\n Args:\n in_channels (int):\n the in channel of the features.\n out_channels (int):\n the out channel of the features.\n output_size (List[int]):\n the output size of the features.\n kernel_size (int):\n the size of the kernel.\n stride (int):\n the stride of the kernel.\n Returns:\n attended_features (torch.Tensor):\n attended feature maps\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n output_size,\n kernel_size,\n stride,\n bias=False):\n super(LocallyConnected2d, self).__init__()\n output_size = _pair(output_size)\n self.weight = nn.Parameter(\n torch.randn(1, out_channels, in_channels, output_size[0],\n output_size[1], kernel_size**2),\n requires_grad=True,\n )\n if bias:\n self.bias = nn.Parameter(\n torch.randn(1, out_channels, output_size[0], output_size[1]),\n requires_grad=True)\n else:\n self.register_parameter('bias', None)\n self.kernel_size = _pair(kernel_size)\n self.stride = _pair(stride)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n _, c, h, w = x.size()\n kh, kw = self.kernel_size\n dh, dw = self.stride\n x = x.unfold(2, kh, dh).unfold(3, kw, dw)\n x = x.contiguous().view(*x.size()[:-2], -1)\n # Sum in in_channel and kernel_size dims\n out = (x.unsqueeze(1) * self.weight).sum([2, -1])\n if self.bias is not None:\n out += self.bias\n return out\n\n\nclass KeypointAttention(nn.Module):\n \"\"\"Keypoint Attention Layer.\n\n Args:\n use_conv (bool):\n whether to use conv for the attended feature map.\n Default: False\n in_channels (List[int]):\n the in channel of shape_cam features and pose features.\n Default: (256, 64)\n out_channels (List[int]):\n the out channel of shape_cam features and pose features.\n Default: (256, 64)\n Returns:\n attended_features (torch.Tensor):\n attended feature maps\n \"\"\"\n\n def __init__(self,\n use_conv=False,\n in_channels=(256, 64),\n out_channels=(256, 64),\n act='softmax',\n use_scale=False):\n super(KeypointAttention, self).__init__()\n self.use_conv = use_conv\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.act = act\n self.use_scale = use_scale\n if use_conv:\n self.conv1x1_pose = nn.Conv1d(\n in_channels[0], out_channels[0], kernel_size=1)\n self.conv1x1_shape_cam = nn.Conv1d(\n in_channels[1], out_channels[1], kernel_size=1)\n\n def forward(self, features, heatmaps):\n \"\"\"Forward function.\"\"\"\n batch_size, num_joints, height, width = heatmaps.shape\n\n if self.use_scale:\n scale = 1.0 / np.sqrt(height * width)\n heatmaps = heatmaps * scale\n\n if self.act == 'softmax':\n normalized_heatmap = F.softmax(\n heatmaps.reshape(batch_size, num_joints, -1), dim=-1)\n elif self.act == 'sigmoid':\n normalized_heatmap = torch.sigmoid(\n heatmaps.reshape(batch_size, num_joints, -1))\n features = features.reshape(batch_size, -1, height * width)\n\n attended_features = torch.matmul(normalized_heatmap,\n features.transpose(2, 1))\n attended_features = attended_features.transpose(2, 1)\n\n if self.use_conv:\n if attended_features.shape[1] == self.in_channels[0]:\n attended_features = self.conv1x1_pose(attended_features)\n else:\n attended_features = self.conv1x1_shape_cam(attended_features)\n\n return attended_features\n\n\ndef interpolate(feat, uv):\n \"\"\"\n Args:\n feat (torch.Tensor): [B, C, H, W] image features\n uv (torch.Tensor): [B, 2, N] uv coordinates\n in the image plane, range [-1, 1]\n Returns:\n samples[:, :, :, 0] (torch.Tensor):\n [B, C, N] image features at the uv coordinates\n \"\"\"\n if uv.shape[-1] != 2:\n uv = uv.transpose(1, 2) # [B, N, 2]\n uv = uv.unsqueeze(2) # [B, N, 1, 2]\n # NOTE: for newer PyTorch, it seems that training\n # results are degraded due to implementation diff in F.grid_sample\n # for old versions, simply remove the aligned_corners argument.\n if int(torch.__version__.split('.')[1]) < 4:\n samples = torch.nn.functional.grid_sample(feat, uv) # [B, C, N, 1]\n else:\n samples = torch.nn.functional.grid_sample(\n feat, uv, align_corners=True) # [B, C, N, 1]\n return samples[:, :, :, 0] # [B, C, N]\n\n\ndef _softmax(tensor, temperature, dim=-1):\n return F.softmax(tensor * temperature, dim=dim)\n\n\ndef softargmax2d(\n heatmaps,\n temperature=None,\n normalize_keypoints=True,\n):\n \"\"\"Softargmax layer for heatmaps.\"\"\"\n dtype, device = heatmaps.dtype, heatmaps.device\n if temperature is None:\n temperature = torch.tensor(1.0, dtype=dtype, device=device)\n batch_size, num_channels, height, width = heatmaps.shape\n x = torch.arange(\n 0, width, device=device,\n dtype=dtype).reshape(1, 1, 1, width).expand(batch_size, -1, height, -1)\n y = torch.arange(\n 0, height, device=device,\n dtype=dtype).reshape(1, 1, height, 1).expand(batch_size, -1, -1, width)\n # Should be Bx2xHxW\n points = torch.cat([x, y], dim=1)\n normalized_heatmap = _softmax(\n heatmaps.reshape(batch_size, num_channels, -1),\n temperature=temperature.reshape(1, -1, 1),\n dim=-1)\n\n # Should be BxJx2\n keypoints = (\n normalized_heatmap.reshape(batch_size, -1, 1, height * width) *\n points.reshape(batch_size, 1, 2, -1)).sum(dim=-1)\n\n if normalize_keypoints:\n # Normalize keypoints to [-1, 1]\n keypoints[:, :, 0] = (keypoints[:, :, 0] / (width - 1) * 2 - 1)\n keypoints[:, :, 1] = (keypoints[:, :, 1] / (height - 1) * 2 - 1)\n\n return keypoints, normalized_heatmap.reshape(batch_size, -1, height, width)\n\n\nclass PareHead(BaseModule):\n\n def __init__(\n self,\n num_joints=24,\n num_input_features=480,\n softmax_temp=1.0,\n num_deconv_layers=3,\n num_deconv_filters=(256, 256, 256),\n num_deconv_kernels=(4, 4, 4),\n num_camera_params=3,\n num_features_smpl=64,\n final_conv_kernel=1,\n pose_mlp_num_layers=1,\n shape_mlp_num_layers=1,\n pose_mlp_hidden_size=256,\n shape_mlp_hidden_size=256,\n bn_momentum=0.1,\n use_heatmaps='part_segm',\n use_keypoint_attention=False,\n use_postconv_keypoint_attention=False,\n keypoint_attention_act='softmax', # softmax, sigmoid\n use_scale_keypoint_attention=False,\n backbone='hrnet_w32-conv', # hrnet, resnet\n smpl_mean_params=None,\n deconv_with_bias=False,\n ):\n \"\"\"PARE parameters regressor head. This class is modified from.\n\n [PARE](hhttps://github.com/\n mkocabas/PARE/blob/master/pare/models/head/pare_head.py). Original\n license please see docs/additional_licenses.md.\n\n Args:\n num_joints (int):\n Number of joints, should be 24 for smpl.\n num_input_features (int):\n Number of input featuremap channels.\n softmax_temp (float):\n Softmax tempreture\n num_deconv_layers (int):\n Number of deconvolution layers.\n num_deconv_filters (List[int]):\n Number of filters for each deconvolution layer,\n len(num_deconv_filters) == num_deconv_layers.\n num_deconv_kernels (List[int]):\n Kernel size for each deconvolution layer,\n len(num_deconv_kernels) == num_deconv_layers.\n num_camera_params (int):\n Number of predicted camera parameter dimension.\n num_features_smpl (int):\n Number of feature map channels.\n final_conv_kernel (int):\n Kernel size for the final deconvolution feature map channels.\n pose_mlp_num_layers (int):\n Number of mpl layers for pose parameter regression.\n shape_mlp_num_layers (int):\n Number of mpl layers for pose parameter regression.\n pose_mlp_hidden_size (int):\n Hidden size for pose mpl layers.\n shape_mlp_hidden_size (int):\n Hidden size for pose mpl layers.\n bn_momemtum (float):\n Momemtum for batch normalization.\n use_heatmaps (str):\n Types of heat maps to use.\n use_keypoint_attention (bool)\n Whether to use attention based on heat maps.\n keypoint_attention_act (str):\n Types of activation function for attention layers.\n use_scale_keypoint_attention (str):\n Whether to scale the attention\n according to the size of the attention map.\n deconv_with_bias (bool)\n Whether to deconv with bias.\n backbone (str):\n Types of the backbone.\n smpl_mean_params (str):\n File name of the mean SMPL parameters\n \"\"\"\n\n super(PareHead, self).__init__()\n self.backbone = backbone\n self.num_joints = num_joints\n self.deconv_with_bias = deconv_with_bias\n self.use_heatmaps = use_heatmaps\n self.pose_mlp_num_layers = pose_mlp_num_layers\n self.shape_mlp_num_layers = shape_mlp_num_layers\n self.pose_mlp_hidden_size = pose_mlp_hidden_size\n self.shape_mlp_hidden_size = shape_mlp_hidden_size\n self.use_keypoint_attention = use_keypoint_attention\n\n self.num_input_features = num_input_features\n self.bn_momentum = bn_momentum\n if self.use_heatmaps == 'part_segm':\n\n self.use_keypoint_attention = True\n\n if backbone.startswith('hrnet'):\n\n self.keypoint_deconv_layers = self._make_conv_layer(\n num_deconv_layers,\n num_deconv_filters,\n (3, ) * num_deconv_layers,\n )\n self.num_input_features = num_input_features\n self.smpl_deconv_layers = self._make_conv_layer(\n num_deconv_layers,\n num_deconv_filters,\n (3, ) * num_deconv_layers,\n )\n else:\n # part branch that estimates 2d keypoints\n\n conv_fn = self._make_deconv_layer\n\n self.keypoint_deconv_layers = conv_fn(\n num_deconv_layers,\n num_deconv_filters,\n num_deconv_kernels,\n )\n # reset inplanes to 2048 -> final resnet layer\n self.num_input_features = num_input_features\n self.smpl_deconv_layers = conv_fn(\n num_deconv_layers,\n num_deconv_filters,\n num_deconv_kernels,\n )\n\n pose_mlp_inp_dim = num_deconv_filters[-1]\n smpl_final_dim = num_features_smpl\n shape_mlp_inp_dim = num_joints * smpl_final_dim\n\n self.keypoint_final_layer = nn.Conv2d(\n in_channels=num_deconv_filters[-1],\n out_channels=num_joints +\n 1 if self.use_heatmaps in ('part_segm',\n 'part_segm_pool') else num_joints,\n kernel_size=final_conv_kernel,\n stride=1,\n padding=1 if final_conv_kernel == 3 else 0,\n )\n\n self.smpl_final_layer = nn.Conv2d(\n in_channels=num_deconv_filters[-1],\n out_channels=smpl_final_dim,\n kernel_size=final_conv_kernel,\n stride=1,\n padding=1 if final_conv_kernel == 3 else 0,\n )\n\n # temperature for softargmax function\n self.register_buffer('temperature', torch.tensor(softmax_temp))\n mean_params = np.load(smpl_mean_params)\n init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)\n init_shape = torch.from_numpy(\n mean_params['shape'][:].astype('float32')).unsqueeze(0)\n init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)\n self.register_buffer('init_pose', init_pose)\n self.register_buffer('init_shape', init_shape)\n self.register_buffer('init_cam', init_cam)\n\n self.pose_mlp_inp_dim = pose_mlp_inp_dim\n self.shape_mlp_inp_dim = shape_mlp_inp_dim\n\n self.shape_mlp = self._get_shape_mlp(output_size=10)\n self.cam_mlp = self._get_shape_mlp(output_size=num_camera_params)\n\n self.pose_mlp = self._get_pose_mlp(\n num_joints=num_joints, output_size=6)\n\n self.keypoint_attention = KeypointAttention(\n use_conv=use_postconv_keypoint_attention,\n in_channels=(self.pose_mlp_inp_dim, smpl_final_dim),\n out_channels=(self.pose_mlp_inp_dim, smpl_final_dim),\n act=keypoint_attention_act,\n use_scale=use_scale_keypoint_attention,\n )\n\n def _get_shape_mlp(self, output_size):\n \"\"\"mlp layers for shape regression.\"\"\"\n if self.shape_mlp_num_layers == 1:\n return nn.Linear(self.shape_mlp_inp_dim, output_size)\n\n module_list = []\n for i in range(self.shape_mlp_num_layers):\n if i == 0:\n module_list.append(\n nn.Linear(self.shape_mlp_inp_dim,\n self.shape_mlp_hidden_size))\n elif i == self.shape_mlp_num_layers - 1:\n module_list.append(\n nn.Linear(self.shape_mlp_hidden_size, output_size))\n else:\n module_list.append(\n nn.Linear(self.shape_mlp_hidden_size,\n self.shape_mlp_hidden_size))\n return nn.Sequential(*module_list)\n\n def _get_pose_mlp(self, num_joints, output_size):\n \"\"\"mlp layers for pose regression.\"\"\"\n if self.pose_mlp_num_layers == 1:\n\n return LocallyConnected2d(\n in_channels=self.pose_mlp_inp_dim,\n out_channels=output_size,\n output_size=[num_joints, 1],\n kernel_size=1,\n stride=1,\n )\n\n module_list = []\n for i in range(self.pose_mlp_num_layers):\n if i == 0:\n module_list.append(\n LocallyConnected2d(\n in_channels=self.pose_mlp_inp_dim,\n out_channels=self.pose_mlp_hidden_size,\n output_size=[num_joints, 1],\n kernel_size=1,\n stride=1,\n ))\n elif i == self.pose_mlp_num_layers - 1:\n module_list.append(\n LocallyConnected2d(\n in_channels=self.pose_mlp_hidden_size,\n out_channels=output_size,\n output_size=[num_joints, 1],\n kernel_size=1,\n stride=1,\n ))\n else:\n module_list.append(\n LocallyConnected2d(\n in_channels=self.pose_mlp_hidden_size,\n out_channels=self.pose_mlp_hidden_size,\n output_size=[num_joints, 1],\n kernel_size=1,\n stride=1,\n ))\n return nn.Sequential(*module_list)\n\n def _get_deconv_cfg(self, deconv_kernel):\n \"\"\"get deconv padding, output padding according to kernel size.\"\"\"\n if deconv_kernel == 4:\n padding = 1\n output_padding = 0\n elif deconv_kernel == 3:\n padding = 1\n output_padding = 1\n elif deconv_kernel == 2:\n padding = 0\n output_padding = 0\n\n return deconv_kernel, padding, output_padding\n\n def _make_conv_layer(self, num_layers, num_filters, num_kernels):\n \"\"\"make convolution layers.\"\"\"\n assert num_layers == len(num_filters), \\\n 'ERROR: num_conv_layers is different len(num_conv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_conv_layers is different len(num_conv_filters)'\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i])\n\n planes = num_filters[i]\n layers.append(\n nn.Conv2d(\n in_channels=self.num_input_features,\n out_channels=planes,\n kernel_size=kernel,\n stride=1,\n padding=padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=self.bn_momentum))\n layers.append(nn.ReLU(inplace=True))\n self.num_input_features = planes\n\n return nn.Sequential(*layers)\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n \"\"\"make deconvolution layers.\"\"\"\n assert num_layers == len(num_filters), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n assert num_layers == len(num_kernels), \\\n 'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i])\n\n planes = num_filters[i]\n layers.append(\n nn.ConvTranspose2d(\n in_channels=self.num_input_features,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=self.deconv_with_bias))\n layers.append(nn.BatchNorm2d(planes, momentum=self.bn_momentum))\n layers.append(nn.ReLU(inplace=True))\n # if self.use_self_attention:\n # layers.append(SelfAttention(planes))\n self.num_input_features = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, features):\n \"\"\"Forward function.\"\"\"\n batch_size = features.shape[0]\n\n init_pose = self.init_pose.expand(batch_size, -1) # N, Jx6\n init_shape = self.init_shape.expand(batch_size, -1)\n init_cam = self.init_cam.expand(batch_size, -1)\n\n output = {}\n\n part_feats = self._get_2d_branch_feats(features)\n\n part_attention = self._get_part_attention_map(part_feats, output)\n\n smpl_feats = self._get_3d_smpl_feats(features, part_feats)\n\n point_local_feat, cam_shape_feats = self._get_local_feats(\n smpl_feats, part_attention, output)\n\n pred_pose, pred_shape, pred_cam = self._get_final_preds(\n point_local_feat, cam_shape_feats, init_pose, init_shape, init_cam)\n\n pred_rotmat = rot6d_to_rotmat(pred_pose).reshape(batch_size, 24, 3, 3)\n\n output.update({\n 'pred_pose': pred_rotmat,\n 'pred_cam': pred_cam,\n 'pred_shape': pred_shape,\n })\n return output\n\n def _get_local_feats(self, smpl_feats, part_attention, output):\n # 1x1 conv\n \"\"\"get keypoints and camera features from backbone features.\"\"\"\n\n cam_shape_feats = self.smpl_final_layer(smpl_feats)\n\n if self.use_keypoint_attention:\n point_local_feat = self.keypoint_attention(smpl_feats,\n part_attention)\n cam_shape_feats = self.keypoint_attention(cam_shape_feats,\n part_attention)\n else:\n point_local_feat = interpolate(smpl_feats, output['pred_kp2d'])\n cam_shape_feats = interpolate(cam_shape_feats, output['pred_kp2d'])\n return point_local_feat, cam_shape_feats\n\n def _get_2d_branch_feats(self, features):\n \"\"\"get part features from backbone features.\"\"\"\n part_feats = self.keypoint_deconv_layers(features)\n\n return part_feats\n\n def _get_3d_smpl_feats(self, features, part_feats):\n \"\"\"get smpl feature maps from backbone features.\"\"\"\n\n smpl_feats = self.smpl_deconv_layers(features)\n\n return smpl_feats\n\n def _get_part_attention_map(self, part_feats, output):\n \"\"\"get attention map from part feature map.\"\"\"\n heatmaps = self.keypoint_final_layer(part_feats)\n\n if self.use_heatmaps == 'part_segm':\n\n output['pred_segm_mask'] = heatmaps\n # remove the the background channel\n heatmaps = heatmaps[:, 1:, :, :]\n else:\n pred_kp2d, _ = softargmax2d(heatmaps, self.temperature)\n output['pred_kp2d'] = pred_kp2d\n output['pred_heatmaps_2d'] = heatmaps\n return heatmaps\n\n def _get_final_preds(self, pose_feats, cam_shape_feats, init_pose,\n init_shape, init_cam):\n \"\"\"get final preds.\"\"\"\n return self._pare_get_final_preds(pose_feats, cam_shape_feats,\n init_pose, init_shape, init_cam)\n\n def _pare_get_final_preds(self, pose_feats, cam_shape_feats, init_pose,\n init_shape, init_cam):\n \"\"\"get final preds.\"\"\"\n pose_feats = pose_feats.unsqueeze(-1) #\n\n if init_pose.shape[-1] == 6:\n # This means init_pose comes from a previous iteration\n init_pose = init_pose.transpose(2, 1).unsqueeze(-1)\n else:\n # This means init pose comes from mean pose\n init_pose = init_pose.reshape(init_pose.shape[0], 6,\n -1).unsqueeze(-1)\n\n shape_feats = cam_shape_feats\n\n shape_feats = torch.flatten(shape_feats, start_dim=1)\n\n pred_pose = self.pose_mlp(pose_feats)\n pred_cam = self.cam_mlp(shape_feats)\n pred_shape = self.shape_mlp(shape_feats)\n\n pred_pose = pred_pose.squeeze(-1).transpose(2, 1) # N, J, 6\n return pred_pose, pred_shape, pred_cam\n","repo_name":"open-mmlab/mmhuman3d","sub_path":"mmhuman3d/models/heads/pare_head.py","file_name":"pare_head.py","file_ext":"py","file_size_in_byte":23009,"program_lang":"python","lang":"en","doc_type":"code","stars":1016,"dataset":"github-code","pt":"77"} +{"seq_id":"10453062208","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.autoreload\nfrom handlers.media_handler import MediaHandler\nfrom handlers.notification_handler import NotificationHandler\n\nclass Application(tornado.web.Application):\n def __init__(self):\n\n handlers = [\n (r\"/media\", MediaHandler),\n (r\"/notify\", NotificationHandler)\n ]\n\n settings = {\"static_path\": os.path.join(os.path.dirname(__file__), \"static\")}\n tornado.web.Application.__init__(self, handlers, **settings)\n\n\ndef main():\n\n app = Application()\n app.listen(os.environ.get(\"PORT\", 5000))\n\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rgan/media_upload","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"71720232888","text":"#!/usr/bin/env python3\n\nimport os\nimport json\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nfrom tkinter import font\n\nimport get_result\nimport create_coding\nimport construct_report\nimport create_view\nimport create_table\n\n# The master GUI suite programm thing.\n\nclass ReportGenForm(tk.Frame):\n\tquestionsfilename = \"\"\n\tanswersfilename = \"\"\n\tdbfilename = \"\"\n\treportdeffilename = \"\"\n\n\tdef __init__(self, master=None):\n\t\t\n\t\tttk.Frame.__init__(self, master)\n\t\tself.pack()\n\t\t\n\t\t#self.grid_columnconfigure(2, {\"minsize\": 200})\n\t\t\n\t\tttk.Label(self, text=\"Question list (JSON):\").grid(row=0, column=0, sticky=\"E\")\n\t\tttk.Button(self, width=0, text = \"...\", command = self.openquestions).grid(row=0, column=1)\n\t\tself.questionsfilename_label = ttk.Label(self, text=\"\", font=\"TkFixedFont\")\n\t\tself.questionsfilename_label.grid(row=0, column=2, sticky=\"W\")\n\t\t\n\t\tttk.Label(self, text=\"Answers list (JSON):\").grid(row=1, column=0, sticky=\"E\")\n\t\tttk.Button(self, width=0, text = \"...\", command = self.openanswers).grid(row=1, column=1)\n\t\tself.answersfilename_label = ttk.Label(self, text=\"\", font=\"TkFixedFont\")\n\t\tself.answersfilename_label.grid(row=1, column=2, sticky=\"W\")\n\t\t\n\t\ttk.Frame(self, height=2, bd=1, relief=\"sunken\").grid(row=2, column=0, columnspan=3, sticky=\"EW\", padx=5, pady=5)\n\t\t\n\t\tttk.Label(self, text=\"Result database (sqlite db):\").grid(row=3, column=0, sticky=\"E\")\n\t\tttk.Button(self, width=0, text = \"...\", command = self.opendb).grid(row=3, column=1)\n\t\tself.dbfilename_label = ttk.Label(self, text=\"\", font=\"TkFixedFont\")\n\t\tself.dbfilename_label.grid(row=3, column=2, sticky=\"W\")\n\t\t\n\t\tttk.Label(self, text=\"Course (from DB):\").grid(row=4, column=0, sticky=\"E\")\n\t\tself.coursevar = tk.StringVar(master)\n\t\tself.table_box = ttk.OptionMenu(self, self.coursevar, \"-- No db loaded --\")\n\t\tself.table_box.grid(row=4, column=2, sticky=\"W\")\n\t\t\n\t\ttk.Frame(self, height=2, bd=1, relief=\"sunken\").grid(row=5, column=0, columnspan=3, sticky=\"EW\", padx=5, pady=5)\n\t\t\n\t\tttk.Label(self, text=\"Report definition (JSON):\").grid(row=6, column=0, sticky=\"E\")\n\t\tttk.Button(self, width=0, text = \"...\", command = self.openreportdef).grid(row=6, column=1)\n\t\tself.reportdeffilename_label = ttk.Label(self, text=\"\", font=\"TkFixedFont\")\n\t\tself.reportdeffilename_label.grid(row=6, column=2, sticky=\"W\")\n\t\t\n\t\ttk.Frame(self, height=2, bd=1, relief=\"sunken\").grid(row=7, column=0, columnspan=3, sticky=\"EW\", padx=5, pady=5)\n\t\t\n\t\tttk.Button(self, width=0, text = \"Create PDF\", command = self.create_pdf).grid(row=8, column=0, columnspan=3, sticky=\"EW\")\n\t\n\tdef openquestions(self):\n\t\tfilename = askopenfilename()\n\t\tif not filename == \"\":\n\t\t\tself.questionsfilename = filename\n\t\t\tself.questionsfilename_label[\"text\"] = os.path.basename(filename)\n\t\n\tdef openanswers(self):\n\t\tfilename = askopenfilename()\n\t\tif not filename == \"\":\n\t\t\tself.answersfilename = filename\n\t\t\tself.answersfilename_label[\"text\"] = os.path.basename(filename)\n\t\n\tdef opendb(self):\n\t\tfilename = askopenfilename()\n\t\tif not filename == \"\":\n\t\t\tself.dbfilename = filename\n\t\t\tself.dbfilename_label[\"text\"] = os.path.basename(filename)\n\t\t\t\n\t\t\tself.coursevar.set(\"-- Please choose --\")\n\t\t\tself.table_box[\"menu\"].delete(0, \"end\")\n\t\t\t\n\t\t\ttables = get_result.get_courses(filename)\n\t\t\t\n\t\t\tfor choice in tables:\n\t\t\t\tself.table_box['menu'].add_command(label=choice, command=tk._setit(self.coursevar, choice))\n\t\n\tdef openreportdef(self):\n\t\tfilename = askopenfilename()\n\t\tif not filename == \"\":\n\t\t\tself.reportdeffilename = filename\n\t\t\tself.reportdeffilename_label[\"text\"] = os.path.basename(filename)\n\t\n\tdef create_pdf(self):\n\t\tfilename = asksaveasfilename(title=\"Save as PDF (additional .json file will be created)\", filetypes=[(\"PDF\", \"*.pdf\")])\n\t\tif not filename == \"\":\n\t\t\t# Create report first...\n\t\t\twith open(self.reportdeffilename, 'r') as f:\n\t\t\t\treportdef = json.load(f)\n\t\t\twith open(self.questionsfilename, 'r') as f:\n\t\t\t\tquestionsdef = json.load(f)\n\t\t\twith open(self.answersfilename, 'r') as f:\n\t\t\t\tanswersdef = json.load(f)\n\t\t\t\n\t\t\treport = construct_report.construct_report(reportdef, questionsdef, answersdef, self.dbfilename, self.coursevar.get())\n\t\t\treport_json = create_coding.prettyprint_json(report)\n\t\t\t\n\t\t\tjsonfilename = \"{}.json\".format(filename)\n\t\t\twith open(jsonfilename, 'w') as f:\n\t\t\t\tf.write(report_json)\n\t\t\t\t\n\t\t\t# ...then render it.\n\t\t\tcreate_view.create_report_pdf(jsonfilename, filename)\n\nclass CreateDbForm(tk.Frame):\n\tdbname = \"\"\n\t\n\tdef __init__(self, master=None):\n\t\t\n\t\tttk.Frame.__init__(self, master)\n\t\tself.pack()\n\t\t\n\t\tttk.Label(self, text=\"Database to create/fill:\").grid(row=0, column=0, sticky=\"E\")\n\t\tttk.Button(self, width=0, text = \"...\", command = self.choose_db).grid(row=0, column=1)\n\t\tself.dbname_label = ttk.Label(self, text=\"\", font=\"TkFixedFont\")\n\t\tself.dbname_label.grid(row=0, column=2, sticky=\"W\")\n\t\t\n\t\ttk.Frame(self, height=2, bd=1, relief=\"sunken\").grid(row=1, column=0, columnspan=3, sticky=\"EW\", padx=5, pady=5)\n\t\t\n\t\tttk.Button(self, width=0, text = \"Choose CSV to add into database\",\n\t\t\tcommand = self.add_file_to_db).grid(row=2, column=0, columnspan=3, sticky=\"EW\")\n\t\t\n\tdef choose_db(self):\n\t\tfilename = asksaveasfilename()\n\t\tif not filename == \"\":\n\t\t\tself.dbname = filename\n\t\t\tself.dbname_label[\"text\"] = os.path.basename(filename)\n\t\n\tdef add_file_to_db(self):\n\t\tfilename = askopenfilename()\n\t\tif not filename == \"\":\n\t\t\tcreate_table.create_table(filename, self.dbname)\n\nroot = tk.Tk()\n\nnotebook = ttk.Notebook(root)\nnotebook.pack()\nnotebook.add(ReportGenForm(master=notebook), text=\"Report generation\")\nnotebook.add(CreateDbForm(master=notebook), text=\"Database creation\")\n\nroot.mainloop()\n","repo_name":"fsr/adam","sub_path":"adam_tk_gui.py","file_name":"adam_tk_gui.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"73603595770","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport csv\n\ngraph_file = \"graph.txt\"\n\nlines = [line.rstrip('\\n') for line in open(graph_file)]\n\nservice_entries = [lines[i:i+11] for i in range(0, len(lines), 12)]\n\ndef get_list(s):\n\ttemp = s.split()\n\tci_minus = float(temp[0][1:-2])\n\tci_plus = float(temp[1][0:-1])\n\treturn [ci_minus, ci_plus]\n\nservice_time = [int(entry[0]) for entry in service_entries]\ngrand_mean_t = [float(entry[1]) for entry in service_entries]\nci_mean_t = np.array([get_list(entry[2]) for entry in service_entries])\ngrand_mean_t_95 = [float(entry[3]) for entry in service_entries]\nci_mean_t_95 = np.array([get_list(entry[4]) for entry in service_entries])\ngrand_mean_d = [float(entry[5]) for entry in service_entries]\nci_mean_d = np.array([get_list(entry[6]) for entry in service_entries])\ngrand_mean_d_95 = [float(entry[7]) for entry in service_entries]\nci_mean_d_95 = np.array([get_list(entry[8]) for entry in service_entries])\nmean_p = [float(entry[9]) for entry in service_entries]\nci_p = np.array([get_list(entry[10]) for entry in service_entries])\n\n# Plot for T\naxes = plt.gca()\nplt.plot(service_time, grand_mean_t)\nplt.plot(service_time, ci_mean_t[:,0], linestyle = \"--\")\nplt.plot(service_time, ci_mean_t[:,1], linestyle = \"--\")\naxes.set_xlim([10,18])\nplt.xlabel(\"Service Time\")\nplt.ylabel(\"Grand Mean T and CI\")\nplt.title(\"Graph for T\")\nplt.savefig('grand_mean_t.jpg')#, bbox_inches='tight')\nplt.plot(service_time, grand_mean_t_95, 'black')\nplt.plot(service_time, ci_mean_t_95[:,0], 'y', linestyle = \"--\")\nplt.plot(service_time, ci_mean_t_95[:,1], 'y', linestyle = \"--\")\n# plt.show()\nplt.savefig('grand_mean_t_and_t95.jpg')#, bbox_inches='tight')\nplt.clf()\n\n# Plot for D\naxes = plt.gca()\nplt.plot(service_time, grand_mean_d)\nplt.plot(service_time, ci_mean_d[:,0], linestyle = \"--\")\nplt.plot(service_time, ci_mean_d[:,1], linestyle = \"--\")\nplt.xlabel(\"Service Time\")\nplt.ylabel(\"Grand Mean D and CI\")\nplt.title(\"Graph for D\")\naxes.set_xlim([10,18])\nplt.savefig('grand_mean_d.jpg')#, bbox_inches='tight')\nplt.plot(service_time, grand_mean_d_95, 'black')\nplt.plot(service_time, ci_mean_d_95[:,0], 'y', linestyle = \"--\")\nplt.plot(service_time, ci_mean_d_95[:,1], 'y', linestyle = \"--\")\n# plt.show()\nplt.savefig('grand_mean_d_and_d95.jpg')#, bbox_inches='tight')\nplt.clf()\n\n# Plot for P\naxes = plt.gca()\nplt.plot(service_time, mean_p)\nplt.plot(service_time, ci_p[:,0], linestyle = \"--\")\nplt.plot(service_time, ci_p[:,1], linestyle = \"--\")\nplt.xlabel(\"Service Time\")\nplt.ylabel(\"Mean P and CI\")\nplt.title(\"Graph for P\")\naxes.set_xlim([10,18])\n# plt.show()\nplt.savefig('mean_p.jpg')#, bbox_inches='tight')\n\nwith open(\"stats.csv\", 'w') as csvf:\n\twriter = csv.writer(csvf)\n\twriter.writerow((str(service_time).strip('[]').split(',')))\n\twriter.writerow((str(grand_mean_t).strip('[]').split(',')))\n\twriter.writerow((str(grand_mean_d).strip('[]').split(',')))\n\twriter.writerow((str(mean_p).strip('[]').split(',')))\n\n'''\nReadings for Service Time : \nGrand Mean T : \nCI Mean T : \nGrand Mean of 95% T : \nCI of Mean 95% T : \nGrand Mean D : \nCI D : \nGrand Mean of 95% D : \nCI of Mean 95% D : \nMean P : \nCI P : \n'''","repo_name":"avshirod/projects","sub_path":"Fall_2016_-_CSC591_-_IoT_Analytics_-_Projects/1/1c/codes/plot_graph.py","file_name":"plot_graph.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9578279591","text":"import cv2\nimport numpy as np\nimport time\nfrom scipy import signal\n\ndef expand(img):\n w, h = img.shape\n nw = int(w)*2\n nh = int(h)*2\n new_img = np.zeros((nw, nh))\n new_img[::2, ::2] = img\n G = create_gaussian_window(5)\n for i in range(2, new_img.shape[0]-2, 2):\n for j in range(2, new_img.shape[1]-2, 2):\n m = new_img[i-2:i+3, j-2:j+3]\n new_img[i, j] = np.sum(m*G)\n return new_img\n\ndef expand_level(img, level):\n if level == 0:\n return img\n else:\n i = 0\n new_img = img.copy()\n while i < level:\n new_img = expand(new_img)\n i += 1\n return new_img\n \ndef reduce(img):\n w, h = img.shape\n nw = int(w//2)\n nh = int(h//2)\n new_img = np.zeros((nw, nh))\n G = create_gaussian_window(5)\n for i in range(2, img.shape[0]-2, 2):\n for j in range(2, img.shape[1]-2, 2):\n m = img[i-2:i+3, j-2:j+3]\n new_img[i//2, j//2] = np.sum(m*G)\n return new_img\n\ndef reduce_level(img, level):\n if level == 0:\n return img\n else:\n i = 0\n new_img = img.copy()\n while i < level:\n new_img = reduce(new_img)\n i += 1\n return new_img\n\ndef create_gaussian_window(window_size):\n ax, ay = np.meshgrid(np.linspace(-1,1,5), np.linspace(-1,1,5))\n ad = np.sqrt(ax*ax+ay*ay)\n sigma, mu = 1.5, 0.0\n gaussian_window = 1/(np.sqrt(2*np.pi)*sigma)*(np.exp(-((ad-mu)**2/(2.0*sigma**2))))\n return gaussian_window\n\ndef findCorners(img_gray, gaussian_window, k, threshold):\n dy, dx = np.gradient(img_gray)\n Ixx = dx**2\n Iyy = dy**2\n Ixy = dx*dy\n Sxx = signal.convolve2d(Ixx, gaussian_window)\n Syy = signal.convolve2d(Iyy, gaussian_window)\n Sxy = signal.convolve2d(Ixy, gaussian_window)\n det = (Sxx * Syy) - (Sxy**2)\n trace = Sxx + Syy\n r = det - k*(trace**2)\n cv2.normalize(r, r, 0, 1, cv2.NORM_MINMAX)\n loc = np.where(r > threshold)\n return loc\n\nk = 0.055\nthreshold = 0.7\nwindow_size = 11\ngaussian_window = create_gaussian_window(window_size)\nlevel = 0\n\nGx = np.array([[-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]])\nGy = np.array([[1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]])\nGt1 = np.array([[-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1]])\nGt2 = np.array([[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]])\n\n#cap = cv2.VideoCapture('C:/Users/USER/Downloads/videoplayback (2).mp4')\ncap = cv2.VideoCapture(0)\nret, prev_frame = cap.read()\nprev_frame_gray1 = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)\nprev_frame_gray1 = prev_frame_gray1/255.\nprev_frame_gray = reduce_level(prev_frame_gray1, level)\n#prev_corners = get_corners(prev_frame_gray)\nprev_corners = findCorners(prev_frame_gray1, gaussian_window, k, threshold)\nprev_corners = np.array(prev_corners)\nprev_corners = prev_corners.reshape(-1)\nprev_corners = np.int32(prev_corners/(2**level))\nprint(prev_corners.shape)\nmask = np.zeros_like(prev_frame)\ncount = 0\nwhile True:\n \n ret, frame = cap.read()\n frame_gray1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_gray1 = frame_gray1/255.\n frame_gray = reduce_level(frame_gray1, level)\n Ix = (signal.convolve2d(prev_frame_gray, Gx) + signal.convolve2d(frame_gray, Gx))/2\n Iy = (signal.convolve2d(frame_gray, Gy) + signal.convolve2d(prev_frame_gray, Gy))/2\n It = signal.convolve2d(prev_frame_gray, Gt1) + signal.convolve2d(frame_gray, Gt2)\n u = np.zeros((Ix.shape[0]+100, Ix.shape[1]+100))\n v = np.zeros((Ix.shape[0]+100, Ix.shape[1]+100))\n A = np.zeros((2, 2))\n b = np.zeros((2, 1))\n #mask = np.zeros_like(prev_frame)\n new_corners = np.zeros_like(prev_corners)\n \n for i in range(prev_corners.shape[0]//2):\n y = prev_corners[i]\n x = prev_corners[i+prev_corners.shape[0]//2]\n A[0, 0] = np.sum(Ix[y-3:y+4, x-3:x+4]**2)\n A[0, 1] = np.sum(Ix[y-3:y+4, x-3:x+4]*Iy[y-3:y+4, x-3:x+4])\n A[1, 0] = A[0, 1]\n A[1, 1] = np.sum(Iy[y-3:y+4, x-3:x+4]**2)\n A_inv = np.linalg.pinv(A)\n b[0, 0] = -np.sum(Ix[y-3:y+4, x-3:x+4]*It[y-3:y+4, x-3:x+4])\n b[1, 0] = -np.sum(Iy[y-3:y+4, x-3:x+4]*It[y-3:y+4, x-3:x+4])\n c = np.matmul(A_inv, b)\n u[y, x] = c[0]\n v[y, x] = c[1]\n new_corners[i] = y + u[y, x]\n new_corners[i + prev_corners.shape[0]//2] = x + v[y, x]\n \n new_corners = np.int32(new_corners * (2**level))\n prev_corners = np.int32(prev_corners * (2**level))\n \n for i in range(prev_corners.shape[0]//2):\n f, g = prev_corners[i], prev_corners[i+prev_corners.shape[0]//2]\n h, j = new_corners[i], new_corners[i+prev_corners.shape[0]//2]\n mask = cv2.line(mask, (g, f), (j, h), (0, 255, 0), 3)\n frame = cv2.circle(frame, (j, h), 8, (0, 0, 255), -1)\n img = cv2.add(frame, mask)\n count += 1\n cv2.imshow('frame', img)\n \n k = cv2.waitKey(1)\n if k == 27:\n break\n elif k == ord('q'):\n ret, prev_frame = cap.read()\n prev_frame_gray1 = cv2.cvtColor(prev_frame, cv2.COLOR_RGB2GRAY)\n prev_frame_gray1 = prev_frame_gray1/255.\n prev_frame_gray = reduce_level(prev_frame_gray1, level)\n #prev_corners = get_corners(prev_frame_gray)\n prev_corners = findCorners(prev_frame_gray1, gaussian_window, k, threshold)\n prev_corners = np.array(prev_corners)\n prev_corners = prev_corners.reshape(-1)\n prev_corners = np.int32(prev_corners/(2**level))\n mask = np.zeros_like(prev_frame)\n count = 0\n\n if count == 50:\n ret, prev_frame = cap.read()\n prev_frame_gray1 = cv2.cvtColor(prev_frame, cv2.COLOR_RGB2GRAY)\n prev_frame_gray1 = prev_frame_gray1/255.\n prev_frame_gray = reduce_level(prev_frame_gray1, level)\n #prev_corners = get_corners(prev_frame_gray)\n prev_corners = findCorners(prev_frame_gray1, gaussian_window, k, threshold)\n prev_corners = np.array(prev_corners)\n prev_corners = prev_corners.reshape(-1)\n prev_corners = np.int32(prev_corners/(2**level))\n mask = np.zeros_like(prev_frame)\n count = 0\n else:\n new_corners = np.int32(new_corners/(2**level))\n prev_corners = new_corners.copy()\n prev_frame_gray = frame_gray.copy()\n prev_frame = frame.copy()\n \ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"sm823zw/Harris-Corner-tracking-using-LK-method","sub_path":"Harris Corner tracking using LK method.py","file_name":"Harris Corner tracking using LK method.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72314137848","text":"# Результаты тестирования\n# .\n# ----------------------------------------------------------------------\n# Ran 1 test in 1.207s\n\n# OK\n\nimport unittest\nfrom geopy import Nominatim, GoogleV3\nfrom geopy.distance import geodesic\n\n\nnom = Nominatim(user_agent=\"podvezisoseda, school project\")\n\ndef get_geocode_osm(usplace):\n '''Геокодер osm, возвращает географические координаты объекта по его адресу'''\n try:\n return nom.geocode(usplace)\n except:\n return None\n\n\naddress_request = [\"Москва, Солянка 14а\", \"Москва, бунинские луга, д4\", \"Москва, Кремль\"]\naddress_response =[\"14А с3, улица Солянка, Таганский район, Москва, Центральный федеральный округ, 109074, Россия\", \n \"Жилой квартал «Бунинские луга», Коммунарка, поселение Сосенское, Москва, Центральный федеральный округ, 142770, Россия\",\n \"Кремль, Северо-Восточная хорда, Кусково, район Вешняки, Москва, Центральный федеральный округ, 109456, Россия\"]\nclass TestGeocodeOSM(unittest.TestCase):\n '''Тестирование функции геокодирования по адресу'''\n def test_area(self):\n '''Проверка значений на эквивалентность'''\n for i in range(len(address_request)):\n self.assertEqual(get_geocode_osm(address_request[i]).address, address_response[i])","repo_name":"Domaestro/podvezi-soseda-ivr","sub_path":"PodveziSosedaSource/tests/test_geocode.py","file_name":"test_geocode.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"74812143287","text":"\"\"\"\nMingi Kang \nJanuary 16, 2022 \nProfessor Kumar \nProject : Lexicon Lab Coding Exercises\n\"\"\"\n\nimport pandas as pd \nimport matplotlib as mp \nimport numpy as np\nfrom numpy.linalg import norm \nfrom sklearn.manifold import TSNE \nimport matplotlib.pyplot as plt \nimport matplotlib.cm as cm \nfrom sklearn import datasets \nfrom switch import switch_simdrop \n\n\nclass Similarity: \n \n def __init__(self): \n \"\"\"Dictionary with ID and Animals Produced as Keys and Values from data-cochlear.txt\"\"\"\n self.data_cochlear = {} \n with open('data-cochlear.txt', 'r') as contents: \n data = contents.readlines() \n for lines in data: \n lines = lines.strip()\n lines = lines.replace('\\t', ' ') \n lines = lines.split() \n if lines[0] in self.data_cochlear.keys(): \n self.data_cochlear[lines[0]] += [lines[1]]\n else: \n self.data_cochlear[lines[0]] = [lines[1]]\n \n \"\"\"Word2vec dictionary with Word and Embedding as Keys and Values from word2vec.txt\"\"\"\n self.word2vec = {} \n with open('word2vec.txt', 'r') as contents: \n data = contents.readlines()\n data = data[1:]\n for lines in data: \n lines = lines.strip() \n lines = lines.split() \n self.word2vec[lines[0]] = [eval(i) for i in list(lines[1:])] \n \n \"\"\"Speech2vec dictionary with Word and Embeddings as Keys and Values from speech2vec.txt\"\"\"\n self.speech2vec = {} \n with open('speech2vec.txt', 'r') as contents: \n data = contents.readlines()\n data = data[1:]\n for lines in data: \n lines = lines.strip()\n lines = lines.split() \n self.speech2vec[lines[0]] = [eval(i) for i in list(lines[1:])]\n \n \"\"\"New Dictionaries of participant data without words that are not in word2vec.txt and speech2vec.txt\"\"\"\n self.w2v_words = {} \n for id in self.data_cochlear: \n words = [] \n for word in self.data_cochlear[id]: \n try: \n self.word2vec[word]\n words.append(word) \n except KeyError: \n pass \n self.w2v_words[id] = words\n \n self.s2v_words = {} \n for id in self.data_cochlear: \n words = [] \n for word in self.data_cochlear[id]: \n try: \n self.speech2vec[word]\n words.append(word)\n except KeyError: \n pass \n self.s2v_words[id] = words \n \n \"\"\"Dictionary with ID and Embeddings as Keys and Values\"\"\"\n self.w2v_embeddings = {} \n for id in self.w2v_words: \n embeddings = [] \n for word in self.w2v_words[id]: \n embeddings.append(self.word2vec[word]) \n self.w2v_embeddings[id] = embeddings \n \n self.s2v_embeddings = {} \n for id in self.s2v_words: \n embeddings = [] \n for word in self.s2v_words[id]: \n embeddings.append(self.speech2vec[word]) \n self.s2v_embeddings[id] = embeddings \n \n def cosine_similarity(self, word1, word2, model): \n if word1 == word2: \n return 1\n else: \n if model == 'word2vec': \n A = np.array(self.word2vec[word1]) \n B = np.array(self.word2vec[word2]) \n return np.dot(A,B) / (norm(A) * norm(B))\n else: \n A = np.array(self.speech2vec[word1]) \n B = np.array(self.speech2vec[word2])\n return np.dot(A,B) / (norm(A) * norm(B)) \n \n def visualize_items(self, ID): \n tsne_2d = TSNE(perplexity= 20, n_components= 2, init= 'pca', n_iter= 3500, random_state= 32) \n w2v_embeddings_2d = tsne_2d.fit_transform(np.array(self.w2v_embeddings[ID]))\n s2v_embeddings_2d = tsne_2d.fit_transform(np.array(self.s2v_embeddings[ID])) \n \n words = [] \n plt.figure(figsize = (16, 9)) \n plt.title(f\"{ID} T-SNE Plot\")\n x = w2v_embeddings_2d[:,0] \n y = w2v_embeddings_2d[:,1] \n plt.scatter(x, y, c= 'r', alpha= 0.8, label = f\"{ID} Word2Vec\") \n a = s2v_embeddings_2d[:,0] \n b = s2v_embeddings_2d[:,1] \n plt.scatter(a, b, c= 'k', alpha= 0.8, label = f\"{ID} Speech2Vec\") \n for i, word in enumerate(words): \n plt.annotate(word, alpha = 0.5, xy= (x[i], y[i]), xytext= (5,2), textcoords= 'offset points', ha= 'right', va= 'bottom', size = 10) \n for i, word in enumerate(words): \n plt.annotate(word, alpha = 0.5, ab= (x[i], y[i]), abtext= (5,2), textcoords= 'offset points', ha= 'right', va= 'bottom', size = 10) \n plt.legend(loc= 4) \n plt.grid(True) \n plt.savefig(f'{ID}_visualize_items.png', format= 'png', dpi= 150, bbox_inches= 'tight') \n plt.show() \n \n def pairwise_similarity(self): \n \n self.w2v_scores = {} \n self.s2v_scores = {} \n \n for ID in self.w2v_words: \n num = [2] \n idx = 1 \n while idx != len(self.w2v_words[ID]):\n num += [self.cosine_similarity(self.w2v_words[ID][idx -1], self.w2v_words[ID][idx], 'word2vec')] \n idx += 1 \n self.w2v_scores[ID] = num \n \n for ID in self.s2v_words: \n num = [2] \n idx =1 \n while idx != len(self.s2v_words[ID]): \n num += [self.cosine_similarity(self.s2v_words[ID][idx-1], self.s2v_words[ID][idx], 'speech2vec')]\n idx += 1\n self.s2v_scores[ID] = num \n \n data = {'ID': self.data_cochlear.keys(), 'Word2Vec Similarity': self.w2v_scores.values(), 'Speech2Vec Similarity': self.s2v_scores.values()}\n self.df = pd.DataFrame(data) \n file_name = 'pairwise_similarity.csv'\n self.df.to_csv(file_name) \n return self.df \n \nclass Clusters(Similarity): \n \n def compute_clusters(self): \n \n a = Similarity()\n self.df = a.pairwise_similarity()\n self.w2v_clusters = {} \n self.w2v_switches = {} \n self.s2v_clusters = {} \n self.s2v_switches = {} \n \n for index, row in self.df.iterrows(): \n simdrop = switch_simdrop(self.w2v_words[row['ID']], row['Word2Vec Similarity']) \n clusters = [] \n switches = [] \n idx = 0 \n while idx < len(simdrop): \n if simdrop[idx] == 0: \n clusters.append(row['Word2Vec Similarity'][idx])\n idx += 1 \n elif simdrop[idx] == 1: \n switches.append(row['Word2Vec Similarity'][idx]) \n idx += 1\n else: \n idx += 1\n self.w2v_clusters[row['ID']] = clusters \n self.w2v_switches[row['ID']] = switches \n \n for index, row in self.df.iterrows(): \n simdrop = switch_simdrop(self.s2v_words[row['ID']], row['Speech2Vec Similarity'])\n clusters = [] \n switches = [] \n idx = 0 \n while idx < len(simdrop):\n if simdrop[idx] == 0: \n clusters.append(row['Speech2Vec Similarity'][idx]) \n idx += 1\n elif simdrop[idx] == 1: \n switches.append(row['Speech2Vec Similarity'][idx])\n idx += 1\n else: \n idx += 1\n self.s2v_clusters[row['ID']] = clusters \n self.s2v_switches[row['ID']] = switches \n \n data = {'ID': self.data_cochlear.keys(), 'W2V Words' : self.w2v_words.values(), 'W2V Clusters' : self.w2v_clusters.values(), \"W2V Switches\" : self.w2v_switches.values(), 'S2V Words' : self.s2v_words.values(), 'S2V Clusters' : self.s2v_clusters.values(), 'S2V Switches' : self.s2v_switches.values()}\n self.df = pd.DataFrame(data) \n file_name = 'compute_clusters.csv'\n self.df.to_csv(file_name) \n return self.df \n \n def visualize_clusters(self, ID): \n a = Clusters()\n df = a.compute_clusters()\n \n n = 2\n mean_switches = (float(df.loc[df['ID'] == ID][\"W2V Switches\"].apply(len)) / float(df.loc[df['ID'] == ID][\"W2V Words\"].apply(len)), float(df.loc[df['ID'] == ID][\"S2V Switches\"].apply(len)) / float(df.loc[df['ID'] == ID][\"S2V Words\"].apply(len)))\n mean_clusters = (float(df.loc[df['ID'] == ID][\"W2V Clusters\"].apply(len)) / float(df.loc[df['ID'] == ID][\"W2V Words\"].apply(len)), float(df.loc[df['ID'] == ID][\"S2V Clusters\"].apply(len)) / float(df.loc[df['ID'] == ID][\"S2V Words\"].apply(len)))\n \n fig, ax = plt.subplots()\n index = np.arange(n) \n bar_width = 0.35 \n opacity = 0.50\n \n plot1 = plt.bar(index, mean_switches, bar_width, alpha= opacity, color= 'b', label= 'Switches') \n plot2 = plt.bar(index + bar_width, mean_clusters, bar_width, alpha= opacity, color= 'r', label= 'Clusters') \n plt.ylabel('mean')\n plt.title('Mean Number of Switches and Clusters')\n plt.xticks(index + bar_width, ('Word2Vec', 'Speech2Vec'))\n plt.legend()\n plt.tight_layout()\n plt.savefig(f'{ID}_visualize_clusters.png', format = 'png', dpi = 150, bbox_inches= 'tight')\n plt.show()\n \n \na = Similarity() \na.cosine_similarity('the', 'or', 'word2vec') \na.visualize_items('CAF-657') \na.pairwise_similarity()\n\nb = Clusters()\nb.compute_clusters()\nb.visualize_clusters('CAF-657')\n","repo_name":"mkang817415/Mingi_cochlear-","sub_path":"lexicon_lab_mingi.py","file_name":"lexicon_lab_mingi.py","file_ext":"py","file_size_in_byte":10020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18127535485","text":"'''\nsimulation of barnsley fern (fractal) using python3.\nAuthor-Ashutosh(0Pixel0)\n'''\n\nimport turtle\nimport random\n\npen = turtle.Turtle()\npen.speed(0)\npen.color(\"green\")\npen.penup()\n\nx = 0\ny = 0\nfor n in range(11000):\n pen.goto(65 * x, 37 * y - 252) # scaling\n pen.pendown()\n pen.dot(3)\n pen.penup()\n r = random.random()\n if r < 0.01:\n x, y = 0.00 * x + 0.00 * y, 0.00 * x + 0.16 * y + 0.00\n elif r < 0.86:\n x, y = 0.85 * x + 0.04 * y, -0.04 * x + 0.85 * y + 1.60\n elif r < 0.93:\n x, y = 0.20 * x - 0.26 * y, 0.23 * x + 0.22 * y + 1.60\n else:\n x, y = -0.15 * x + 0.28 * y, 0.26 * x + 0.24 * y + 0.44","repo_name":"Arindam200/Python_Projects","sub_path":"Projects/Barnsley_fern/barnsley_fern.py","file_name":"barnsley_fern.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"} +{"seq_id":"28892638365","text":"# Standard library imports\nimport random\nimport os\nimport sys\nimport re\n\n# Related third party imports\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nfrom scipy import io\nfrom pandas import DataFrame\n\n# Local application/library specific imports\nimport classify_covs as my_classif\nfrom manifold import random_spd\nfrom connectivity import cov_to_corr\nsys.path.append(\"/home/sb238920/CODE/servier2\")\nfrom conn_utilities import get_structurals, get_conditions, extract_rois\n\n\ndef test_corr_to_Z():\n \"\"\"Testing function corr_to_Z\"\"\"\n n_subjects = random.randint(2, 60)\n shape = random.randint(2, 100)\n input_corrs = np.empty((n_subjects, shape, shape))\n for input_corr in input_corrs:\n input_corr = random_spd(shape, shape)\n input_corr = cov_to_corr(input_corr)\n Z = my_classif.corr_to_Z(input_corrs)\n output_corrs = np.tanh(Z)\n assert_almost_equal(output_corrs, input_corrs)\n\n\ndef test_statistical_test():\n \"\"\"Testing function statistical_test\"\"\"\n # Prepare the dataframe\n conn_file = \"/volatile/new/salma/subject1to40/\" + \\\n \"conn_servier2_1to40sub_RS1-Nback2-Nback3-RS2_Pl-D_1_1_1.mat\"\n if not os.path.isfile(conn_file):\n raise IOError(\"file {0} not found\".format(conn_file))\n\n conn_class = io.loadmat(conn_file, struct_as_record=False,\n squeeze_me=True)['CONN_x']\n structurals = get_structurals(conn_class)\n subjects = [re.findall(\"([A-Z]{2}[0-9]{6})\", file_name)[0] for file_name in\n structurals]\n conditions = get_conditions(conn_class)\n rois = ['IPL', 'LMFG_peak1', 'RCPL_peak1', 'LCPL_peak3', 'LT',\n 'vIPS_big', 'pIPS_big', 'MT_big', 'FEF_big', 'RTPJ', 'RDLPFC',\n 'AG_big', 'SFG_big', 'PCC', 'MPFC', 'FP']\n conn_folder = os.path.splitext(conn_file)[0]\n df_list = list()\n for s, subject in enumerate(subjects):\n for c, condition in enumerate(conditions):\n file_name = \"ROI_Subject%03d_Condition%03d.mat\" % (s + 1, c + 1)\n regions_signal = extract_rois(os.path.join(\n conn_folder, \"results/preprocessing\", file_name), rois)\n df_list.append({\"condition\": condition,\n \"subj_id\": subject,\n \"region_signals\": regions_signal})\n df = DataFrame(df_list)\n\n # Launch statistics\n t_test = my_classif.statistical_test(df, conditions, p_correction=\"fdr\",\n estimators={'kind': 'tangent',\n 'cov_estimator': None})\n # Visualize results","repo_name":"rphlypo/parietalretreat","sub_path":"test_classify_covs.py","file_name":"test_classify_covs.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70749217209","text":"from inspect import isawaitable\nfrom typing import Any, Callable, Optional, Union\nfrom dataclasses import dataclass\n\n\nclass SagaError(Exception):\n def __init__(self, action_exception, compensation_exceptions):\n self.action = action_exception\n self.compensations = compensation_exceptions\n\n\n@dataclass\nclass Action:\n action: Callable[..., Any]\n compensation: Callable[..., Any]\n compensation_args: Optional[Union[tuple[Any], list[Any]]] = None\n result: Optional[Any] = None\n\n async def act(self, *args):\n result = self.action(*(args if self.action.__code__.co_varnames else []))\n if isawaitable(result):\n result = await result\n\n return result\n\n async def compensate(self):\n result = self.compensation(\n *(self.compensation_args if self.compensation.__code__.co_varnames else []) # pyright:ignore\n )\n if isawaitable(result):\n result = await result\n\n return result\n\n\n@dataclass\nclass Saga:\n \"\"\"\n The Saga class provides a way to manage Saga-style transactions using a sequence of steps,\n where each step consists of an operation and a compensation function. Transactions will be\n executed sequentially, and step-by-step compensation is supported.\n\n Methods:\n execute(self) -> Any:\n Execute the saga, sequentially executing each action and storing the result for\n compensation use in case of failure. If any action fails, compensation functions will\n be called in reverse order for each executed action.\n \"\"\"\n steps: list[Action]\n\n async def execute(self):\n args = []\n for index, action in enumerate(self.steps):\n if isinstance(action, Action):\n try:\n actioned_result = await action.act(*args)\n if actioned_result is None:\n args = []\n elif isinstance(actioned_result, (list, tuple)):\n args = actioned_result\n else:\n args = (actioned_result,)\n action.compensation_args = args\n action.result = actioned_result\n except Exception as action_exception:\n compensation_exceptions = await self._run_compensations(index)\n raise SagaError(action_exception, compensation_exceptions)\n\n return self\n\n async def _run_compensations(self, last_action_index: int):\n compensation_exceptions = []\n for compensation_index in range(last_action_index - 1, -1, -1):\n try:\n action = self.steps[compensation_index]\n await action.compensate()\n except Exception as ex:\n compensation_exceptions.append(ex)\n\n return compensation_exceptions\n\n\nclass OrchestrationBuilder:\n \"\"\"\n OrchestrationBuilder is a utility class for building a saga-style transaction using a series of\n steps, where each step consists of an action and a compensation function. The transaction will be\n executed in sequence and support compensation on a per-step basis.\n\n Usage:\n ```\n builder = OrchestrationBuilder()\n builder.add_step(action_1, compensation_1)\n builder.add_step(action_2, compensation_2)\n ...\n builder.add_step(action_n, compensation_n)\n saga = await builder.execute()\n ```\n\n Methods:\n - add_step(action: Callable[..., Any], compensation: Callable[..., Any]) -> OrchestrationBuilder:\n Adds a step to the transaction, consisting of an action and a compensation function.\n Both action and compensation functions can be synchronous or asynchronous. Returns\n the current OrchestrationBuilder instance.\n\n - execute() -> Saga:\n Builds and executes a Saga instance representing the transaction. When an action function\n completes successfully, its response will be passed to the next action function as a parameter.\n If an action function fails, the Saga will compensate for the previously executed actions.\n\n For example, if action_n fails, the compensations will be executed in the following order:\n compensation_n-1, compensation_n-2, ..., compensation_1. Finally raises a SagaError.\n\n OrchestrationBuilder instance methods should be chained together to build up the desired\n sequence of actions and compensations.\n\n When the action function completes, its response will be passed to the corresponding compensation\n function as a parameter.\n\n See also:\n - Saga\n \"\"\"\n\n def __init__(self):\n self.steps: list[Action] = []\n\n def add_step(self, action: Callable[..., Any], compensation: Callable[..., Any]) -> 'OrchestrationBuilder':\n action_ = Action(action, compensation)\n self.steps.append(action_)\n\n return self\n\n async def execute(self) -> Saga:\n return await Saga(self.steps).execute()\n\n","repo_name":"cdddg/py-saga-orchestration","sub_path":"saga.py","file_name":"saga.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29297289692","text":"import pygame\nimport os\nimport sys\n\nos.environ['SDL_VIDEO_CENTERED'] = '1' # Force static position of screen\n\n# Constants\nWIN_W = 8 * 100\nWIN_H = 8 * 100\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nBOX_DIM = WIN_W / 8\nPIECEWIDTH = 100\nPIECEHEIGHT = 100\n\n# Global Variables\ncheck_for_50_move_rule = 0\nloop = 0 # How many iterations of the while loop\ninit_mx, init_my = (-1, -1) # First mouse click for selecting piece\ndest_mx, dest_my = (-1, -1) # Second mouse click for placing piece\nend = (-1, -1) # Not used yet\nwhiteturn = True # White or Black turn\nwaitforsecondbutton = False # Check if first button or second button\nblocked = False # piece between initial and destination\ncounter = 0\nindex = 0\n\n\nclass Entity(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n\n\nclass Player:\n def __init__(self, name):\n self.castle_long = False\n self.castle_short = False\n\n\nclass Game(Entity):\n def __init__(self):\n Entity.__init__(self)\n\n\nclass Block(Entity):\n def __init__(self, xpos, ypos, l):\n Entity.__init__(self)\n self.image = self.set_image(l)\n self.image.convert()\n self.rect = pygame.Rect(xpos, ypos, BOX_DIM, BOX_DIM)\n\n def set_image(self, letter):\n if letter == \"B\":\n # print \"black\"\n sur = pygame.image.load(\"blocks/black_square.png\").convert_alpha()\n sur = pygame.transform.scale(sur, (PIECEWIDTH, PIECEHEIGHT))\n return sur\n else:\n # print \"white\"\n sur = pygame.image.load(\"blocks/white_square.png\").convert_alpha()\n sur = pygame.transform.scale(sur, (PIECEWIDTH, PIECEHEIGHT))\n return sur\n\n\n\nclass PieceBoard(Entity):\n def __init__(self, xpos, ypos, piece_object):\n Entity.__init__(self)\n self.image = self.set_image(piece_object)\n self.rect = pygame.Rect(xpos, ypos, BOX_DIM, BOX_DIM)\n\n def set_image(self, piece_object):\n sur = piece_object.image\n sur = pygame.transform.scale(sur, (BOX_DIM, BOX_DIM))\n return sur\n\n\ndef blit_pieces(piece, col, row, screen):\n piece = pygame.transform.scale(piece, (PIECEWIDTH, PIECEHEIGHT))\n piece_rect = pygame.Rect(col * PIECEHEIGHT, row * PIECEWIDTH, BOX_DIM, BOX_DIM)\n screen.blit(piece, piece_rect)\n\n\ndef piece_move(init_mousepos, dest_mousepos, board_pieces):\n global whiteturn\n board_pieces[dest_mousepos[0]][dest_mousepos[1]] = board_pieces[init_mousepos[0]][init_mousepos[1]]\n board_pieces[init_mousepos[0]][init_mousepos[1]] = 'e'\n whiteturn = not whiteturn\n\n\n# Rules of pieces\ndef pawn_check(init_mousepos, dest_mousepos, board_pieces, whiteturn):\n if whiteturn:\n if board_pieces[dest_mousepos[0]][dest_mousepos[1]] == 'e' and dest_mousepos[1] == init_mousepos[1]:\n if init_mousepos[0] - dest_mousepos[0] == 1:\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n elif init_mousepos[0] == 6 and init_mousepos[0] - dest_mousepos[0] < 3:\n if clear_line(init_mousepos, dest_mousepos, board_pieces):\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n elif board_pieces[dest_mousepos[0]][dest_mousepos[1]] != 'e' and (dest_mousepos[1] - init_mousepos[1]) ** 2 == 1:\n if init_mousepos[0] - dest_mousepos[0] == 1:\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n elif not whiteturn:\n if board_pieces[dest_mousepos[0]][dest_mousepos[1]] == 'e' and dest_mousepos[1] == init_mousepos[1]:\n if dest_mousepos[0] - init_mousepos[0] == 1:\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n elif init_mousepos[0] == 1 and dest_mousepos[0] - init_mousepos[0] < 3:\n if clear_line(init_mousepos, dest_mousepos, board_pieces):\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n elif board_pieces[dest_mousepos[0]][dest_mousepos[1]] != 'e' and (dest_mousepos[1] - init_mousepos[1]) ** 2 == 1:\n if dest_mousepos[0] - init_mousepos[0] == 1:\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n\n\ndef knight_check(init_mousepos, dest_mousepos, board_pieces):\n if (dest_mousepos[1] - init_mousepos[1]) ** 2 + (dest_mousepos[0] - init_mousepos[0]) ** 2 == 5:\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n\n\ndef bishop_check(init_mousepos, dest_mousepos, board_pieces):\n if clear_diagonal(init_mousepos, dest_mousepos, board_pieces):\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n\n\ndef rook_check(init_mousepos, dest_mousepos, board_pieces):\n if clear_line(init_mousepos, dest_mousepos, board_pieces):\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n\n\ndef queen_check(init_mousepos, dest_mousepos, board_pieces):\n if clear_diagonal(init_mousepos, dest_mousepos, board_pieces):\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n elif clear_line(init_mousepos, dest_mousepos, board_pieces):\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n\n\ndef king_check(init_mousepos, dest_mousepos, board_pieces):\n if (dest_mousepos[1] - init_mousepos[1]) ** 2 + (dest_mousepos[0] - init_mousepos[0]) ** 2 in (1, 2):\n piece_move(init_mousepos, dest_mousepos, board_pieces)\n\n\ndef clear_diagonal(init_mousepos, dest_mousepos, board_piece):\n if abs(dest_mousepos[1] - init_mousepos[1]) != abs(dest_mousepos[0] - init_mousepos[0]):\n return False\n\n if (dest_mousepos[0]-init_mousepos[0])**2 + (dest_mousepos[1]-init_mousepos[1])**2 == 2:\n return True\n\n if dest_mousepos[0] > init_mousepos[0]:\n if dest_mousepos[1] > init_mousepos[1]:\n tmp = [init_mousepos[0]+1, init_mousepos[1]+1]\n else:\n tmp = [init_mousepos[0]+1, init_mousepos[1]-1]\n else:\n if dest_mousepos[1] > init_mousepos[1]:\n tmp = [init_mousepos[0]-1, init_mousepos[1]+1]\n else:\n tmp = [init_mousepos[0]-1, init_mousepos[1]-1]\n\n if board_piece[tmp[0]][tmp[1]] != 'e':\n return False\n else:\n return clear_diagonal(tmp, dest_mousepos, board_piece)\n\n\ndef clear_line(init_mousepos, dest_mousepos, board_piece):\n if dest_mousepos[0] != init_mousepos[0] and dest_mousepos[1] != init_mousepos[1]:\n return False\n\n if (dest_mousepos[0]-init_mousepos[0])**2 + (dest_mousepos[1]-init_mousepos[1])**2 == 1:\n return True\n\n if init_mousepos[0] == dest_mousepos[0]:\n if dest_mousepos[1] > init_mousepos[1]:\n tmp = [init_mousepos[0], init_mousepos[1]+1]\n else:\n tmp = [init_mousepos[0], init_mousepos[1]-1]\n else:\n if dest_mousepos[0] > init_mousepos[0]:\n tmp = [init_mousepos[0]+1, init_mousepos[1]]\n else:\n tmp = [init_mousepos[0]-1, init_mousepos[1]]\n\n if board_piece[tmp[0]][tmp[1]] != 'e':\n return False\n else:\n return clear_line(tmp, dest_mousepos, board_piece)\n\n\n# main\ndef main():\n global init_mx, init_my, dest_mx, dest_my, end, waitforsecondbutton, whiteturn\n pygame.init()\n\n # Create Game Variables\n\n fps = 60\n clock = pygame.time.Clock()\n play = True\n pygame.display.set_caption('Chess')\n screen = pygame.display.set_mode((WIN_W, WIN_H), pygame.SRCALPHA)\n\n # Create Groups\n board_group = pygame.sprite.Group()\n # Load Background\n board = [\n \"WBWBWBWB\",\n \"BWBWBWBW\",\n \"WBWBWBWB\",\n \"BWBWBWBW\",\n \"WBWBWBWB\",\n \"BWBWBWBW\",\n \"WBWBWBWB\",\n \"BWBWBWBW\", ]\n\n # Load Initial Pieces on Board\n board_pieces = [\n ['r', 'n', 'b', 'q', 'k', 'b', 'n', 'r'],\n ['p', 'p', 'p', 'p', 'p', 'p', 'p', 'p'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e'],\n ['P', 'P', 'P', 'P', 'P', 'P', 'P', 'P'],\n ['R', 'N', 'B', 'Q', 'K', 'B', 'N', 'R']]\n\n # Create Game Objects, Update\n\n # Build Background\n x = y = 0\n for row in board:\n for letter in row:\n b = Block(x, y, letter)\n board_group.add(b)\n x += BOX_DIM\n y += BOX_DIM\n x = 0\n\n # Game loop\n\n while play:\n global loop\n\n loop += 1 # Not sure if useful\n\n # Checks if button pressed\n for event in pygame.event.get():\n # Checks if window exit button pressed\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n # Checks if mouse button pressed\n if event.type is pygame.MOUSEBUTTONDOWN:\n if not waitforsecondbutton:\n init_my, init_mx = pygame.mouse.get_pos()\n print(init_my, init_mx)\n init_mousepos = (int(init_mx / 100), int(init_my / 100))\n if whiteturn:\n if board_pieces[init_mousepos[0]][init_mousepos[1]] in ['P', 'N', 'B', 'R', 'Q', 'K']:\n waitforsecondbutton = True\n\n else:\n if board_pieces[init_mousepos[0]][init_mousepos[1]] in ['p', 'n', 'b', 'r', 'q', 'k']:\n waitforsecondbutton = True\n\n else:\n dest_my, dest_mx = pygame.mouse.get_pos()\n dest_mousepos = (int(dest_mx / 100), int(dest_my / 100))\n if whiteturn:\n if board_pieces[dest_mousepos[0]][dest_mousepos[1]] not in ['P', 'N', 'B', 'R', 'Q', 'K']:\n\n if board_pieces[init_mousepos[0]][init_mousepos[1]] == 'P':\n pawn_check(init_mousepos, dest_mousepos, board_pieces, whiteturn)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'N':\n knight_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'B':\n bishop_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'R':\n rook_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'Q':\n queen_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'K':\n king_check(init_mousepos, dest_mousepos, board_pieces)\n\n else:\n if board_pieces[dest_mousepos[0]][dest_mousepos[1]] not in ['p', 'n', 'b', 'r', 'q', 'k']:\n\n if board_pieces[init_mousepos[0]][init_mousepos[1]] == 'p':\n pawn_check(init_mousepos, dest_mousepos, board_pieces, whiteturn)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'n':\n knight_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'b':\n bishop_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'r':\n rook_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'q':\n queen_check(init_mousepos, dest_mousepos, board_pieces)\n\n elif board_pieces[init_mousepos[0]][init_mousepos[1]] == 'k':\n king_check(init_mousepos, dest_mousepos, board_pieces)\n\n waitforsecondbutton = False\n\n # Draw Everything\n screen.fill(WHITE)\n for b in board_group:\n screen.blit(b.image, b.rect)\n\n if waitforsecondbutton:\n #if board[row][col] = 'W' :\n piece = pygame.image.load(\"blocks/white_square_highlight.png\").convert_alpha()\n blit_pieces(piece, init_mousepos[1], init_mousepos[0], screen)\n\n for row in range(0, 8):\n for col in range(0, 8):\n if board_pieces[row][col] != 'e':\n if board_pieces[row][col] == 'p':\n piece = pygame.image.load(\"chesspiecesimages/blackpawn.png\").convert_alpha()\n elif board_pieces[row][col] == 'P':\n piece = pygame.image.load(\"chesspiecesimages/whitepawn.png\").convert_alpha()\n elif board_pieces[row][col] == 'n':\n piece = pygame.image.load(\"chesspiecesimages/blackknight.png\").convert_alpha()\n elif board_pieces[row][col] == 'N':\n piece = pygame.image.load(\"chesspiecesimages/whiteknight.png\").convert_alpha()\n elif board_pieces[row][col] == 'b':\n piece = pygame.image.load(\"chesspiecesimages/blackbishop.png\").convert_alpha()\n elif board_pieces[row][col] == 'B':\n piece = pygame.image.load(\"chesspiecesimages/whitebishop.png\").convert_alpha()\n elif board_pieces[row][col] == 'r':\n piece = pygame.image.load(\"chesspiecesimages/blackrook.png\").convert_alpha()\n elif board_pieces[row][col] == 'R':\n piece = pygame.image.load(\"chesspiecesimages/whiterook.png\").convert_alpha()\n elif board_pieces[row][col] == 'q':\n piece = pygame.image.load(\"chesspiecesimages/blackqueen.png\").convert_alpha()\n elif board_pieces[row][col] == 'Q':\n piece = pygame.image.load(\"chesspiecesimages/whitequeen.png\").convert_alpha()\n elif board_pieces[row][col] == 'k':\n piece = pygame.image.load(\"chesspiecesimages/blackking.png\").convert_alpha()\n elif board_pieces[row][col] == 'K':\n piece = pygame.image.load(\"chesspiecesimages/whiteking.png\").convert_alpha()\n blit_pieces(piece, col, row, screen)\n\n # Limits frames per iteration of while loop\n clock.tick(fps)\n # Writes to main surface\n pygame.display.flip()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TheRealKinglogic/Chess_GUI_Python","sub_path":"Chess1/Chess.py","file_name":"Chess.py","file_ext":"py","file_size_in_byte":15177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26982176158","text":"import PyGnuplot as gp\nimport numpy as np\nX = np.arange(42)\nY = np.zeros(42)\n# Load file\nfile1 = open('plots/plot00', 'r')\nLines = file1.readlines()\n\ni=0\nfor line in Lines:\n nums = line.split(' ')\n for i in range(len(nums)):\n Y[i] = nums[i]\ngp.s([X, Y])\ngp.c('set terminal pngcairo size 1100,1000 enhanced')\ngp.c('set output \"plot1.png\"')\ngp.c('plot \"tmp.dat\" u 1:2 w lp')\n","repo_name":"BenKarcher/Computer_Physik","sub_path":"5/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37815944027","text":"from datetime import datetime\nfrom typing import List, Optional\n\nfrom opennem.db import SessionLocal\nfrom opennem.db.models.opennem import NetworkRegion\nfrom opennem.schema.network import (\n NETWORKS,\n NetworkAPVI,\n NetworkAU,\n NetworkNEM,\n NetworkRegionSchema,\n NetworkSchema,\n NetworkWEM,\n)\nfrom opennem.utils.timezone import is_aware\n\nNEM_STATES = [\"QLD\", \"NSW\", \"VIC\", \"ACT\", \"TAS\", \"SA\", \"NT\"]\n\n\ndef state_from_network_region(network_region: str) -> str:\n _state = network_region\n\n if _state.endswith(\"1\"):\n _state = _state[:-1]\n\n _state = _state.strip().upper()\n\n if _state in NEM_STATES:\n return _state\n\n raise Exception(\"State {} not found\".format(network_region))\n\n\ndef network_from_state(state: str) -> NetworkSchema:\n state = state.upper().strip()\n\n if state in [\"WA\"]:\n return NetworkWEM\n\n if state in [\"QLD\", \"NSW\", \"VIC\", \"ACT\", \"TAS\", \"SA\", \"NT\"]:\n return NetworkNEM\n\n raise Exception(\"Unknown network {}\".format(state))\n\n\ndef network_from_network_region(\n network_region: str,\n) -> NetworkSchema:\n network_region = network_region.upper()\n\n if network_region in [\"WEM\", \"WA1\"]:\n return NetworkWEM\n if network_region in [\"NEM\", \"NSW1\", \"QLD1\", \"SA1\", \"VIC1\", \"TAS1\"]:\n return NetworkNEM\n\n raise Exception(\"Unknown network {}\".format(network_region))\n\n\ndef network_from_network_code(network_code: str) -> NetworkSchema:\n network_code = network_code.upper().strip()\n\n if network_code in [\"AU\"]:\n return NetworkAU\n\n if network_code in [\"WEM\"]:\n return NetworkWEM\n\n if network_code in [\"NEM\"]:\n return NetworkNEM\n\n if network_code in [\"APVI\"]:\n return NetworkAPVI\n\n network_lookup = list(filter(lambda n: n.code == network_code, NETWORKS))\n\n if len(network_lookup):\n return network_lookup.pop()\n\n raise Exception(\"Unknown network {}\".format(network_code))\n\n\ndef get_network_region_schema(\n network: NetworkSchema, network_region_code: Optional[str] = None\n) -> List[NetworkRegionSchema]:\n \"\"\"Return regions for a network\"\"\"\n s = SessionLocal()\n regions_query = s.query(NetworkRegion).filter_by(network_id=network.code)\n\n if network_region_code:\n regions_query = regions_query.filter_by(code=network_region_code)\n\n regions_result = regions_query.all()\n\n regions = [NetworkRegionSchema.from_orm(i) for i in regions_result]\n\n return regions\n\n\ndef datetime_add_network_timezone(dt: datetime, network: NetworkSchema) -> datetime:\n \"\"\" Returns a datetime in network timezone \"\"\"\n return dt.astimezone(network.get_fixed_offset())\n","repo_name":"bje-/opennem","sub_path":"opennem/core/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"6669278736","text":"# 포도주 시식\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nwine = [int(input()) for _ in range(n)]\ndp = []\n\nif n < 3 :\n dp.append(sum(wine))\nelse:\n dp.append(wine[0])\n dp.append(wine[0]+wine[1])\n dp.append(max(dp[1], dp[0]+wine[2], wine[1]+wine[2]))\n\n for i in range(3, n):\n dp.append(max(dp[i-1], dp[i-3]+wine[i-1]+wine[i], dp[i-2]+wine[i]))\n\nprint(dp[-1])","repo_name":"surpmh/algorithms","sub_path":"BaekJoon/동적 계획법 1/11_2156.py","file_name":"11_2156.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20494837707","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport argparse\nimport pickle\nimport random\nimport numpy as np\nfrom pathlib import Path\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\nimport torch.optim as optim\n\nfrom models import DNN\nfrom utils import *\n\nX_DIM = 28\nY_DIM = 2\ngoals = ['leaving', 'blocking']\n\n\ndef prepare_testing_data(trajs, max_episode_length, agent_id, dT=1):\n X = []\n Y = []\n N = len(trajs)\n for episode_id in range(N):\n traj1, traj2 = trajs[episode_id][0], trajs[episode_id][1]\n T = min(len(traj1), args.max_episode_length * dT)\n if agent_id == 0:\n X.append([np.array(phi(traj1[t], traj2[t])) for t in range(0,T-dT,dT)])\n else:\n X.append([np.array(phi(traj2[t], traj1[t])) for t in range(0,T-dT,dT)])\n return X\n\n\ndef test_single_video(X_all, model, args):\n log_prod = []\n for X in X_all:\n T = len(X)\n if args.network_type == 'LSTM':\n if args.cuda:\n hx = Variable(torch.zeros(1, args.latent_dim).cuda())\n cx = Variable(torch.zeros(1, args.latent_dim).cuda())\n else:\n hx = Variable(torch.zeros(1, args.latent_dim))\n cx = Variable(torch.zeros(1, args.latent_dim))\n hidden = (hx, cx)\n else:\n hidden = None\n for t in range(T):\n x_tensor = torch.from_numpy(X[t]).float().unsqueeze(0)\n if args.cuda:\n x_var = Variable(x_tensor.cuda())\n else:\n x_var = Variable(x_tensor)\n y_pred, hidden = model(x_var, hidden)\n log_prod.append(y_pred.data.cpu().numpy()[0][0])\n return log_prod\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data-dir', type=str, default='./data', help='Data directory')\nparser.add_argument('--max-episode-length', type=int, default=31, help='Maximum episode length')\nparser.add_argument('--network-type', type=str, default='LSTM', help='Network type (MLP, LSTM)')\nparser.add_argument('--latent-dim', type=int, default=128, help='Hidden size of LSTM cell')\nparser.add_argument('--checkpoint-dir', type=str, default='./checkpoints', help='Checkpoint directory')\nparser.add_argument('--train-size', type=int, default=50, help='The size of the training set')\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n args.cuda = torch.cuda.is_available()\n vid_dirs = [\n args.data_dir + '/animations_all/HH/Style1/7',\n args.data_dir + '/animations_all/HH/Style1/10',\n args.data_dir + '/animations_all/HH/Style1/20',\n args.data_dir + '/animations_all/HH/Style1/50',\n args.data_dir + '/animations_all/HH/Style1/100',\n args.data_dir + '/animations_all/HH/Style2/7',\n args.data_dir + '/animations_all/HH/Style2/10',\n args.data_dir + '/animations_all/HH/Style2/20',\n args.data_dir + '/animations_all/HH/Style2/50',\n args.data_dir + '/animations_all/HH/Style2/100',\n args.data_dir + '/animations_all/HO/Style1',\n args.data_dir + '/animations_all/HO/Style2',\n args.data_dir + '/animations_all/HO/Style3',\n args.data_dir + '/animations_all/OO/collision',\n args.data_dir + '/animations_all/OO/rod',\n args.data_dir + '/animations_all/OO/rope',\n args.data_dir + '/animations_all/OO/spring',\n ]\n \n checkpoint_dir = args.checkpoint_dir + '/{}_{}'.format(args.network_type, goals[0])\n p = Path(checkpoint_dir)\n model0 = DNN(X_DIM, Y_DIM, args.latent_dim, args.network_type, activation='log_softmax')\n if args.cuda: model0.cuda()\n load_model(model0, checkpoint_dir + '/model_{}'.format(args.train_size))\n print(checkpoint_dir)\n model0.eval()\n\n checkpoint_dir = args.checkpoint_dir + '/{}_{}'.format(args.network_type, goals[1])\n p = Path(checkpoint_dir)\n model1 = DNN(X_DIM, Y_DIM, args.latent_dim, args.network_type, activation='log_softmax')\n if args.cuda: model1.cuda()\n load_model(model1, checkpoint_dir + '/model_{}'.format(args.train_size))\n print(checkpoint_dir)\n model1.eval()\n\n\n raw_trajs = dict()\n results = dict()\n\n for i, vid_dir in enumerate(vid_dirs):\n print(vid_dir)\n loss = []\n trajs = []\n vid_id_list = list(range(1, 100, 2)) # only test the videos used as stimuli\n for vid_id in vid_id_list:\n trajs_file_path = vid_dir + '/{}.txt'.format(vid_id)\n traj1, traj2 = [], []\n with open(trajs_file_path) as f:\n for line in f:\n values = [float(x) for x in line.split()]\n traj1.append((values[0], values[1]))\n traj2.append((values[2], values[3]))\n trajs.append([traj1, traj2])\n X_all0 = prepare_testing_data(trajs, args.max_episode_length, 0, 1 if i < 13 else 5) # testing entity 0\n X_all1 = prepare_testing_data(trajs, args.max_episode_length, 1, 1 if i < 13 else 5) # testing entity 1\n\n L00 = test_single_video(X_all0, model0, args)\n L01 = test_single_video(X_all0, model1, args)\n L10 = test_single_video(X_all1, model0, args)\n L11 = test_single_video(X_all1, model1, args)\n # print(np.array([L00, L01, L10, L11]).transpose())\n\n results[vid_dir] = [L00, L01, L10, L11] \n\n pickle.dump(results, \n open('./data/BaselineDNN_L_{}.pik'.format(args.train_size), 'wb'), \n protocol=pickle.HIGHEST_PROTOCOL)\n","repo_name":"MicroSTM/PSF","sub_path":"experiments/test_social_dnn.py","file_name":"test_social_dnn.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29475779263","text":"\"\"\"\nЗагрузка top_3 локаций\n\"\"\"\nimport logging\n\nfrom airflow import DAG\nfrom airflow.utils.dates import days_ago\n\nfrom airflow.operators.python import PythonOperator\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\nfrom airflow.providers.postgres.operators.postgres import PostgresOperator\nfrom airflow.operators.python import BranchPythonOperator\nfrom k_smirnov_plugins.k_smirnov_ram_location_operator import RAMLocationOperator\nfrom k_smirnov_plugins.k_smirnov_fission_sensor import FissionSensor\n\nDEFAULT_ARGS = {\n 'start_date': days_ago(2),\n 'owner': 'k-smirnov',\n 'poke_interval': 600\n}\n\ndag = DAG(\"k-smirnov_top3\",\n schedule_interval='@daily',\n default_args=DEFAULT_ARGS,\n max_active_runs=1,\n tags=['k-smirnov']\n )\n\ngeiger_counter = FissionSensor(\n task_id='geiger_counter',\n mode='reschedule',\n range_number=2,\n dag=dag\n)\n\n\ndef check_table():\n pg_hook = PostgresHook(postgres_conn_id='conn_greenplum_write')\n conn = pg_hook.get_conn()\n cursor = conn.cursor(\"check_table_cursor\")\n cursor.execute(\"\"\"SELECT EXISTS (SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'k_smirnov_ram_location' \n AND table_schema = 'public') AS table_exists;\n \"\"\")\n return cursor.fetchone()[0]\n\n\ncheck_table = PythonOperator(\n task_id='check_table',\n python_callable=check_table,\n dag=dag\n)\n\n\ndef is_table_exist(table_exists):\n next_task = 'create_table'\n if table_exists == 'True':\n next_task = 'get_top3_location'\n return next_task\n\n\nis_table_exist = BranchPythonOperator(\n task_id='is_table_exist',\n op_kwargs={\"table_exists\": \"{{ti.xcom_pull('check_table')}}\"},\n python_callable=is_table_exist,\n dag=dag\n)\n\ncreate_table = PostgresOperator(\n task_id='create_table',\n postgres_conn_id='conn_greenplum_write',\n sql=\"\"\"\n CREATE TABLE public.k_smirnov_ram_location (\n id int NOT NULL,\n \"name\" varchar(200) NULL,\n \"type\" varchar(200) NULL,\n dimension varchar(200) NULL,\n resident_cnt int NULL,\n CONSTRAINT k_smirnov_ram_location_pkey PRIMARY KEY (id)\n )\n DISTRIBUTED BY (id);\n \"\"\",\n dag=dag\n)\n\nget_top3_location = RAMLocationOperator(\n task_id='get_top3_location',\n trigger_rule='one_success',\n dag=dag\n)\n\n\ndef write_to_gp(top3_location):\n pg_hook = PostgresHook(postgres_conn_id='conn_greenplum_write')\n sql = \"\"\"\n INSERT INTO public.k_smirnov_ram_location (id, name, type, dimension, resident_cnt) \n SELECT id, name, type, dimension, resident_cnt\n FROM (\n VALUES \n {}\n ) tmp(id, name, type, dimension, resident_cnt)\n WHERE NOT EXISTS (SELECT 1 FROM public.k_smirnov_ram_location WHERE id = tmp.id);\n \"\"\"\n values = \"\"\n values_temp = \"({}, '{}', '{}', '{}', {}),\"\n top3_location_dic = eval(top3_location)\n for location in top3_location_dic:\n values += values_temp.format(location['id'], location['name'], location['type'], location['dimension'],\n location['residents'])\n sql = sql.format(values[:-1])\n pg_hook.run(sql)\n\n\nwrite_to_gp = PythonOperator(\n task_id='write_to_gp',\n python_callable=write_to_gp,\n op_kwargs={\"top3_location\": \"{{ti.xcom_pull('get_top3_location')}}\"},\n dag=dag\n)\n\ngeiger_counter >> check_table >> is_table_exist >> create_table >> get_top3_location >>write_to_gp\n\n","repo_name":"skarfex/education.courses_data_engineer","sub_path":"karpov_airflow_fullrep/dags/k-smirnov/k-smirnov_top3.py","file_name":"k-smirnov_top3.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32770456338","text":"'''\n4. Realizar un función que reciba una lista y devuelva una nueva lista cuyo contenido \nsea igual a la original pero invertida. Así, dada la lista [‘Di’, ‘buen’, ‘día’, ‘a’, ‘papa’],\n deberá devolver [‘papa’, ‘a’, ‘día’, ‘buen’, ‘Di’]. Llamar a dicha función\nCreated on 17 dic. 2020\n\n@author: Javier\n'''\nfrom _ast import Return\ndef pideLista():\n lista=[\"HOLA\", \"ADIOS\", \"KASE\"]\n return lista\ndef invierteLista(lista):\n listaInvertida=[]\n for i in range (len(lista)-1,-1,-1):\n listaInvertida.append(lista[i])\n return listaInvertida\n \nprint(invierteLista(pideLista()))\n ","repo_name":"JavierCamposCuesta/repoJavierCampos","sub_path":"1ºDaw/ProgramacionPython/Python/Listas/ej4.py","file_name":"ej4.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"382576577","text":"import json\nfrom flask import Flask, render_template, request, redirect, flash, url_for\nfrom datetime import datetime\n\n\ndef loadClubs():\n with open(\"clubs.json\") as c:\n listOfClubs = json.load(c)[\"clubs\"]\n return listOfClubs\n\n\ndef loadCompetitions():\n with open(\"competitions.json\") as comps:\n listOfCompetitions = json.load(comps)[\"competitions\"]\n for competitions in listOfCompetitions:\n if datetime.strptime(competitions[\"date\"], \"%Y-%m-%d %H:%M:%S\") < datetime.now():\n competitions[\"valid\"] = False\n else:\n competitions[\"valid\"] = True\n return listOfCompetitions\n\n\napp = Flask(__name__)\napp.secret_key = \"something_special\"\n\n\ndef config_app(config):\n app.config.from_object(config)\n return app\n\n\ncompetitions = loadCompetitions()\nclubs = loadClubs()\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/showSummary\", methods=[\"POST\"])\ndef showSummary():\n if not any(email[\"email\"] == request.form[\"email\"] for email in clubs):\n flash(\"this email doesn't exist in our club list\")\n return render_template(\n \"index.html\",\n )\n else:\n club = [club for club in clubs if club[\"email\"] == request.form[\"email\"]][0]\n return render_template(\n \"welcome.html\",\n club=club,\n competitions=competitions,\n )\n\n\n@app.route(\"/book//\")\ndef book(competition, club):\n\n foundClub = [c for c in clubs if c[\"name\"] == club]\n foundCompetition = [c for c in competitions if c[\"name\"] == competition]\n if foundClub and foundCompetition:\n return render_template(\n \"booking.html\",\n club=foundClub[0],\n competition=foundCompetition[0],\n )\n else:\n flash(\"Something went wrong-please try again\")\n return render_template(\"welcome.html\", club=foundClub[0], competitions=competitions)\n\n\n@app.route(\"/purchasePlaces\", methods=[\"POST\"])\ndef purchasePlaces():\n competition = [c for c in competitions if c[\"name\"] == request.form[\"competition\"]][0]\n club = [c for c in clubs if c[\"name\"] == request.form[\"club\"]][0]\n placesRequired = int(request.form[\"places\"])\n if placesRequired > 12:\n flash(\"No more than 12 places by club!\")\n return render_template(\"booking.html\", club=club, competition=competition)\n elif placesRequired > int(competition[\"numberOfPlaces\"]):\n flash(\"not enouth place in this competition!\")\n return render_template(\"booking.html\", club=club, competition=competition)\n elif placesRequired > int(club[\"points\"]):\n flash(\"you don't have enouth points\")\n flash(\"points : \" + str(club[\"points\"]))\n return render_template(\"booking.html\", club=club, competition=competition)\n else:\n competition[\"numberOfPlaces\"] = int(competition[\"numberOfPlaces\"]) - placesRequired\n club[\"points\"] = int(club[\"points\"]) - placesRequired\n flash(\"Great-booking complete!\")\n return render_template(\"welcome.html\", club=club, competitions=competitions)\n\n\n# TODO: Add route for points display\n@app.route(\"/display\")\ndef display():\n return render_template(\"display.html\", public_list=clubs)\n\n\n@app.route(\"/logout\")\ndef logout():\n return redirect(url_for(\"index\"))\n","repo_name":"Airdeon/P11-Am-liorez-une-application-Web-Python-par-des-tests-et-du-d-bogage","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34283393033","text":"import numpy as np\n\n# Helper Functions\n\nrotY = lambda x: np.array([\n [np.cos(x), 0, np.sin(x)],\n [0, 1, 0],\n [-np.sin(x), 0, np.cos(x)]\n])\n\nrotZ = lambda x: np.array([\n [np.cos(x), -np.sin(x), 0],\n [np.sin(x), np.cos(x), 0],\n [0, 0, 1]\n])\n\nrot_view = lambda z, y: rotY(y) @ rotZ(z)\n\nrot_degrees = lambda z, y: rot_view(np.deg2rad(z), np.deg2rad(y))\n","repo_name":"artem-tkachuk/cs182-project-pointnet-solutions","sub_path":"visualize/rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14643934330","text":"\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\"\"\"\n\n\n\n# 用队列和栈(list)实现 √\nclass Solution(object):\n def postorder(self, root):\n ret, stack = [], root and [root]\n while stack:\n node = stack.pop()\n ret.append(node.val)\n stack += [child for child in node.children if child] # 中-右-左 的逆序 左-右-中\n return ret[::-1]\n\n# 递归实现\nclass Solution(object):\n def __init__(self):\n self.list = []\n def postorder(self, root):\n if root == None:\n return []\n children = root.children\n for c in children:\n self.preorder(c)\n self.list.append(root.val)\n return self.list\n\n","repo_name":"whoisalan/leetcode","sub_path":"easy/590N-aryTreePostorderTraversal.py","file_name":"590N-aryTreePostorderTraversal.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"71646011128","text":"import boost_histogram as bh\nimport mplhep as hep\n\n# Make 1-d histogram with 5 logarithmic bins from 1e0 to 1e5\nh = bh.Histogram(\n bh.axis.Regular(5, 1e0, 1e5, metadata=\"x\", transform=bh.axis.transform.log),\n storage=bh.storage.Weight(),\n)\n\n# Fill histogram with numbers\nx = (2e0, 2e1, 2e2, 2e3, 2e4)\nh.fill(x, weight=2)\n\nprint(h.view().value)\nprint(type(h.view().value))\nhep.histplot(h.view().value, bins=h.axes[0].edges)\n","repo_name":"KeanuGh/myROOT_analysis_framework","sub_path":"quick_scripts/mplhep_bh_weights_test.py","file_name":"mplhep_bh_weights_test.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14854004428","text":"import warnings\n\nimport dateutil.parser\nfrom qiskit.providers import BackendV1 as Backend\nfrom qiskit.providers.models import BackendConfiguration\nfrom qiskit.providers.models.backendstatus import BackendStatus\nfrom qiskit.providers import Options\n\nfrom . import exceptions, ionq_client, ionq_job\nfrom .helpers import ionq_basis_gates\n\n\nclass Calibration:\n \"\"\"\n IonQ backend calibration data.\n\n This class is a simple wrapper for IonQ hardware calibration data.\n \"\"\"\n\n def __init__(self, data):\n self._data = data\n\n @property\n def num_qubits(self):\n \"\"\"The number of qubits available.\n\n Returns:\n int: A number of qubits.\n \"\"\"\n return int(self._data[\"qubits\"])\n\n @property\n def target(self):\n \"\"\"The target calibrated hardware.\n\n Returns:\n str: The name of the target hardware backend.\n \"\"\"\n return self._data[\"target\"]\n\n @property\n def calibration_time(self):\n \"\"\"Time of the measurement, in UTC.\n\n Returns:\n datetime.datetime: A datetime object with the time.\n \"\"\"\n return dateutil.parser.isoparse(self._data[\"date\"])\n\n @property\n def fidelities(self):\n \"\"\"Fidelity for single-qubit (1q) and two-qubit (2q) gates, and State\n Preparation and Measurement (spam) operations.\n\n Currently provides only mean fidelity; additional statistical data will\n be added in the future.\n\n Returns:\n dict: A dict containing fidelity data for 1a, 2q, and spam.\n \"\"\"\n return self._data[\"fidelity\"]\n\n @property\n def timings(self):\n \"\"\"Various system property timings. All times expressed as seconds.\n\n Timings currently include::\n\n * ``t1``\n * ``t2``\n * ``1q``\n * ``2q``\n * ``readout``\n * ``reset``\n\n Returns:\n dict: A dictionary of timings.\n \"\"\"\n return self._data[\"timing\"]\n\n @property\n def connectivity(self):\n \"\"\"Returns connectivity data.\n\n Returns:\n list[tuple[int, int]]: An array of valid, unordered tuples of\n possible qubits for executing two-qubit gates\n \"\"\"\n return self._data[\"connectivity\"]\n\n\nclass IonQBackend(Backend):\n \"\"\"IonQ Backend base class.\"\"\"\n\n _client = None\n\n @classmethod\n def _default_options(cls):\n return Options(shots=1024)\n\n @property\n def client(self):\n \"\"\"A lazily populated IonQ API Client.\n\n Returns:\n IonQClient: An instance of a REST API client\n \"\"\"\n if self._client is None:\n self._client = self.create_client()\n return self._client\n\n def create_client(self):\n \"\"\"Create an IonQ REST API Client using provider credentials.\n\n Raises:\n IonQCredentialsError: If the provider's\n :attr:`credentials ` does not have\n a ``\"token\"`` or ``\"url\"`` key, or if their values are ``None``.\n\n Returns:\n IonQClient: An instance of a REST API client.\n \"\"\"\n credentials = self._provider.credentials\n\n try:\n token = credentials[\"token\"]\n except KeyError as ex:\n raise exceptions.IonQCredentialsError(\n \"Credentials `token` not present in provider.\"\n ) from ex\n\n if token is None:\n raise exceptions.IonQCredentialsError(\"Credentials `token` may not be None!\")\n\n try:\n url = credentials[\"url\"]\n except KeyError as ex:\n raise exceptions.IonQCredentialsError(\n \"Credentials `url` not present in provider.\"\n ) from ex\n\n if url is None:\n raise exceptions.IonQCredentialsError(\"Credentials `url` may not be None!\")\n\n return ionq_client.IonQClient(token, url)\n\n # pylint: disable=missing-type-doc,missing-param-doc,arguments-differ\n def run(self, circuit, **kwargs):\n \"\"\"Create and run a job on an IonQ Backend.\n\n .. NOTE::\n\n IonQ backends do not support multi-experiment jobs.\n If ``circuit`` is provided as a list with more than one element\n then this method will raise out with a RuntimeError.\n\n Args:\n circuit (:class:`QuantumCircuit `):\n A Qiskit QuantumCircuit object.\n\n Returns:\n IonQJob: A reference to the job that was submitted.\n\n Raises:\n RuntimeError: If a multi-experiment circuit was provided.\n \"\"\"\n if isinstance(circuit, (list, tuple)):\n if len(circuit) > 1:\n raise RuntimeError(\"Multi-experiment jobs are not supported!\")\n circuit = circuit[0]\n\n for kwarg in kwargs:\n if not hasattr(self.options, kwarg):\n warnings.warn(\n \"Option %s is not used by this backend\" % kwarg, UserWarning, stacklevel=2\n )\n if \"shots\" not in kwargs:\n kwargs[\"shots\"] = self.options.shots\n passed_args = kwargs\n\n job = ionq_job.IonQJob(\n self,\n None,\n self.client,\n circuit=circuit,\n passed_args=passed_args,\n )\n job.submit()\n return job\n\n def retrieve_job(self, job_id):\n \"\"\"get a job from a specific backend, by job id.\"\"\"\n return ionq_job.IonQJob(self, job_id, self.client)\n\n def retrieve_jobs(self, job_ids):\n \"\"\"get a list of jobs from a specific backend, job id\"\"\"\n\n return [ionq_job.IonQJob(self, job_id, self.client) for job_id in job_ids]\n\n # TODO: Implement backend status checks.\n def status(self):\n \"\"\"Return a backend status object to the caller.\n\n Returns:\n BackendStatus: the status of the backend.\n \"\"\"\n return BackendStatus(\n backend_name=self.name(),\n backend_version=\"1\",\n operational=True,\n pending_jobs=0,\n status_msg=\"\",\n )\n\n def calibration(self):\n \"\"\"Fetch the most recent calibration data for this backend.\n\n Returns:\n Calibration: A calibration data wrapper.\n \"\"\"\n backend_name = self.name().replace(\"_\", \".\")\n calibration_data = self.client.get_calibration_data(backend_name)\n if calibration_data is None:\n return None\n return Calibration(calibration_data)\n\n\nclass IonQSimulatorBackend(IonQBackend):\n \"\"\"\n IonQ Backend for running simulated jobs.\n .. ATTENTION::\n\n The maximum shot-count for a state vector sim is always ``1``.\n\n .. ATTENTION::\n\n Calling :meth:`get_counts `\n on a job processed by this backend will return counts expressed as\n probabilites, rather than a multiple of shots.\n \"\"\"\n\n @classmethod\n def _default_options(cls):\n return Options(shots=1024, sampler_seed=None)\n\n # pylint: disable=missing-type-doc,missing-param-doc,arguments-differ,useless-super-delegation\n def run(self, circuit, **kwargs):\n \"\"\"Create and run a job on IonQ's Simulator Backend.\n\n .. WARNING:\n\n The maximum shot-count for a state vector sim is always ``1``.\n As a result, the ``shots`` keyword argument in this method is ignored.\n\n Args:\n circuit (:class:`QuantumCircuit `):\n A Qiskit QuantumCircuit object.\n\n Returns:\n IonQJob: A reference to the job that was submitted.\n \"\"\"\n return super().run(circuit, **kwargs)\n\n def calibration(self):\n \"\"\"Simulators have no calibration data.\n\n Returns:\n NoneType: None\n \"\"\"\n return None\n\n def __init__(self, provider):\n \"\"\"Base class for interfacing with an IonQ backend\"\"\"\n config = BackendConfiguration.from_dict(\n {\n \"backend_name\": \"ionq_simulator\",\n \"backend_version\": \"0.0.1\",\n \"simulator\": True,\n \"local\": False,\n \"coupling_map\": None,\n \"description\": \"IonQ simulator\",\n \"basis_gates\": ionq_basis_gates,\n \"memory\": False,\n \"n_qubits\": 29,\n \"conditional\": False,\n \"max_shots\": 1,\n \"max_experiments\": 1,\n \"open_pulse\": False,\n \"gates\": [{\"name\": \"TODO\", \"parameters\": [], \"qasm_def\": \"TODO\"}],\n }\n )\n super().__init__(configuration=config, provider=provider)\n\n\nclass IonQQPUBackend(IonQBackend):\n \"\"\"IonQ Backend for running qpu-based jobs.\"\"\"\n\n def __init__(self, provider):\n config = BackendConfiguration.from_dict(\n {\n \"backend_name\": \"ionq_qpu\",\n \"backend_version\": \"0.0.1\",\n \"simulator\": False,\n \"local\": False,\n \"coupling_map\": None,\n \"description\": \"IonQ QPU\",\n \"basis_gates\": ionq_basis_gates,\n \"memory\": False,\n \"n_qubits\": 11,\n \"conditional\": False,\n \"max_shots\": 10000,\n \"max_experiments\": 1,\n \"open_pulse\": False,\n \"gates\": [{\"name\": \"TODO\", \"parameters\": [], \"qasm_def\": \"TODO\"}],\n }\n )\n super().__init__(configuration=config, provider=provider)\n\n\n__all__ = [\"IonQBackend\", \"IonQQPUBackend\", \"IonQSimulatorBackend\"]\n","repo_name":"nadinem100/iQuHackUnityGame","sub_path":"Library/PythonInstall/lib/python3.7/site-packages/qiskit_ionq/ionq_backend.py","file_name":"ionq_backend.py","file_ext":"py","file_size_in_byte":9640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19497189218","text":"from __future__ import (print_function, absolute_import)\n\nimport io\nimport collections\n\nfrom smoke.io.wrap import demo as io_wrp_dm\nfrom smoke.replay import demo as rply_dm\nfrom smoke.replay.const import Data\nimport pandas as pd\nimport config\n\nwith io.open('1481687622.dem', 'rb') as infile:\n demo_io = io_wrp_dm.Wrap(infile)\n demo_io.bootstrap()\n\n # parse = Data.UserMessages | Data.GameEvents\n demo = rply_dm.Demo(demo_io, parse=Data.All)\n demo.bootstrap()\n\n class_info = demo.match.class_info\n messages_found = collections.Counter()\n events_found = collections.Counter()\n\n game_meta_tables = demo.match.recv_tables.by_dt['DT_DOTAGamerulesProxy']\n game_status_index = game_meta_tables.by_name['dota_gamerules_data.m_nGameState']\n \n points = []\n time_offset = None\n for i, match in enumerate(demo.play()):\n game_meta = match.entities.by_cls[class_info['DT_DOTAGamerulesProxy']][0].state\n current_game_status = game_meta.get(game_status_index)\n if current_game_status != 5:\n continue\n \n world_data = match.entities.by_cls[class_info['DT_DOTA_PlayerResource']]\n current_data = world_data[0].state\n\n npc_info_table = match.recv_tables.by_dt['DT_DOTA_BaseNPC']\n\n position_offset_x = npc_info_table.by_name['m_cellX']\n position_offset_y = npc_info_table.by_name['m_cellY']\n position_origin_v = npc_info_table.by_name['m_vecOrigin']\n\n player_resource = match.recv_tables.by_dt['DT_DOTA_PlayerResource']\n\n\n\n time = game_meta.get(game_meta_tables.by_name['dota_gamerules_data.m_fGameTime'])\n\n if time_offset is None:\n time_offset = time\n\n\n time -= time_offset\n\n\n \n for idx in range(10):\n team_id = player_resource.by_name['m_iPlayerTeams.%04d' % idx]\n hero_id = player_resource.by_name['m_hSelectedHero.%04d' % idx]\n team, hero_handle = current_data.get(team_id), current_data.get(hero_id)\n\n try:\n if hero_handle:\n hero = match.entities.by_ehandle[hero_handle].state\n\n x = hero.get(position_offset_x) + hero.get(position_origin_v)[0] / 128.\n y = hero.get(position_offset_y) + hero.get(position_origin_v)[1] / 128.\n #print(hero_handle)\n data_vector = (time,hero_handle,x,y)\n #print(data_vector)\n points.append(data_vector)\n\n except KeyError:\n pass\n\n # lh = current_data.get(player_resource.by_name['m_iLastHitCount.{:04d}'.format(idx)])\n # deaths = current_data.get(player_resource.by_name['m_iDeaths.{:04d}'.format(idx)])\n # print(lh, '/', deaths, end=', ')\n # print('')\n \n messages_found.update(match.user_messages.keys())\n for k, v in match.user_messages.items():\n pass\n # messages_found.add(k)\n # if k in (67, 68): # (77, 78, 83, 85, 86, 88, 89, 97): # 68\n # print('FRAME', i, 'KEY', k, 'VALUE', dir(v[0]))\n # break\n \n events_found.update(match.game_events.keys()) \n for idx, lst in match.game_events.items():\n # match.game_event_descriptors.by_eventid[idx]\n # print(match.game_event_descriptors.by_eventid[idx])\n break\n\n\n df = pd.DataFrame(points, columns=[\"timestamp\",\"hero_id\",\"position_x\",\"position_y\"])\n df.to_csv(config.data_file, index=None)\n\n\n\n\n","repo_name":"aigamedev/nuclai15","sub_path":"dota2/extract_data_vectors.py","file_name":"extract_data_vectors.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"11433621078","text":"\"\"\"\nInverse kinematics of a two-joint arm\nLeft-click the plot to set the goal position of the end effector\n\nAuthor: Daniel Ingram (daniel-s-ingram)\n Atsushi Sakai (@Atsushi_twi)\n\nRef: P. I. Corke, \"Robotics, Vision & Control\", Springer 2017, ISBN 978-3-319-54413-7 p102\n- [Robotics, Vision and Control \\| SpringerLink](https://link.springer.com/book/10.1007/978-3-642-20144-8)\n\n\nAuthor: Karthik \n\tupdate-1:Dec-06-2020 :Converted the Original function implementation to class based implementation\n\"\"\"\nimport json\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport logging\nimport asyncio\nimport os\nimport yaml,threading\n\nfrom commons.mqttclient import MessageTelemetryClient\nfrom config_file_list import CONFIG_FILES, CONFIG_DIR\n\nlogging.basicConfig( level=logging.WARNING, format='%(levelname)-8s [%(filename)s:%(lineno)d] %(message)s' )\n\n\nclass RobotArm2:\n \"\"\"This class implements Robot Arm with 2 joint ARM\n \"\"\"\n\n def __init__(self, robot_id, mode=\"motion\", base_x_coordinate=0, base_y_coordinate=0, len_shoulder_to_elbow=0.5, len_elbow_to_gripper=0.5,\n sample_rate=0.01, proportional_gain=10, show_animation=False, motion_sequence=\"seq-1\", pub_topic=None, sub_topic=None):\n \"\"\"Initializes the Robot ARM\n\n Args:\n len_shoulder_to_elbow (int, optional): Length of arm from shoulder to elbow. Defaults to 1.\n len_elbow_to_gripper (int, optional): Length of arm from elbow to gripper. Defaults to 1.\n coordinate_x (int, optional): start x coordinate . Defaults to 2.\n coordinate (int, optional): start y coordinate. Defaults to 2.\n sample_rate (float, optional): Sample rate for data acquisition from robot ARM. Defaults to 0.01.\n proportional_gain (int, optional): Gain value, this defines settling time near destination coordinate. Defaults to 10.\n \"\"\"\n if mode == \"motion\":\n pass # publisher of position\n elif mode == \"tracker\":\n pass # subscriber of position\n else:\n pass # raise exception\n self.robot_id = robot_id\n self.kp = proportional_gain\n self.dt = sample_rate\n self.l1 = len_shoulder_to_elbow\n self.l2 = len_elbow_to_gripper\n self.theta1 = 0.0\n self.theta2 = 0.0\n self.GOAL_TH = 0.01\n self.dest_x = 0\n self.dest_y = 0\n self.prev_dest_x = self.dest_x\n self.prev_dest_y = self.dest_y\n self.shoulder = np.array([base_x_coordinate, base_y_coordinate])\n self.show_animation = show_animation\n self.sequence_count = 0\n self.motion_sequence = motion_sequence\n if self.show_animation:\n plt.ion()\n self.topics = dict( position_pub=pub_topic, position_sub=sub_topic )\n self.telemetry_client = MessageTelemetryClient()\n if self.topics[\"position_sub\"] is not None:\n self.telemetry_client.subscribe( topic=self.topics[\"position_sub\"] )\n self.telemetry_client_thread = threading.Thread( target=self.telemetry_client.start_service )\n self.telemetry_client_thread.start();\n\n def get_sample_time(self):\n return self.dt\n\n def publish(self, msg_type, msg):\n if msg_type in self.topics.keys():\n if self.topics[msg_type] is not None:\n self.telemetry_client.publish( topic=self.topics[msg_type], payload=msg )\n\n def get_telemetry_data(self):\n pass\n\n def generate_motion(self):\n \"\"\"Computes the inverse kinematics for a planar 2DOF arm. When out of bounds, rewrite x and y with last correct values\n\n Returns:\n [type]: [description]\n \"\"\"\n try:\n if math.sqrt( (self.dest_x ** 2) + (self.dest_y ** 2) ) > (self.l1 + self.l2):\n raise RuntimeError(\n \"Coordinates cannot be reached by the Robot\" )\n\n theta2_inner = (self.dest_x ** 2 + self.dest_y ** 2 -\n self.l1 ** 2 - self.l2 ** 2) / (2 * self.l1 * self.l2)\n if (theta2_inner > 1) or (theta2_inner < -1):\n raise RuntimeError(\n \"Coordinates cannot be reached by the Robot\" )\n\n theta2_goal = np.arccos( theta2_inner )\n if theta2_goal < 0:\n theta1_goal = np.math.atan2( self.dest_y, self.dest_x ) + np.math.atan2(\n self.l2 * np.sin( theta2_goal ), (self.l1 + self.l2 * np.cos( theta2_goal )) )\n else:\n theta1_goal = np.math.atan2( self.dest_y, self.dest_x ) - np.math.atan2(\n self.l2 * np.sin( theta2_goal ), (self.l1 + self.l2 * np.cos( theta2_goal )) )\n\n ang_diff = lambda theta1, theta2: (theta1 - theta2 + np.pi) % (2 * np.pi) - np.pi\n\n self.theta1 = self.theta1 + self.kp * \\\n ang_diff( theta1_goal, self.theta1 ) * self.dt\n self.theta2 = self.theta2 + self.kp * \\\n ang_diff( theta2_goal, self.theta2 ) * self.dt\n\n self.prev_dest_x = self.dest_x\n self.prev_dest_y = self.dest_y\n\n wrist = self.update_joint_coordinates()\n\n # check goal\n if self.dest_x is not None and self.dest_y is not None:\n d2goal = np.hypot( wrist[0] - self.dest_x,\n wrist[1] - self.dest_y )\n\n if abs( d2goal ) < self.GOAL_TH and self.dest_x is not None:\n # return theta1, theta2\n self.get_motion_sequence()\n\n except ValueError as e:\n logging.critical( e )\n exit( -1 )\n except TypeError as e:\n logging.critical( e )\n exit( -1 )\n except RuntimeError as e:\n logging.critical( e )\n self.dest_x = self.prev_dest_x\n self.dest_y = self.prev_dest_y\n\n def get_motion_sequence(self):\n if self.motion_sequence == \"seq-1\":\n if self.sequence_count == 1:\n self.dest_x = 0.6\n self.dest_y = 0.6\n elif self.sequence_count == 2:\n self.dest_x = 1\n self.dest_y = 1\n else:\n self.sequence_count = 0\n elif self.motion_sequence == \"seq-2\":\n if self.sequence_count == 1:\n self.dest_x = -0.6\n self.dest_y = -0.6\n elif self.sequence_count == 2:\n self.dest_x = 0.1\n self.dest_y = 0.1\n elif self.sequence_count == 3:\n self.dest_x = 0.5\n self.dest_y = 0.3\n self.sequence_count += 1\n else:\n self.sequence_count = 0\n\n self.sequence_count += 1\n\n def update_joint_coordinates(self): # pragma: no cover\n \"\"\"Ploting arm\n\n Returns:\n [type]: [description]\n \"\"\"\n result = dict()\n\n elbow = self.shoulder + \\\n np.array( [self.l1 * np.cos( self.theta1 ), self.l1 * np.sin( self.theta1 )] )\n wrist = elbow + \\\n np.array( [self.l2 * np.cos( self.theta1 + self.theta2 ),\n self.l2 * np.sin( self.theta1 + self.theta2 )] )\n result.update({\n \"robot_id\": self.robot_id,\n \"shoulder\":np.array2string(self.shoulder),\n \"elbow\": np.array2string( elbow ),\n \"wrist\": np.array2string( wrist )\n })\n self.publish( msg_type=\"position_pub\", msg=json.dumps( result ) )\n\n self.animate( shoulder=self.shoulder, elbow=elbow, wrist=wrist )\n\n wrist[0] -= self.shoulder[0]\n wrist[1] -= self.shoulder[1]\n return wrist\n\n def get_joint_coordinates(self):\n elbow = self.shoulder + \\\n np.array( [self.l1 * np.cos( self.theta1 ), self.l1 * np.sin( self.theta1 )] )\n wrist = elbow + \\\n np.array( [self.l2 * np.cos( self.theta1 + self.theta2 ),\n self.l2 * np.sin( self.theta1 + self.theta2 )] )\n return dict(\n shoulder=(self.shoulder[0], self.shoulder[1]),\n elbow=(elbow[0], elbow[1]),\n wrist=(wrist[0], wrist[1])\n )\n\n def animate(self, shoulder, elbow, wrist):\n if self.show_animation:\n plt.cla()\n plt.plot( [shoulder[0], elbow[0]], [shoulder[1], elbow[1]], 'k-' )\n plt.plot( [elbow[0], wrist[0]], [elbow[1], wrist[1]], 'k-' )\n\n plt.plot( shoulder[0], shoulder[1], 'ro' ) # base of the robot\n plt.plot( elbow[0], elbow[1], 'go' ) # joint 1\n plt.plot( wrist[0], wrist[1], 'bo' ) # Tool tip\n\n # plt.plot( [wrist[0], self.x], [wrist[1], self.y], 'g--' )\n # plt.plot( self.x, self.y, 'g*' )\n plt.xlim( shoulder[0] - 2, shoulder[0] + 2 )\n plt.ylim( shoulder[1] - 2, shoulder[1] + 2 )\n plt.show()\n plt.pause( self.dt )\n\n\ndef create_robots():\n try:\n filename = CONFIG_DIR + CONFIG_FILES[\"robot\"]\n robots = []\n if os.path.exists( filename ):\n with open( filename, 'r' ) as json_file:\n robot_config = yaml.load( json_file, Loader=yaml.FullLoader )\n for robot in robot_config[\"robots\"]:\n if robot[\"arm_count\"] == 2:\n specs = robot[\"specs\"]\n base_coordinate = robot[\"base_coordinate\"]\n robot = RobotArm2( robot_id=robot[\"robot_id\"],\n mode=robot[\"mode\"],\n base_x_coordinate=base_coordinate[\"x\"],\n base_y_coordinate=base_coordinate[\"y\"],\n len_shoulder_to_elbow=specs[\"length_shoulder_to_elbow\"],\n len_elbow_to_gripper=specs[\"length_elbow_to_gripper\"],\n sample_rate=robot[\"sample_rate\"],\n proportional_gain=robot[\"proportional_gain\"],\n show_animation=robot[\"show_animation\"],\n motion_sequence=robot[\"motion_sequence\"],\n pub_topic=robot[\"telemetry_topics\"][\"position_publish\"],\n sub_topic=None )\n robots.append( robot )\n return robots\n else:\n raise FileNotFoundError( \"File not found. Check filename and filepath\" )\n except FileNotFoundError as e:\n logging.critical( e )\n exit( -1 )\n except yaml.YAMLError as e:\n logging.critical( e )\n exit( -1 )\n except Exception as e:\n logging.critical( e )\n exit( -1 )\n\n\nasync def test():\n machines = create_robots()\n while True:\n for robot in machines:\n robot.generate_motion()\n await asyncio.sleep( 0.01 )\n\n\nif __name__ == \"__main__\":\n asyncio.run( test() )\n","repo_name":"eternalamit5/Indoor-Localisation","sub_path":"hrc/robot/arm2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5392575095","text":"infile = open(\"h.txt\")\n\n#\ndata = []\nfor line in infile:\n line = line.strip()\n data.append(line.split())\n\n#\nnumbers = []\nfor i in range(len(data)):\n if i % 2 == 0:\n numbers.append([data[i], data[i+1]])\n\nfor i in numbers:\n for j in i[1]:\n print(j[::-1], end=\" \")\n print()\n\n\ninfile.close()\n","repo_name":"abd-ui/Makeen","sub_path":"week 2/day5/h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43373123618","text":"# Given two strings s and t of lengths m and n respectively, return the minimum window substring\n# of s such that every character in t (including duplicates) is included in the window.\n# If there is no such substring, return the empty string \"\".\n# The testcases will be generated such that the answer is unique.\n# --------------------------\n# m == s.length , n == t.length , 1 <= m, n <= 10 ** 5\n# s and t consist of uppercase and lowercase English letters.\n# --------------------------\n# Follow up: Could you find an algorithm that runs in O(m + n) time?\n\n\ndef min_window(s: str, t: str) -> str:\n # working_sol (21.69%, 65.61%) -> (301ms, 17mb) time: O(n + (n - j) * m) | space: O(m)\n if len(t) > len(s):\n return \"\"\n chars: dict[str] = {}\n cur_chars: dict[str] = {}\n for char in t:\n if char not in chars:\n chars[char] = 1\n cur_chars[char] = 0\n continue\n chars[char] += 1\n left: int = 0\n right: int = 0\n min_sub: str = \"\"\n whole: bool = False\n used: int = 0\n while right != len(s) + 1 and left < len(s):\n if used >= len(t):\n for key in cur_chars:\n if cur_chars[key] >= chars[key]:\n whole = True\n continue\n if chars[key] > cur_chars[key]:\n whole = False\n break\n if whole:\n if len(min_sub) > len(s[left: right]) or len(min_sub) == 0:\n min_sub = s[left: right]\n if len(min_sub) == len(t):\n return min_sub\n if s[left] in cur_chars:\n cur_chars[s[left]] -= 1\n used -= 1\n whole = False\n left += 1\n continue\n while left < len(s) and ((s[left] not in cur_chars) or ((cur_chars[s[left]] - chars[s[left]]) > 0)):\n if (s[left] in cur_chars) and ((cur_chars[s[left]] - chars[s[left]]) > 0):\n cur_chars[s[left]] -= 1\n used -= 1\n whole = False\n left += 1\n if (right < len(s)) and (s[right] in cur_chars):\n cur_chars[s[right]] += 1\n used += 1\n right += 1\n return min_sub\n\n\n# Time complexity: O(n + (n - j) * m) ->\n# -> traversing whole symbols_string, and creating 3 dictionaries with equal size => O(m) ->\n# -> traversing whole input_string from 0 to end with right_pointer, in the worst case,\n# and (n - m) with left_pointer => O(n + (n - m)) ->\n# -> for every index on this path checking every KEY in cur_chars + chars + cur_chars, every check is O(1) but\n# for every KEY is still should be O(m) => O((n - j) * 3m) ->\n# -> O(m + n + (n - m) + (n - j) * 3m) => O(2n + (n - j) * 3m) => O(n + (n - j) * m)\n# ^^ m - len of symbols_string | n - len of input_string | j - number of unique subs with WHOLE in it ^^\n# --------------------------\n# ^^After sleeping on this task, it's not so bad in the end if I just add extra check to cull some calls\n# on check if WHOLE or not, now it's 308ms instead of 800, still not 120ms like top_tiers, but I'm fine with that.\n# And now it's not (n * m) -> ((n - j) * m) <- where's j is number of unique substrings\n# with correct number of search_values.\n# Don't know how to calculate this j. Worst case should be like -> s = \"aacbcaacbcaacbca\", t = \"aba\" ->\n# -> \"aacb\" gives us WHOLE but not same length as t, so we're going to check more ->\n# -> so after that we're checking every index from 3 to 15 is calling check on WHOLE, so it's still O((n - j) * m).\n# But in the best case, like -> s = \"aacbcddddddddd\", t = \"aba\" -> we're getting \"aacb\" ->\n# -> and after shifting left side by 1 index, it's never going to get (used >= len(t)), so it's just 1 check.\n# Better than it was, but still can be changed to check less, maybe I will come up with something later.\n# --------------------------\n# Auxiliary space: O(m) -> 3 dictionaries of the same size == m => O(3m) -> extra constants,\n# and string with the size == m in the worst case => O(m) -> O(4m) -> O(m)\n# --------------------------\n# Sheesh. Hard without extra info and only using HINT as guidance, evolving.\n# Dunno about O(n + m), but at least I made a correctly working solution.\n# Actually we're making it in ONE_WAY walk O(n), but with every step we're checking too much data\n# in dictionaries, so if we can't call dictionary operations as O(1), in this case I doubt that.\n# Because search in dictionary is O(1) but we're checking every KEY. Plus there's top solutions with <200ms,\n# when mine is 801ms. So if they're O(n + m) mine is something O(n + m * n) ->\n# -> we're checking every index once, but every KEY in dictionaries will be checked, on every index step.\n# --------------------------\n# Don't think changing extras from list to dict is going to change anything, but if there's\n# a 100 extra it's going to be faster. Well let's try this one as well.\n# Otherwise, I need to find how to check all_symbols used or not faster.\n# Well well well, working in this 266 test_case, 1 to go :)\n# --------------------------\n# Sadly 265/267 cases passed, now it's TimeGate, but at least I made working solution,\n# without extra info(google/gpt). There's a lot of extra checks, and maybe I will find how to cull them.\n# What about length?\n# While doing heavy_lifting I forgot that we can just cull any input with incorrect length.\n# Nah, not the case, but still good to implement.\n# Another one I see is that we can insta return if we found min_sub with equal length to t,\n# because t == min_sub is IDEAL option, no reasons to search anything else.\n# --------------------------\n# Ok. I need to fail, because I'm kinda stuck with thinking that's correct need more test_cases.\n# --------------------------\n# Creating two dictionaries because copying is taking the same time, and I want to use counter and limiter.\n# And without copying should be O(m) not O(2m).\n# --------------------------\n# !\n# s and t consist of uppercase and lowercase English letters. !\n# Should we differentiate them? Because \"a\" can be equal to \"A\" or not, no info on that.\n# !\n# such that every character in t (including duplicates) is included in the window !\n# Only this. Hmm. Guess there's would be no reasons to put UPPER and LOWER cases as constraints,\n# if we shouldn't differentiate them.\n# I will stick to this, and search for UPPER and LOWER cases as different characters.\n# Tested with test case s = \"A\" and t = \"a\", correct output from them is \"\".\n# So yes we should differentiate them.\n\n\ntest1 = \"ADOBECODEBANC\"\ntest1_t = \"ABC\"\ntest1_out = \"BANC\"\nprint(min_window(test1, test1_t))\nassert test1_out == min_window(test1, test1_t)\n\ntest2 = \"a\"\ntest2_t = \"a\"\ntest2_out = \"a\"\nprint(min_window(test2, test2_t))\nassert test2_out == min_window(test2, test2_t)\n\ntest3 = \"a\"\ntest3_t = \"aa\"\ntest3_out = \"\"\nprint(min_window(test3, test3_t))\nassert test3_out == min_window(test3, test3_t)\n\ntest4 = \"A\"\ntest4_t = \"a\"\ntest4_out = \"\"\nprint(min_window(test4, test4_t))\nassert test4_out == min_window(test4, test4_t)\n\ntest5 = \"acbbaca\"\ntest5_t = \"aba\"\ntest5_out = \"baca\"\nprint(min_window(test5, test5_t))\nassert test5_out == min_window(test5, test5_t)\n","repo_name":"Massprod/leetcode-testing","sub_path":"leetcode_problems/p76_minimum_window_substring.py","file_name":"p76_minimum_window_substring.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34419471954","text":"from datetime import date\nfrom django.http import HttpResponseServerError\nfrom rest_framework import serializers, status\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rareapi.models import Post, Rare_User, Category, Subscription, Tag\nfrom django.contrib.auth.models import User\nfrom rest_framework.decorators import action\n\n\nclass PostView(ViewSet):\n\n def retrieve(self, request, pk):\n\n post = Post.objects.get(pk=pk)\n serializer = PostSerializer(post)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def list(self, request):\n\n posts = []\n\n if 'status' in request.query_params:\n \n rare_user = Rare_User.objects.get(user=request.auth.user)\n\n if request.query_params['status'] == \"created\":\n posts = Post.objects.filter(user=rare_user.id)\n\n if request.query_params['status'] == \"subscribed\":\n subscriptions = Subscription.objects.filter(follower=rare_user.id)\n \n for subscription in subscriptions:\n author_posts = Post.objects.filter(user=subscription.author)\n posts.extend(author_posts)\n else:\n posts = Post.objects.all()\n\n serializer = PostSerializer(posts, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def create(self, request):\n\n\n post = Post()\n post.user = Rare_User.objects.get(user=request.auth.user)\n post.category = Category.objects.get(pk=request.data[\"category_id\"])\n post.title = request.data[\"title\"]\n post.publication_date = date.today()\n post.image_url = request.data[\"image_url\"]\n post.content = request.data[\"content\"]\n\n if request.auth.user.is_staff:\n post.approved=True\n else:\n post.approved=False\n\n post.save()\n\n serializer = PostSerializer(post)\n return Response(serializer.data)\n\n\n @action(methods=['post'], detail=True)\n def addTag(self, request, pk):\n \n tag = Tag.objects.get(pk=request.data[\"tag_id\"])\n post = Post.objects.get(pk=request.data[\"post_id\"])\n post.tags.add(tag)\n\n return Response({'message': 'Tag added'}, status=status.HTTP_201_CREATED)\n\n def destroy(self, request, pk):\n post = Post.objects.get(pk=pk)\n post.delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n\nclass RareUserSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Rare_User\n fields = ('id', 'user', 'category', 'title',\n 'publication_date', 'image_url', 'content')\n\nclass CategorySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Category\n fields = ('id', 'user', 'category', 'title',\n 'publication_date', 'image_url', 'content')\n\nclass RareUserSerializer(serializers.ModelSerializer):\n class Meta:\n\n model = Rare_User\n fields = ('id', 'user', 'active', 'profile_image_url',\n 'created_on', 'bio', 'full_name', 'username', 'email')\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Category\n fields = ('id', 'label')\n\nclass PostTagSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Tag\n fields = ('id', 'label')\n\nclass PostSerializer(serializers.ModelSerializer):\n\n user = RareUserSerializer(many=False)\n category = CategorySerializer(many=False)\n tags = PostTagSerializer(many=True)\n\n class Meta:\n model = Post\n fields = ('id', 'user', 'category', 'title',\n 'publication_date', 'image_url', 'content', 'approved', 'tags')\n","repo_name":"NSS-Day-Cohort-58/rare-rest-git-it-girlz","sub_path":"rareapi/views/post_view.py","file_name":"post_view.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21739990284","text":"from zope.interface import Interface\nfrom zope import schema\n\nclass IStep(Interface):\n \"\"\"A given/when/then step\n \"\"\"\n \n text = schema.TextLine(title=u\"Step text\")\n step_type = schema.Choice(title=u\"Step type\", values=(\"given\", \"when\", \"then,\"))\n\nclass IScenario(Interface):\n \"\"\"A scenario comprising multiple given, when and/or then steps\n \"\"\"\n \n name = schema.TextLine(title=u\"Scenario name\")\n \n givens = schema.List(title=u\"Given clauses\", value_type=schema.Object(schema=IStep))\n whens = schema.List(title=u\"Given clauses\", value_type=schema.Object(schema=IStep))\n thens = schema.List(title=u\"Given clauses\", value_type=schema.Object(schema=IStep))\n \n status = schema.Choice(title=u\"Status\", values=('pass', 'fail', 'pending', 'mismatch', 'superfluous',), required=False)\n \n story = schema.Object(schema=Interface, required=False)\n\nclass IStory(Interface):\n \"\"\"A story comprising multiple scenarios\n \"\"\"\n \n name = schema.TextLine(title=u\"Story name\")\n \n title = schema.TextLine(title=u\"Story title\")\n \n givens = schema.List(title=u\"Given clauses\", value_type=schema.Object(schema=IStep))\n whens = schema.List(title=u\"Given clauses\", value_type=schema.Object(schema=IStep))\n thens = schema.List(title=u\"Given clauses\", value_type=schema.Object(schema=IStep))\n \n scenarios = schema.List(title=u\"Scenarios\", value_type=schema.Object(schema=IScenario))\n \n points = schema.Int(title=u\"Story points\", required=False)\n status = schema.TextLine(title=u\"Status\", required=False)\n resolution = schema.TextLine(title=u\"Resolution\", required=False)\n priority = schema.TextLine(title=u\"Priority\", required=False)\n \n epic = schema.Object(schema=Interface, required=False)\n\nclass IEpic(Interface):\n \"\"\"An epic comprising multiple stories\n \"\"\"\n \n name = schema.TextLine(title=u\"Story name\")\n \n title = schema.TextLine(title=u\"Story title\")\n \n stories = schema.List(title=u\"Stories\", value_type=schema.Object(schema=IStory))\n\nclass IRequirementsCatalogue(Interface):\n \"\"\"A requirements catalogue comprising multiple epics\n \"\"\"\n \n extractTime = schema.Datetime(title=u\"Extract time\")\n testTime = schema.Datetime(title=u\"Test time\")\n project = schema.TextLine(title=u\"Project\")\n epics = schema.List(title=u\"Epic\", value_type=schema.Object(schema=IEpic))\n\n def populate(input):\n \"\"\"Populate from XML representation in the file-like object input\n \"\"\"\n \n def serialize():\n \"\"\"Return a serialisation of this catalogue as an lxml ElementTree\n \"\"\"\n \n def write(output):\n \"\"\"Write XML representation to the file-like object output\n \"\"\"\n\n# Fix schemata we can't set immediately due to circular dependencies\nIStory['epic'].schema = IEpic\nIScenario['story'].schema = IStory\n","repo_name":"optilude/corejet.core","sub_path":"corejet/core/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"8521510400","text":"from fileinput import filename\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QToolTip, QWidget, QPushButton, QFileDialog\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtCore import QCoreApplication\nfrom PatranCommandSession import modeling\nfrom PatranCommandSession import Fields\nfrom PatranCommandSession import p3Utilities\nfrom PatranCommandSession import Loads\nfrom PatranCommandSession import LoadCases\nfrom PatranCommandSession import Result\n\n#from Loads import Load\n\nclass Xession(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('Patran Session Generator')\n self.setWindowIcon(QIcon('patran.png'))\n self.move(300, 300)\n self.resize(400, 400)\n\n btnModeling = QPushButton('Modeling', self)\n btnFEMField = QPushButton('FEM Field', self)\n btnLoad = QPushButton('Loads', self)\n btnLoadCase = QPushButton('Load Cases', self)\n btnResultCombine = QPushButton('Rsult Combine', self)\n btnResultSum = QPushButton('Result Sum', self)\n btnQuit = QPushButton('Quit', self)\n\n # btnLoad.setToolTip('This is a QWidget widget1')\n # btnLoad = QPushButton('Button2', self)\n # btn2.setToolTip('This is a QWidget widget2')\n # btn.setToolTip('This is a QWidget widget')\n\n btnModeling.move(50, 50)\n btnFEMField.move(50, 100)\n btnLoad.move(50, 150)\n btnLoadCase.move(50,200)\n btnResultCombine.move(50, 250)\n btnResultSum.move(50,300)\n btnQuit.move(50, 350)\n\n btnModeling.resize(300, 30)\n btnFEMField.resize(300, 30)\n btnLoad.resize(300, 30)\n btnLoadCase.resize(300,30)\n btnResultCombine.resize(300, 30)\n btnResultSum.resize(300, 30)\n\n btnQuit.resize(300, 30)\n\n btnModeling.clicked.connect(self.btnModelingClick)\n btnFEMField.clicked.connect(self.btnFEMFieldClick)\n btnLoad.clicked.connect(self.btnLoadClick)\n btnLoadCase.clicked.connect(self.btnLoadCaseClick) \n btnResultCombine.clicked.connect(self.btnResultCombineClick)\n btnResultSum.clicked.connect(self.btnResultSumClick)\n btnQuit.clicked.connect(QApplication.instance().quit)\n\n #btn.resize(btn.sizeHint())\n\n self.show()\n\n def btnModelingClick(self):\n filename = QFileDialog.getOpenFileName(self, 'Open file', './*fem input.xlsx')\n if filename[0] !='':\n modeling.joint_modeling(filename[0])\n else:\n return -1\n\n def btnFEMFieldClick(self):\n filename = QFileDialog.getOpenFileName(self, 'Open file', './*field input.xlsx')\n print(filename[0])\n if filename[0] != '':\n Fields.FEMField(filename[0])\n else:\n return -1\n\n\n def btnLoadClick(self):\n filename = QFileDialog.getOpenFileName(self, 'Open file', './*load input.xlsx')\n print('==============')\n print(filename[0])\n print('==============')\n if filename[0] != '':\n Loads.load_gen(filename[0])\n else:\n return -1\n \n def btnLoadCaseClick(self):\n filename = QFileDialog.getOpenFileName(self, 'Open file', './*loadcases input.xlsx')\n \n if filename[0] != '':\n LoadCases.LoadCase(filename[0])\n else:\n return -1\n\n def btnResultCombineClick(self):\n filename = QFileDialog.getOpenFileName(self, 'Open file', './*result input.xlsx')\n if filename[0] != '':\n Result.result_combine(filename[0])\n else:\n return -1\n\n def btnResultSumClick(self):\n filename = QFileDialog.getOpenFileName(self, 'Open file', './*result input.xlsx')\n if filename[0] != '':\n Result.result_sum(filename[0])\n else:\n return -1\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Xession()\n sys.exit(app.exec_())\n\n\n","repo_name":"wjeongx/PatranCommandSession","sub_path":"p3Xession.py","file_name":"p3Xession.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16913605617","text":"from django.urls import path, include\nfrom django.urls.resolvers import URLPattern\nfrom ninja import NinjaAPI\n\napi = NinjaAPI()\n\n\n@api.get(\"/add\")\ndef add(request, a: int, b: int):\n return {\"result\": a + b}\n\n\nninja_url = [\n]\nurlpatterns=[\n]","repo_name":"jungzi1/starting-django-ninja","sub_path":"apidemo/apidemo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"31835522512","text":"def convert(a =\"python.py\"):\n file = open(\"/home/kaushik/PROGRAMMING/python/Projects/Python_into_Java/d.py\",\"r\")\n demo = open(\"d.java\",\"w\")\n demo.write(\"class D{\\npublic static void main(String[] args){\\n\")\n for i in file.readlines():\n if '\"' in i and \"=\" in i or \"'\" in i and \"=\" in i:\n j = i.strip(\" \")\n j = j.split(\" \")\n j = \"\".join(j)\n demo.write(f\"String {i}\"+\";\")\n\n if \"print(\" in i:\n j = i.strip()\n j = j.split(\" =\")\n j = \"\".join(j)\n if \"end=\" in i:\n pass\n else:\n j=j.split()\n print(j)\n j.remove(\"(\")\n j = j.remove(\")\")\n print(\"j\")\n demo.write(\"\\nSystem.out.println();\")\n demo.write(\"\\n}\\n}\")\nconvert()\n","repo_name":"sage-kanishq/PythonFiles","sub_path":"Projects/Python_into_Java/JavaConverterPython.py","file_name":"JavaConverterPython.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74070137208","text":"import markdown\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\n\nregister = template.Library()\n\n\n@register.filter\n@stringfilter\ndef markdown2html(value):\n if value is None:\n value = ''\n return markdown.markdown(value)\n","repo_name":"pythonindia/pssi.org.in","sub_path":"apps/common/templatetags/markdown_tags.py","file_name":"markdown_tags.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"19075117579","text":"\r\nfrom __future__ import print_function\r\nfrom imutils.object_detection import non_max_suppression\r\nfrom imutils import paths\r\nimport numpy as np\r\nimport argparse\r\nimport imutils\r\nimport cv2\r\n\r\ndef pedestrian_detection(imagePath):\r\n hog = cv2.HOGDescriptor()\t#load histogram of oriented gradients descriptor\r\n hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\t#apply it to people\r\n image = cv2.imread(imagePath)\t#open image\r\n image = cv2.resize(image, (400, 300))\t#resize\r\n\t#rects: array of position\r\n\t#weights: probability\r\n\t#winstride beyemshy 4,4 in x and y\r\n (rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),padding=(8, 8), scale=1.05)\r\n rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])\r\n\t#remove overlapping rectangles\r\n pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)\r\n for (xA, yA, xB, yB) in pick:\r\n\t#draw rectangles\r\n if (yB > 250):\r\n cv2.rectangle(image, (xA, yA), (xB, yB), (0, 0, 255), 2)\r\n else:\r\n cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)\r\n print (\"Found {0} Pedestrains!\".format(len(pick)))\r\n cv2.imshow(\"Pedestrains Found\", image)\r\n\r\ndef car_detection(imagePath):\r\n cascPath = 'cars.xml'\t#load the training set file\r\n carCascade = cv2.CascadeClassifier(cascPath)\t#train the classifier\r\n image = cv2.imread(imagePath)\t#read the image\r\n image = cv2.resize(image, (500, 400))\t#resize\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) \t#turn it to gray\r\n cars = carCascade.detectMultiScale(gray, 1.1, 1)\t#detect the cars in the image\r\n for (x, y, w, h) in cars:\r\n if (w > 200):\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n else:\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n print(\"Found {0} Cars!\".format(len(cars)))\r\n cv2.imshow(\"Cars Found\", image)\r\n\r\nimagepath = 'test.bmp'\r\npedestrian_detection(imagepath)\r\ncar_detection(imagepath)\r\ncv2.waitKey(0)","repo_name":"7alawanii/Cars-and-Pedestrians-Detector","sub_path":"Cars&Pedestrian's_Detector.py","file_name":"Cars&Pedestrian's_Detector.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"25364200343","text":"# -*- coding: utf-8 -*-\n\"\"\" Decorators for FluidIntegrates. \"\"\"\n\n\nimport functools\nimport re\n\nimport rollbar\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.cache.backends.base import DEFAULT_TIMEOUT\nfrom django.http import HttpRequest, HttpResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom graphql import GraphQLError\nfrom promise import Promise\nfrom rediscluster.nodemanager import RedisClusterException\nfrom simpleeval import AttributeDoesNotExist\n\nfrom backend.dal import finding as finding_dal, project as project_dal\n\nfrom backend.domain import (\n user as user_domain, event as event_domain, finding as finding_domain\n)\nfrom backend.services import (\n get_user_role, has_valid_access_token, project_exists\n)\n\nfrom backend import util\nfrom backend.exceptions import InvalidAuthorization\n\nCACHE_TTL = getattr(settings, 'CACHE_TTL', DEFAULT_TIMEOUT)\n\nENFORCER_BASIC = getattr(settings, 'ENFORCER_BASIC')\nENFORCER_ACTION = getattr(settings, 'ENFORCER_ACTION')\nENFORCER_ACTION_ASYNC = getattr(settings, 'ENFORCER_ACTION_ASYNC')\n\n\ndef authenticate(func):\n @functools.wraps(func)\n def authenticate_and_call(*args, **kwargs):\n request = args[0]\n if \"username\" not in request.session or request.session[\"username\"] is None:\n return HttpResponse('Unauthorized \\\n ')\n return func(*args, **kwargs)\n return authenticate_and_call\n\n\ndef authorize(roles):\n def wrapper(func):\n @functools.wraps(func)\n def authorize_and_call(*args, **kwargs):\n request = args[0]\n # Verify role if the user is logged in\n if 'username' in request.session and request.session['registered']:\n if request.session['role'] not in roles:\n return util.response([], 'Access denied', True)\n else:\n # The user is not even authenticated. Redirect to login\n return HttpResponse('')\n\n return func(*args, **kwargs)\n return authorize_and_call\n return wrapper\n\n\n# Access control decorators for GraphQL\ndef verify_csrf(func):\n \"\"\"\n Conditional CSRF decorator\n\n Enables django CSRF protection if using cookie-based authentication\n \"\"\"\n @functools.wraps(func)\n def verify_and_call(*args, **kwargs):\n request = args[0]\n if request.COOKIES.get(settings.JWT_COOKIE_NAME):\n ret = csrf_protect(func)(*args, **kwargs)\n else:\n ret = func(*args, **kwargs)\n\n if isinstance(ret, Promise):\n ret = ret.get()\n return ret\n return verify_and_call\n\n\ndef require_login(func):\n \"\"\"\n Require_login decorator\n\n Verifies that the user is logged in with a valid JWT\n \"\"\"\n @functools.wraps(func)\n def verify_and_call(*args, **kwargs):\n context = args[1].context\n try:\n user_data = util.get_jwt_content(context)\n if user_data.get('jti'):\n verify_jti(user_data['user_email'],\n context.META.get('HTTP_AUTHORIZATION'),\n user_data['jti'])\n except InvalidAuthorization:\n raise GraphQLError('Login required')\n return func(*args, **kwargs)\n return verify_and_call\n\n\ndef resolve_project_name(args, kwargs):\n \"\"\"Get project name based on args passed.\"\"\"\n if args[0] and hasattr(args[0], 'name'):\n project_name = args[0].name\n elif 'project_name' in kwargs:\n project_name = kwargs['project_name']\n elif 'finding_id' in kwargs:\n project_name = \\\n finding_dal.get_attributes(kwargs['finding_id'], ['project_name']).get('project_name')\n elif 'draft_id' in kwargs:\n project_name = \\\n finding_dal.get_attributes(kwargs['draft_id'], ['project_name']).get('project_name')\n elif 'event_id' in kwargs:\n project_name = \\\n event_domain.get_event(kwargs['event_id']).get('project_name')\n else:\n project_name = None\n return project_name\n\n\ndef resolve_project_data(project_name):\n \"\"\"Get project data or mock it if needed.\"\"\"\n if project_name:\n if not project_exists(project_name):\n project_data = {}\n else:\n project_data = project_dal.get(project_name)[0]\n else:\n project_data = {}\n\n if 'customeradmin' not in project_data:\n project_data['customeradmin'] = set()\n return project_data\n\n\ndef enforce_authz(func):\n \"\"\"\n Require_role decorator based on Casbin enforcer.\n\n Verifies that the current user's role is within the specified allowed roles\n \"\"\"\n @functools.wraps(func)\n def verify_and_call(*args, **kwargs):\n context = args[1].context\n user_data = util.get_jwt_content(context)\n user_data['role'] = get_user_role(user_data)\n project_name = resolve_project_name(args, kwargs)\n project_data = resolve_project_data(project_name)\n action = '{}.{}'.format(func.__module__, func.__qualname__)\n action = action.replace('.', '_')\n try:\n if not ENFORCER_ACTION.enforce(user_data, project_data, action):\n util.cloudwatch_log(context,\n 'Security: \\\nUnauthorized role attempted to perform operation')\n raise GraphQLError('Access denied')\n except AttributeDoesNotExist:\n util.cloudwatch_log(context,\n 'Security: \\\nUnauthorized role attempted to perform operation')\n raise GraphQLError('Access denied')\n return func(*args, **kwargs)\n return verify_and_call\n\n\ndef enforce_authz_async(func):\n \"\"\"\n Require_role decorator based on Casbin enforcer.\n\n Verifies that the current user's role is within the specified allowed roles\n \"\"\"\n @functools.wraps(func)\n def verify_and_call(*args, **kwargs):\n context = args[1].context\n user_data = util.get_jwt_content(context)\n user_data['role'] = get_user_role(user_data)\n project_name = resolve_project_name(args, kwargs)\n project_data = resolve_project_data(project_name)\n action = '{}.{}'.format(func.__module__, func.__qualname__)\n action = action.replace('.', '_')\n try:\n if not ENFORCER_ACTION_ASYNC.enforce(\n user_data, project_data, action\n ):\n util.cloudwatch_log(context,\n 'Security: \\\nUnauthorized role attempted to perform operation')\n raise GraphQLError('Access denied')\n except AttributeDoesNotExist:\n util.cloudwatch_log(context,\n 'Security: \\\nUnauthorized role attempted to perform operation')\n raise GraphQLError('Access denied')\n return func(*args, **kwargs)\n return verify_and_call\n\n\ndef verify_jti(email, context, jti):\n if not has_valid_access_token(email, context, jti):\n raise InvalidAuthorization()\n\n\ndef require_project_access(func):\n \"\"\"\n Require_project_access decorator\n\n Verifies that the current user has access to a given project\n \"\"\"\n @functools.wraps(func)\n def verify_and_call(*args, **kwargs):\n context = args[1].context\n project_name = kwargs.get('project_name')\n user_data = util.get_jwt_content(context)\n user_data['subscribed_projects'] = \\\n user_domain.get_projects(user_data['user_email'])\n user_data['subscribed_projects'] += \\\n user_domain.get_projects(user_data['user_email'], active=False)\n user_data['role'] = get_user_role(user_data)\n if not project_name:\n rollbar.report_message('Error: Empty fields in project',\n 'error', context)\n raise GraphQLError('Access denied')\n try:\n if not ENFORCER_BASIC.enforce(user_data, project_name.lower()):\n util.cloudwatch_log(context,\n 'Security: \\\nAttempted to retrieve {project} project info without permission'\n .format(project=kwargs.get('project_name')))\n raise GraphQLError('Access denied')\n util.cloudwatch_log(context,\n 'Security: Access to {project} project'\n .format(project=kwargs.get('project_name')))\n except AttributeDoesNotExist:\n return GraphQLError('Access denied')\n return func(*args, **kwargs)\n return verify_and_call\n\n\ndef require_finding_access(func):\n \"\"\"\n Require_finding_access decorator.\n\n Verifies that the current user has access to a given finding\n \"\"\"\n @functools.wraps(func)\n def verify_and_call(*args, **kwargs):\n context = args[1].context\n finding_id = kwargs.get('finding_id') \\\n if kwargs.get('identifier') is None else kwargs.get('identifier')\n user_data = util.get_jwt_content(context)\n user_data['subscribed_projects'] = \\\n user_domain.get_projects(user_data['user_email'])\n user_data['subscribed_projects'] += \\\n user_domain.get_projects(user_data['user_email'], active=False)\n user_data['role'] = get_user_role(user_data)\n finding_project = finding_domain.get_finding(finding_id).get('projectName')\n\n if not re.match('^[0-9]*$', finding_id):\n rollbar.report_message('Error: Invalid finding id format',\n 'error', context)\n raise GraphQLError('Invalid finding id format')\n try:\n if not ENFORCER_BASIC.enforce(user_data, finding_project.lower()):\n util.cloudwatch_log(context,\n 'Security: \\\n Attempted to retrieve finding-related info without permission')\n raise GraphQLError('Access denied')\n except AttributeDoesNotExist:\n return GraphQLError('Access denied')\n return func(*args, **kwargs)\n return verify_and_call\n\n\ndef require_event_access(func):\n \"\"\"\n Require_event_access decorator\n\n Verifies that the current user has access to a given event\n \"\"\"\n @functools.wraps(func)\n def verify_and_call(*args, **kwargs):\n context = args[1].context\n event_id = kwargs.get('event_id') \\\n if kwargs.get('identifier') is None else kwargs.get('identifier')\n user_data = util.get_jwt_content(context)\n user_data['subscribed_projects'] = \\\n user_domain.get_projects(user_data['user_email'])\n user_data['subscribed_projects'] += \\\n user_domain.get_projects(user_data['user_email'], active=False)\n user_data['role'] = get_user_role(user_data)\n event_project = event_domain.get_event(event_id).get('project_name')\n\n if not re.match('^[0-9]*$', event_id):\n rollbar.report_message('Error: Invalid event id format',\n 'error', context)\n raise GraphQLError('Invalid event id format')\n try:\n if not ENFORCER_BASIC.enforce(user_data, event_project.lower()):\n util.cloudwatch_log(context,\n 'Security: \\\n Attempted to retrieve event-related info without permission')\n raise GraphQLError('Access denied')\n except AttributeDoesNotExist:\n return GraphQLError('Access denied: Missing attributes')\n return func(*args, **kwargs)\n return verify_and_call\n\n\ndef cache_content(func):\n \"\"\"Get cached content from a django view with a request object.\"\"\"\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n \"\"\"Get cached content from a django view with a request object.\"\"\"\n req = args[0]\n assert isinstance(req, HttpRequest)\n keys = ['username', 'company', 'role', 'findingid', 'project']\n uniq_id = '_'.join([req.session[x] for x in keys if x in req.session])\n uniq_id += '_'.join([req.GET[x] for x in keys if x in req.GET])\n uniq_id += '_'.join([req.POST[x] for x in keys if x in req.POST])\n if len(args) > 1:\n uniq_id += '_'.join([str(x) for x in args[1:]])\n if kwargs:\n uniq_id += '_'.join([str(kwargs[x]) for x in kwargs])\n key_name = \\\n f'{func.__module__.replace(\".\", \"_\")}_{func.__qualname__}_{uniq_id}'\n try:\n ret = cache.get(key_name)\n if ret:\n return ret\n ret = func(*args, **kwargs)\n cache.set(key_name, ret, timeout=CACHE_TTL)\n return ret\n except RedisClusterException:\n rollbar.report_exc_info()\n return func(*args, **kwargs)\n return decorated\n\n\ndef get_cached(func):\n \"\"\"Get cached response from function if it exists.\"\"\"\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n \"\"\"Get cached response from function if it exists.\"\"\"\n uniq_id = \"_\".join([str(kwargs[x])[:24] for x in kwargs])\n key_name = \\\n f'{func.__module__.replace(\".\", \"_\")}_{func.__qualname__}_{uniq_id}'\n key_name = key_name.lower()\n try:\n ret = cache.get(key_name)\n if ret:\n return ret\n ret = func(*args, **kwargs)\n if isinstance(ret, Promise):\n ret = ret.get()\n cache.set(key_name, ret, timeout=CACHE_TTL)\n return ret\n except RedisClusterException:\n rollbar.report_exc_info()\n return func(*args, **kwargs)\n return decorated\n\n\ndef get_entity_cache(func):\n \"\"\"Get cached response of a GraphQL entity if it exists.\"\"\"\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n \"\"\"Get cached response from function if it exists.\"\"\"\n gql_ent = args[0]\n uniq_id = str(gql_ent)\n params = '_'.join([kwargs[key] for key in kwargs]) + '_'\n complement = (params if kwargs else '') + uniq_id\n key_name = \\\n f'{func.__module__.replace(\".\", \"_\")}_{func.__qualname__}_{complement}'\n key_name = key_name.lower()\n try:\n ret = cache.get(key_name)\n if ret is None:\n ret = func(*args, **kwargs)\n if isinstance(ret, Promise):\n ret = ret.get()\n cache.set(key_name, ret, timeout=CACHE_TTL)\n return ret\n except RedisClusterException:\n rollbar.report_exc_info()\n return func(*args, **kwargs)\n return decorated\n","repo_name":"tom-vanbraband-sonarsource/integrates","sub_path":"django-apps/integrates-back/backend/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":14930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34387280832","text":"ghenv.Component.Name = \"ShrimpGIS UTM PRJ\"\nghenv.Component.NickName = \"shrimp_utm_prj\"\nghenv.Component.Message = \"1.0.0\"\nghenv.Component.Category = \"ShrimpGIS\"\nghenv.Component.SubCategory = \"2 || Utils\"\ntry: ghenv.Component.AdditionalHelpFromDocStrings = \"1\"\nexcept: pass\n\nimport scriptcontext as sc\nimport os\nimport sys\n##################ShrimpGIS#####################\ntry:\n user_path = os.getenv(\"APPDATA\")\n sys.path.append(user_path)\n from shrimp_gis import __version__\n from shrimp_gis.io import get_epsg_from_shp_point, get_prj_text_from_EPSG\n \n ghenv.Component.Message = __version__\nexcept ImportError as e:\n raise ImportError(\"\\nFailed to import ShrimpGIS: {0}\\n\\nCheck your 'shrimp_gis' folder in {1}\".format(e, os.getenv(\"APPDATA\")))\n################################################\n\ndef main():\n \n if _shp_point:\n EPSG = get_epsg_from_shp_point(_shp_point)\n prj_text = get_prj_text_from_EPSG(EPSG)\n \n return EPSG, prj_text\n return None, None\n\nEPSG, prj_text = main()\n\n\n","repo_name":"AntonelloDN/ShrimpGIS","sub_path":"src/ShrimpGIS UTM PRJ.py","file_name":"ShrimpGIS UTM PRJ.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"} +{"seq_id":"7981186311","text":"\"\"\"\nA tool to have auto sync obsidian notes from your repo vault\n\"\"\"\nfrom setuptools import find_packages, setup\n\ndependencies = ['pyobjc',\n 'rumps']\n\nAPP = ['obsidiansync/sync.py']\nDATA_FILES = []\nOPTIONS = {\n 'argv_emulation': True,\n 'iconfile': 'assets/obsidian.png',\n 'plist': {\n 'CFBundleShortVersionString': '0.2.0',\n 'LSUIElement': True,\n },\n 'packages': ['rumps'],\n}\n\n\nsetup(\n name='obsidiansync',\n version='0.1.0',\n url='https://github.com/Vi-Sri/obsidiansync',\n license='BSD',\n author='Vishal Srinivas',\n author_email='srinivasvishal7@gmail.com',\n description='A tool to have auto sync obsidian notes from your repo vault',\n long_description=__doc__,\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n platforms='darwin',\n app=APP,\n data_files=DATA_FILES,\n options={'py2app': OPTIONS},\n setup_requires=['py2app'],\n install_requires=dependencies,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ]\n)\n","repo_name":"Vi-Sri/Obsidian-Sync","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6027974662","text":"import os\nimport subprocess\nimport cv2\nimport math\nimport utils\nfrom config import Config as cfg\n\nfrom gentle.gentle.transcriber import do_transcription\n\n\n@utils.traverser\n@utils.log_process\ndef do_video_alignment(**kwargs):\n source_file, file_name, kwargs = utils.extra_path(**kwargs)\n words = do_transcription(kwargs['source_path'] + source_file, kwargs['wav_path'] + file_name + '.wav',\n kwargs['transcription_and_phone_path'] + file_name + '.json')\n with open(kwargs['transcription_path'] + file_name + '.txt', 'w') as transcription_file:\n for item in words:\n if not item.word.startswith('<'):\n transcription_file.write(item.word + ' ')\n if not os.path.exists(kwargs['img_path'] + file_name):\n try:\n os.makedirs(kwargs['img_path'] + file_name)\n except FileExistsError:\n pass\n if item.word == cfg.trigger_word:\n cap = cv2.VideoCapture(kwargs['source_path'] + source_file)\n fps = cap.get(cv2.CAP_PROP_FPS)\n cap.set(cv2.CAP_PROP_POS_FRAMES, math.floor(item.start * fps))\n for i in range(math.ceil(item.end * fps) - math.floor(item.start * fps) + 1):\n success, frame = cap.read()\n if success:\n cv2.imwrite(kwargs['img_path'] + file_name + '/' + str(i) + '.jpg', frame)\n command = cfg.FFMPEG + ' -loglevel quiet -y -ss ' + str(item.start) + ' -to ' + str(\n item.end) + ' -accurate_seek -i ' + kwargs['source_path'] + source_file + ' -c copy ' + kwargs[\n 'video_path'] + file_name + '_cut.mp4'\n subprocess.call(command, shell = True)\n\n\ndef main():\n do_video_alignment(**cfg.param)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shenmishajing/text_base_edition","sub_path":"video_alignment.py","file_name":"video_alignment.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11462724236","text":"from . import util, authentication_service, cache, application\n\n\n@cache.memoize()\ndef get_events(calendar, time_max=None, time_min=None):\n \"\"\"\n Method to retrieve a list of events from the Google Calendar API.\n :param calendar: the identifier of the calendar\n :param time_max: optional start time (upper bound for event retrieval)\n :param time_min: optional end time (lower bound for event retrieval)\n :return: a list of events\n \"\"\"\n service = authentication_service.get_service()\n\n events_result = service.events().list(calendarId=calendar, orderBy='startTime', singleEvents=True, timeMin=time_min,\n timeMax=time_max).execute()\n application.logger.debug('API call - events list')\n events = events_result.get('items', [])\n\n return events\n\n\ndef get_duration(event):\n \"\"\"\n Given an event, calculate its duration\n :param event\n :return: a timedelta, such that days, hours, minutes or seconds can be derived\n \"\"\"\n end = util.convert_date(event['end'])\n start = util.convert_date(event['start'])\n return end - start\n\n\ndef calculate_days_hours_minutes(event):\n \"\"\"\n Given an event, calculate the days, hours and minutes that it lasts\n :param event\n :return: the duration in days, hours and minutes\n \"\"\"\n try:\n duration = get_duration(event)\n days = duration.days\n hours = duration.seconds // 3600\n minutes = duration.seconds // 60 % 60\n return days, hours, minutes\n except TypeError as e:\n application.logger.error(e)\n return 0, 0, 0\n\n\ndef search(cal_id, query, sort):\n \"\"\"\n Filter a list of events to find matches to a given query.\n :param cal_id: the identifier of the calendar\n :param query: either a word, phrase or the empty string\n :param sort: ascending ('earliest') or descending ('latest')\n :return: a list of events matching the query, as well as the total number of days, hours and minutes spent in all\n of those events\n \"\"\"\n events = get_events(cal_id)\n matches = []\n days = 0\n hours = 0\n minutes = 0\n for ev in events:\n if query.lower() in ev['summary'].lower():\n if 'dateTime' in ev['start'] and 'dateTime' in ev['end']:\n duration = calculate_days_hours_minutes(ev)\n days += duration[0]\n hours += duration[1]\n minutes += duration[2]\n ev = format_event(ev)\n if sort == 'earliest':\n matches.append(ev)\n elif sort == 'latest':\n matches.insert(0, ev)\n\n hours += minutes // 60\n minutes -= (minutes // 60) * 60\n days += hours // 24\n hours -= (hours // 24) * 24\n\n return matches, days, hours, minutes\n\n\ndef format_event(event):\n \"\"\"\n Convert the event object into one that can be more easily used by the template to display results\n :param event\n :return: a dict with the required attributes correctly formatted\n \"\"\"\n start = util.convert_date(event['start'])\n end = util.convert_date(event['end'])\n start_time = start.strftime(\"%H:%M\") if start.hour != 0 else None\n end_time = end.strftime(\"%H:%M\") if end.hour != 0 else None\n\n formatted = {'summary': event['summary'],\n 'location': event['location'] if 'location' in event else None,\n 'description': event['description'] if 'description' in event else None,\n 'start_day_num': start.day,\n 'start_day_text': start.strftime(\"%A\"),\n 'end_day_text': end.strftime(\"%A\") if not end_time else None,\n 'start_month': start.strftime(\"%B\"),\n 'start_time': start_time,\n 'end_time': end_time}\n\n return formatted\n","repo_name":"kingarj/CalendarInsight","sub_path":"src/services/events_service.py","file_name":"events_service.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16851918546","text":"import streamlit as st\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n#import pickle\r\nimport seaborn as sns\r\n\r\ndef show_explore_page():\r\n st.title('Data Exploration')\r\n st.subheader('Training Set')\r\n # read the saved model\r\n #mdlPath = 'f_app/mdl.pickle'\r\n dfPath = 'f_app/traincleaned.csv'\r\n df = load_data(dfPath)\r\n st.write(df)\r\n\r\n st.subheader('Explore data!')\r\n selectPlot = st.selectbox('Select the chart you want to see', ['Correlation Plot', 'Boxplot', 'Barplot'])\r\n doPlot(selectPlot,df)\r\n\r\ndef doPlot(selectPlot,df):\r\n fullVars = ['KIDSDRIV', 'AGE', 'HOMEKIDS', 'YOJ', 'INCOME', 'HOME_VAL', 'TRAVTIME',\r\n 'BLUEBOOK', 'TIF', 'OLDCLAIM', 'CLM_FREQ', 'MVR_PTS', 'CAR_AGE',\r\n 'PARENT1', 'MSTATUS', 'RED_CAR', 'REVOKED', 'GENDER',\r\n 'COMMERCIAL_CAR_USE', 'URBAN_CAR', 'BACHELORS', 'ELEMENTARY_EDUCATION',\r\n 'MASTERS', 'PHD', 'HIGH_SCHOOL', 'CLERICAL', 'DOCTOR', 'HOME_MAKER',\r\n 'LAWYER', 'MANAGER', 'PROFESSIONAL', 'STUDENT', 'BLUE_COLLAR',\r\n 'MINIVAN', 'PANEL_TRUCK', 'PICKUP', 'SPORTS_CAR', 'VAN', 'SUV']\r\n\r\n if selectPlot == 'Correlation Plot':\r\n fig1 = plt.figure(figsize=(15,10))\r\n sns.heatmap(df.corr(), annot=True, cmap='YlGnBu')\r\n elif selectPlot == 'Boxplot':\r\n var1 = 'TARGET_FLAG'\r\n var2 = st.selectbox('Select a variable', fullVars)\r\n fig1 = plt.figure(figsize=(10,8))\r\n sns.boxplot(data=df, x=var1, y=var2)\r\n elif selectPlot == 'Barplot':\r\n fig1 = plt.figure(figsize=(10,8))\r\n df['TARGET_FLAG'].value_counts().plot(kind='bar', title='Unbalanced classes')\r\n\r\n plotButton = st.button('Plot!')\r\n if plotButton:\r\n st.pyplot(fig1)\r\n\r\n@st.cache_data\r\ndef load_data(dfPath):\r\n #mdl = pickle.load(open(mdlPath, \"rb\"))\r\n df = pd.read_csv(dfPath, index_col=\"INDEX\")\r\n return df\r\n","repo_name":"pietrodileo/TimMLproject","sub_path":"f_app/explore_page.py","file_name":"explore_page.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6486630310","text":"\"\"\"Common schema objects.\"\"\"\nfrom __future__ import annotations\n\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom inspect import signature\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Generic,\n List,\n NamedTuple,\n Optional,\n Sequence,\n TypeVar,\n Union,\n)\nfrom uuid import UUID\n\nfrom pydantic import BaseModel, Field, root_validator\n\nfrom langchain.load.serializable import Serializable\n\nif TYPE_CHECKING:\n from langchain.callbacks.manager import (\n AsyncCallbackManagerForRetrieverRun,\n CallbackManagerForRetrieverRun,\n Callbacks,\n )\n\nRUN_KEY = \"__run\"\n\n\ndef get_buffer_string(\n messages: Sequence[BaseMessage], human_prefix: str = \"Human\", ai_prefix: str = \"AI\"\n) -> str:\n \"\"\"Convert sequence of Messages to strings and concatenate them into one string.\n\n Args:\n messages: Messages to be converted to strings.\n human_prefix: The prefix to prepend to contents of HumanMessages.\n ai_prefix: THe prefix to prepend to contents of AIMessages.\n\n Returns:\n A single string concatenation of all input messages.\n\n Example:\n .. code-block:: python\n\n from langchain.schema import AIMessage, HumanMessage\n\n messages = [\n HumanMessage(content=\"Hi, how are you?\"),\n AIMessage(content=\"Good, how are you?\"),\n ]\n get_buffer_string(messages)\n # -> \"Human: Hi, how are you?\\nAI: Good, how are you?\"\n \"\"\"\n string_messages = []\n for m in messages:\n if isinstance(m, HumanMessage):\n role = human_prefix\n elif isinstance(m, AIMessage):\n role = ai_prefix\n elif isinstance(m, SystemMessage):\n role = \"System\"\n elif isinstance(m, FunctionMessage):\n role = \"Function\"\n elif isinstance(m, ChatMessage):\n role = m.role\n else:\n raise ValueError(f\"Got unsupported message type: {m}\")\n message = f\"{role}: {m.content}\"\n if isinstance(m, AIMessage) and \"function_call\" in m.additional_kwargs:\n message += f\"{m.additional_kwargs['function_call']}\"\n string_messages.append(message)\n\n return \"\\n\".join(string_messages)\n\n\n@dataclass\nclass AgentAction:\n \"\"\"A full description of an action for an ActionAgent to execute.\"\"\"\n\n tool: str\n \"\"\"The name of the Tool to execute.\"\"\"\n tool_input: Union[str, dict]\n \"\"\"The input to pass in to the Tool.\"\"\"\n log: str\n \"\"\"Additional information to log about the action.\"\"\"\n\n\nclass AgentFinish(NamedTuple):\n \"\"\"The final return value of an ActionAgent.\"\"\"\n\n return_values: dict\n \"\"\"Dictionary of return values.\"\"\"\n log: str\n \"\"\"Additional information to log about the return value\"\"\"\n\n\nclass Generation(Serializable):\n \"\"\"A single text generation output.\"\"\"\n\n text: str\n \"\"\"Generated text output.\"\"\"\n\n generation_info: Optional[Dict[str, Any]] = None\n \"\"\"Raw response from the provider. May include things like the \n reason for finishing or token log probabilities.\n \"\"\"\n # TODO: add log probs as separate attribute\n\n @property\n def lc_serializable(self) -> bool:\n \"\"\"Whether this class is LangChain serializable.\"\"\"\n return True\n\n\nclass BaseMessage(Serializable):\n \"\"\"The base abstract Message class.\n\n Messages are the inputs and outputs of ChatModels.\n \"\"\"\n\n content: str\n \"\"\"The string contents of the message.\"\"\"\n\n additional_kwargs: dict = Field(default_factory=dict)\n \"\"\"Any additional information.\"\"\"\n\n @property\n @abstractmethod\n def type(self) -> str:\n \"\"\"Type of the Message, used for serialization.\"\"\"\n\n @property\n def lc_serializable(self) -> bool:\n \"\"\"Whether this class is LangChain serializable.\"\"\"\n return True\n\n\nclass HumanMessage(BaseMessage):\n \"\"\"A Message from a human.\"\"\"\n\n example: bool = False\n \"\"\"Whether this Message is being passed in to the model as part of an example \n conversation.\n \"\"\"\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"human\"\n\n\nclass AIMessage(BaseMessage):\n \"\"\"A Message from an AI.\"\"\"\n\n example: bool = False\n \"\"\"Whether this Message is being passed in to the model as part of an example \n conversation.\n \"\"\"\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"ai\"\n\n\nclass SystemMessage(BaseMessage):\n \"\"\"A Message for priming AI behavior, usually passed in as the first of a sequence\n of input messages.\n \"\"\"\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"system\"\n\n\nclass FunctionMessage(BaseMessage):\n \"\"\"A Message for passing the result of executing a function back to a model.\"\"\"\n\n name: str\n \"\"\"The name of the function that was executed.\"\"\"\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"function\"\n\n\nclass ChatMessage(BaseMessage):\n \"\"\"A Message that can be assigned an arbitrary speaker (i.e. role).\"\"\"\n\n role: str\n \"\"\"The speaker / role of the Message.\"\"\"\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"chat\"\n\n\ndef _message_to_dict(message: BaseMessage) -> dict:\n return {\"type\": message.type, \"data\": message.dict()}\n\n\ndef messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]:\n \"\"\"Convert a sequence of Messages to a list of dictionaries.\n\n Args:\n messages: Sequence of messages (as BaseMessages) to convert.\n\n Returns:\n List of messages as dicts.\n \"\"\"\n return [_message_to_dict(m) for m in messages]\n\n\ndef _message_from_dict(message: dict) -> BaseMessage:\n _type = message[\"type\"]\n if _type == \"human\":\n return HumanMessage(**message[\"data\"])\n elif _type == \"ai\":\n return AIMessage(**message[\"data\"])\n elif _type == \"system\":\n return SystemMessage(**message[\"data\"])\n elif _type == \"chat\":\n return ChatMessage(**message[\"data\"])\n else:\n raise ValueError(f\"Got unexpected type: {_type}\")\n\n\ndef messages_from_dict(messages: List[dict]) -> List[BaseMessage]:\n \"\"\"Convert a sequence of messages from dicts to Message objects.\n\n Args:\n messages: Sequence of messages (as dicts) to convert.\n\n Returns:\n List of messages (BaseMessages).\n \"\"\"\n return [_message_from_dict(m) for m in messages]\n\n\nclass ChatGeneration(Generation):\n \"\"\"A single chat generation output.\"\"\"\n\n text: str = \"\"\n \"\"\"*SHOULD NOT BE SET DIRECTLY* The text contents of the output message.\"\"\"\n message: BaseMessage\n \"\"\"The message output by the chat model.\"\"\"\n\n @root_validator\n def set_text(cls, values: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Set the text attribute to be the contents of the message.\"\"\"\n values[\"text\"] = values[\"message\"].content\n return values\n\n\nclass RunInfo(BaseModel):\n \"\"\"Class that contains metadata for a single execution of a Chain or model.\"\"\"\n\n run_id: UUID\n \"\"\"A unique identifier for the model or chain run.\"\"\"\n\n\nclass ChatResult(BaseModel):\n \"\"\"Class that contains all results for a single chat model call.\"\"\"\n\n generations: List[ChatGeneration]\n \"\"\"List of the chat generations. This is a List because an input can have multiple \n candidate generations.\n \"\"\"\n llm_output: Optional[dict] = None\n \"\"\"For arbitrary LLM provider specific output.\"\"\"\n\n\nclass LLMResult(BaseModel):\n \"\"\"Class that contains all results for a batched LLM call.\"\"\"\n\n generations: List[List[Generation]]\n \"\"\"List of generated outputs. This is a List[List[]] because\n each input could have multiple candidate generations.\"\"\"\n llm_output: Optional[dict] = None\n \"\"\"Arbitrary LLM provider-specific output.\"\"\"\n run: Optional[List[RunInfo]] = None\n \"\"\"List of metadata info for model call for each input.\"\"\"\n\n def flatten(self) -> List[LLMResult]:\n \"\"\"Flatten generations into a single list.\n\n Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult\n contains only a single Generation. If token usage information is available,\n it is kept only for the LLMResult corresponding to the top-choice\n Generation, to avoid over-counting of token usage downstream.\n\n Returns:\n List of LLMResults where each returned LLMResult contains a single\n Generation.\n \"\"\"\n llm_results = []\n for i, gen_list in enumerate(self.generations):\n # Avoid double counting tokens in OpenAICallback\n if i == 0:\n llm_results.append(\n LLMResult(\n generations=[gen_list],\n llm_output=self.llm_output,\n )\n )\n else:\n if self.llm_output is not None:\n llm_output = deepcopy(self.llm_output)\n llm_output[\"token_usage\"] = dict()\n else:\n llm_output = None\n llm_results.append(\n LLMResult(\n generations=[gen_list],\n llm_output=llm_output,\n )\n )\n return llm_results\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Check for LLMResult equality by ignoring any metadata related to runs.\"\"\"\n if not isinstance(other, LLMResult):\n return NotImplemented\n return (\n self.generations == other.generations\n and self.llm_output == other.llm_output\n )\n\n\nclass PromptValue(Serializable, ABC):\n \"\"\"Base abstract class for inputs to any language model.\n\n PromptValues can be converted to both LLM (pure text-generation) inputs and\n ChatModel inputs.\n \"\"\"\n\n @abstractmethod\n def to_string(self) -> str:\n \"\"\"Return prompt value as string.\"\"\"\n\n @abstractmethod\n def to_messages(self) -> List[BaseMessage]:\n \"\"\"Return prompt as a list of Messages.\"\"\"\n\n\nclass BaseMemory(Serializable, ABC):\n \"\"\"Base abstract class for memory in Chains.\n\n Memory refers to state in Chains. Memory can be used to store information about\n past executions of a Chain and inject that information into the inputs of\n future executions of the Chain. For example, for conversational Chains Memory\n can be used to store conversations and automatically add them to future model\n prompts so that the model has the necessary context to respond coherently to\n the latest input.\n\n Example:\n .. code-block:: python\n\n class SimpleMemory(BaseMemory):\n memories: Dict[str, Any] = dict()\n\n @property\n def memory_variables(self) -> List[str]:\n return list(self.memories.keys())\n\n def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:\n return self.memories\n\n def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:\n pass\n\n def clear(self) -> None:\n pass\n \"\"\" # noqa: E501\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n @abstractmethod\n def memory_variables(self) -> List[str]:\n \"\"\"The string keys this memory class will add to chain inputs.\"\"\"\n\n @abstractmethod\n def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Return key-value pairs given the text input to the chain.\"\"\"\n\n @abstractmethod\n def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:\n \"\"\"Save the context of this chain run to memory.\"\"\"\n\n @abstractmethod\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n\n\nclass BaseChatMessageHistory(ABC):\n \"\"\"Abstract base class for storing chat message history.\n\n See `ChatMessageHistory` for default implementation.\n\n Example:\n .. code-block:: python\n\n class FileChatMessageHistory(BaseChatMessageHistory):\n storage_path: str\n session_id: str\n\n @property\n def messages(self):\n with open(os.path.join(storage_path, session_id), 'r:utf-8') as f:\n messages = json.loads(f.read())\n return messages_from_dict(messages)\n\n def add_message(self, message: BaseMessage) -> None:\n messages = self.messages.append(_message_to_dict(message))\n with open(os.path.join(storage_path, session_id), 'w') as f:\n json.dump(f, messages)\n\n def clear(self):\n with open(os.path.join(storage_path, session_id), 'w') as f:\n f.write(\"[]\")\n \"\"\"\n\n messages: List[BaseMessage]\n \"\"\"A list of Messages stored in-memory.\"\"\"\n\n def add_user_message(self, message: str) -> None:\n \"\"\"Convenience method for adding a human message string to the store.\n\n Args:\n message: The string contents of a human message.\n \"\"\"\n self.add_message(HumanMessage(content=message))\n\n def add_ai_message(self, message: str) -> None:\n \"\"\"Convenience method for adding an AI message string to the store.\n\n Args:\n message: The string contents of an AI message.\n \"\"\"\n self.add_message(AIMessage(content=message))\n\n # TODO: Make this an abstractmethod.\n def add_message(self, message: BaseMessage) -> None:\n \"\"\"Add a Message object to the store.\n\n Args:\n message: A BaseMessage object to store.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def clear(self) -> None:\n \"\"\"Remove all messages from the store\"\"\"\n\n\nclass Document(Serializable):\n \"\"\"Class for storing a piece of text and associated metadata.\"\"\"\n\n page_content: str\n \"\"\"String text.\"\"\"\n metadata: dict = Field(default_factory=dict)\n \"\"\"Arbitrary metadata about the page content (e.g., source, relationships to other\n documents, etc.).\n \"\"\"\n\n\nclass BaseRetriever(ABC):\n \"\"\"Abstract base class for a Document retrieval system.\n\n A retrieval system is defined as something that can take string queries and return\n the most 'relevant' Documents from some source.\n\n Example:\n .. code-block:: python\n\n class TFIDFRetriever(BaseRetriever, BaseModel):\n vectorizer: Any\n docs: List[Document]\n tfidf_array: Any\n k: int = 4\n\n class Config:\n arbitrary_types_allowed = True\n\n def get_relevant_documents(self, query: str) -> List[Document]:\n from sklearn.metrics.pairwise import cosine_similarity\n\n # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)\n query_vec = self.vectorizer.transform([query])\n # Op -- (n_docs,1) -- Cosine Sim with each doc\n results = cosine_similarity(self.tfidf_array, query_vec).reshape((-1,))\n return [self.docs[i] for i in results.argsort()[-self.k :][::-1]]\n\n async def aget_relevant_documents(self, query: str) -> List[Document]:\n raise NotImplementedError\n\n \"\"\" # noqa: E501\n\n _new_arg_supported: bool = False\n _expects_other_args: bool = False\n\n def __init_subclass__(cls, **kwargs: Any) -> None:\n super().__init_subclass__(**kwargs)\n # Version upgrade for old retrievers that implemented the public\n # methods directly.\n if cls.get_relevant_documents != BaseRetriever.get_relevant_documents:\n warnings.warn(\n \"Retrievers must implement abstract `_get_relevant_documents` method\"\n \" instead of `get_relevant_documents`\",\n DeprecationWarning,\n )\n swap = cls.get_relevant_documents\n cls.get_relevant_documents = ( # type: ignore[assignment]\n BaseRetriever.get_relevant_documents\n )\n cls._get_relevant_documents = swap # type: ignore[assignment]\n if (\n hasattr(cls, \"aget_relevant_documents\")\n and cls.aget_relevant_documents != BaseRetriever.aget_relevant_documents\n ):\n warnings.warn(\n \"Retrievers must implement abstract `_aget_relevant_documents` method\"\n \" instead of `aget_relevant_documents`\",\n DeprecationWarning,\n )\n aswap = cls.aget_relevant_documents\n cls.aget_relevant_documents = ( # type: ignore[assignment]\n BaseRetriever.aget_relevant_documents\n )\n cls._aget_relevant_documents = aswap # type: ignore[assignment]\n parameters = signature(cls._get_relevant_documents).parameters\n cls._new_arg_supported = parameters.get(\"run_manager\") is not None\n # If a V1 retriever broke the interface and expects additional arguments\n cls._expects_other_args = (not cls._new_arg_supported) and len(parameters) > 2\n\n @abstractmethod\n def _get_relevant_documents(\n self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any\n ) -> List[Document]:\n \"\"\"Get documents relevant to a query.\n Args:\n query: String to find relevant documents for.\n run_manager: The callbacks handler to use.\n Returns:\n List of relevant documents\n \"\"\"\n\n @abstractmethod\n async def _aget_relevant_documents(\n self,\n query: str,\n *,\n run_manager: AsyncCallbackManagerForRetrieverRun,\n **kwargs: Any,\n ) -> List[Document]:\n \"\"\"Asynchronously get documents relevant to a query.\n Args:\n query: string to find relevant documents for\n run_manager: The callbacks handler to use\n Returns:\n List of relevant documents\n \"\"\"\n\n def get_relevant_documents(\n self, query: str, *, callbacks: Callbacks = None, **kwargs: Any\n ) -> List[Document]:\n \"\"\"Retrieve documents relevant to a query.\n Args:\n query: String to find relevant documents for.\n callbacks: Callback manager or list of callbacks.\n Returns:\n List of relevant documents\n \"\"\"\n from langchain.callbacks.manager import CallbackManager\n\n callback_manager = CallbackManager.configure(\n callbacks, None, verbose=kwargs.get(\"verbose\", False)\n )\n run_manager = callback_manager.on_retriever_start(\n query,\n **kwargs,\n )\n try:\n if self._new_arg_supported:\n result = self._get_relevant_documents(\n query, run_manager=run_manager, **kwargs\n )\n elif self._expects_other_args:\n result = self._get_relevant_documents(query, **kwargs)\n else:\n result = self._get_relevant_documents(query) # type: ignore[call-arg]\n except Exception as e:\n run_manager.on_retriever_error(e)\n raise e\n else:\n run_manager.on_retriever_end(\n result,\n **kwargs,\n )\n return result\n\n async def aget_relevant_documents(\n self, query: str, *, callbacks: Callbacks = None, **kwargs: Any\n ) -> List[Document]:\n \"\"\"Asynchronously get documents relevant to a query.\n Args:\n query: string to find relevant documents for\n callbacks: Callback manager or list of callbacks\n Returns:\n List of relevant documents\n \"\"\"\n from langchain.callbacks.manager import AsyncCallbackManager\n\n callback_manager = AsyncCallbackManager.configure(\n callbacks, None, verbose=kwargs.get(\"verbose\", False)\n )\n run_manager = await callback_manager.on_retriever_start(\n query,\n **kwargs,\n )\n try:\n if self._new_arg_supported:\n result = await self._aget_relevant_documents(\n query, run_manager=run_manager, **kwargs\n )\n elif self._expects_other_args:\n result = await self._aget_relevant_documents(query, **kwargs)\n else:\n result = await self._aget_relevant_documents(\n query, # type: ignore[call-arg]\n )\n except Exception as e:\n await run_manager.on_retriever_error(e)\n raise e\n else:\n await run_manager.on_retriever_end(\n result,\n **kwargs,\n )\n return result\n\n\n# For backwards compatibility\nMemory = BaseMemory\n\nT = TypeVar(\"T\")\n\n\nclass BaseLLMOutputParser(Serializable, ABC, Generic[T]):\n \"\"\"Abstract base class for parsing the outputs of a model.\"\"\"\n\n @abstractmethod\n def parse_result(self, result: List[Generation]) -> T:\n \"\"\"Parse a list of candidate model Generations into a specific format.\n\n Args:\n result: A list of Generations to be parsed. The Generations are assumed\n to be different candidate outputs for a single model input.\n\n Returns:\n Structured output.\n \"\"\"\n\n\nclass BaseOutputParser(BaseLLMOutputParser, ABC, Generic[T]):\n \"\"\"Class to parse the output of an LLM call.\n\n Output parsers help structure language model responses.\n\n Example:\n .. code-block:: python\n\n class BooleanOutputParser(BaseOutputParser[bool]):\n true_val: str = \"YES\"\n false_val: str = \"NO\"\n\n def parse(self, text: str) -> bool:\n cleaned_text = text.strip().upper()\n if cleaned_text not in (self.true_val.upper(), self.false_val.upper()):\n raise OutputParserException(\n f\"BooleanOutputParser expected output value to either be \"\n f\"{self.true_val} or {self.false_val} (case-insensitive). \"\n f\"Received {cleaned_text}.\"\n )\n return cleaned_text == self.true_val.upper()\n\n @property\n def _type(self) -> str:\n return \"boolean_output_parser\"\n \"\"\" # noqa: E501\n\n def parse_result(self, result: List[Generation]) -> T:\n \"\"\"Parse a list of candidate model Generations into a specific format.\n\n The return value is parsed from only the first Generation in the result, which\n is assumed to be the highest-likelihood Generation.\n\n Args:\n result: A list of Generations to be parsed. The Generations are assumed\n to be different candidate outputs for a single model input.\n\n Returns:\n Structured output.\n \"\"\"\n return self.parse(result[0].text)\n\n @abstractmethod\n def parse(self, text: str) -> T:\n \"\"\"Parse a single string model output into some structure.\n\n Args:\n text: String output of language model.\n\n Returns:\n Structured output.\n \"\"\"\n\n # TODO: rename 'completion' -> 'text'.\n def parse_with_prompt(self, completion: str, prompt: PromptValue) -> Any:\n \"\"\"Parse the output of an LLM call with the input prompt for context.\n\n The prompt is largely provided in the event the OutputParser wants\n to retry or fix the output in some way, and needs information from\n the prompt to do so.\n\n Args:\n completion: String output of language model.\n prompt: Input PromptValue.\n\n Returns:\n Structured output\n \"\"\"\n return self.parse(completion)\n\n def get_format_instructions(self) -> str:\n \"\"\"Instructions on how the LLM output should be formatted.\"\"\"\n raise NotImplementedError\n\n @property\n def _type(self) -> str:\n \"\"\"Return the output parser type for serialization.\"\"\"\n raise NotImplementedError(\n f\"_type property is not implemented in class {self.__class__.__name__}.\"\n \" This is required for serialization.\"\n )\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Return dictionary representation of output parser.\"\"\"\n output_parser_dict = super().dict(**kwargs)\n output_parser_dict[\"_type\"] = self._type\n return output_parser_dict\n\n\nclass NoOpOutputParser(BaseOutputParser[str]):\n \"\"\"'No operation' OutputParser that returns the text as is.\"\"\"\n\n @property\n def lc_serializable(self) -> bool:\n \"\"\"Whether the class LangChain serializable.\"\"\"\n return True\n\n @property\n def _type(self) -> str:\n \"\"\"Return the output parser type for serialization.\"\"\"\n return \"default\"\n\n def parse(self, text: str) -> str:\n \"\"\"Returns the input text with no changes.\"\"\"\n return text\n\n\nclass OutputParserException(ValueError):\n \"\"\"Exception that output parsers should raise to signify a parsing error.\n\n This exists to differentiate parsing errors from other code or execution errors\n that also may arise inside the output parser. OutputParserExceptions will be\n available to catch and handle in ways to fix the parsing error, while other\n errors will be raised.\n\n Args:\n error: The error that's being re-raised or an error message.\n observation: String explanation of error which can be passed to a\n model to try and remediate the issue.\n llm_output: String model output which is error-ing.\n send_to_llm: Whether to send the observation and llm_output back to an Agent\n after an OutputParserException has been raised. This gives the underlying\n model driving the agent the context that the previous output was improperly\n structured, in the hopes that it will update the output to the correct\n format.\n \"\"\"\n\n def __init__(\n self,\n error: Any,\n observation: Optional[str] = None,\n llm_output: Optional[str] = None,\n send_to_llm: bool = False,\n ):\n super(OutputParserException, self).__init__(error)\n if send_to_llm:\n if observation is None or llm_output is None:\n raise ValueError(\n \"Arguments 'observation' & 'llm_output'\"\n \" are required if 'send_to_llm' is True\"\n )\n self.observation = observation\n self.llm_output = llm_output\n self.send_to_llm = send_to_llm\n\n\nclass BaseDocumentTransformer(ABC):\n \"\"\"Abstract base class for document transformation systems.\n\n A document transformation system takes a sequence of Documents and returns a\n sequence of transformed Documents.\n\n Example:\n .. code-block:: python\n\n class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel):\n embeddings: Embeddings\n similarity_fn: Callable = cosine_similarity\n similarity_threshold: float = 0.95\n\n class Config:\n arbitrary_types_allowed = True\n\n def transform_documents(\n self, documents: Sequence[Document], **kwargs: Any\n ) -> Sequence[Document]:\n stateful_documents = get_stateful_documents(documents)\n embedded_documents = _get_embeddings_from_stateful_docs(\n self.embeddings, stateful_documents\n )\n included_idxs = _filter_similar_embeddings(\n embedded_documents, self.similarity_fn, self.similarity_threshold\n )\n return [stateful_documents[i] for i in sorted(included_idxs)]\n\n async def atransform_documents(\n self, documents: Sequence[Document], **kwargs: Any\n ) -> Sequence[Document]:\n raise NotImplementedError\n\n \"\"\" # noqa: E501\n\n @abstractmethod\n def transform_documents(\n self, documents: Sequence[Document], **kwargs: Any\n ) -> Sequence[Document]:\n \"\"\"Transform a list of documents.\n\n Args:\n documents: A sequence of Documents to be transformed.\n\n Returns:\n A list of transformed Documents.\n \"\"\"\n\n @abstractmethod\n async def atransform_documents(\n self, documents: Sequence[Document], **kwargs: Any\n ) -> Sequence[Document]:\n \"\"\"Asynchronously transform a list of documents.\n\n Args:\n documents: A sequence of Documents to be transformed.\n\n Returns:\n A list of transformed Documents.\n \"\"\"\n","repo_name":"zaitianaoxiang/langchain","sub_path":"langchain/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":29211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"17417636416","text":"import pandas as pd\nimport numpy as np\nimport plotly.graph_objects as go\nfrom kaleido.scopes.plotly import PlotlyScope\n\nDATA_PATH = 'output/'\nFILE_1 = 'daily_cases_wtih_race.csv'\nIMAGE_PATH = 'images/correlation/'\nscope = PlotlyScope()\npd.set_option('display.max_rows', 3000)\n# pd.set_option('display.max_columns', 20)\n# pd.set_option('display.width', 20)\n\n\ndef read_data():\n covid_df = pd.read_csv(DATA_PATH + FILE_1, index_col=[0])\n covid_df['date'] = pd.to_datetime(covid_df['date'].astype(str), format='%Y%m%d')\n\n return covid_df\n\n\ndef compute_covariance_matrix(covid_df):\n \"\"\"\n States are CA, CO, WA\n \"\"\"\n covid_df = covid_df[covid_df['date'] >= '2020-05-03']\n headers = ['date',\n 'cases_white',\n 'cases_black',\n 'cases_asian',\n 'cases_aian',\n 'cases_nhpi',\n 'cases_multiracial',\n 'cases_ethnicity_hispanic',\n 'deaths_white',\n 'deaths_black',\n 'deaths_asian',\n 'deaths_aian',\n 'deaths_nhpi',\n 'deaths_multiracial',\n 'deaths_ethnicity_hispanic',\n ]\n covid_df = covid_df[headers]\n covid_df = covid_df.dropna(thresh=len(headers) - 1).reset_index(drop=True)\n # There is an entry with a comma (object type) in cases_white which ends up getting dropped.\n covid_df['cases_white'] = covid_df['cases_white'].astype(float)\n covid_df = covid_df.groupby('date').sum().diff().iloc[1:]\n\n return covid_df\n\n\ndef create_heatmap(covid_df):\n\n covid_df = covid_df.reset_index(drop=True)\n\n covariance_matrix = covid_df.corr()\n cases_corr_matrix = covariance_matrix.iloc[0:7, 0:7]\n death_corr_matrix = covariance_matrix.iloc[7:, 7:]\n cases_death_corr_matrix = covariance_matrix.iloc[0:7, 7:]\n\n np.fill_diagonal(cases_corr_matrix.values, np.nan)\n np.fill_diagonal(death_corr_matrix.values, np.nan)\n\n cases_fig = go.Figure(\n data=go.Heatmap(\n z=cases_corr_matrix.values.tolist(),\n x=list(cases_corr_matrix.columns.values),\n y=list(cases_corr_matrix.columns.values),\n hoverongaps=False,\n colorbar=dict(title='Correlation')),\n )\n\n cases_fig.update_layout(\n title_text='Heatmap Cases by Race',\n )\n\n with open(IMAGE_PATH + \"cases_corr_matrix.png\", \"wb\") as file:\n file.write(scope.transform(cases_fig, format=\"png\"))\n\n death_fig = go.Figure(data=go.Heatmap(\n z=death_corr_matrix.values.tolist(),\n x=list(death_corr_matrix.columns.values),\n y=list(death_corr_matrix.columns.values),\n hoverongaps=False,\n colorbar=dict(title='Correlation')),\n )\n\n death_fig.update_layout(\n title_text='Heatmap Death by Race',\n )\n\n with open(IMAGE_PATH + \"death_corr_matrix.png\", \"wb\") as file:\n file.write(scope.transform(death_fig, format=\"png\"))\n\n cases_death_fig = go.Figure(data=go.Heatmap(\n z=cases_death_corr_matrix.values.tolist(),\n x=list(cases_death_corr_matrix.columns.values),\n y=list(cases_death_corr_matrix.index.values),\n hoverongaps=False,\n colorbar=dict(title='Correlation')),\n )\n\n cases_death_fig.update_layout(\n title_text='Heatmap Death and Cases by Race',\n )\n\n with open(IMAGE_PATH + \"cases_death_corr_matrix.png\", \"wb\") as file:\n file.write(scope.transform(cases_death_fig, format=\"png\"))\n\n\ndef main():\n covid_df = read_data()\n covid_df = compute_covariance_matrix(covid_df)\n covid_df = create_heatmap(covid_df)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MikeZ77/COVID19-Project","sub_path":"05-correlation.py","file_name":"05-correlation.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12959154983","text":"from django.db import models\n\n# Create your models here.\n\n\nclass Level (models.Model):\n level = models.CharField(max_length=255)\n\n class Meta:\n db_table = 'Level'\n\n def __str__(self):\n return self.level\n\n\nclass Subject(models.Model):\n level = models.ForeignKey(Level, on_delete=models.CASCADE)\n name = models.CharField(max_length=255)\n\n class Meta:\n db_table = 'Subject'\n\n def __str__(self):\n return self.name\n\n\nclass Note(models.Model):\n\n description = models.CharField(max_length= 255, blank=True)\n document = models.FileField(upload_to=\"documents/\")\n uploaded_at = models.DateTimeField(auto_now_add=True)\n level = models.ForeignKey(Level, on_delete=models.CASCADE)\n subject = models.ForeignKey(Subject, on_delete=models.CASCADE)\n\n class Meta:\n db_table = \"note_table\"\n\n","repo_name":"Casper94/SampleBlog","sub_path":"SampleBlog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33431817045","text":"import os\nimport numpy as np\nfrom multiprocessing import Process, shared_memory\nfrom get_input_from_cam import get_input_from_cam\nfrom face_detection import face_detection\nfrom head_pose_estimation import head_pose_estimation\n#from body_pose_estimation import body_pose_estimation\n#from action_recognition import action_recognition\nfrom networking import router_function\nfrom hand_gesture_recognition import hand_gesture_recognition\n\nif __name__ == \"__main__\":\n\t##################### shared memory initialization ########################\n\t# frame shared memory\n\ttest_array = np.zeros((640, 640, 3), dtype=np.uint8)\n\tframe_shm = shared_memory.SharedMemory(create=True, size=test_array.nbytes, name='frame')\n\tframe = np.ndarray(test_array.shape, dtype=np.uint8, buffer=frame_shm.buf)\n\n\t# depth shared memory\n\tdepth_array = np.zeros((640, 640), dtype=np.uint64)\n\tdepth_shm = shared_memory.SharedMemory(create=True, size=depth_array.nbytes, name='depth')\n\tdepth = np.ndarray(depth_array.shape, dtype=np.uint64, buffer=depth_shm.buf)\n\n\t# face box coordinate shared memory\n\tface_box_coordinate_shape = (10, 4) # for 20 peoples\n\tsize_array = np.zeros(face_box_coordinate_shape, dtype=np.int64)\n\tface_box_coordinate_shm = shared_memory.SharedMemory(create=True, size=size_array.nbytes, name = 'face_box_coordinate')\n\n\t# main user face box coordinate shared memory\n\tmain_user_face_box_coordinate_shape = (1, 4) # for 20 peoples\n\tsize_array = np.zeros(main_user_face_box_coordinate_shape, dtype=np.int64)\n\tmain_user_face_box_coordinate_shm = shared_memory.SharedMemory(create=True, size=size_array.nbytes, name = 'main_user_face_box_coordinate')\n\n\t# main user face center coordinate shared memory\n\tmain_user_face_center_coordinate_shape = (1, 3) # for 20 peoples\n\tsize_array = np.zeros(main_user_face_center_coordinate_shape, dtype=np.int64)\n\tmain_user_face_center_coordinate_shm = shared_memory.SharedMemory(create=True, size=size_array.nbytes, name = 'main_user_face_center_coordinate')\n\n\t# main user face center coordinate shared memory\n\tmain_user_calib_face_center_coordinate_shape = (1, 3) # for 20 peoples\n\tsize_array = np.zeros(main_user_calib_face_center_coordinate_shape, dtype=np.int64)\n\tmain_user_calib_face_center_coordinate_shm = shared_memory.SharedMemory(create=True, size=size_array.nbytes, name = 'main_user_calib_face_center_coordinate')\n\n # head pose shm\n\thead_pose_shape = (3) # for 1 people\n\tsize_array = np.zeros(head_pose_shape, dtype=np.int64)\n\thead_pose_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'head_pose')\n\thead_pose_sh_array = np.ndarray(head_pose_shape, dtype=np.int64, buffer=head_pose_shm.buf)\n\n # body pose shm\n\tbody_pose_shape = (3) # for 1 people\n\tsize_array = np.zeros(body_pose_shape, dtype=np.int64)\n\tbody_pose_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'body_pose')\n\tbody_pose_sh_array = np.ndarray(body_pose_shape, dtype=np.int64, buffer=body_pose_shm.buf)\n\n # body coordinates shm\n\tbody_coordinates_shape = (5, 3) # for 1 people\n\tsize_array = np.zeros(body_coordinates_shape, dtype=np.int64)\n\tbody_coordinates_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'body_coordinates')\n\tbody_coordinates_sh_array = np.ndarray(body_pose_shape, dtype=np.int64, buffer=body_coordinates_shm.buf)\n\n # action shm\n\taction_shape = (1)\n\tsize_array = np.chararray(action_shape, itemsize=10)\n\taction_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'action')\n\taction_sh_array = np.chararray(action_shape, itemsize=10, buffer=action_shm.buf)\n\n # network shm\n\tnetwork_shape = (1)\n\tsize_array = np.zeros(network_shape, dtype=np.int64)\n\tnetwork_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'networking')\n\tnetwork_sh_array = np.ndarray(network_shape, dtype=np.int64, buffer=network_shm.buf)\n\tnetwork_sh_array[:] = 2\n\n # hand_gesture shm\n\thand_gesture_shape = (1)\n\tsize_array = np.chararray(hand_gesture_shape, itemsize=30)\n\thand_gesture_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'hand_gesture')\n\thand_gesture_sh_array = np.chararray(hand_gesture_shape, itemsize=30, buffer=hand_gesture_shm.buf)\n\thand_gesture_sh_array[:] = 'standard'\n\n # hand_gesture \n\thand_val_shape = (3)\n\tsize_array = np.zeros(hand_val_shape, dtype=np.int64)\n\thand_val_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'hand_val')\n\thand_val_sh_array = np.ndarray(hand_val_shape, dtype=np.int64, buffer=hand_val_shm.buf)\n\thand_val_sh_array[:] = [0, 0, 0]\n\n\t# multi renderer communication\n\t# if you want to change the port numbers or display positions, you must match the port number and display position correctly.\n\tport_numbers = [5551, 5552, 5553]\n\tdisplay_positions = [[0, 0, 0], [-730, 0, 0], [730, 0, 0]]\n\t#port_numbers = [5551]\n\t#display_positions = [[0, 0, 0]]\n\t\n # main_display_port\n\tmain_display_port_shape = (1)\n\tsize_array = np.zeros(main_display_port_shape, dtype=np.int64)\n\tmain_display_port_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'main_display_port')\n\tmain_display_port_sh_array = np.ndarray(main_display_port_shape, dtype=np.int64, buffer=main_display_port_shm.buf)\n\tmain_display_port_sh_array[:] = 0\n\n # other display-human matching info\n\tdisplay_human_matching_shape = (70)\n\tsize_array = np.zeros(display_human_matching_shape, dtype=np.float)\n\tdisplay_human_matching_shm = shared_memory.SharedMemory(create = True, size = size_array.nbytes, name = 'display_human_matching_info')\n\tdisplay_human_matching_sh_array = np.ndarray(display_human_matching_shape, dtype=np.float, buffer=display_human_matching_shm.buf)\n\n\t#################### Multi processing #########################\n\n\tp1 = Process(target=get_input_from_cam)\n\tp2 = Process(target=face_detection)\n\tp3 = Process(target=head_pose_estimation, args=(display_positions, ))\n\t#p4 = Process(target=body_pose_estimation)\n\t#p5 = Process(target=action_recognition)\n\tfor port_number in port_numbers:\n\t\tp6 = Process(target=router_function, args=([port_number, port_numbers],))\n\t\tp6.start()\n\tp7 = Process(target=hand_gesture_recognition)\n\tp1.start()\n\tprint('p1 start')\n\tp2.start()\n\tprint('p2 start')\n\tp3.start()\n\tprint('p3 start')\n\t#p4.start()\n\t#print('p4 start')\n\t#p5.start()\n\t#print('p5 start')\n\t#p6.start()\n\tprint('p6 start')\n\tp7.start()\n\tprint('p7 start')\n\n\tp1.join()\n\tprint('p1 join')\n\tp2.join()\n\tprint('p2 join')\n\tp3.join()\n\tprint('p3 join')\n\t#p4.join()\n\t#print('p4 join')\n\t#p5.join()\n\t#print('p5 join')\n\tp6.join()\n\tprint('p6 join')\n\tp7.join()\n\tprint('p7 join')","repo_name":"LeeChanHyuk/human_display_interaction","sub_path":"code/multi_processing.py","file_name":"multi_processing.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16376740799","text":"#!/usr/bin/python3\n\"\"\" two 2d rotation of 90 deg\"\"\"\n\n\ndef rotate_2d_matrix(matrix):\n array_len = len(matrix)\n\n for i in range(array_len):\n for j in range(i, array_len):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for i in range(array_len):\n matrix[i] = matrix[i][::-1]\n","repo_name":"egjallow10/alx-interview","sub_path":"0x07-rotate_2d_matrix/0-rotate_2d_matrix.py","file_name":"0-rotate_2d_matrix.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36482301212","text":"import urllib.request\nfrom bs4 import BeautifulSoup\nimport re\nimport csv\nimport chardet\nimport ssl\n\ncsvFile = open(\"/Users/zhangtibin/Downloads/数据存储/TPY.csv\", \"w\", newline=\"\")\nwriter = csv.writer(csvFile)\n#writer.writerow((\"店名称\", \"描述\", \"地址\", \"部门经理\", \"联系电话\"))\nwriter.writerow((\"店名称\", \"地址\", \"联系电话\"))\n\ntotal = 0\nsumPage = 9\npageIndex = 1\nwhile (pageIndex <= sumPage):\n\n url = 'http://www.pacific.sh.cn/shows.asp?base_id=2&second_id=&third_id=&pageIndex='+ str(pageIndex)\n res = urllib.request.urlopen(url)\n soup = BeautifulSoup(res, \"html.parser\")\n #获取页面相应的标签\n storeInfoList = soup.findAll(attrs={\"class\": \"txt\"})\n #本页店面的数量\n storeNum = len(storeInfoList)\n print('本页店面的数量' + str(storeNum))\n\n for storeInfo in storeInfoList:\n storeName = storeInfo.find(\"h6\").find(\"a\").get_text()\n #storeIntro = storeInfo.find(attrs={\"class\": \"intro\"}).get_text()\n storeBaseInfo = storeInfo.findAll(\"p\")\n storeAddress = storeBaseInfo[1].get_text()\n #storeManager = storeBaseInfo[3].get_text()\n storeMobile = storeBaseInfo[4].get_text()\n #writer.writerow((storeName, str(storeIntro.encode('utf-8')), storeAddress.encode('utf-8')[5:], storeManager.encode('utf-8')[5:],storeMobile.encode('utf-8')[5:]))\n writer.writerow((storeName, storeAddress[5:], storeMobile[5:]))\n\n pageIndex = pageIndex + 1\n total = total + storeNum\n\n\n\ncsvFile.close()\n\nprint(total)\n","repo_name":"zhangtibin/PythonLearning","sub_path":"PythonLearningProject/jimu.py","file_name":"jimu.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30579254848","text":"\"\"\"Print all data received on the serial port.\"\"\"\n\n# Builtins\n\nimport argparse\n\n# Packages\n\n\n# Parsing\n\ndef parse_args(*arg_adders, grouped_args, **parser_kwargs):\n \"\"\"Parse the command-line args in groups.\"\"\"\n parser = argparse.ArgumentParser(**parser_kwargs)\n for arg_adder in arg_adders:\n arg_adder(parser)\n for (group_adder, arg_adders) in grouped_args.items():\n group = group_adder(parser)\n for arg_adder in arg_adders:\n arg_adder(group)\n return parser.parse_args()\n","repo_name":"ethanjli/phyllo-python","sub_path":"phyllo/io/cli/args/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15594752535","text":"from collections import deque, ChainMap\nimport copy\nfrom dataclasses import dataclass\nfrom functools import singledispatch\nimport importlib\nimport inspect\nimport itertools\nimport more_itertools\nimport math\nimport os\nimport pathlib\nimport re\nfrom typing import Any, Union, Optional, Tuple, List\nimport pyparsing as pp\n\nfrom handcalcs.constants import GREEK_UPPER, GREEK_LOWER\nfrom handcalcs import global_config\nfrom handcalcs.integrations import DimensionalityError\n\n# Six basic line types\n@dataclass\nclass CalcLine:\n line: deque\n comment: str\n latex: str\n\n\n@dataclass\nclass SymbolicLine:\n line: deque\n comment: str\n latex: str\n\n\n@dataclass\nclass ConditionalLine:\n condition: deque\n condition_type: str\n expressions: deque\n raw_condition: str\n raw_expression: str\n true_condition: deque\n true_expressions: deque\n comment: str\n latex_condition: str\n latex_expressions: str\n latex: str\n\n\n@dataclass\nclass ParameterLine:\n line: deque\n comment: str\n latex: str\n\n\n@dataclass\nclass LongCalcLine:\n line: deque\n comment: str\n latex: str\n\n\n@dataclass\nclass NumericCalcLine:\n line: deque\n comment: str\n latex: str\n\n\n@dataclass\nclass IntertextLine:\n line: deque\n comment: str\n latex: str\n\n\n@dataclass\nclass BlankLine: # Attributes not used on BlankLine but still req'd\n line: deque\n comment: str\n latex: str\n\n\n# Five types of cell\n@dataclass\nclass CalcCell:\n source: str\n calculated_results: dict\n precision: Optional[int]\n scientific_notation: Optional[bool]\n lines: deque\n latex_code: str\n\n\n@dataclass\nclass ShortCalcCell:\n source: str\n calculated_results: dict\n precision: Optional[int]\n scientific_notation: Optional[bool]\n lines: deque\n latex_code: str\n\n\n@dataclass\nclass SymbolicCell:\n source: str\n calculated_results: dict\n precision: Optional[int]\n scientific_notation: Optional[bool]\n lines: deque\n latex_code: str\n\n\n@dataclass\nclass ParameterCell:\n source: str\n calculated_results: dict\n lines: deque\n precision: Optional[int]\n scientific_notation: Optional[bool]\n # cols: int\n latex_code: str\n\n\n@dataclass\nclass LongCalcCell:\n source: str\n calculated_results: dict\n lines: deque\n precision: Optional[int]\n scientific_notation: Optional[bool]\n latex_code: str\n\n\ndef is_number(s: str) -> bool:\n \"\"\"\n A basic helper function because Python str methods do not\n have this ability...\n \"\"\"\n try:\n float(s)\n return True\n except:\n return False\n\n\ndef dict_get(d: dict, item: Any) -> Any:\n \"\"\"\n Return the item from the dict, 'd'.\n \"\"\"\n try:\n return d.get(item, item)\n except TypeError:\n return item\n\n\n# The renderer class (\"output\" class)\nclass LatexRenderer:\n # dec_sep = \".\"\n\n def __init__(self, python_code_str: str, results: dict, line_args: dict):\n self.source = python_code_str\n self.results = results\n self.override_precision = line_args[\"precision\"]\n self.override_scientific_notation = line_args[\"sci_not\"]\n self.override_commands = line_args[\"override\"]\n\n def render(self, config_options: dict = global_config._config):\n return latex(\n raw_python_source=self.source,\n calculated_results=self.results,\n override_commands=self.override_commands,\n config_options=config_options,\n cell_precision=self.override_precision,\n cell_notation=self.override_scientific_notation,\n )\n\n\n# Pure functions that do all the work\ndef latex(\n raw_python_source: str,\n calculated_results: dict,\n override_commands: str,\n config_options: dict,\n cell_precision: Optional[int] = None,\n cell_notation: Optional[bool] = None,\n) -> str:\n \"\"\"\n Returns the Python source as a string that has been converted into latex code.\n \"\"\"\n # decimal_separator = config_options.get(\"decimal_separator\")\n # latex_block_start = config_options.get(\"latex_block_start\")\n # latex_block_end = config_options.get(\"latex_block_end\")\n # latex_math_environment = config_options.get(\"latex_math_environment\")\n # use_sci_notation = config_options.get(\"use_sci_notation\")\n # display_precision = config_options.get(\"display_precision\")\n # underscore_subscripts = config_options.get(\"underscore_subscripts\")\n # greek_exclusions = config_options.get(\"greek_exclusions\")\n # param_columns = config_options.get(\"param_columns\")\n\n source = raw_python_source\n\n cell = categorize_raw_cell(\n source,\n calculated_results,\n override_commands,\n cell_precision,\n cell_notation,\n )\n cell = categorize_lines(cell)\n cell = convert_cell(\n cell,\n **config_options,\n )\n cell = format_cell(\n cell,\n **config_options,\n # dec_sep\n )\n return cell.latex_code\n\n\ndef categorize_raw_cell(\n raw_source: str,\n calculated_results: dict,\n override_commands: str,\n cell_precision: Optional[int] = None,\n cell_notation: Optional[bool] = None,\n) -> Union[ParameterCell, CalcCell]:\n \"\"\"\n Return a \"Cell\" type depending on the source code of the cell.\n \"\"\"\n if override_commands:\n if override_commands == \"params\":\n return create_param_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n elif override_commands == \"long\":\n return create_long_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n elif override_commands == \"short\":\n return create_short_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n elif override_commands == \"symbolic\":\n return create_symbolic_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n\n if test_for_parameter_cell(raw_source):\n return create_param_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n elif test_for_long_cell(raw_source):\n return create_long_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n elif test_for_short_cell(raw_source):\n return create_short_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n elif test_for_symbolic_cell(raw_source):\n return create_symbolic_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n else:\n return create_calc_cell(\n raw_source, calculated_results, cell_precision, cell_notation\n )\n\n\ndef strip_cell_code(raw_source: str) -> str:\n \"\"\"\n Return 'raw_source' with the \"cell code\" removed.\n A \"cell code\" is a first-line comment in the cell for the\n purpose of categorizing an IPython cell as something other\n than a CalcCell.\n \"\"\"\n split_lines = deque(raw_source.split(\"\\n\"))\n first_line = split_lines[0]\n if first_line.startswith(\"#\") and not first_line.startswith(\n \"##\"\n ): ## for intertext line\n split_lines.popleft()\n return \"\\n\".join(split_lines)\n return raw_source\n\n\ndef categorize_lines(\n cell: Union[CalcCell, ParameterCell]\n) -> Union[CalcCell, ParameterCell]:\n \"\"\"\n Return 'cell' with the line data contained in cell_object.source categorized\n into one of four types:\n * CalcLine\n * ParameterLine\n * ConditionalLine\n\n categorize_lines(calc_cell) is considered the default behaviour for the\n singledispatch categorize_lines function.\n \"\"\"\n incoming = cell.source.rstrip().split(\"\\n\")\n outgoing = deque([])\n calculated_results = cell.calculated_results\n cell_override = \"\"\n for line in incoming:\n if isinstance(cell, ParameterCell):\n cell_override = \"parameter\"\n elif isinstance(cell, LongCalcCell):\n cell_override = \"long\"\n elif isinstance(cell, SymbolicCell):\n cell_override = \"symbolic\"\n categorized = categorize_line(line, calculated_results, cell_override)\n categorized_w_result_appended = add_result_values_to_line(\n categorized, calculated_results\n )\n outgoing.append(categorized_w_result_appended)\n cell.lines = outgoing\n return cell\n\n\ndef categorize_line(\n line: str, calculated_results: dict, cell_override: str = \"\"\n) -> Union[CalcLine, ParameterLine, ConditionalLine]:\n \"\"\"\n Return 'line' as either a CalcLine, ParameterLine, or ConditionalLine if 'line'\n fits the appropriate criteria. Raise ValueError, otherwise.\n\n 'override' is a str used to short-cut the tests in categorize_line(). e.g.\n if the cell that the lines belong to is a ParameterCell,\n we do not need to run the test_for_parameter_line() function on the line\n because, in a ParameterCell, all lines will default to a ParameterLine\n because of the cell it's in and how that cell is supposed to behave.\n\n 'override' is passed from the categorize_lines() function because that\n function has the information of the cell type and can pass along any\n desired behavior to categorize_line().\n \"\"\"\n if test_for_blank_line(line):\n return BlankLine(line, \"\", \"\")\n\n if test_for_intertext_line(line):\n return IntertextLine(line, \"\", \"\")\n\n if line.startswith(\"#\"):\n return BlankLine(line, \"\", \"\")\n\n try:\n line, comment = line.split(\"#\", 1)\n except ValueError:\n comment = \"\"\n\n # Override behaviour\n categorized_line = None\n if cell_override == \"parameter\":\n if test_for_conditional_line(line):\n categorized_line = create_conditional_line(\n line, calculated_results, cell_override, comment\n )\n else:\n categorized_line = ParameterLine(\n split_parameter_line(line, calculated_results), comment, \"\"\n )\n return categorized_line\n\n elif cell_override == \"long\":\n if test_for_parameter_line(line): # A parameter can exist in a long cell, too\n categorized_line = ParameterLine(\n split_parameter_line(line, calculated_results), comment, \"\"\n )\n elif test_for_conditional_line(\n line\n ): # A conditional line can exist in a long cell, too\n categorized_line = create_conditional_line(\n line, calculated_results, cell_override, comment\n )\n elif test_for_numeric_line(\n deque(\n list(expr_parser(line))[1:]\n ) # Leave off the declared variable, e.g. _x_ = ...\n ):\n categorized_line = NumericCalcLine(expr_parser(line), comment, \"\")\n\n else:\n categorized_line = LongCalcLine(\n expr_parser(line), comment, \"\"\n ) # code_reader\n return categorized_line\n\n elif cell_override == \"symbolic\":\n if test_for_conditional_line(\n line\n ): # A conditional line can exist in a symbolic cell, too\n categorized_line = create_conditional_line(\n line, calculated_results, cell_override, comment\n )\n else:\n categorized_line = SymbolicLine(\n expr_parser(line), comment, \"\"\n ) # code_reader\n return categorized_line\n\n elif cell_override == \"short\":\n if test_for_numeric_line(\n deque(list(line)[1:]) # Leave off the declared variable\n ):\n categorized_line = NumericCalcLine(expr_parser(line), comment, \"\")\n else:\n categorized_line = CalcLine(expr_parser(line), comment, \"\") # code_reader\n\n return categorized_line\n elif True:\n pass # Future override conditions to match new cell types can be put here\n\n # Standard behaviour\n if line == \"\\n\" or line == \"\":\n categorized_line = BlankLine(line, \"\", \"\")\n\n elif test_for_parameter_line(line):\n categorized_line = ParameterLine(\n split_parameter_line(line, calculated_results), comment, \"\"\n )\n\n elif test_for_conditional_line(line):\n categorized_line = create_conditional_line(\n line, calculated_results, cell_override, comment\n )\n\n elif test_for_numeric_line(\n deque(list(expr_parser(line))[1:]) # Leave off the declared variable\n ):\n categorized_line = NumericCalcLine(expr_parser(line), comment, \"\")\n\n elif \"=\" in line:\n categorized_line = CalcLine(expr_parser(line), comment, \"\") # code_reader\n\n elif len(expr_parser(line)) == 1:\n categorized_line = ParameterLine(\n split_parameter_line(line, calculated_results), comment, \"\"\n )\n\n else:\n # TODO: Raise this error in a test\n raise ValueError(\n f\"Line: {line} is not recognized for rendering.\\n\"\n \"Lines must either:\\n\"\n \"\\t * Be the name of a previously assigned single variable\\n\"\n \"\\t * Be an arithmetic variable assignment (i.e. calculation that uses '=' in the line)\\n\"\n \"\\t * Be a conditional arithmetic assignment (i.e. uses 'if', 'elif', or 'else', each on a single line)\"\n )\n return categorized_line\n\n\ndef create_param_cell(\n raw_source: str,\n calculated_result: dict,\n cell_precision: Optional[int] = None,\n cell_notation: Optional[bool] = None,\n) -> ParameterCell:\n \"\"\"\n Returns a ParameterCell.\n \"\"\"\n comment_tag_removed = strip_cell_code(raw_source)\n cell = ParameterCell(\n source=comment_tag_removed,\n calculated_results=calculated_result,\n precision=cell_precision,\n scientific_notation=cell_notation,\n lines=deque([]),\n latex_code=\"\",\n )\n return cell\n\n\ndef create_long_cell(\n raw_source: str,\n calculated_result: dict,\n cell_precision: Optional[int] = None,\n cell_notation: Optional[bool] = None,\n) -> LongCalcCell:\n \"\"\"\n Returns a LongCalcCell.\n \"\"\"\n comment_tag_removed = strip_cell_code(raw_source)\n cell = LongCalcCell(\n source=comment_tag_removed,\n calculated_results=calculated_result,\n precision=cell_precision,\n scientific_notation=cell_notation,\n lines=deque([]),\n latex_code=\"\",\n )\n return cell\n\n\ndef create_short_cell(\n raw_source: str,\n calculated_result: dict,\n cell_precision: Optional[int] = None,\n cell_notation: Optional[bool] = None,\n) -> ShortCalcCell:\n \"\"\"\n Returns a ShortCell\n \"\"\"\n comment_tag_removed = strip_cell_code(raw_source)\n cell = ShortCalcCell(\n source=comment_tag_removed,\n calculated_results=calculated_result,\n precision=cell_precision,\n scientific_notation=cell_notation,\n lines=deque([]),\n latex_code=\"\",\n )\n return cell\n\n\ndef create_symbolic_cell(\n raw_source: str,\n calculated_result: dict,\n cell_precision: Optional[int] = None,\n cell_notation: Optional[bool] = None,\n) -> SymbolicCell:\n \"\"\"\n Returns a SymbolicCell\n \"\"\"\n comment_tag_removed = strip_cell_code(raw_source)\n cell = SymbolicCell(\n source=comment_tag_removed,\n calculated_results=calculated_result,\n precision=cell_precision,\n scientific_notation=cell_notation,\n lines=deque([]),\n latex_code=\"\",\n )\n return cell\n\n\ndef create_calc_cell(\n raw_source: str,\n calculated_result: dict,\n cell_precision: Optional[int] = None,\n cell_notation: Optional[bool] = None,\n) -> CalcCell:\n \"\"\"\n Returns a CalcCell\n \"\"\"\n cell = CalcCell(\n source=raw_source,\n calculated_results=calculated_result,\n precision=cell_precision,\n scientific_notation=cell_notation,\n lines=deque([]),\n latex_code=\"\",\n )\n return cell\n\n\ndef create_conditional_line(\n line: str, calculated_results: dict, override: str, comment: str\n):\n (\n condition,\n condition_type,\n expression,\n raw_condition,\n raw_expression,\n ) = split_conditional(line, calculated_results, override)\n categorized_line = ConditionalLine(\n condition=condition,\n condition_type=condition_type,\n expressions=expression,\n raw_condition=raw_condition,\n raw_expression=raw_expression.strip(),\n true_condition=deque([]),\n true_expressions=deque([]),\n comment=comment,\n latex_condition=\"\",\n latex_expressions=\"\",\n latex=\"\",\n )\n return categorized_line\n\n\n@singledispatch\ndef add_result_values_to_line(line_object, calculated_results: dict):\n raise TypeError(\n f\"Line object, {type(line_object)} is not recognized yet in add_result_values_to_line()\"\n )\n\n\n@add_result_values_to_line.register(CalcLine)\ndef results_for_calcline(line_object, calculated_results):\n parameter_name = line_object.line[0]\n resulting_value = dict_get(calculated_results, parameter_name)\n line_object.line.append(deque([\"=\", resulting_value]))\n return line_object\n\n\n@add_result_values_to_line.register(NumericCalcLine)\ndef results_for_numericcalcline(line_object, calculated_results):\n parameter_name = line_object.line[0]\n resulting_value = dict_get(calculated_results, parameter_name)\n line_object.line.append(deque([\"=\", resulting_value]))\n return line_object\n\n\n@add_result_values_to_line.register(LongCalcLine)\ndef results_for_longcalcline(line_object, calculated_results):\n parameter_name = line_object.line[0]\n resulting_value = dict_get(calculated_results, parameter_name)\n line_object.line.append(deque([\"=\", resulting_value]))\n return line_object\n\n\n@add_result_values_to_line.register(ParameterLine)\ndef results_for_paramline(line_object, calculated_results):\n return line_object\n\n\n@add_result_values_to_line.register(ConditionalLine)\ndef results_for_conditionline(line_object, calculated_results: dict):\n expressions = line_object.expressions\n for expr in expressions:\n add_result_values_to_line(expr, calculated_results)\n return line_object\n\n\n@add_result_values_to_line.register(SymbolicLine)\ndef results_for_symbolicline(line_object, calculated_results):\n return line_object\n\n\n@add_result_values_to_line.register(BlankLine)\ndef results_for_blank(line_object, calculated_results):\n return line_object\n\n\n@add_result_values_to_line.register(IntertextLine)\ndef results_for_intertext(line_object, calculated_results):\n return line_object\n\n\n@singledispatch\ndef convert_cell(\n cell_object,\n **config_options,\n):\n \"\"\"\n Return the cell_object with all of its lines run through the function,\n 'convert_lines()', effectively converting each python element in the parsed\n deque in the equivalent element in latex.\n\n The result remains stored in cell.lines\n \"\"\"\n raise TypeError(\n f\"Cell object {type(cell_object)} is not yet recognized in convert_cell()\"\n )\n\n\n@convert_cell.register(CalcCell)\ndef convert_calc_cell(\n cell: CalcCell,\n **config_options,\n) -> CalcCell:\n outgoing = cell.lines\n calculated_results = cell.calculated_results\n incoming = deque([])\n for line in outgoing:\n incoming.append(\n convert_line(\n line,\n calculated_results,\n **config_options,\n )\n )\n cell.lines = incoming\n return cell\n\n\n@convert_cell.register(ShortCalcCell)\ndef convert_calc_cell(cell: ShortCalcCell, **config_options) -> ShortCalcCell:\n outgoing = cell.lines\n calculated_results = cell.calculated_results\n incoming = deque([])\n for line in outgoing:\n incoming.append(convert_line(line, calculated_results, **config_options))\n cell.lines = incoming\n return cell\n\n\n@convert_cell.register(LongCalcCell)\ndef convert_longcalc_cell(cell: LongCalcCell, **config_options) -> LongCalcCell:\n outgoing = cell.lines\n calculated_results = cell.calculated_results\n incoming = deque([])\n for line in outgoing:\n incoming.append(convert_line(line, calculated_results, **config_options))\n cell.lines = incoming\n return cell\n\n\n@convert_cell.register(ParameterCell)\ndef convert_parameter_cell(cell: ParameterCell, **config_options) -> ParameterCell:\n outgoing = cell.lines\n calculated_results = cell.calculated_results\n incoming = deque([])\n for line in outgoing:\n incoming.append(convert_line(line, calculated_results, **config_options))\n cell.lines = incoming\n return cell\n\n\n@convert_cell.register(SymbolicCell)\ndef convert_symbolic_cell(cell: SymbolicCell, **config_options) -> SymbolicCell:\n outgoing = cell.lines\n calculated_results = cell.calculated_results\n incoming = deque([])\n for line in outgoing:\n incoming.append(convert_line(line, calculated_results, **config_options))\n cell.lines = incoming\n return cell\n\n\n@singledispatch\ndef convert_line(\n line_object,\n calculated_results: dict,\n **config_options,\n):\n \"\"\"\n Returns 'line_object' with its .line attribute converted into a\n deque with elements that have been converted to their appropriate\n Latex counterparts.\n\n convert_line() runs the deque through all of the conversion functions\n as organized in `swap_calculation()`.\n \"\"\"\n raise TypeError(\n f\"Cell object {type(line_object)} is not yet recognized in convert_line()\"\n )\n\n\n@convert_line.register(CalcLine)\ndef convert_calc(line, calculated_results, **config_options):\n (\n *line_deque,\n result,\n ) = line.line # Unpack deque of form [[calc_line, ...], ['=', 'result']]\n symbolic_portion, numeric_portion = swap_calculation(\n line_deque, calculated_results, **config_options\n )\n line.line = symbolic_portion + numeric_portion + result\n return line\n\n\n@convert_line.register(NumericCalcLine)\ndef convert_numericcalc(line, calculated_results, **config_options):\n (\n *line_deque,\n result,\n ) = line.line # Unpack deque of form [[calc_line, ...], ['=', 'result']]\n symbolic_portion, _ = swap_calculation(\n line_deque, calculated_results, **config_options\n )\n line.line = symbolic_portion + result\n return line\n\n\n@convert_line.register(LongCalcLine)\ndef convert_longcalc(line, calculated_results, **config_options):\n (\n *line_deque,\n result,\n ) = line.line # Unpack deque of form [[calc_line, ...], ['=', 'result']]\n symbolic_portion, numeric_portion = swap_calculation(\n line_deque, calculated_results, **config_options\n )\n line.line = symbolic_portion + numeric_portion + result\n return line\n\n\n@convert_line.register(ConditionalLine)\ndef convert_conditional(line, calculated_results, **config_options):\n condition, condition_type, expressions, raw_condition = (\n line.condition,\n line.condition_type,\n line.expressions,\n line.raw_condition,\n )\n true_condition_deque = swap_conditional(\n condition, condition_type, raw_condition, calculated_results, **config_options\n )\n if true_condition_deque:\n line.true_condition = true_condition_deque\n for expression in expressions:\n line.true_expressions.append(\n convert_line(expression, calculated_results, **config_options)\n )\n return line\n\n\n@convert_line.register(ParameterLine)\ndef convert_parameter(line, calculated_results, **config_options):\n line.line = swap_symbolic_calcs(line.line, calculated_results, **config_options)\n return line\n\n\n@convert_line.register(SymbolicLine)\ndef convert_symbolic_line(line, calculated_results, **config_options):\n line.line = swap_symbolic_calcs(line.line, calculated_results, **config_options)\n return line\n\n\n@convert_line.register(IntertextLine)\ndef convert_intertext(line, calculated_results, **config_options):\n return line\n\n\n@convert_line.register(BlankLine)\ndef convert_blank(line, calculated_results, **config_options):\n return line\n\n\n@singledispatch\ndef format_cell(cell_object, **config_options):\n raise TypeError(\n f\"Cell type {type(cell_object)} has not yet been implemented in format_cell().\"\n )\n\n\n@format_cell.register(ParameterCell)\ndef format_parameters_cell(cell: ParameterCell, **config_options):\n \"\"\"\n Returns the input parameters as an \\\\align environment with 'cols'\n number of columns.\n \"\"\"\n cols = config_options[\"param_columns\"]\n if cell.precision is None:\n precision = config_options[\"display_precision\"]\n else:\n precision = cell.precision\n cell_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell.scientific_notation\n )\n opener = config_options[\"latex_block_start\"]\n begin = f\"\\\\begin{{{config_options['math_environment_start']}}}\"\n end = f\"\\\\end{{{config_options['math_environment_end']}}}\"\n closer = config_options[\"latex_block_end\"]\n line_break = f\"{config_options['line_break']}\\n\"\n cycle_cols = itertools.cycle(range(1, cols + 1))\n for line in cell.lines:\n line = round_and_render_line_objects_to_latex(\n line, precision, cell_notation, **config_options\n )\n line = format_lines(line, **config_options)\n if isinstance(line, BlankLine):\n continue\n if isinstance(line, ConditionalLine):\n outgoing = deque([])\n for expr in line.true_expressions:\n current_col = next(cycle_cols)\n if current_col % cols == 0:\n outgoing.append(\"&\" + expr + line_break)\n elif current_col % cols != 1:\n outgoing.append(\"&\" + expr)\n else:\n outgoing.append(expr)\n line.latex_expressions = \" \".join(outgoing)\n line.latex = line.latex_condition + line.latex_expressions\n else:\n latex_param = line.latex\n\n current_col = next(cycle_cols)\n if current_col % cols == 0:\n line.latex = \"&\" + latex_param + line_break\n elif current_col % cols != 1:\n line.latex = \"&\" + latex_param\n else:\n line.latex = latex_param\n\n latex_block = \" \".join(\n [line.latex for line in cell.lines if not isinstance(line, BlankLine)]\n ).rstrip() # .rstrip(): Hack to solve another problem of empty lines in {aligned} environment\n cell.latex_code = \"\\n\".join([opener, begin, latex_block, end, closer])\n return cell\n\n\n@format_cell.register(CalcCell)\ndef format_calc_cell(cell: CalcCell, **config_options) -> str:\n line_break = f\"{config_options['line_break']}\\n\"\n if cell.precision is None:\n precision = config_options[\"display_precision\"]\n else:\n precision = cell.precision\n cell_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell.scientific_notation\n )\n incoming = deque([])\n for line in cell.lines:\n line = round_and_render_line_objects_to_latex(\n line, precision, cell_notation, **config_options\n )\n line = convert_applicable_long_lines(line)\n line = format_lines(line, **config_options)\n incoming.append(line)\n cell.lines = incoming\n\n latex_block = line_break.join([line.latex for line in cell.lines if line.latex])\n opener = config_options[\"latex_block_start\"]\n begin = f\"\\\\begin{{{config_options['math_environment_start']}}}\"\n end = f\"\\\\end{{{config_options['math_environment_end']}}}\"\n closer = config_options[\"latex_block_end\"]\n cell.latex_code = \"\\n\".join([opener, begin, latex_block, end, closer]).replace(\n \"\\n\" + end, end\n )\n return cell\n\n\n@format_cell.register(ShortCalcCell)\ndef format_shortcalc_cell(cell: ShortCalcCell, **config_options) -> str:\n incoming = deque([])\n line_break = f\"{config_options['line_break']}\\n\"\n if cell.precision is None:\n precision = config_options[\"display_precision\"]\n else:\n precision = cell.precision\n cell_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell.scientific_notation\n )\n for line in cell.lines:\n line = round_and_render_line_objects_to_latex(\n line, precision, cell_notation, **config_options\n )\n line = format_lines(line, **config_options)\n incoming.append(line)\n cell.lines = incoming\n\n latex_block = line_break.join([line.latex for line in cell.lines if line.latex])\n opener = config_options[\"latex_block_start\"]\n begin = f\"\\\\begin{{{config_options['math_environment_start']}}}\"\n end = f\"\\\\end{{{config_options['math_environment_end']}}}\"\n closer = config_options[\"latex_block_end\"]\n cell.latex_code = \"\\n\".join([opener, begin, latex_block, end, closer]).replace(\n \"\\n\" + end, end\n )\n return cell\n\n\n@format_cell.register(LongCalcCell)\ndef format_longcalc_cell(cell: LongCalcCell, **config_options) -> str:\n line_break = f\"{config_options['line_break']}\\n\"\n if cell.precision is None:\n precision = config_options[\"display_precision\"]\n else:\n precision = cell.precision\n cell_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell.scientific_notation\n )\n incoming = deque([])\n for line in cell.lines:\n line = round_and_render_line_objects_to_latex(\n line, precision, cell_notation, **config_options\n )\n line = convert_applicable_long_lines(line)\n line = format_lines(line, **config_options)\n incoming.append(line)\n cell.lines = incoming\n\n latex_block = line_break.join([line.latex for line in cell.lines if line.latex])\n opener = config_options[\"latex_block_start\"]\n begin = f\"\\\\begin{{{config_options['math_environment_start']}}}\"\n end = f\"\\\\end{{{config_options['math_environment_end']}}}\"\n closer = config_options[\"latex_block_end\"]\n cell.latex_code = \"\\n\".join([opener, begin, latex_block, end, closer]).replace(\n \"\\n\" + end, end\n )\n return cell\n\n\n@format_cell.register(SymbolicCell)\ndef format_symbolic_cell(cell: SymbolicCell, **config_options) -> str:\n line_break = f\"{config_options['line_break']}\\n\"\n if cell.precision is None:\n precision = config_options[\"display_precision\"]\n else:\n precision = cell.precision\n cell_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell.scientific_notation\n )\n incoming = deque([])\n for line in cell.lines:\n line = round_and_render_line_objects_to_latex(\n line, precision, cell_notation, **config_options\n )\n line = format_lines(line, **config_options)\n incoming.append(line)\n cell.lines = incoming\n\n latex_block = line_break.join([line.latex for line in cell.lines if line.latex])\n opener = config_options[\"latex_block_start\"]\n begin = f\"\\\\begin{{{config_options['math_environment_start']}}}\"\n end = f\"\\\\end{{{config_options['math_environment_end']}}}\"\n closer = config_options[\"latex_block_end\"]\n cell.latex_code = \"\\n\".join([opener, begin, latex_block, end, closer]).replace(\n \"\\n\" + end, end\n )\n return cell\n\n\n@singledispatch\ndef round_and_render_line_objects_to_latex(\n line: Union[CalcLine, ConditionalLine, ParameterLine],\n cell_precision: int,\n cell_notation: bool,\n **config_options,\n): # Not called for symbolic lines; see format_symbolic_cell()\n \"\"\"\n Returns 'line' with the elements of the deque in its .line attribute\n converted into their final string form for rendering (thereby preserving\n its intermediate step) and populates the\n .latex attribute with the joined string from .line.\n\n 'precision' is the number of decimal places that each object should\n be rounded to for display.\n \"\"\"\n raise TypeError(\n f\"Line type {type(line)} not recognized yet in round_and_render_line_objects_to_latex()\"\n )\n\n\n@round_and_render_line_objects_to_latex.register(CalcLine)\ndef round_and_render_calc(\n line: CalcLine, cell_precision: int, cell_notation: bool, **config_options\n) -> CalcLine:\n idx_line = line.line\n precision = cell_precision\n use_scientific_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell_notation\n )\n preferred_formatter = config_options[\"preferred_string_formatter\"]\n rendered_line = render_latex_str(\n idx_line, use_scientific_notation, precision, preferred_formatter\n )\n rendered_line = swap_dec_sep(rendered_line, config_options[\"decimal_separator\"])\n line.line = rendered_line\n line.latex = \" \".join(rendered_line)\n return line\n\n\n@round_and_render_line_objects_to_latex.register(NumericCalcLine)\ndef round_and_render_numericcalc(\n line: NumericCalcLine, cell_precision: int, cell_notation: bool, **config_options\n) -> NumericCalcLine:\n idx_line = line.line\n precision = cell_precision\n use_scientific_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell_notation\n )\n preferred_formatter = config_options[\"preferred_string_formatter\"]\n rendered_line = render_latex_str(\n idx_line, use_scientific_notation, precision, preferred_formatter\n )\n rendered_line = swap_dec_sep(rendered_line, config_options[\"decimal_separator\"])\n line.line = rendered_line\n line.latex = \" \".join(rendered_line)\n return line\n\n\n@round_and_render_line_objects_to_latex.register(LongCalcLine)\ndef round_and_render_longcalc(\n line: LongCalcLine, cell_precision: int, cell_notation: bool, **config_options\n) -> LongCalcLine:\n idx_line = line.line\n precision = cell_precision\n use_scientific_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell_notation\n )\n preferred_formatter = config_options[\"preferred_string_formatter\"]\n rendered_line = render_latex_str(\n idx_line, use_scientific_notation, precision, preferred_formatter\n )\n rendered_line = swap_dec_sep(rendered_line, config_options[\"decimal_separator\"])\n line.line = rendered_line\n line.latex = \" \".join(rendered_line)\n return line\n\n\n@round_and_render_line_objects_to_latex.register(ParameterLine)\ndef round_and_render_parameter(\n line: ParameterLine, cell_precision: int, cell_notation: bool, **config_options\n) -> ParameterLine:\n idx_line = line.line\n precision = cell_precision\n use_scientific_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell_notation\n )\n preferred_formatter = config_options[\"preferred_string_formatter\"]\n rendered_line = render_latex_str(\n idx_line, use_scientific_notation, precision, preferred_formatter\n )\n rendered_line = swap_dec_sep(rendered_line, config_options[\"decimal_separator\"])\n line.line = rendered_line\n line.latex = \" \".join(rendered_line)\n return line\n\n\n@round_and_render_line_objects_to_latex.register(ConditionalLine)\ndef round_and_render_conditional(\n line: ConditionalLine, cell_precision: int, cell_notation: bool, **config_options\n) -> ConditionalLine:\n conditional_line_break = f\"{config_options['line_break']}\\n\"\n outgoing = deque([])\n idx_line = line.true_condition\n precision = cell_precision\n use_scientific_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell_notation\n )\n preferred_formatter = config_options[\"preferred_string_formatter\"]\n rendered_line = render_latex_str(\n idx_line, use_scientific_notation, precision, preferred_formatter\n )\n rendered_line = swap_dec_sep(rendered_line, config_options[\"decimal_separator\"])\n line.line = rendered_line\n line.latex = \" \".join(rendered_line)\n # return line\n line.true_condition = rendered_line\n for (\n expr\n ) in line.true_expressions: # Each 'expr' item is a CalcLine or other line type\n outgoing.append(\n round_and_render_line_objects_to_latex(\n expr, cell_precision, cell_notation, **config_options\n )\n )\n line.true_expressions = outgoing\n line.latex = conditional_line_break.join(\n [calc_line.latex for calc_line in outgoing]\n )\n return line\n\n\n@round_and_render_line_objects_to_latex.register(SymbolicLine)\ndef round_and_render_symbolic(\n line: SymbolicLine, cell_precision: int, cell_notation: bool, **config_options\n) -> SymbolicLine:\n idx_line = line.line\n precision = cell_precision\n use_scientific_notation = toggle_scientific_notation(\n config_options[\"use_scientific_notation\"], cell_notation\n )\n preferred_formatter = config_options[\"preferred_string_formatter\"]\n rendered_line = render_latex_str(\n idx_line, use_scientific_notation, precision, preferred_formatter\n )\n rendered_line = swap_dec_sep(rendered_line, config_options[\"decimal_separator\"])\n line.line = rendered_line\n line.latex = \" \".join(rendered_line)\n return line\n\n\n@round_and_render_line_objects_to_latex.register(BlankLine)\ndef round_and_render_blank(\n line, cell_precision: int, cell_notation: bool, **config_options\n):\n return line\n\n\n@round_and_render_line_objects_to_latex.register(IntertextLine)\ndef round_and_render_intertext(\n line, cell_precision: int, cell_notation: bool, **config_options\n):\n return line\n\n\ndef render_latex_str(\n line_of_code: deque,\n use_scientific_notation: bool,\n precision: int,\n preferred_formatter: str,\n) -> deque:\n \"\"\"\n Returns a rounded str based on the latex_repr of an object in\n 'line_of_code'\n \"\"\"\n outgoing = deque([])\n for item in line_of_code:\n rendered_str = latex_repr(\n item, use_scientific_notation, precision, preferred_formatter\n )\n outgoing.append(rendered_str)\n return outgoing\n\n\ndef latex_repr(\n item: Any, use_scientific_notation: bool, precision: int, preferred_formatter: str\n) -> str:\n \"\"\"\n Return a str if the object, 'item', has a special repr method\n for rendering itself in latex. If not, returns str(result).\n \"\"\"\n # Check for arrays\n if hasattr(item, \"__len__\") and not isinstance(item, (str, dict)):\n comma_space = \",\\\\ \"\n try:\n array = (\n \"[\"\n + comma_space.join(\n [\n latex_repr(\n v, use_scientific_notation, precision, preferred_formatter\n )\n for v in item\n ]\n )\n + \"]\"\n )\n rendered_string = array\n return rendered_string\n except TypeError:\n pass\n\n # Check for sympy objects\n if hasattr(item, \"__sympy__\"):\n return render_sympy(round_sympy(item, precision, use_scientific_notation))\n\n # Check for scientific notation strings\n if isinstance(item, str) and test_for_scientific_float(item):\n if \"e-\" in item:\n rendered_string = swap_scientific_notation_str(item)\n elif \"e+\" in item:\n rendered_string = swap_scientific_notation_str(item)\n elif \"e\" in item:\n rendered_string = swap_scientific_notation_str(item.replace(\"e\", \"e+\"))\n return rendered_string\n\n # Procedure for atomic data items\n try:\n if use_scientific_notation:\n rendered_string = f\"{item:.{precision}e{preferred_formatter}}\"\n else:\n rendered_string = f\"{item:.{precision}f{preferred_formatter}}\"\n except (ValueError, TypeError):\n try:\n if use_scientific_notation and isinstance(item, complex):\n rendered_real = f\"{item.real:.{precision}e}\"\n rendered_real = swap_scientific_notation_str(rendered_real)\n\n rendered_imag = f\"{item.imag:.{precision}e}\"\n rendered_imag = swap_scientific_notation_str(rendered_imag)\n\n rendered_string = (\n f\"\\\\left( {rendered_real} + {rendered_imag} j \\\\right)\"\n )\n elif use_scientific_notation and not isinstance(item, int):\n rendered_string = f\"{item:.{precision}e}\"\n rendered_string = swap_scientific_notation_str(rendered_string)\n elif not isinstance(item, int):\n rendered_string = f\"{item:.{precision}f}\"\n else:\n rendered_string = str(item)\n except (ValueError, TypeError):\n try:\n rendered_string = item._repr_latex_()\n except AttributeError:\n rendered_string = str(item)\n\n return rendered_string.replace(\"$\", \"\")\n\n\ndef round_sympy(elem: Any, precision: int, use_scientific_notation: bool) -> Any:\n \"\"\"\n Returns the Sympy expression 'elem' rounded to 'precision'\n \"\"\"\n from sympy import Float\n\n rule = {}\n for n in elem.atoms(Float):\n if use_scientific_notation:\n rule[n] = round_for_scientific_notation(n, precision)\n else:\n rule[n] = round(n, precision)\n rounded = elem.xreplace(rule)\n if hasattr(elem, \"units\") and not hasattr(rounded, \"units\"):\n # Add back pint units lost during rounding.\n rounded = rounded * elem.units\n return rounded\n\n\ndef render_sympy(elem: Any) -> str:\n \"\"\"\n Returns a string of the Latex representation of the sympy object, 'elem'.\n \"\"\"\n from sympy import latex\n\n return latex(elem)\n\n\ndef round_for_scientific_notation(elem, precision):\n \"\"\"\n Returns a float rounded so that the decimals behind the coefficient are rounded to 'precision'.\n \"\"\"\n adjusted_precision = calculate_adjusted_precision(elem, precision)\n rounded = round(elem, adjusted_precision)\n return rounded\n\n\ndef calculate_adjusted_precision(elem, precision):\n \"\"\"\n Returns the number of decimal places 'elem' should be rounded to\n to achieve a final 'precision' in scientific notation.\n \"\"\"\n try:\n power_of_ten = int(math.log10(abs(elem)))\n except (DimensionalityError, TypeError):\n elem_float = float(str(elem).split(\" \")[0])\n power_of_ten = int(math.log10(abs(elem_float)))\n if power_of_ten < 1:\n return precision - power_of_ten + 1\n else:\n return precision - power_of_ten\n\n\n# def round_elements(line_of_code: deque, cell_precision: Optional[int] = None, cell_notation: bool = False) -> deque:\n# \"\"\"\n# Returns a rounded float\n# \"\"\"\n# outgoing = deque([])\n# for item in line_of_code:\n# rounded = round_(item, precision=cell_precision, use_scientific_notation=cell_notation)\n# outgoing.append(rounded)\n# return outgoing\n\n\n# def round_(item: Any, precision: int, depth: int = 0, use_scientific_notation: bool = False) -> Any:\n# \"\"\"\n# Recursively round an object and its elements to a given precision.\n# \"\"\"\n# round_notation = use_scientific_notation\n# if depth > 3:\n# # Limit maximum recursion depth.\n# return item\n\n# if hasattr(item, \"__sympy__\"):\n# return round_sympy(item, precision, use_scientific_notation)\n\n# if hasattr(item, \"__len__\") and not isinstance(item, (str, dict, tuple)):\n# try: # For catching arrays\n# return [round_(v, precision=precision, depth=depth + 1, use_scientific_notation=use_scientific_notation) for v in item]\n# except (ValueError, TypeError):\n# # Objects like Quantity (from pint) have a __len__ wrapper\n# # even if the wrapped magnitude object is not iterable.\n# return round_float(item, precision, use_scientific_notation)\n\n# if isinstance(item, complex):\n# return round_complex(item, precision, use_scientific_notation)\n# if not isinstance(item, (str, int)):\n# try:\n# return round_float(item, precision, use_scientific_notation)\n# except (ValueError, TypeError):\n# pass\n# return item\n\n\n# def round_float(elem: Any, precision: int, use_scientific_notation: bool) -> Any:\n# \"\"\"\n# Returns 'elem', presumed to be float-like, to 'precision', where 'precision' varies\n# depending on whether 'use_scientific_notation' is True or not.\n# \"\"\"\n# if use_scientific_notation:\n# return round_for_scientific_notation(elem, precision)\n# else:\n# return round(elem, precision)\n\n\n# def round_complex(elem: complex, precision: int, use_scientific_notation: bool) -> complex:\n# \"\"\"\n# Returns the complex 'elem' rounded to 'precision'\n# \"\"\"\n# return complex(\n# round_float(elem.real, precision, use_scientific_notation),\n# round_float(elem.imag, precision, use_scientific_notation)\n# )\n\n\n@singledispatch\ndef convert_applicable_long_lines(\n line: Union[ConditionalLine, CalcLine]\n): # Not called for symbolic lines; see format_symbolic_cell()\n raise TypeError(\n f\"Line type {type(line)} not yet implemented in convert_applicable_long_lines().\"\n )\n\n\n@convert_applicable_long_lines.register(CalcLine)\ndef convert_calc_to_long(line: CalcLine):\n if test_for_long_lines(line):\n return convert_calc_line_to_long(line)\n return line\n\n\n@convert_applicable_long_lines.register(NumericCalcLine)\ndef convert_calc_to_long(line: NumericCalcLine):\n if test_for_long_lines(line):\n return convert_calc_line_to_long(line)\n return line\n\n\n@convert_applicable_long_lines.register(LongCalcLine)\ndef convert_longcalc_to_long(line: LongCalcLine):\n return line\n\n\n@convert_applicable_long_lines.register(ConditionalLine)\ndef convert_expressions_to_long(line: ConditionalLine):\n for idx, expr in enumerate(line.true_expressions):\n if test_for_long_lines(expr):\n line.true_expressions[idx] = convert_calc_line_to_long(expr)\n return line\n\n\n@convert_applicable_long_lines.register(ParameterLine)\ndef convert_param_to_long(line: ParameterLine):\n return line\n\n\n@convert_applicable_long_lines.register(IntertextLine)\ndef convert_intertext_to_long(line: IntertextLine):\n return line\n\n\n@convert_applicable_long_lines.register(BlankLine)\ndef convert_blank_to_long(line: BlankLine):\n return line\n\n\n@singledispatch\ndef test_for_long_lines(line: Union[CalcLine, ConditionalLine]) -> bool:\n raise TypeError(\n f\"Line type of {type(line)} not yet implemented in test_for_long_lines().\"\n )\n\n\n@test_for_long_lines.register(ParameterLine)\ndef test_for_long_param_lines(line: ParameterLine) -> bool:\n return False\n\n\n@test_for_long_lines.register(BlankLine)\ndef test_for_long_blank(line: BlankLine) -> bool:\n return False\n\n\n@test_for_long_lines.register(IntertextLine)\ndef test_for_long_intertext(line: IntertextLine) -> bool:\n return False\n\n\n@test_for_long_lines.register(LongCalcLine)\ndef test_for_long_longcalcline(line: LongCalcLine) -> bool:\n return True\n\n\n@test_for_long_lines.register(NumericCalcLine)\ndef test_for_long_numericcalcline(line: NumericCalcLine) -> bool:\n return False\n\n\n@test_for_long_lines.register(CalcLine)\ndef test_for_long_calc_lines(line: CalcLine) -> bool:\n \"\"\"\n Return True if 'calc_line' passes the criteria to be considered,\n as a \"LongCalcLine\". False otherwise.\n\n Function goes through all of the code in the CalcLine and maintains\n several (imperfect) tallies of characters to determine if the\n calculation is too long to exist on a single line.\n\n This is attempted by counting actual characters that will appear\n in the resulting equation and that are not part of\n the actual latex code (e.g. anything with a \"\\\\\" in front of it, etc.),\n and by also \"discounting\" characters that are in a fraction, since\n the overall length of the fraction (on the page) is determined by\n whichever is longer, the numerator or denominator. As such, characters\n in a fraction (single level of fraction, only) are counted and\n discounted from the total tally.\n\n This is a (very) imperfect work-in-progress.\n \"\"\"\n threshold = 130 # This is an arbitrary value that can be adjusted manually, if reqd\n item_length = 0\n fraction_discount = 0\n stack = 0\n stack_location = 0\n fraction_flag = False\n fraction_count = 0\n total_length = 0\n for item in line.line:\n if \"_\" in item or \"^\" in item: # Check for subscripts and superscripts first\n item = (\n item.replace(\"_\", \"\").replace(\"^\", \"\").replace(\"{\", \"\").replace(\"}\", \"\")\n )\n item_length = len(item)\n\n elif \"\\\\\" not in item or \"{\" not in item:\n item_length = len(item)\n\n elif \"{\" in item: # Check for other latex operators that use { }\n stack += 1\n\n else: # Assume the latex command adds at least one character, e.g. \\left( or \\cdot\n total_length += 1\n continue\n\n if item == \"\\\\frac{\" or item == \"}{\": # If entering into a fraction\n fraction_discount = (\n fraction_count\n if fraction_count > fraction_discount\n else fraction_discount\n )\n fraction_count = 0\n fraction_flag = True\n if item == \"\\\\frac{\":\n stack_location = stack # Mark where the fraction is in relation to the other \"{\" operators\n stack += 1\n\n elif ( # Check for closing of misc latex operators, which may include a fraction\n item == \"}\"\n ):\n stack -= 1\n if stack == stack_location:\n fraction_flag == False\n fraction_discount = (\n fraction_count\n if fraction_count > fraction_discount\n else fraction_discount\n )\n\n if fraction_flag == True:\n fraction_count += item_length\n\n total_length += item_length\n\n stat = total_length - fraction_discount\n return stat >= threshold\n\n\ndef convert_calc_line_to_long(calc_line: CalcLine) -> LongCalcLine:\n \"\"\"\n Return a LongCalcLine based on a calc_line\n \"\"\"\n return LongCalcLine(\n line=calc_line.line, comment=calc_line.comment, latex=calc_line.latex\n )\n\n\n@singledispatch\ndef format_lines(line_object, **config_options):\n \"\"\"\n format_lines adds small, context-dependent pieces of latex code in\n amongst the latex string in the line_object.latex attribute. This involves\n things like inserting \"&\" or linebreak characters for equation alignment,\n formatting comments stored in the .comment attribute and putting them at\n the end of the calculation, or the distinctive \"Since, ...\"\n text that occurs when a conditional calculation is rendered.\n \"\"\"\n raise TypeError(\n f\"Line type {type(line_object)} is not yet implemented in format_lines().\"\n )\n\n\n@format_lines.register(CalcLine)\ndef format_calc_line(line: CalcLine, **config_options) -> CalcLine:\n latex_code = line.latex\n\n equals_signs = [idx for idx, char in enumerate(latex_code) if char == \"=\"]\n second_equals = equals_signs[1] # Change to 1 for second equals\n latex_code = latex_code.replace(\"=\", \"&=\") # Align with ampersands for '\\align'\n comment_space = \"\"\n comment = \"\"\n if line.comment:\n comment_space = \"\\\\;\"\n comment = format_strings(line.comment, comment=True)\n line.latex = f\"{latex_code[0:second_equals + 1]} {latex_code[second_equals + 2:]} {comment_space} {comment}\\n\"\n return line\n\n\n@format_lines.register(NumericCalcLine)\ndef format_calc_line(line: NumericCalcLine, **config_options) -> NumericCalcLine:\n latex_code = line.latex\n latex_code = latex_code.replace(\"=\", \"&=\") # Align with ampersands for '\\align'\n comment_space = \"\"\n comment = \"\"\n if line.comment:\n comment_space = \"\\\\;\"\n comment = format_strings(line.comment, comment=True)\n line.latex = f\"{latex_code} {comment_space} {comment}\\n\"\n return line\n\n\n@format_lines.register(ConditionalLine)\ndef format_conditional_line(line: ConditionalLine, **config_options) -> ConditionalLine:\n \"\"\"\n Returns the conditional line as a string of latex_code\n \"\"\"\n if line.true_condition:\n latex_condition = \" \".join(line.true_condition)\n a = \"{\"\n b = \"}\"\n comment_space = \"\"\n comment = \"\"\n if line.comment:\n comment_space = \"\\\\;\"\n comment = format_strings(line.comment, comment=True)\n\n line_break = f\"{config_options['line_break']}\\n\"\n first_line = f\"&\\\\text{a}Since, {b} {latex_condition} : {comment_space} {comment} {line_break}\"\n if line.condition_type == \"else\":\n first_line = \"\"\n line.latex_condition = first_line\n\n outgoing = deque([])\n for calc_line in line.true_expressions:\n outgoing.append((format_lines(calc_line, **config_options)).latex)\n line.true_expressions = outgoing\n line.latex_expressions = line_break.join(line.true_expressions)\n line.latex = line.latex_condition + line.latex_expressions\n return line\n else:\n line.condition_latex = \"\"\n line.true_expressions = deque([])\n return line\n\n\n@format_lines.register(LongCalcLine)\ndef format_long_calc_line(line: LongCalcLine, **config_options) -> LongCalcLine:\n \"\"\"\n Return line with .latex attribute formatted with line breaks suitable\n for positioning within the \"\\aligned\" latex environment.\n \"\"\"\n latex_code = line.latex\n long_latex = latex_code.replace(\"=\", \"\\\\\\\\&=\") # Change all...\n long_latex = long_latex.replace(\"\\\\\\\\&=\", \"&=\", 1) # ...except the first one\n line_break = f\"{config_options['line_break']}\\n\"\n comment_space = \"\"\n comment = \"\"\n if line.comment:\n comment_space = \"\\\\;\"\n comment = format_strings(line.comment, comment=True)\n line.latex = f\"{long_latex} {comment_space} {comment}{line_break}\"\n return line\n\n\n@format_lines.register(ParameterLine)\ndef format_param_line(line: ParameterLine, **config_options) -> ParameterLine:\n comment_space = \"\\\\;\"\n line_break = \"\\n\"\n if \"=\" in line.latex:\n replaced = line.latex.replace(\"=\", \"&=\")\n comment = format_strings(line.comment, comment=True)\n line.latex = f\"{replaced} {comment_space} {comment}{line_break}\"\n else: # To handle sympy symbols displayed alone\n replaced = line.latex.replace(\" \", comment_space)\n comment = format_strings(line.comment, comment=True)\n line.latex = f\"{replaced} {comment_space} {comment}{line_break}\"\n return line\n\n\n@format_lines.register(SymbolicLine)\ndef format_symbolic_line(line: SymbolicLine, **config_options) -> SymbolicLine:\n replaced = line.latex.replace(\"=\", \"&=\")\n comment_space = \"\\\\;\"\n comment = format_strings(line.comment, comment=True)\n line.latex = f\"{replaced} {comment_space} {comment}\\n\"\n return line\n\n\n@format_lines.register(IntertextLine)\ndef format_intertext_line(line: IntertextLine, **config_options) -> IntertextLine:\n cleaned_line = line.line.replace(\"##\", \"\")\n line.latex = f\"& \\\\textrm{{{cleaned_line}}}\"\n return line\n\n\n@format_lines.register(BlankLine)\ndef format_blank_line(line: BlankLine, **config_options) -> BlankLine:\n line.latex = \"\"\n return line\n\n\ndef split_conditional(line: str, calculated_results: dict, cell_override: str):\n raw_conditional, raw_expressions = line.split(\":\")\n expr_deque = deque(raw_expressions.split(\";\")) # handle multiple lines in cond\n try:\n cond_type, condition = raw_conditional.strip().split(\" \", 1)\n except:\n cond_type = \"else\"\n condition = \"\"\n cond_type = cond_type.strip().lstrip()\n condition = condition.strip().lstrip()\n try:\n cond = expr_parser(condition)\n except pp.ParseException:\n cond = deque([condition])\n\n expr_acc = deque([])\n for line in expr_deque:\n categorized = categorize_line(\n line, calculated_results, cell_override=cell_override\n )\n expr_acc.append(categorized)\n\n return (\n cond,\n cond_type,\n expr_acc,\n condition,\n raw_expressions,\n )\n\n\ndef test_for_parameter_line(line: str) -> bool:\n \"\"\"\n Returns True if `line` appears to be a line to simply declare a\n parameter (e.g. \"a = 34\") instead of an actual calculation.\n \"\"\"\n # Fast Tests\n if not line.strip(): # Blank lines\n return False\n elif len(line.strip().split()) == 1: # Outputing variable names\n return True\n elif \"=\" not in line or \"if \" in line or \":\" in line: # conditional lines\n return False\n\n # Exploratory Tests\n _, right_side = line.split(\"=\", 1)\n right_side = right_side.replace(\" \", \"\")\n\n if (right_side.find(\"(\") == 0) and (\n right_side.find(\")\") == len(right_side) - 1\n ): # Blocked by parentheses\n return True\n\n try:\n right_side_deque = expr_parser(right_side)\n except pp.ParseException:\n right_side_deque = deque([right_side])\n\n if len(right_side_deque) == 1:\n return True\n elif test_for_unary(right_side_deque):\n return True\n else:\n return False\n\n\ndef test_for_parameter_cell(raw_python_source: str) -> bool:\n \"\"\"\n Returns True if the text, \"# Parameters\" or \"#Parameters\" is the line\n of 'row_python_source'. False, otherwise.\n \"\"\"\n first_element = raw_python_source.split(\"\\n\")[0]\n if \"#\" in first_element and \"parameter\" in first_element.lower():\n return True\n return False\n\n\ndef test_for_long_cell(raw_python_source: str) -> bool:\n \"\"\"\n Returns True if the text \"# Long\" is in the first line of\n `raw_python_source`. False otherwise.\n \"\"\"\n first_element = raw_python_source.split(\"\\n\")[0]\n if \"#\" in first_element and \"long\" in first_element.lower():\n return True\n return False\n\n\ndef test_for_short_cell(raw_python_source: str) -> bool:\n \"\"\"\n Returns True if the text \"# Long\" is in the first line of\n `raw_python_source`. False otherwise.\n \"\"\"\n first_element = raw_python_source.split(\"\\n\")[0]\n if \"#\" in first_element and \"short\" in first_element.lower():\n return True\n return False\n\n\ndef test_for_symbolic_cell(raw_python_source: str) -> bool:\n \"\"\"\n Returns True if the text \"# Long\" is in the first line of\n `raw_python_source`. False otherwise.\n \"\"\"\n first_element = raw_python_source.split(\"\\n\")[0]\n if \"#\" in first_element and \"symbolic\" in first_element.lower():\n return True\n return False\n\n\ndef test_for_blank_line(source: str) -> bool:\n \"\"\"\n Returns True if 'source' is effectively a blank line,\n either \"\\n\", \" \", or \"\", or any combination thereof.\n Returns False, otherwise.\n \"\"\"\n return not bool(source.strip())\n\n\ndef test_for_conditional_line(source: str) -> bool:\n \"\"\"\n Returns True if 'source' appears to be conditional expression.\n \"\"\"\n return \":\" in source and (\"if\" in source or \"else\" in source)\n\n\ndef test_for_intertext_line(source: str) -> bool:\n \"\"\"\n Returns True if 'source' appears to be an intertext line\n \"\"\"\n return source.startswith(\"##\")\n\n\ndef test_for_numeric_line(\n d: deque,\n # func_deque: bool = False\n) -> bool:\n \"\"\"\n Returns True if 'd' appears to be a calculation in\n consisting entirely of numerals, operators, and functions.\n In other words, the calculation has no \"variables\" in it,\n whatsoever.\n \"\"\"\n bool_acc = []\n func_flag = False\n if get_function_name(d):\n func_flag = True\n # bool_acc.append((item, True))\n for item in d:\n # if func_deque:\n if func_flag:\n func_flag = False\n bool_acc.append(True)\n continue\n if is_number(item):\n bool_acc.append(True)\n elif test_for_py_operator(item):\n bool_acc.append(True)\n elif (\n item == \"/\" or item == \"//\"\n ): # Not tested in test_for_py_operator, for reasons\n bool_acc.append(True)\n elif item == \",\": # Numbers separated with commas: ok\n bool_acc.append(True)\n elif isinstance(item, deque):\n if get_function_name(item):\n bool_acc.append(True)\n bool_acc.append(\n test_for_numeric_line(\n d=item,\n # func_deque=True\n )\n )\n else:\n bool_acc.append(test_for_numeric_line(d=item))\n else:\n bool_acc.append(False)\n return all(bool_acc)\n\n\ndef toggle_scientific_notation(\n use_scientific_notation: bool, cell_notation: Optional[bool]\n) -> bool:\n \"\"\"\n Returns a bool representing whether or not scientific notation should be used or not\n based on whether it has been turned on in global_config and whether it has been\n toggled in the cell overides.\n\n In general, the cell overide toggles the reverse of the global_config.\n \"\"\"\n if not cell_notation:\n return use_scientific_notation\n else:\n return not use_scientific_notation\n\n\ndef test_for_single_dict(source: str, calc_results: dict) -> bool:\n \"\"\"\n Returns True if 'source' is a str representing a variable name\n within 'calc_results' whose value itself is a single-level\n dictionary of keyword values.\n \"\"\"\n gotten = calc_results.get(source, \"\")\n return isinstance(gotten, dict)\n\n\ndef test_for_scientific_float(elem: str) -> bool:\n \"\"\"\n Returns True if 'elem' is a str representation of a float\n in scientific notation\n \"\"\"\n if isinstance(elem, str) and \"e\" in elem.lower():\n left, right = elem.lower().split(\"e\", 1)\n if (\n left.replace(\"-\", \"\").replace(\"+\", \"\").replace(\".\", \"\").isnumeric()\n and right.replace(\"-\", \"\").replace(\"+\", \"\").replace(\".\", \"\").isnumeric()\n ):\n return True\n return False\n\n\ndef split_parameter_line(line: str, calculated_results: dict) -> deque:\n \"\"\"\n Return 'line' as a deque that represents the line as:\n deque([, \"&=\", ])\n \"\"\"\n param = line.replace(\" \", \"\").split(\"=\", 1)[0]\n param_line = deque([param, \"=\", calculated_results[param]])\n return param_line\n\n\ndef format_strings(string: str, comment: bool, **config_options) -> deque:\n \"\"\"\n Returns 'string' appropriately formatted to display in a latex\n math environment.\n \"\"\"\n if not string:\n return \"\"\n text_env = \"\"\n end_env = \"\"\n l_par = \"\"\n r_par = \"\"\n if comment:\n l_par = \"(\"\n r_par = \")\"\n text_env = \"\\\\;\\\\textrm{\"\n end_env = \"}\"\n else:\n l_par = \"\"\n r_par = \"\"\n text_env = \"\\\\textrm{\"\n end_env = \"}\"\n\n return \"\".join([text_env, l_par, string.strip().rstrip(), r_par, end_env])\n\n\nclass ConditionalEvaluator:\n def __init__(self):\n self.prev_cond_type = \"\"\n self.prev_result = False\n\n def __call__(\n self,\n conditional: deque,\n conditional_type: str,\n raw_conditional: str,\n calc_results: dict,\n **config_options,\n ) -> deque:\n if conditional_type == \"if\": # Reset\n self.prev_cond_type = \"\"\n self.prev_result = False\n if conditional_type != \"else\":\n result = eval_conditional(raw_conditional, **calc_results)\n else:\n result = True\n if (\n result == True\n and self.check_prev_cond_type(conditional_type)\n and not self.prev_result\n ):\n l_par = \"\\\\left(\"\n r_par = \"\\\\right)\"\n if conditional_type != \"else\":\n symbolic_portion = swap_symbolic_calcs(\n conditional, calc_results, **config_options\n )\n numeric_portion = swap_numeric_calcs(\n conditional, calc_results, **config_options\n )\n resulting_latex = (\n symbolic_portion\n + deque([\"\\\\rightarrow\"])\n + deque([l_par])\n + numeric_portion\n + deque([r_par])\n )\n else:\n numeric_portion = swap_numeric_calcs(\n conditional, calc_results, **config_options\n )\n resulting_latex = numeric_portion\n self.prev_cond_type = conditional_type\n self.prev_result = result\n return resulting_latex\n else:\n self.prev_cond_type = conditional_type\n self.prev_result = result\n return deque([])\n\n def check_prev_cond_type(self, cond_type: str) -> bool:\n \"\"\"\n Returns True if cond_type is a legal conditional type to\n follow self.prev_cond_type. Returns False otherwise.\n e.g. cond_type = \"elif\", self.prev_cond_type = \"if\" -> True\n e.g. cond_type = \"if\", self.prev_cond_type = \"elif\" -> False\n \"\"\"\n prev = self.prev_cond_type\n current = cond_type\n if prev == \"else\":\n return False\n elif prev == \"elif\" and current == \"if\":\n return False\n return True\n\n\nswap_conditional = (\n ConditionalEvaluator()\n) # Instantiate the callable helper class at \"Cell\" level scope\n\n\ndef swap_calculation(calculation: deque, calc_results: dict, **config_options) -> tuple:\n \"\"\"Returns the python code elements in the deque converted into\n latex code elements in the deque\"\"\"\n symbolic_portion = swap_symbolic_calcs(calculation, calc_results, **config_options)\n calc_drop_decl = deque(list(calculation)[1:]) # Drop the variable declaration\n numeric_portion = swap_numeric_calcs(calc_drop_decl, calc_results, **config_options)\n return (symbolic_portion, numeric_portion)\n\n\ndef swap_symbolic_calcs(\n calculation: deque, calc_results: dict, **config_options\n) -> deque:\n # remove calc_results function parameter\n symbolic_expression = copy.copy(calculation)\n functions_on_symbolic_expressions = [\n insert_parentheses,\n swap_math_funcs,\n swap_superscripts,\n swap_chained_fracs,\n swap_frac_divs,\n swap_py_operators,\n swap_comparison_ops,\n swap_for_greek,\n swap_prime_notation,\n swap_long_var_strs,\n extend_subscripts,\n swap_superscripts,\n flatten_deque,\n ]\n for function in functions_on_symbolic_expressions:\n # breakpoint()\n if function is swap_math_funcs:\n symbolic_expression = function(symbolic_expression, calc_results)\n elif (\n function is extend_subscripts\n and not config_options[\"underscore_subscripts\"]\n ):\n symbolic_expression = replace_underscores(\n symbolic_expression, **config_options\n )\n else:\n symbolic_expression = function(symbolic_expression, **config_options)\n return symbolic_expression\n\n\ndef swap_numeric_calcs(\n calculation: deque, calc_results: dict, **config_options\n) -> deque:\n numeric_expression = copy.copy(calculation)\n functions_on_numeric_expressions = [\n insert_parentheses,\n swap_math_funcs,\n swap_chained_fracs,\n swap_frac_divs,\n swap_py_operators,\n swap_comparison_ops,\n swap_values,\n swap_for_greek,\n swap_prime_notation,\n swap_superscripts,\n extend_subscripts,\n flatten_deque,\n ]\n for function in functions_on_numeric_expressions:\n if function is swap_values or function is swap_math_funcs:\n numeric_expression = function(\n numeric_expression, calc_results, **config_options\n )\n elif (\n function is extend_subscripts\n and not config_options[\"underscore_subscripts\"]\n ):\n numeric_expression = replace_underscores(\n numeric_expression, **config_options\n )\n else:\n numeric_expression = function(numeric_expression, **config_options)\n return numeric_expression\n\n\ndef swap_integrals(d: deque, calc_results: dict, **config_options) -> deque:\n \"\"\"\n Returns 'calculation' with any function named \"quad\" or \"integrate\"\n rendered as an integral.\n \"\"\"\n swapped_deque = deque([])\n if \"integrate\" == d[0] or \"quad\" == d[0]:\n args_deque = d[1]\n function_name = args_deque[0]\n function = dict_get(calc_results, function_name)\n function_source = (\n inspect.getsource(function).split(\"\\n\")[1].replace(\"return\", \"\")\n )\n d_var = (\n str(inspect.signature(function))\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\" \", \"\")\n .split(\":\")[0]\n )\n source_deque = expr_parser(function_source)\n a = args_deque[2]\n b = args_deque[4]\n swapped_deque += deque([\"\\\\int_{\", a, \"}\", \"^\", \"{\", b, \"}\"])\n swapped_deque.append(source_deque)\n swapped_deque.append(f\"\\\\; d{d_var}\")\n return swapped_deque\n else:\n return d\n\n\ndef swap_log_func(d: deque, calc_results: dict, **config_options) -> deque:\n \"\"\"\n Returns a new deque representing 'd' but with any log functions swapped\n out for the appropriate Latex equivalent.\n \"\"\"\n # Checks to figure out where things are and where they go\n swapped_deque = deque([])\n base = \"\"\n has_deque = isinstance(d[1], deque)\n has_nested_deque = len(d) > 2 and isinstance(d[2], deque) and d[0] == \"\\\\left(\"\n log_func = d[0] if d[0] != \"\\\\left(\" else d[1]\n base = \"\"\n has_nested_lpar = d[0] == \"\\\\left(\"\n has_rpar = d[-1] == \"\\\\right)\"\n has_single_lpar = d[1] == \"\\\\left(\"\n\n # For specialized functions\n if log_func in [\"log10\", \"log2\"]:\n base = log_func.replace(\"log\", \"\")\n\n if has_deque: # Arithmetic expression as argument in sub-deque\n sub_deque = d[1]\n elif has_nested_deque: # Nested function in sub-deque\n sub_deque = d[2]\n\n if has_deque or has_nested_deque:\n if \",\" in sub_deque: # Log base argument provided\n base = sub_deque[-2] # Last arg in d before \"\\\\right)\"\n operand = swap_math_funcs(\n deque(list(sub_deque)[:-3] + [\"\\\\right)\"]), calc_results\n ) # Operand is everything before the base argument\n else:\n # No Log base argument, recurse everything in the sub-deque\n operand = swap_math_funcs(deque([sub_deque]), calc_results)\n else:\n operand = d[2] # swap_math_funcs(d, calc_results)\n\n if base == \"e\":\n base = \"\"\n if isinstance(base, deque):\n raise ValueError(\n \"Cannot use an expression as the log base in handcalcs.\"\n \" Try assigning the base to a variable first.\"\n )\n base = dict_get(calc_results, base)\n if base:\n log_func = \"\\\\log_\"\n else:\n log_func = \"\\\\ln\"\n\n swapped_deque.append(log_func + str(base))\n if has_single_lpar:\n swapped_deque.append(\"\\\\left(\")\n swapped_deque.append(operand)\n\n if has_nested_lpar:\n swapped_deque.appendleft(\"\\\\left(\")\n if has_rpar:\n swapped_deque.append(\"\\\\right)\")\n\n return swapped_deque\n\n\ndef swap_floor_ceil(\n d: deque, func_name: str, calc_results: dict, **config_options\n) -> deque:\n \"\"\"\n Return a deque representing 'd' but with the functions floor(...)\n and ceil(...) swapped out for floor and ceiling Latex brackets.\n \"\"\"\n lpar = f\"\\\\left \\\\l{func_name}\"\n rpar = f\"\\\\right \\\\r{func_name}\"\n swapped_deque = deque([])\n peekable_deque = more_itertools.peekable(d)\n for item in peekable_deque:\n next_item = peekable_deque.peek(False)\n if isinstance(item, deque):\n new_item = swap_math_funcs(item, calc_results)\n swapped_deque.append(new_item)\n elif item == func_name and isinstance(next_item, deque):\n next_item.popleft()\n next_item.appendleft(lpar)\n next_item.pop()\n next_item.append(rpar)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef flatten_deque(d: deque, **config_options) -> deque:\n new_deque = deque([])\n for item in flatten(d):\n new_deque.append(item)\n return new_deque\n\n\ndef flatten(items: Any, omit_parentheses: bool = False) -> deque:\n \"\"\"Returns elements from a deque and flattens elements from sub-deques.\n Inserts latex parentheses ( '\\\\left(' and '\\\\right)' ) where sub-deques\n used to exists, except if the reason for the sub-deque was to encapsulate\n either a fraction or an integral (then no parentheses).\n \"\"\"\n if isinstance(items, deque):\n for item in items:\n yield from flatten(item) # recursion!\n else:\n yield items\n\n\ndef eval_conditional(conditional_str: str, **kwargs) -> str:\n \"\"\"\n Evals the python code statement, 'conditional_str', based on the variables passed in\n as an unpacked dict as kwargs. The first line allows the dict values to be added to\n locals that can be drawn upon to evaluate the conditional_str. Returns bool.\n \"\"\"\n # From Thomas Holder on SO:\n # https://stackoverflow.com/questions/1897623/\n # unpacking-a-passed-dictionary-into-the-functions-name-space-in-python\n exec(\",\".join(kwargs) + \", = kwargs.values()\")\n try:\n # It would be good to sanitize the code coming in on 'conditional_str'\n # Should this code be forced into using only boolean operators?\n # Do not need to cross this bridge, yet.\n return eval(conditional_str)\n except SyntaxError:\n return conditional_str\n\n\ndef expr_parser(line: str) -> list:\n import sys\n\n sys.setrecursionlimit(3000)\n pp.ParserElement.enablePackrat()\n\n variable = pp.Word(pp.alphanums + \"_.\")\n numbers = pp.pyparsing_common.fnumber.setParseAction(\"\".join)\n imag = pp.Literal(\"j\")\n plusminus = pp.oneOf(\"+ -\")\n imag_num = pp.Combine(numbers + imag)\n comp_num = pp.Combine(numbers + plusminus + numbers + imag)\n complex_number = comp_num | imag_num\n all_nums = complex_number | numbers\n\n lpar = pp.Literal(\"(\").suppress()\n rpar = pp.Literal(\")\").suppress()\n functor = variable + pp.ZeroOrMore(\".\")\n\n expr = pp.Forward()\n func = pp.Group(functor + lpar + pp.Optional(pp.delimitedList(expr)) + rpar)\n # operand = func | numbers | variable .\n operand = func | all_nums | variable\n\n expop = pp.Literal(\"**\")\n signop = pp.oneOf(\"+ - ~\")\n arithop = pp.oneOf(\"= + - * / // % , < > >= <= == !=\")\n\n expr <<= pp.infixNotation(\n operand,\n [\n (expop, 2, pp.opAssoc.RIGHT),\n (signop, 1, pp.opAssoc.RIGHT),\n (arithop, 2, pp.opAssoc.LEFT),\n ],\n )\n\n parsed = list_to_deque(\n more_itertools.collapse(expr.parseString(line).asList(), levels=1)\n )\n return parsed\n\n\n# def convert_to_number(x: str):\n# x = \"\".join(x)\n# try:\n# return int(x)\n# except ValueError:\n# try:\n# return float(x)\n# except ValueError:\n# return x\n\n\ndef list_to_deque(los: List[str]) -> deque:\n \"\"\"\n Return `los` converted into a deque.\n \"\"\"\n acc = deque([])\n for s in los:\n if isinstance(s, list):\n acc.append(list_to_deque(s))\n else:\n acc.append(s)\n return acc\n\n\ndef extend_subscripts(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n For variables named with a subscript, e.g. V_c, this function ensures that any\n more than one subscript, e.g. s_ze, is included in the latex subscript notation.\n For any item in 'pycode_as_deque' that has more than one character in the subscript,\n e.g. s_ze, then it will be converted to s_{ze}. Also handles nested subscripts.\n \"\"\"\n swapped_deque = deque([])\n for item in pycode_as_deque:\n discount = 0 # hack to prevent excess braces from swap_long_var_str\n if isinstance(item, deque):\n new_item = extend_subscripts(item) # recursion!\n swapped_deque.append(new_item)\n elif isinstance(item, str) and \"_\" in item and not \"\\\\int\" in item:\n if \"\\\\mathrm{\" in item:\n discount = 1\n new_item = \"\"\n for char in item:\n if char == \"_\":\n new_item += char\n new_item += \"{\"\n else:\n new_item += char\n num_braces = new_item.count(\"{\") - discount\n\n new_item += \"}\" * num_braces\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef replace_underscores(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n Returns 'pycode_as_deque' with underscores replaced with spaces.\n Used when global_config['underscore_subscripts'] == False\n \"\"\"\n swapped_deque = deque([])\n for item in pycode_as_deque:\n if isinstance(item, deque):\n new_item = replace_underscores(item)\n swapped_deque.append(new_item)\n elif isinstance(item, str):\n new_item = item.replace(\"_\", \"\\\\ \")\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef swap_chained_fracs(d: deque, **config_options) -> deque:\n \"\"\"\n Swaps out the division symbol, \"/\", with a Latex fraction.\n The numerator is the symbol before the \"/\" and the denominator follows.\n If either is a string, then that item alone is in the fraction.\n If either is a deque, then all the items in the deque are in that part of the fraction.\n\n If a \"chained division\" is encountered, e.g. 4 / 2 / 2, these are rendered as\n fractions that retain the original order of operations meaning.\n\n Returns a deque.\n \"\"\"\n a = \"{\"\n b = \"}\"\n swapped_deque = deque([])\n ops = \"\\\\frac{1}\"\n cdot = \"\\\\cdot\"\n past_first_frac = False\n close_bracket_token = False\n for item in d:\n if isinstance(item, deque):\n swapped_deque.append(swap_chained_fracs(item)) # recursion!\n\n elif item == \"/\" and not past_first_frac:\n past_first_frac = True\n swapped_deque.append(item)\n continue\n\n elif item == \"/\" and past_first_frac:\n swapped_deque.append(cdot)\n swapped_deque.append(ops)\n swapped_deque.append(a)\n close_bracket_token = True\n continue\n\n elif test_for_py_operator(item) and past_first_frac:\n past_first_frac = False\n swapped_deque.append(item)\n\n else:\n swapped_deque.append(item)\n\n if close_bracket_token:\n swapped_deque.append(b)\n close_bracket_token = False\n\n return swapped_deque\n\n\ndef test_for_py_operator(item: str):\n \"\"\"\n Returns True if `item` represents a str that can be used as\n a Python arithmetic or binary operator. Return False otherwise.\n\n Python arithmetic operators:\n +, -, *, %, **\n (note `/`, and `//` is not considered b/c they will be\n swapped out as fractions)\n\n Python binary operators:\n >, <, =\n \"\"\"\n py_ops = [\"+\", \"-\", \"*\", \"%\", \"//\", \"**\"]\n for op in py_ops:\n if op == str(item):\n return True\n\n bin_ops = \"<>=\"\n for op in bin_ops:\n if op in str(item):\n return True\n\n return False\n\n\ndef swap_frac_divs(code: deque, **config_options) -> deque:\n \"\"\"\n Swaps out the division symbol, \"/\", with a Latex fraction.\n The numerator is the symbol before the \"/\" and the denominator follows.\n If either is a string, then that item alone is in the fraction.\n If either is a deque, then all the items in the deque are in that part of the fraction.\n Returns a deque.\n \"\"\"\n swapped_deque = deque([])\n length = len(code)\n a = \"{\"\n b = \"}\"\n ops = \"\\\\frac\"\n close_bracket_token = 0\n for index, item in enumerate(code):\n next_idx = min(index + 1, length - 1)\n if code[next_idx] == \"/\" and isinstance(item, deque):\n new_item = f\"{ops}{a}\"\n swapped_deque.append(new_item)\n swapped_deque.append(swap_frac_divs(item, **config_options)) # recursion!\n elif code[next_idx] == \"/\" and not isinstance(item, deque):\n new_item = f\"{ops}{a}\"\n swapped_deque.append(new_item)\n swapped_deque.append(item)\n elif item == \"/\":\n swapped_deque.append(f\"{b}{a}\")\n close_bracket_token += 1\n elif close_bracket_token:\n if isinstance(item, deque):\n swapped_deque.append(\n swap_frac_divs(item, **config_options)\n ) # recursion!\n else:\n swapped_deque.append(item)\n new_item = f\"{b}\" * close_bracket_token\n close_bracket_token = 0\n swapped_deque.append(new_item)\n elif isinstance(item, deque):\n new_item = swap_frac_divs(item, **config_options) # recursion!\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef swap_math_funcs(\n pycode_as_deque: deque, calc_results: dict, **config_options\n) -> deque:\n \"\"\"\n Returns a deque representing 'pycode_as_deque' but with appropriate\n parentheses inserted.\n \"\"\"\n a = \"{\"\n b = \"}\"\n swapped_deque = deque([])\n for item in pycode_as_deque:\n if isinstance(item, deque):\n possible_func = not test_for_typ_arithmetic(item)\n poss_func_name = get_function_name(item)\n func_name_match = get_func_latex(poss_func_name)\n if poss_func_name != func_name_match:\n item = swap_func_name(item, poss_func_name)\n if poss_func_name == \"sqrt\":\n item = insert_func_braces(item)\n new_item = swap_math_funcs(item, calc_results)\n swapped_deque.append(new_item)\n elif poss_func_name == func_name_match:\n # Begin checking for specialized function names\n if poss_func_name == \"quad\":\n new_item = swap_integrals(item, calc_results)\n swapped_deque.append(new_item)\n elif \"log\" in poss_func_name:\n new_item = swap_log_func(item, calc_results)\n swapped_deque.append(new_item)\n elif poss_func_name == \"ceil\" or poss_func_name == \"floor\":\n new_item = swap_floor_ceil(item, poss_func_name, calc_results)\n swapped_deque.append(new_item)\n #\n # elif possible_func and poss_func_name:\n # elif possible_func:\n elif possible_func:\n ops = \"\\\\operatorname\"\n new_func = f\"{ops}{a}{poss_func_name}{b}\"\n item = swap_func_name(item, poss_func_name, new_func)\n if possible_func:\n item = insert_func_braces(item)\n new_item = swap_math_funcs(item, calc_results)\n swapped_deque.append(new_item)\n\n else:\n swapped_deque.append(swap_math_funcs(item, calc_results))\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef get_func_latex(func: str, **config_options) -> str:\n \"\"\"\n Returns the Latex equivalent of the function name, 'func'.\n If a match is not found then 'func' is returned.\n \"\"\"\n latex_math_funcs = {\n \"sin\": \"\\\\sin\",\n \"cos\": \"\\\\cos\",\n \"tan\": \"\\\\tan\",\n \"sqrt\": \"\\\\sqrt\",\n \"exp\": \"\\\\exp\",\n \"sinh\": \"\\\\sinh\",\n \"tanh\": \"\\\\tanh\",\n \"cosh\": \"\\\\cosh\",\n \"asin\": \"\\\\arcsin\",\n \"acos\": \"\\\\arccos\",\n \"atan\": \"\\\\arctan\",\n \"atan2\": \"\\\\arctan\",\n \"asinh\": \"\\\\arcsinh\",\n \"acosh\": \"\\\\arccosh\",\n \"atanh\": \"\\\\arctanh\",\n \"sum\": \"\\\\Sigma\",\n }\n return dict_get(latex_math_funcs, func)\n\n\ndef insert_func_braces(d: deque, **config_options) -> deque:\n \"\"\"\n Returns a deque representing 'd' with appropriate latex function\n braces inserted.\n 'd' represents a deque representing a function and its parameters\n having already been tested by 'get_function_name(...)'\n \"\"\"\n a = \"{\"\n b = \"}\"\n swapped_deque = deque([])\n d_len = len(d)\n last_idx = d_len - 1\n if last_idx == 1: # Special case, func is sqrt or other non-parenth func\n swapped_deque.append(d[0])\n swapped_deque.append(a)\n swapped_deque.append(d[1])\n swapped_deque.append(b)\n elif (\n last_idx == 3 and d[0] == \"\\\\left(\" and d[last_idx] == \"\\\\right)\"\n ): # Special case, func is inside another func with parenth\n swapped_deque.append(a)\n swapped_deque += d\n swapped_deque.append(b)\n else:\n for idx, elem in enumerate(d):\n if idx == 1: # func name is 0, brace at 1\n swapped_deque.append(a)\n swapped_deque.append(elem)\n elif idx == last_idx: # brace at end\n swapped_deque.append(elem)\n swapped_deque.append(b)\n else:\n swapped_deque.append(elem)\n return swapped_deque\n\n\ndef swap_func_name(d: deque, old: str, new: str = \"\", **config_options) -> deque:\n \"\"\"\n Returns 'd' with the function name swapped out\n \"\"\"\n swapped_deque = deque([])\n for elem in d:\n if elem == old:\n if new:\n swapped_deque.append(new)\n else:\n swapped_func = get_func_latex(elem)\n swapped_deque.append(swapped_func)\n else:\n swapped_deque.append(elem)\n return swapped_deque\n\n\ndef swap_py_operators(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n Swaps out Python mathematical operators that do not exist in Latex.\n Specifically, swaps \"*\", \"**\", and \"%\" for \"\\\\cdot\", \"^\", and \"\\\\bmod\",\n respectively.\n \"\"\"\n swapped_deque = deque([])\n for item in pycode_as_deque:\n if type(item) is deque:\n new_item = swap_py_operators(item) # recursion!\n swapped_deque.append(new_item)\n else:\n if item == \"*\":\n swapped_deque.append(\"\\\\cdot\")\n elif item == \"%\":\n swapped_deque.append(\"\\\\bmod\")\n elif item == \",\":\n swapped_deque.append(\",\\\\ \")\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef swap_scientific_notation_str(item: str) -> str:\n \"\"\"\n Returns a deque representing 'line' with any python\n float elements in the deque\n that are in scientific notation \"e\" format converted into a Latex\n scientific notation.\n \"\"\"\n b = \"}\"\n components = []\n for component in item.split(\" \"):\n if \"e+\" in component:\n new_component = component.replace(\"e+0\", \"e+\").replace(\n \"e+\", \" \\\\times 10 ^ {\"\n )\n components.append(new_component + b)\n elif \"e-\" in component:\n new_component = component.replace(\"e-0\", \"e-\").replace(\n \"e-\", \" \\\\times 10 ^ {-\"\n )\n components.append(new_component + b)\n else:\n components.append(component)\n new_item = \"\\\\ \".join(components)\n return new_item\n\n\ndef swap_scientific_notation_float(\n line: deque, precision: int, **config_options\n) -> deque:\n \"\"\"\n Returns a deque representing 'pycode_as_deque' with any python floats that\n will get \"cut-off\" by the 'precision' arg when they are rounded as being\n rendered as strings in python's \"e format\" scientific notation.\n\n A float is \"cut-off\" by 'precision' when it's number of significant digits will\n be less than those required by precision.\n\n e.g. elem = 0.001353 with precision=3 will round to 0.001, with only one\n significant digit (1 < 3). Therefore this float is \"cut off\" and will be\n formatted instead as \"1.353e-3\"\n\n elem = 0.1353 with precision=3 will round to 0.135 with three significant digits\n (3 == 3). Therefore this float will not be formatted.\n \"\"\"\n swapped_deque = deque([])\n for item in line:\n if test_for_float(item, precision):\n new_item = (\n \"{:.{precision}e}\".format(item, precision=precision)\n .replace(\"e-0\", \"e-\")\n .replace(\"e+0\", \"e+\")\n )\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n\n return swapped_deque\n\n\n# def swap_scientific_notation_complex(line: deque, precision: int, **config_options) -> deque:\n# swapped_deque = deque([])\n# for item in line:\n# if isinstance(item, complex) and test_for_small_complex(item, precision):\n# real = swap_scientific_notation_float([item.real], precision)\n# imag = swap_scientific_notation_float([item.imag], precision)\n# swapped_real = list(swap_scientific_notation_str(real, precision=precision))\n# swapped_imag = list(swap_scientific_notation_str(imag, precision=precision))\n\n# ops = \"\" if item.imag < 0 else \"+\"\n# real_str = (\n# f\"{swapped_real[0]}\"\n# if len(swapped_real) == 1\n# else \" \".join(swapped_real)\n# )\n# imag_str = (\n# f\"{swapped_imag[0]}\"\n# if len(swapped_imag) == 1\n# else \" \".join(swapped_imag)\n# )\n# new_complex_str = f\"( {real_str} {ops} {imag_str}j )\"\n# swapped_deque.append(new_complex_str)\n# else:\n# swapped_deque.append(item)\n# return swapped_deque\n\n\ndef swap_comparison_ops(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n Returns a deque representing 'pycode_as_deque' with any python\n comparison operators, eg. \">\", \">=\", \"!=\", \"==\" swapped with\n their latex equivalent.\n \"\"\"\n py_ops = {\n \"<\": \"\\\\lt\",\n \">\": \"\\\\gt\",\n \"<=\": \"\\\\leq\",\n \">=\": \"\\\\geq\",\n \"==\": \"=\",\n \"!=\": \"\\\\neq\",\n }\n swapped_deque = deque([])\n for item in pycode_as_deque:\n if type(item) is deque:\n new_item = swap_comparison_ops(item)\n swapped_deque.append(new_item)\n else:\n new_item = dict_get(py_ops, item)\n swapped_deque.append(new_item)\n return swapped_deque\n\n\ndef swap_superscripts(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n Returns the python code deque with any exponentials swapped\n out for latex superscripts.\n \"\"\"\n pycode_with_supers = deque([])\n close_bracket_token = False\n ops = \"^\"\n a = \"{\"\n b = \"}\"\n l_par = \"\\\\left(\"\n r_par = \"\\\\right)\"\n for idx, item in enumerate(pycode_as_deque):\n next_idx = min(idx + 1, len(pycode_as_deque) - 1)\n next_item = pycode_as_deque[next_idx]\n if isinstance(item, deque): # and not close_bracket_token:\n if \"**\" == str(next_item):\n pycode_with_supers.append(l_par)\n new_item = swap_superscripts(item)\n pycode_with_supers.append(new_item)\n pycode_with_supers.append(r_par)\n else:\n new_item = swap_superscripts(item) # recursion!\n pycode_with_supers.append(new_item)\n if close_bracket_token:\n pycode_with_supers.append(b)\n close_bracket_token = False\n\n else:\n if \"**\" == str(next_item):\n pycode_with_supers.append(l_par)\n pycode_with_supers.append(item)\n pycode_with_supers.append(r_par)\n elif str(item) == \"**\":\n new_item = f\"{ops}{a}\"\n pycode_with_supers.append(new_item)\n close_bracket_token = True\n elif close_bracket_token:\n pycode_with_supers.append(item)\n pycode_with_supers.append(b)\n close_bracket_token = False\n else:\n pycode_with_supers.append(item)\n prev_item = item\n\n return pycode_with_supers\n\n\ndef swap_for_greek(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n Returns full line of code as deque with any Greek terms swapped in for words describing\n Greek terms, e.g. 'beta' -> 'β'\n \"\"\"\n greeks_to_exclude = config_options[\"greek_exclusions\"]\n swapped_deque = deque([])\n greek_chainmap = ChainMap(GREEK_LOWER, GREEK_UPPER)\n for item in pycode_as_deque:\n if isinstance(item, deque):\n new_item = swap_for_greek(item, **config_options)\n swapped_deque.append(new_item)\n elif \"_\" in str(item):\n components = str(item).split(\"_\")\n swapped_components = [\n dict_get(greek_chainmap, component)\n if component not in greeks_to_exclude\n else component\n for component in components\n ]\n new_item = \"_\".join(swapped_components)\n swapped_deque.append(new_item)\n elif item not in greeks_to_exclude:\n new_item = dict_get(greek_chainmap, item)\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef test_for_long_var_strs(elem: Any, **config_options) -> bool:\n \"\"\"\n Returns True if 'elem' is a variable string that has more than one character\n in it's \"top-level\" name (as opposed to it's subscript).\n False, otherwise.\n\n e.g. elem = \"Rate_annual\" -> True\n elem = \"x_rake_red\" -> False\n elem = \"AB_x_y\" -> True\n elem = \"category_x\" -> True\n elem = \"x\" -> False\n elem = \"xy\" -> True\n \"\"\"\n if not isinstance(elem, str):\n return False\n if \"\\\\\" in elem or \"{\" in elem or \"}\" in elem:\n return False\n components = elem.replace(\"'\", \"\").split(\"_\")\n if len(components) != 1:\n top_level, *_remainders = components\n if not config_options[\"underscore_subscripts\"]:\n if len(top_level) + len(_remainders) == 1:\n return False\n else:\n return True\n else:\n if len(top_level) > 1:\n return True\n else:\n return False\n if len(components[0]) == 1:\n return False\n return True\n\n\ndef swap_long_var_strs(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n Returns a new deque that represents 'pycode_as_deque' but\n with all long variable names \"escaped\" so that they do not\n render as italic variables but rather upright text.\n\n ***Must be just before swap_subscripts in stack.***\n \"\"\"\n swapped_deque = deque([])\n begin = \"\\\\mathrm{\"\n end = \"}\"\n for item in pycode_as_deque:\n if isinstance(item, deque):\n new_item = swap_long_var_strs(item, **config_options)\n swapped_deque.append(new_item)\n elif test_for_long_var_strs(item, **config_options) and not is_number(\n str(item)\n ):\n try:\n top_level, remainder = str(item).split(\"_\", 1)\n if config_options[\"underscore_subscripts\"]:\n new_item = begin + top_level + end + \"_\" + remainder\n else:\n new_item = begin + top_level + \"_\" + remainder + end\n swapped_deque.append(new_item)\n except:\n new_item = begin + item + end\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef swap_prime_notation(d: deque, **config_options) -> deque:\n \"\"\"\n Returns a deque representing 'd' with all elements\n with \"_prime\" substrings replaced with \"'\".\n \"\"\"\n swapped_deque = deque([])\n for item in d:\n if isinstance(item, deque):\n new_item = swap_prime_notation(item)\n swapped_deque.append(new_item)\n elif isinstance(item, str):\n new_item = item.replace(\"_prime\", \"'\")\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef swap_values(pycode_as_deque: deque, tex_results: dict, **config_options) -> deque:\n \"\"\"\n Returns a the 'pycode_as_deque' with any symbolic terms swapped out for their corresponding\n values.\n \"\"\"\n outgoing = deque([])\n for item in pycode_as_deque:\n swapped_value = \"\"\n if isinstance(item, deque):\n outgoing.append(\n swap_values(item, tex_results, **config_options)\n ) # recursion!\n else:\n swapped_value = dict_get(tex_results, item)\n if isinstance(swapped_value, str) and swapped_value != item:\n swapped_value = format_strings(\n swapped_value, comment=False, **config_options\n )\n outgoing.append(swapped_value)\n return outgoing\n\n\ndef test_for_unary(d: deque) -> bool:\n \"\"\"\n Returns True if 'd' represents a unary expression, e.g. -1.\n False otherwise.\n \"\"\"\n ops = \"+ -\".split()\n if len(d) == 2 and d[0] in ops:\n return True\n return False\n\n\ndef test_for_typ_arithmetic(d: deque) -> bool:\n \"\"\"\n Returns True if 'd' represents a deque created to store lower-precedent\n arithmetic. Returns False otherwise.\n \"\"\"\n operators = \"+ - * ** / // % , < > >= <= == !=\".split()\n any_op = any(elem for elem in d if elem in operators)\n return any_op and not test_for_unary(d)\n\n\ndef get_function_name(d: deque) -> str:\n \"\"\"\n Returns the function name if 'd' represents a deque containing a function\n name (both typical case and special case).\n \"\"\"\n dummy_deque = copy.deepcopy(d)\n dummy_deque.popleft()\n if test_for_function_name(d):\n return d[0]\n elif test_for_function_name(dummy_deque):\n return dummy_deque[0]\n # elif (isinstance(d[0], str) and re.match(r\"^[A-Za-z0-9_]+$\", d[0])\n # and isinstance(d[1], deque)# and d[1][0] == \"\\\\left(\"\n # ):\n # return d[0]\n # elif (\n # d[0] == \"\\\\left(\"\n # and (isinstance(d[1], str) and re.match(r\"^[A-Za-z0-9_]+$\", d[1])\n # )\n # ):\n # return d[1]\n else:\n return \"\"\n\n\ndef test_for_function_name(d: deque) -> bool:\n \"\"\"\n Returns True if 'd' qualifies for a typical function that should have\n some form of function brackets around it.\n \"\"\"\n if (\n (len(d) == 2 or len(d) == 4 or len(d) == 3)\n and (isinstance(d[0], str) and re.match(r\"^[A-Za-z0-9_]+$\", d[0]))\n and (\n isinstance(d[1], str)\n and (re.match(r\"^[A-Za-z0-9_]+$\", d[1]) or is_number(d[1]))\n or d[1] == \"\\\\left(\"\n or d[-1] == \"\\\\right)\"\n )\n ):\n return True\n elif (\n len(d) > 1\n and isinstance(d[0], str)\n and re.match(r\"^[A-Za-z0-9_]+$\", d[0])\n and isinstance(d[1], deque)\n ):\n return True\n else:\n return False\n\n\ndef insert_unary_parentheses(d: deque) -> deque:\n \"\"\"\n Returns a deque representing 'd' with parentheses inserted\n appropriately for unary brackets\n \"\"\"\n lpar = \"\\\\left(\"\n rpar = \"\\\\right)\"\n swapped_deque = deque([])\n swapped_deque.append(lpar)\n for elem in d:\n swapped_deque.append(elem)\n swapped_deque.append(rpar)\n return swapped_deque\n\n\ndef test_for_fraction_exception(item: Any, next_item: Any) -> bool:\n \"\"\"\n Returns True if a combination 'item' and 'next_item' appear to indicate\n a fraction in the symbolic deque. False otherwise.\n\n e.g. item=deque([...]), next_item=\"/\" -> True\n item=\"/\", next_item=deque -> True\n False otherwise\n \"\"\"\n if isinstance(item, deque) and next_item == \"/\":\n return True\n elif item == \"/\" and isinstance(next_item, deque):\n return True\n return False\n\n\ndef insert_function_parentheses(d: deque) -> deque:\n \"\"\"\n Returns a deque representing 'd' with parentheses inserted\n appropriately for functions.\n \"\"\"\n lpar = \"\\\\left(\"\n rpar = \"\\\\right)\"\n swapped_deque = deque([])\n last = len(d) - 1\n for idx, item in enumerate(d):\n if idx == last == 1 and not isinstance(item, deque):\n swapped_deque.append(lpar)\n swapped_deque.append(item)\n swapped_deque.append(rpar)\n elif idx == 1 and isinstance(item, deque):\n new_item = copy.deepcopy(item)\n new_item.appendleft(lpar)\n new_item.append(rpar)\n swapped_deque.append(new_item)\n elif idx == 2 and isinstance(item, deque) and d[0] == \"\\\\left(\":\n new_item = copy.deepcopy(item)\n new_item.appendleft(lpar)\n new_item.append(rpar)\n swapped_deque.append(new_item)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef insert_arithmetic_parentheses(d: deque) -> deque:\n \"\"\"\n Returns a deque representing 'd' with parentheses inserted\n appropriately for arithmetical brackets.\n \"\"\"\n lpar = \"\\\\left(\"\n rpar = \"\\\\right)\"\n swapped_deque = deque([])\n last = len(d) - 1\n exp_check = False\n if last > 1:\n exp_check = d[1] == \"**\" # Don't double up parenth on exponents\n for idx, item in enumerate(d):\n if idx == 0 and not exp_check and d[idx] != lpar:\n swapped_deque.append(lpar)\n swapped_deque.append(item)\n elif idx == last and not exp_check and d[idx] != rpar:\n swapped_deque.append(item)\n swapped_deque.append(rpar)\n else:\n swapped_deque.append(item)\n return swapped_deque\n\n\ndef insert_parentheses(pycode_as_deque: deque, **config_options) -> deque:\n \"\"\"\n Returns a deque representing 'pycode_as_deque' but with appropriate\n parentheses inserted.\n \"\"\"\n swapped_deque = deque([])\n peekable_deque = more_itertools.peekable(pycode_as_deque)\n lpar = \"\\\\left(\"\n prev_item = None\n func_exclude = [\"sqrt\", \"quad\", \"integrate\"]\n skip_fraction_token = False\n for item in peekable_deque:\n next_item = peekable_deque.peek(False)\n if isinstance(item, deque):\n poss_func_name = get_function_name(item)\n typ_arithmetic = test_for_typ_arithmetic(item)\n if poss_func_name:\n if test_for_fraction_exception(item, next_item):\n skip_fraction_token = True\n if poss_func_name not in func_exclude:\n item = insert_function_parentheses(item)\n new_item = insert_parentheses(item)\n swapped_deque.append(new_item)\n\n elif (\n typ_arithmetic\n # and not prev_item == lpar\n and not skip_fraction_token\n ):\n\n if test_for_fraction_exception(item, next_item):\n\n skip_fraction_token = True\n new_item = insert_parentheses(item)\n swapped_deque.append(new_item)\n else:\n if (\n prev_item not in func_exclude\n # and not test_for_nested_deque(item)\n and next_item != \"**\"\n ): # Allow swap_superscript to handle its parenths\n item = insert_arithmetic_parentheses(item)\n\n new_item = insert_parentheses(item)\n swapped_deque.append(new_item)\n\n elif test_for_unary(item):\n item = insert_unary_parentheses(item)\n new_item = insert_parentheses(item)\n swapped_deque.append(new_item)\n else:\n if skip_fraction_token and prev_item == \"/\":\n skip_fraction_token = False\n new_item = insert_parentheses(item)\n swapped_deque.append(new_item)\n else:\n if item == \"/\":\n skip_fraction_token = True\n elif skip_fraction_token and prev_item == \"/\":\n skip_fraction_token = False\n swapped_deque.append(item)\n prev_item = item\n return swapped_deque\n\n\ndef test_for_nested_deque(d: deque) -> bool:\n \"\"\"\n Returns true if 'd' has a deque as its first item.\n False otherwise\n \"\"\"\n nested_deque_bool = next(isinstance(i, deque) for i in d)\n try:\n not_exponent = (\n d[0][1] != \"**\"\n ) # Nested deques are permitted if first item is raised to power\n except IndexError:\n not_exponent = True\n return nested_deque_bool and not_exponent\n\n\ndef swap_dec_sep(d: deque, dec_sep: str) -> deque:\n \"\"\"\n Returns 'd' with numerical elements with the \".\" decimal separator,\n replaced with 'dec_sep'.\n \"\"\"\n swapped_deque = deque([])\n a = \"{\"\n b = \"}\"\n if dec_sep == \".\":\n return d\n for item in d:\n if is_number(item):\n item = item.replace(\".\", f\"{a}{dec_sep}{b}\")\n swapped_deque.append(item)\n elif is_number(item.replace(\"\\\\\", \"\")):\n item = item.replace(\".\", f\"{a}{dec_sep}{b}\")\n swapped_deque.append(item)\n elif \" \" in item:\n components = deque(item.split())\n swapped_components = swap_dec_sep(components, dec_sep)\n swapped_deque.append(\" \".join(swapped_components))\n else:\n swapped_deque.append(item)\n return swapped_deque\n","repo_name":"connorferster/handcalcs","sub_path":"handcalcs/handcalcs.py","file_name":"handcalcs.py","file_ext":"py","file_size_in_byte":105592,"program_lang":"python","lang":"en","doc_type":"code","stars":5272,"dataset":"github-code","pt":"77"} +{"seq_id":"18563140591","text":"'''\n\nDescription\n\nGiven an array of N distinct elementsA[ ], find the minimum number of swaps required to sort the array.Your are required to complete the function which returns an integer denoting the minimum number of swaps, required to sort the array.\n\nInput\n\nThe first line of input contains an integer T denoting the no of test cases . Then T test cases follow . Each test case contains an integer N denoting the no of element of the array A[ ]. In the next line are N space separated values of the array A[ ] .(1<=T<=100;1<=N<=100;1<=A[] <=1000)\n\nOutput\n\nFor each test case in a new line output will be an integer denoting minimum umber of swaps that are required to sort the array.\n\nSample Input 1\n\n2\n4\n4 3 2 1\n5\n1 5 4 3 2\n\nSample Output 1\n\n2\n2\n\n'''\n\nimport sys\n\ndef fuck():\n n = int(sys.stdin.readline().strip())\n a = [int(x) for x in sys.stdin.readline().strip().split(\" \")]\n \n \n time = 0\n for i in range(n):\n # print(\"i\",i)\n min = a[i]\n index = i\n for j in range(i,n):\n # print(j)\n if a[j] < min:\n min = a[j]\n index = j\n if index != i:\n time += 1\n a[index] ,a[i] = a[i], a[index]\n print(time)\n\nif __name__ == '__main__':\n t = int(sys.stdin.readline().strip())\n for s in range(t):\n fuck()","repo_name":"know-no/algorithm-homework","sub_path":"k1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40254125102","text":"import hashlib\nfrom Container import Container\n\n\nclass Util:\n def __init__(self):\n pass\n\n @staticmethod\n def printContainers(containers):\n for i in range(len(containers)):\n print(str(i) + \"| \" + str(containers[i]))\n\n @staticmethod\n def takeSnapshot(containers):\n snapshot = []\n for i in range(len(containers)):\n snapshot.append(containers[i].takeSnapshot())\n return snapshot\n\n @staticmethod\n def loadSnapshot(snapshot):\n containers = []\n for i in range(len(snapshot)):\n container = Container([])\n container.loadSnapshot(snapshot[i])\n containers.append(container)\n return containers\n\n @staticmethod\n def takeSnapshotFingerprint(containers):\n return hashlib.md5(str(Util.takeSnapshot(containers))).hexdigest()\n\n @staticmethod\n def vectorFullArrange(endpoint):\n arranges = []\n\n for i in range(len(endpoint)):\n for j in range(len(endpoint)):\n if i == j:\n continue\n arranges.append({\"from\": endpoint[i], \"to\": endpoint[j]})\n return arranges\n\n @staticmethod\n def getVectorScore(vector):\n return vector['score']\n","repo_name":"TimeBather/watersort-puzzle-resolver","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41159202456","text":"from pathlib import Path\nfrom typing import AnyStr\n\nfrom bs4 import BeautifulSoup\nfrom parameterized import parameterized\nfrom sphinx_testing import TestApp, with_app\n\n\ndef gen_app_conf(**kwargs: dict) -> dict:\n \"\"\"Create TestApp configuration.\"\"\"\n kwargs[\"buildername\"] = \"html\"\n kwargs[\"srcdir\"] = str(Path(__file__).parent / \"testdoc\")\n kwargs[\"copy_srcdir_to_tmpdir\"] = True\n return kwargs\n\n\ndef soup_html(app: TestApp, path: str) -> BeautifulSoup:\n \"\"\"Build application and parse content.\"\"\"\n app.build()\n html: AnyStr = (app.outdir / path).read_text()\n return BeautifulSoup(html, \"html.parser\")\n\n\n@with_app(**gen_app_conf(confoverrides={\"googlefonts_families\": [\"Roboto\"]}))\ndef test_script_tags(app: TestApp, status, warning): # noqa\n soup = soup_html(app, \"index.html\")\n link = [\n e\n for e in soup.find_all(\"link\", rel=\"stylesheet\")\n if e[\"href\"].startswith(\"https://fonts.googleapis.com/css2\")\n ][0][\"href\"]\n assert link == \"https://fonts.googleapis.com/css2?family=Roboto\"\n\n\n@parameterized(\n [\n ([\"Roboto\"], [(\"family\", \"Roboto\")]),\n ([\"Noto Sans JP\"], [(\"family\", \"Noto+Sans+JP\")]),\n ]\n)\ndef test_build_family_query(families, query):\n from sphinxcontrib.googlefonts import build_family_query\n\n assert build_family_query(families) == query\n","repo_name":"attakei/sphinxcontrib-googlefonts","sub_path":"tests/test_build.py","file_name":"test_build.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8078696055","text":"import sys\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport lib.PulseFinder as pu\n\ndef OscopePrintToCSV(csv_file):\n waveform_dict = {}\n line0_key = ''\n line1_key = ''\n with open(csv_file,\"r\") as f:\n for j,line in enumerate(csv.reader(f)):\n if j == 0: continue\n if j == 1:\n line0_key = line[0]\n line1_key = line[1]\n waveform_dict[line[0]] = []\n waveform_dict[line[1]] = []\n else:\n waveform_dict[line0_key].append(float(line[0]))\n waveform_dict[line1_key].append(float(line[1]))\n for entry in waveform_dict:\n waveform_dict[entry] = np.array(waveform_dict[entry])\n return waveform_dict\n\ndef EstimateSimpleBaseline(waveform, bl_range):\n bl_samples = waveform[bl_range[0]:bl_range[1]]\n bl_mean = np.average(bl_samples)\n bl_sigma = np.std(bl_samples)\n return bl_mean, bl_sigma\n\nif __name__ == '__main__':\n myPulseFinder = pu.PulseFinder()\n myPulseFinder.SetPulseThreshold(5) #nsigma outside baseline to define a pulse\n myPulseFinder.SetEdgeSamples(8)\n BL_RANGE_NSAMP = [0, 70]\n print(\"Let's analyze a waveform\")\n print(\"usage: main.py [waveform_filename]\")\n wavefile = sys.argv[1]\n fileNum = wavefile[:-4]\n waveform = OscopePrintToCSV(wavefile)\n mu, sigma = EstimateSimpleBaseline(waveform['Volt'],BL_RANGE_NSAMP)\n pulses = myPulseFinder.FindPulses_SimpleBaseline(waveform['second'],waveform['Volt'],mu,sigma)\n have_pulse = False\n for pulse in pulses:\n if not have_pulse:\n plt.vlines(waveform['second'][pulse['min_time_sample']]*1E9,ymin=0, ymax =pulse['peak_amplitude'], color='purple', linewidth=2,label='Pulses')\n else:\n plt.vlines(waveform['second'][pulse['min_time_sample']]*1E9,ymin=0, ymax =pulse['peak_amplitude'], color='purple', linewidth=2)\n plt.vlines(waveform['second'][pulse['max_time_sample']]*1E9,ymin=0, ymax =pulse['peak_amplitude'], color='purple', linewidth=2)\n plt.hlines(pulse['peak_amplitude'], xmin=waveform['second'][pulse['min_time_sample']]*1E9,xmax=waveform['second'][pulse['max_time_sample']]*1E9, color='purple', linewidth=2)\n plt.hlines(0, xmin=waveform['second'][pulse['min_time_sample']]*1E9,xmax=waveform['second'][pulse['max_time_sample']]*1E9, color='purple', linewidth=2)\n plt.plot(waveform['second']*1E9,waveform['Volt']- mu,label='Data (BL-Subtracted)')\n bl_min = waveform['second'][BL_RANGE_NSAMP[0]]*1E9\n bl_max = waveform['second'][BL_RANGE_NSAMP[1]]*1E9\n #plt.hlines(mu,xmin=bl_min, xmax = bl_max,color='black', label = 'Baseline mean', linewidth=2)\n plt.hlines(sigma, xmin=bl_min, xmax = bl_max,color='red',alpha=0.4, label = 'Baseline sigma',linewidth=2)\n plt.hlines(-sigma, xmin=bl_min, xmax = bl_max,color='red',alpha=0.4,linewidth=2)\n plt.legend()\n plt.xlabel(\"Time (ns)\")\n plt.ylabel(\"Voltage (V)\")\n plt.title(\"Waveform from OD PMT 902 \")#\\n (Signal to oscilloscope with 1 MOhm impedance)\")\n plt.savefig(fileNum,papertype='a0')\n #plt.show()\n for j,pulse in enumerate(pulses):\n print(\"PULSE NUMBER: \" + str(j))\n print(\"PULSE PEAK AMPLITUDE: %f\"%(pulse['peak_amplitude']))\n print(\"PULSE PEAK AMPLITUDE TIME: %f ns\"%(waveform[\"second\"][pulse['peak_amplitude_sample']]*1E9))\n print(\"PULSE INTEGRAL : %f V\"%(pulse['integral']))","repo_name":"pershint/ODPMTWaveformAnalysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2201554460","text":"from skbuild import setup\nimport subprocess\nimport shutil\nimport os\n\n\ndef get_version():\n\n this_dir = os.path.dirname(os.path.realpath(__file__))\n\n git_describe = subprocess.check_output(\n [\"git\", \"describe\", \"--tags\"], cwd=this_dir\n ).decode(\"utf-8\")\n\n sections = git_describe.split(\"-\")\n version = sections[0]\n\n return version\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nwith open(\"requirements.txt\") as fh:\n requirements = fh.readlines()\n\nbuild_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"_skbuild\")\nif os.path.isdir(build_dir):\n print(f\"Clean {build_dir}\")\n shutil.rmtree(build_dir)\n\nsetup(\n name=\"vpunn_cost_model\",\n version=get_version(),\n author=\"Alessandro Palla\",\n author_email=\"alessandro.palla@intel.com\",\n description=\"VPUNN cost model\",\n license=\"Apache License 2.0\",\n cmake_install_target=\"vpunn-install-bindings\",\n cmake_args=[\n \"-DVPUNN_BUILD_EXAMPLES=OFF\",\n \"-DVPUNN_BUILD_TESTS=OFF\",\n \"-DVPUNN_BUILD_SHARED_LIB=OFF\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/intel-innersource/libraries.performance.modeling.vpu.nn_cost_model\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/intel-innersource/libraries.performance.modeling.vpu.nn_cost_model/issues\",\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"vpunn\": \"python\"},\n packages=[\"vpunn\"],\n entry_points={\n \"console_scripts\": [\n \"vpunn_to_json=vpunn.to_json:main\",\n \"vpunn_builder=vpunn.builder:main\",\n \"vpu_cost_model=vpunn.cost:main\",\n \"vpu_layer_cost_model=vpunn.layer:main\",\n ],\n },\n python_requires=\">=3.6\",\n install_requires=requirements,\n)\n","repo_name":"intel/npu-nn-cost-model","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"77"} +{"seq_id":"29967035132","text":"import requests\r\nimport json\r\n\r\ndef inputData(name):\r\n print('Введи %s:'%name)\r\n return input()\r\n\r\nmyemail = inputData('имя почтового ящика')\r\n\r\nlink = 'https://account.mail.ru/api/v1/user/signup'\r\ndatas = {'name':'{\"first\":\"NAME\",\"last\":\"FAMILIE\"}', 'from':'main', 'sex':'male', 'birthday':'{\"day\":24,\"month\":8,\"year\":1990}',\r\n 'context':'signup',\r\n 'browser':'{\"screen\":{\"availWidth\":\"1600\",\"availHeight\":\"860\",\"width\":\"1600\",\"height\":\"900\",\"colorDepth\":\"24\",\"pixelDepth\":\"24\",\"availLeft\":\"0\",\"availTop\":\"0\"},\"navigator\":{\"vendorSub\":\"\",\"productSub\":\"20030107\",\"vendor\":\"Google Inc.\",\"maxTouchPoints\":\"0\",\"hardwareConcurrency\":\"4\",\"cookieEnabled\":\"true\",\"appCodeName\":\"Mozilla\",\"appName\":\"Netscape\",\"appVersion\":\"5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36\",\"platform\":\"Win32\",\"product\":\"Gecko\",\"userAgent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36\",\"language\":\"ru\",\"onLine\":\"true\",\"doNotTrack\":\"inaccessible\",\"deviceMemory\":\"4\"},\"flash\":{\"version\":\"inaccessible\"}}',\r\n 'device':'{\"os\":\"\",\"os_version\":\"\",\"dtid\":\"\",\"viewType\":\"0\"}',\r\n 'login':myemail,\r\n 'domain':'mail.ru',\r\n 'password':inputData('пароль'),\r\n 'htmlencoded':'false'}\r\n\r\nmyreq = requests.Session()\r\n\r\ntext = json.loads(myreq.post(link, data = datas).text)['body']\r\n\r\nprint(myreq)\r\n\r\nurlcapcha = 'https://c.mail.ru/6?r=0.71848591092699836'\r\nmycapcha = myreq.get(urlcapcha)\r\nwith open(\"img.jpg\", 'wb') as f:\r\n f.write(mycapcha.content)\r\n\r\n\r\nnext = myreq.post('https://account.mail.ru/api/v1/user/signup/confirm', data= {'email':'%s@mail.ru'%myemail,\r\n 'from':'main',\r\n 'reg_anketa':('{\"id\":\"%s\",\"capcha\":\"%s\"}' %(text, inputData('капчу'))),\r\n 'redirect_uri':'https://e.mail.ru/messages/inbox?newreg=1&signup_b=1&sms_reg=1&features=1',\r\n 'htmlencoded':'false'})\r\n\r\nprint(next.text)\r\n#print('usaly body id :' + text)","repo_name":"Dangeres/AutoRegerMailRu","sub_path":"reger.py","file_name":"reger.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"34724483098","text":"n, m = map(int, input().split())\nboard = []\ncheck = []\nfor _ in range(n):\n board.append(input())\n\nfor i in range(n - 7):\n for j in range(m - 7):\n sub_sum = 0\n for x in range(i, i + 8):\n for y in range(j, j + 8):\n if (x + y) % 2 == 0 and board[x][y] == \"B\":\n sub_sum += 1\n if (x + y) % 2 == 1 and board[x][y] == \"W\":\n sub_sum += 1\n if sub_sum > 32:\n sub_sum = 64 - sub_sum\n check.append(sub_sum)\n\nprint(min(check))","repo_name":"hjh3229/algorithm","sub_path":"src/baekjoon/case/bj_1018.py","file_name":"bj_1018.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4968289515","text":"\"\"\"\nlevel 3:\n案例:爬取内涵吧爬虫(re)\nhttps://www.neihan-8.com/wenzi//\n正则表达式提取段子标题,url,点赞数,踩数,内容\n\n\"\"\"\nimport random\nimport time\n\nimport requests\n\nimport re\nimport redis\nimport json\n\n\ndef down(url):\n '''\n 下载制定url的页面内容\n :param url:\n :return:\n '''\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'\n }\n response = requests.get(url, headers=headers)\n # html = response.text\n html = response.content.decode(response.apparent_encoding, 'ignore')\n return html\n\n\ndef get_text_list(url):\n html = down(url)\n # print(html)\n # 数据提取\n ls = pat1.findall(html)\n print('len:', len(ls))\n for item in ls:\n # print('item:',item)\n # 标题\n match_obj = pat2.search(item)\n if match_obj != None:\n title = match_obj.group(1)\n else:\n title = '空'\n print('title:', title)\n # url\n match_obj = pat3.search(item)\n if match_obj != None:\n url = \"https://www.neihan-8.com\" + match_obj.group(1)\n else:\n url = '空'\n print('url:', url)\n # 点赞数\n match_obj = pat4.search(item)\n if match_obj != None:\n good_nums = match_obj.group(1)\n else:\n good_nums = '空'\n print('good_nums:', good_nums)\n # 踩数\n match_obj = pat5.search(item)\n if match_obj != None:\n bad_nums = match_obj.group(1)\n else:\n bad_nums = '空'\n print('bad_nums:', bad_nums)\n # 进入详情连接\n get_text_detail(url)\n\n # 下一页\n # 先来个随机延迟\n time.sleep(random.random())\n match_obj = pat7.search(html)\n if match_obj != None:\n next_page = match_obj.group(1)\n else:\n next_page = '空'\n print('next_page:', next_page)\n print('*:' * 66)\n get_text_list(\"https://www.neihan-8.com\" + next_page)\n\n\ndef get_text_detail(detail_url):\n print(\"进入详情页\", detail_url)\n\n # 文章内容\n # 请求详情页\n detail_html = down(detail_url)\n # print(detail_html)\n # 数据提取\n match_obj = pat6.search(detail_html)\n if match_obj != None:\n joke_text = match_obj.group(1)\n else:\n joke_text = '空'\n print('joke_text:', joke_text)\n print('=' * 200)\n\n\n\nif __name__ == '__main__':\n try:\n r = redis.StrictRedis(host='localhost', port=6379)\n except Exception as e:\n print(e)\n # 条目,
\n #

超级灵药

\n #
  \n # 汤姆早上老是睡过头,他的老板威胁说:如果再这样就要炒他鱿鱼。  汤姆很着急,就去看医生,医生给了他一个药丸让他睡觉之前吃。  这个晚上汤姆睡得很好,一大早就醒了,悠闲
\n #
\n #
\n # \n # 属于:冷笑话\n #
\n #
0
\n #
0
\n #
54
\n #
\n #
\n # 可以在分组外描述匹配细节\n # pat1 = re.compile(r'
(.*?)
',\n # re.S | re.M)\n # 可以在分组中继续描述匹配细节\n pat1 = re.compile(r'()', re.S | re.M)\n # 标题,

超级灵药

\n # 通过标签内容获取标题\n # pat2 = re.compile(r'.*?(.*?)', re.S | re.M)\n # url,
\n #
\n # \n # 属于:冷笑话\n #
\n #
0
\n #
1
\n #
49
\n #
\n pat3 = re.compile(r'', re.S | re.M)\n # 踩数\n pat5 = re.compile(r'
', re.S | re.M)\n\n # 内容(详情页获取)\n # 详情连接=url\n # 笑话内容\n pat6 = re.compile(r'
(.*?)
下一页\n # 注意此时尽可能贪婪,拿到最后一个就是下一页\n pat7 = re.compile(r'
', re.S | re.M)\n\n get_text_list(\"https://www.neihan-8.com/wenzi//\")\n","repo_name":"1987617587/lsh_py","sub_path":"pachong/PCdemo1/day05/刘士豪_20200327/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":5394,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"26845916834","text":"import re\nimport requests\n\n# List the input files\ninput_files = ['ins1.txt', 'ins2.txt', 'ins3.txt']\n\n# Iterate through the input files\nfor input_file in input_files:\n # Open the current input file and read its contents into a list\n with open(input_file, 'r') as f:\n lines = f.readlines()\n\n # Iterate through the list of URLs\n for line in lines:\n # Download the JavaScript file from the URL\n response = requests.get(line)\n contents = response.text\n\n # Use a regular expression to find all URLs\n urls = re.findall(r'https?://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+', contents)\n\n # Print the URLs\n for url in urls:\n print(url)\n","repo_name":"zhirobyte/Python-Repo","sub_path":"filterjs.py","file_name":"filterjs.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"44563814341","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 14/3/2023\n@author: ZhizhuoYin\n\"\"\"\n\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom torch.nn.functional import softmax\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\nfrom scipy.stats import wasserstein_distance\n\n\ndef forward(model, loader, device, writer, epoch, user_edge_dict = None,is_user = False, is_item=True, optimizer=None, train_flag=True, max_item_id=0, last_update=0):\n if train_flag:\n model.train()\n else:\n model.eval()\n hit20, mrr20, hit10, mrr10, hit5, mrr5, hit1, mrr1 = [], [], [], [], [], [], [], []\n\n mean_loss = 0.0\n itemlist = []\n edgerepeat = []\n item_edges = [[], []]\n edgelist = [[] for i in range(max_item_id+1)] # elements [receiver,times,index]\n globalItem2IndMapper = {}\n itemindex = 0\n\n for i, batch in enumerate(loader):\n if train_flag:\n optimizer.zero_grad()\n x = [it[0] for it in batch.x.tolist()]\n edge_index = batch.edge_index.tolist()\n edge_count = batch.edge_count.tolist()\n itemlist += list(filter(lambda d: d not in globalItem2IndMapper,x))\n\n item = itemlist\n if is_item:\n for it in x:\n if it not in globalItem2IndMapper:\n globalItem2IndMapper[it] = itemindex\n itemindex += 1\n\n for k in range(len(edge_index[0])):\n isexist = 0\n for receiver in edgelist[x[edge_index[0][k]]]:\n if receiver[0] == x[edge_index[1][k]]:\n receiver[1] += 1\n isexist = 1\n break\n if not isexist:\n item_edges[0] += [x[edge_index[0][k]]-1]\n item_edges[1] += [x[edge_index[1][k]]-1]\n edgelist[x[edge_index[0][k]]].append([x[edge_index[1][k]],1,len(edgerepeat)])\n edgerepeat += [edge_count[k]]\n\n usredgelist = [[], []]\n userid = batch.userid.tolist()\n if is_user == True:\n for u in userid:\n for v in userid:\n v = int(v)\n u = int(u)\n if (u in user_edge_dict) and (v in user_edge_dict):\n if v in user_edge_dict[u]['in']:\n usredgelist[0].append(v)\n usredgelist[1].append(u)\n if v in user_edge_dict[u]['out']:\n usredgelist[0].append(u)\n usredgelist[1].append(v)\n if is_item:\n usredgelist = torch.tensor(usredgelist, dtype=torch.long)\n item_edge_index = torch.tensor(item_edges,dtype=torch.long)\n item = torch.tensor(item,dtype=torch.long)\n scores = model(batch.to(device),train_flag=train_flag, is_user=is_user, is_item=is_item, user_edge_list=usredgelist.to(device) ,item=item.to(device),item_edge_index=item_edge_index.to(device), max_item_id=max_item_id)\n else:\n scores = model(batch.to(device), train_flag=train_flag, is_user=is_user, is_item=is_item, max_item_id=max_item_id)\n targets = batch.y - 1\n loss = model.loss_function(scores, targets)\n\n if train_flag:\n loss.backward()\n optimizer.step()\n writer.add_scalar('loss/train_batch_loss', loss.item(), last_update + i)\n else:\n sub_scores = scores.topk(20)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit20.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr20.append(0)\n else:\n mrr20.append(1 / (np.where(score == target)[0][0] + 1))\n\n sub_scores = scores.topk(10)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit10.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr10.append(0)\n else:\n mrr10.append(1 / (np.where(score == target)[0][0] + 1))\n\n sub_scores = scores.topk(5)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit5.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr5.append(0)\n else:\n mrr5.append(1 / (np.where(score == target)[0][0] + 1))\n\n sub_scores = scores.topk(1)[1] # batch * top_k indices\n for score, target in zip(sub_scores.detach().cpu().numpy(), targets.detach().cpu().numpy()):\n hit1.append(np.isin(target, score))\n if len(np.where(score == target)[0]) == 0:\n mrr1.append(0)\n else:\n mrr1.append(1 / (np.where(score == target)[0][0] + 1))\n\n mean_loss += loss / batch.num_graphs\n\n if train_flag:\n writer.add_scalar('loss/train_loss', mean_loss.item(), epoch)\n else:\n writer.add_scalar('loss/test_loss', mean_loss.item(), epoch)\n hit20 = np.mean(hit20) * 100\n mrr20 = np.mean(mrr20) * 100\n print(str(hit20)+'\\t'+str(mrr20))\n writer.add_scalar('index/hit20', hit20, epoch)\n writer.add_scalar('index/mrr20', mrr20, epoch)\n hit10 = np.mean(hit10) * 100\n mrr10 = np.mean(mrr10) * 100\n print(str(hit10)+'\\t'+str(mrr10))\n writer.add_scalar('index/hit10', hit10, epoch)\n writer.add_scalar('index/mrr10', mrr10, epoch)\n hit5 = np.mean(hit5) * 100\n mrr5 = np.mean(mrr5) * 100\n print(str(hit5)+'\\t'+str(mrr5))\n writer.add_scalar('index/hit5', hit5, epoch)\n writer.add_scalar('index/mrr5', mrr5, epoch)\n hit1 = np.mean(hit1) * 100\n mrr1 = np.mean(mrr1) * 100\n print(str(hit1)+'\\t'+str(mrr1))\n writer.add_scalar('index/hit1', hit1, epoch)\n writer.add_scalar('index/mrr1', mrr1, epoch)\n return [[hit20,hit10,hit5,hit1],[mrr20,mrr10,mrr5,mrr1],epoch]\n return []\n\ndef forward_entropy(model, loader, device, max_item_id=0):\n for i, batch in enumerate(loader):\n scores = softmax(model(batch.to(device), train_flag=False, max_item_id=max_item_id), dim=1)\n dis_score = Categorical(scores)\n if i == 0:\n entropy = dis_score.entropy()\n else:\n entropy = torch.cat((entropy, dis_score.entropy()))\n \n pro = entropy.cpu().detach().numpy()\n weights = np.exp((pd.Series(pro).rank() / len(pro)).values)\n return weights / np.sum(weights)\n\n\ndef forward_cross_entropy(model, loader, device, max_item_id=0):\n for i, batch in enumerate(loader):\n scores = softmax(model(batch.to(device),train_flag=False, max_item_id= max_item_id), dim=1)\n targets = batch.y - 1\n if i == 0:\n cross_entropy = torch.nn.functional.cross_entropy(scores, targets, reduction='none')\n else:\n cross_entropy = torch.cat((cross_entropy, torch.nn.functional.cross_entropy(scores, targets, reduction='none')))\n\n pro = cross_entropy.cpu().detach().numpy()\n return pro / pro.sum()\n\n\ndef forward_wass(model, loader, device, max_item_id=0):\n distance = []\n for i, batch in enumerate(loader):\n\n scores = softmax(model(batch.to(device), train_flag=False, max_item_id = max_item_id), dim=1)\n targets = batch.y - 1\n\n targets_1hot = torch.zeros_like(scores).scatter_(1, targets.view(-1, 1), 1).cpu().numpy()\n distance += list(wasserstein_distance(score, target) for score, target in zip(scores.cpu().numpy(), targets_1hot))\n\n weights = np.exp((pd.Series(distance).rank() / len(distance)).values)\n return weights / np.sum(weights)\n","repo_name":"Williamy946/GIUA-GNN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74287833527","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import datasets\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\n\ndef plot_embedding(data, label, title):\n x_min, x_max = np.min(data, 0), np.max(data, 0)\n data = (data - x_min) / (x_max - x_min)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n for i in range(data.shape[0]):\n plt.text(data[i, 0], data[i, 1], str(label[i]),\n color=plt.cm.Set1(label[i] / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n plt.xticks([])\n plt.yticks([])\n plt.title(title)\n return fig\n\ninputn = np.load(\"input.npy\") # (500, 929, 2)\natt_out = np.load(\"att_out.npy\") # (500, 929, 2)\natt_out2 = np.load(\"att_out2.npy\") # (500, 929, 2)\natt_out3 = np.load(\"att_out3.npy\") # (500, 929, 2)\nlabel = np.load(\"label_test500.npy\") # (500,)\nprint(label.shape)\n\nselect_f = inputn\n#select_f = att_out\n#select_f = att_out2\n#select_f = att_out3\n\nfig = plt.figure()\ntsne = TSNE(n_components=2, init='pca', random_state=0)\nstack = np.concatenate((select_f[:,:,0], select_f[:,:,1]), axis=1)\nprint(stack.shape)\nresult = tsne.fit_transform(stack)\nprint(result.shape)\n#fig = plot_embedding(result, label,'t-SNE embedding of the digits')\nx_min, x_max = np.min(result, 0), np.max(result, 0)\nresult = (result - x_min) / (x_max - x_min)\n\ncolor = [\"#B0E0E6\",\"#EE6363\"]\n#color = [\"#B0E0E6\",\"#EE00EE\"]\n\nax = plt.subplot(111)\nfor i in range(result.shape[0]):\n if(label[i] == 0):\n s1 = plt.scatter(result[i, 0], result[i, 1],s=20,color=color[label[i]])\nfor i in range(result.shape[0]):\n if(label[i] == 1):\n s2 = plt.scatter(result[i, 0], result[i, 1],s=20,color=color[label[i]])\nplt.xlabel('Dimension 1')\nplt.ylabel('Dimension 2')\nplt.title('t-SNE embedding of the input layer')\n#plt.title('t-SNE embedding of the global attention layer')\n#plt.title('t-SNE embedding of the 1st MHA layer')\n#plt.title('t-SNE embedding of the 2nd MHA layer')\nplt.legend((s1,s2),('0','1') ,loc = 'best')\nplt.show()","repo_name":"Liuzhe30/AttADR","sub_path":"visulization/vis-tsne-representation.py","file_name":"vis-tsne-representation.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"19436182783","text":"import collections\nimport re\n\n\ndef read_stopwords():\n with open('./stopwords.txt', \"r\") as file:\n stopwords = file.read().split(',')\n # Remove newline characters and make the words lowercase\n stopwords = set([word.strip().lower() for word in stopwords])\n return stopwords\n\n\ndef remove_stopwords_bigrams(bigrams):\n stopwords = read_stopwords()\n filtered_bigrams = []\n for b1, b2 in bigrams:\n if b1.lower() not in stopwords and b2.lower() not in stopwords:\n filtered_bigrams.append((b1, b2))\n return filtered_bigrams\n\n\ndef get_bigram_frequencies(in_file, out_file):\n # Open the input file and read in the contents\n print('Reading in file...')\n with open(in_file, 'r') as infile:\n text = infile.read()\n\n # Tokenize the words in the text\n print('Tokenizing words...')\n words = re.findall(r'\\b[^\\W\\d_]{2,}\\b', text)\n\n # Normalize the case of the words\n print('Normalizing case...')\n words = [word.lower() for word in words]\n\n # Generate the bigrams\n print('Generating bigrams...')\n bigrams = [(words[i], words[i + 1]) for i in range(len(words) - 1)]\n\n print('Removing stopwords...')\n bigrams = remove_stopwords_bigrams(bigrams)\n\n # Count the frequency of each bigram\n print('Counting bigram frequency...')\n bigram_counts = collections.Counter(bigrams)\n\n # Sort the bigrams by frequency\n print('Sorting bigrams by frequency...')\n sorted_bigrams = sorted(bigram_counts.items(), key=lambda x: x[1], reverse=True)\n\n # Open the output file and write the bigram frequencies to it\n print('Writing to output file...')\n with open(out_file, 'w') as outfile:\n for bigram, count in sorted_bigrams:\n # ignore less than\n if count < 1000:\n continue\n outfile.write(f'{bigram[0]} {bigram[1]},{count}\\n')\n\n\nif __name__ == '__main__':\n # Test the function\n # Download oscar corpus from here https://www.kaggle.com/code/bmukhtar/starter-kazakh-oscar-corpus-05b5dbd5-d\n get_bigram_frequencies('kk.txt', 'kk_bigrams.txt')\n","repo_name":"BMukhtar/KazakhSpellingAndSuggestion","sub_path":"generate_bigrams.py","file_name":"generate_bigrams.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19802888956","text":"from distutils.core import setup\nimport os.path\n\nREADME = os.path.join(os.path.dirname(__file__), 'README.md')\n\nversion = '1.0'\n\nwith open(README) as fp:\n longdesc = fp.read()\n\nsetup(name='ignore-from-github',\n include_package_data=True,\n version=version,\n description='Add common sets of ignored file types to your .gitignore easily',\n long_description=longdesc,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Software Development',\n 'Intended Audience :: Developers'\n ],\n author='Anson Rosenthal',\n author_email='anson.rosenthal@gmail.com',\n license='MIT License',\n url='https://github.com/anrosent/ignore.git',\n scripts=['ignore']\n)\n","repo_name":"anrosent/ignore","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16883905593","text":"from typing import Sized\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\n\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\ndef calculate(image1, image2):\n # 灰度直方图算法\n # 计算单通道的直方图的相似值\n hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])\n hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])\n # 计算直方图的重合度\n degree = 0\n for i in range(len(hist1)):\n if hist1[i] != hist2[i]:\n degree = degree + \\\n (1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))\n else:\n degree = degree + 1\n degree = degree / len(hist1)\n return degree\n\ndef classify_hist_with_split(image1, image2, size=(255,255)):\n image1 = cv2.resize(image1, size)\n image2 = cv2.resize(image2, size)\n sub_image1 = cv2.split(image1)\n sub_image2 = cv2.split(image2)\n sub_data = 0\n for im1, im2 in zip(sub_image1, sub_image2):\n sub_data += calculate(im1, im2)\n sub_data = sub_data / 3\n return sub_data\n\n\ntem2018 = cv2.imread(r\"D:/Program/reefStudy/data/tem2018.png\")\ntem2019 = cv2.imread(r\"D:/Program/reefStudy/data/tem2019.png\")\ntem2020 = cv2.imread(r\"D:/Program/reefStudy/data/tem2020.png\")\ntem2021 = cv2.imread(r\"D:/Program/reefStudy/data/tem2021.png\")\n\npicList = [tem2018,tem2019,tem2020,tem2021]\n\ntem2018 = cv2.resize(tem2018, (657,398))\ntem2019 = cv2.resize(tem2019, (657,398))\ntem2020 = cv2.resize(tem2020, (657,398))\ntem2021 = cv2.resize(tem2021, (657,398))\n\n# tmp1 = cv2.addWeighted(tem2018,0.5,tem2019,0.5,0)\n# tmp2 = cv2.addWeighted(tem2020,0.5,tem2021,0.5,0)\n# tmp3 = cv2.addWeighted(tmp1,0.5,tmp2,0.5,0)\n\n# globalreef = cv2.imread(r\"D:/Program/reefStudy/data/gr2020.png\")\n# globalreef = cv2.resize(globalreef, (657,398))\n\n# tmp3 = cv2.subtract(tem2021,tem2020)\n# print(classify_hist_with_split(tmp3,tem2021))\n# tmp3 = cv2.addWeighted(globalreef,0.8,tmp3,0.2,0)\n\ngr2018 = cv2.imread(r\"D:/Program/reefStudy/data/gr2018.png\")\ngr2019 = cv2.imread(r\"D:/Program/reefStudy/data/gr2019.png\")\ngr2020 = cv2.imread(r\"D:/Program/reefStudy/data/gr2020.png\")\n\nplt.plot([0.24361189,0.2487901])\nplt.plot([0.22682571411132812,0.266563355922699])\nplt.legend([\"水温变化速率\",\"珊瑚变化速率\"])\nplt.title(\"水温变化速率和珊瑚变化速率比较\")\nplt.show()\n\n\n\n# tmp3 = cv2.cvtColor(tmp3, cv2.COLOR_BGR2GRAY)\n\n\n# cv2.imshow('tmp3',tmp3)\n# cv2.waitKey()","repo_name":"MicosLiang/reefStudy","sub_path":"temAndReef.py","file_name":"temAndReef.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25975129049","text":"import subprocess\nimport os\nimport time\n\n\ndef getProjectName():\n projectName = input('Project name: ')\n return projectName\n\n\ndef getFlutterPath():\n userProfile = os.environ.get('USERPROFILE')\n flutterPath = f'{userProfile}\\\\Downloads\\\\flutter_windows_3.0.5-stable\\\\flutter\\\\bin\\\\flutter.bat'\n\n return flutterPath\n\n\ndef askForTypeOfProject():\n print('Select the type of project:')\n typeOfProject = int(input(\n '1. Basic Riverpod structure project\\n2. Responsive riverpod structure project\\n'))\n\n while typeOfProject not in range(1, 3):\n typeOfProject = askForTypeOfProject()\n\n return typeOfProject\n\n\ndef askForFeaturesInProject():\n featuresString = input(\n 'This project will be using feature first approach.\\nEnter the features you want in your app:\\nExample: auth, chat, call, products, home or type skip to skip this step\\n')\n\n if featuresString.lower() == 'skip':\n return []\n else:\n featuresList = featuresString.split(',')\n features = []\n\n for feature in featuresList:\n features.append(feature.strip())\n\n if 'home' in features:\n features.remove('home')\n return features\n\n\ndef createFlutterProject(projectName):\n flutterPath = getFlutterPath()\n runTerminalCommand(f'{flutterPath} create {projectName}')\n\n\ndef flutterPubGet(projectName):\n flutterPath = getFlutterPath()\n runTerminalCommand(f'{flutterPath} pub get',\n directoryName=f'.\\{projectName}')\n\n\ndef addFlutterPackage(packageName, directoryName):\n flutterPath = getFlutterPath()\n runTerminalCommand(f'{flutterPath} pub add {packageName}',\n directoryName=directoryName)\n\n\ndef runTerminalCommand(command, directoryName=''):\n try:\n if (len(directoryName) > 0):\n process = subprocess.Popen(command, cwd=directoryName)\n process.wait()\n else:\n process = subprocess.Popen(command)\n process.wait()\n except ():\n print('some error occured during while executing some commands')\n\n\ndef createFile(filePath, content):\n\n with open(filePath, 'w') as f:\n f.write(content)\n print(f\"File {filePath} created successfully.\")\n\n\ndef createFolders(folders):\n for i in range(len(folders)):\n if i > 0:\n if doesFolderExists(folders[i - 1]):\n os.mkdir(folders[i])\n else:\n os.mkdir(folders[i])\n\n\ndef createFiles(files):\n for filePath in files:\n createFile(filePath, files[filePath])\n\n\ndef doesFolderExists(filePath):\n while not os.path.exists(filePath):\n print(f'Creating {filePath} ...')\n time.sleep(0.1)\n\n files = filePath.split('\\\\')\n return True\n","repo_name":"Nitin-Poojary/startup-code-generator-flutter","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27347211972","text":"import boto3\nfrom constants import (\n TABLE_NAME,\n TABLE_READ_CAPACITY_UNITS,\n TABLE_WRITE_CAPACITY_UNITS,\n AWS_REGION,\n)\nfrom init import logger, statistics\n\n\ndef create_table(\n table_name: str = TABLE_NAME,\n ReadCapacityUnits: int = TABLE_READ_CAPACITY_UNITS,\n WriteCapacityUnits: int = TABLE_WRITE_CAPACITY_UNITS,\n aws_region: str = AWS_REGION,\n) -> bool:\n \"\"\" Creates DynamoB table \"\"\"\n\n try:\n client = boto3.client(\"dynamodb\", region_name=aws_region)\n response = client.list_tables()\n tables = [\n table for table in response[\"TableNames\"] if table == table_name\n ]\n\n if len(tables) > 0:\n logger.warning(\n f'Table \"{table_name}\" already exists. Skipping table creation.'\n )\n return False\n else:\n logger.info(\n f'Table \"{table_name}\" does not exist. Starting creation process...'\n )\n except Exception as e:\n logger.error(e)\n raise\n\n logger.info(\"Creating DB table...\")\n logger.debug(\n f\"Context Parameters: {create_table.__name__} => {create_table.__code__.co_varnames}\"\n )\n try:\n dynamodb = boto3.resource(\"dynamodb\", region_name=aws_region)\n table = dynamodb.create_table(\n TableName=table_name,\n AttributeDefinitions=[\n {\"AttributeName\": \"ts\", \"AttributeType\": \"S\"}\n ],\n KeySchema=[{\"AttributeName\": \"ts\", \"KeyType\": \"HASH\"}],\n ProvisionedThroughput={\n \"ReadCapacityUnits\": int(ReadCapacityUnits),\n \"WriteCapacityUnits\": int(WriteCapacityUnits),\n },\n )\n logger.info(\"Table created successfully.\")\n logger.debug(table)\n except dynamodb.exceptions.ResourceInUseException as e:\n logger.warning(\n f'Table \"{table_name}\" already exists. Skipping table creation.'\n )\n logger.debug(e)\n return False\n\n return True\n\n\ndef seed_db_table(\n db_objects: list = None,\n table_name: str = TABLE_NAME,\n aws_region: str = AWS_REGION,\n) -> bool:\n \"\"\" Insert DB objects into table \"\"\"\n\n logger.info(\"Inserting data into DB...\")\n logger.debug(\n f\"Context Parameters: {seed_db_table.__name__} => {seed_db_table.__code__.co_varnames}\"\n )\n\n try:\n dynamodb = boto3.resource(\"dynamodb\", region_name=aws_region)\n table = dynamodb.Table(table_name)\n\n with table.batch_writer() as batch:\n for item in db_objects:\n batch.put_item(Item=item)\n\n statistics.append([\"seed_db_table\", len(db_objects)])\n\n logger.info(f\"{len(db_objects)} item(s) were inserted in DB.\")\n except Exception as e:\n logger.error(e)\n raise\n\n return True\n","repo_name":"will666/wasabi-cli","sub_path":"manage/src/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27657430835","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm # tqdm是显示循环进度条的库\n\n\nclass CliffWalkingEnv:\n def __init__(self, ncol, nrow):\n self.ncol = ncol # 列\n self.nrow = nrow # 行\n self.x = 0 # 记录当前智能体位置的横坐标\n self.y = self.nrow - 1 # 记录当前智能体位置的纵坐标\n\n def step(self, action): # 外部调用这个函数来改变当前位置\n # 区别在于:没有定义P矩阵\n # 4种动作, change[0]:上, change[1]:下, change[2]:左, change[3]:右。坐标系原点(0,0)\n # 定义在左上角\n change = [[0, -1], [0, 1], [-1, 0], [1, 0]]\n self.x = min(self.ncol - 1, max(0, self.x + change[action][0]))\n self.y = min(self.nrow - 1, max(0, self.y + change[action][1]))\n next_state = self.y * self.ncol + self.x\n reward = -1\n done = False\n # 第三行\n if self.y == self.nrow - 1 and self.x > 0: # 下一个位置在悬崖或者目标\n done = True\n if self.x != self.ncol - 1:# 不在第11列\n reward = -100\n return next_state, reward, done\n\n def reset(self): # 回归初始状态,坐标轴原点在左上角\n self.x = 0 # 列0\n self.y = self.nrow - 1 # 行 3\n return self.y * self.ncol + self.x","repo_name":"MengxueTao/testGIT-HandsOnRL","sub_path":"C5_TD_CliffWalkingEnv.py","file_name":"C5_TD_CliffWalkingEnv.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"533426544","text":"import os\nfrom teradataml.common.exceptions import TeradataMlException\nfrom teradataml.common.messages import Messages\nfrom teradataml.common.messagecodes import MessageCodes\n\n\nclass _ConfigureSuper(object):\n\n def __init__(self):\n pass\n\n def _SetKeyValue(self, name, value):\n super().__setattr__(name, value)\n\n def _GetValue(self, name):\n return super().__getattribute__(name)\n\n\ndef _create_property(name):\n storage_name = '_' + name\n\n @property\n def prop(self):\n return self._GetValue(storage_name)\n\n @prop.setter\n def prop(self, value):\n self._SetKeyValue(storage_name, value)\n\n return prop\n\n\nclass _Configure(_ConfigureSuper):\n \"\"\"\n Options to configure database related values.\n \"\"\"\n\n default_varchar_size = _create_property('default_varchar_size')\n column_casesensitive_handler = _create_property('column_casesensitive_handler')\n vantage_version = _create_property('vantage_version')\n val_install_location = _create_property('VAL_install_location')\n byom_install_location = _create_property('BYOM_install_location')\n sandbox_container_id = _create_property('sandbox_container_id')\n temp_table_database = _create_property('temp_table_database')\n temp_view_database = _create_property('temp_view_database')\n read_nos_function_mapping = _create_property('read_nos_function_mapping')\n write_nos_function_mapping = _create_property('write_nos_function_mapping')\n\n\n def __init__(self, default_varchar_size=1024, column_casesensitive_handler = False,\n vantage_version=\"vantage1.1\", val_install_location=None,\n byom_install_location=None, sandbox_container_id=None,\n temp_table_database=None, temp_view_database=None, database_version=None,\n read_nos_function_mapping=\"read_nos\", write_nos_function_mapping=\"write_nos\"):\n \"\"\"\n PARAMETERS:\n default_varchar_size:\n Specifies the size of varchar datatype in Teradata Vantage, the default\n size is 1024.\n User can configure this parameter using options.\n Types: int\n Example:\n teradataml.options.configure.default_varchar_size = 512\n\n column_casesensitive_handler:\n Specifies a boolean value that sets the value of this option to True or\n False.\n One should set this to True, when ML Engine connector property is\n CASE-SENSITIVE, else set to False, which is CASE-INSENSITIVE.\n Types: bool\n Example:\n # When ML Engine connector property is CASE-SENSITIVE, set this\n # parameter to True.\n teradataml.options.configure.column_casesensitive_handler = True\n\n vantage_version:\n Specifies the Vantage version of the system teradataml is connected to.\n Types: string\n Example:\n # Set the Vantage Version\n teradataml.options.configure.vantage_version = \"vantage1.1\"\n\n val_install_location:\n Specifies the name of the database where Vantage Analytic Library functions\n are installed.\n Types: string\n Example:\n # Set the Vantage Analytic Library install location to 'SYSLIB'\n # when VAL functions are installed in 'SYSLIB'.\n teradataml.options.configure.val_install_location = \"SYSLIB\"\n\n byom_install_location:\n Specifies the name of the database where Bring Your Own Model functions\n are installed.\n Types: string\n Example:\n # Set the BYOM install location to 'SYSLIB'\n # when BYOM functions are installed in 'SYSLIB'.\n teradataml.options.configure.byom_install_location = \"SYSLIB\"\n\n sandbox_container_id:\n Specifies the id of sandbox container that will be used by test_script method.\n Types: string\n Example:\n # Set the sandbox_container_id.\n teradataml.options.configure.sandbox_container_id = '734rfjsls3'\n\n database_version:\n Specifies the actual database version of the system teradataml is connected to.\n Types: string\n Example:\n # Set the Vantage Version\n teradataml.options.configure.database_version = \"17.05a.00.147\"\n \n read_nos_function_mapping:\n Specifies the function mapping name for the read_nos table operator function.\n Types: string\n Example:\n # Set the read nos function mapping name\n teradataml.options.configure.read_nos_function_mapping = \"read_nos_fm\"\n \n write_nos_function_mapping:\n Specifies the function mapping name for the write_nos table operator function.\n Types: string\n Example:\n # Set the write nos function mapping name\n teradataml.options.configure.write_nos_function_mapping = \"write_nos_fm\"\n\n \"\"\"\n super().__init__()\n super().__setattr__('default_varchar_size', default_varchar_size)\n super().__setattr__('column_casesensitive_handler', column_casesensitive_handler)\n super().__setattr__('vantage_version', vantage_version)\n super().__setattr__('val_install_location', val_install_location)\n super().__setattr__('byom_install_location', byom_install_location)\n super().__setattr__('sandbox_container_id', sandbox_container_id)\n super().__setattr__('temp_table_database', temp_table_database)\n super().__setattr__('temp_view_database', temp_view_database)\n super().__setattr__('database_version', database_version)\n super().__setattr__('read_nos_function_mapping', read_nos_function_mapping)\n super().__setattr__('write_nos_function_mapping', write_nos_function_mapping)\n\n \n # internal configurations\n # These configurations are internal and should not be\n # exported to the user's namespace.\n super().__setattr__('_validate_metaexpression', False)\n # Internal parameter, that should be used while testing to validate whether\n # Garbage collection is being done or not.\n super().__setattr__('_validate_gc', False)\n # Internal parameter, that is used for checking if sto sandbox image exists on user's system\n super().__setattr__('_latest_sandbox_exists', False)\n # Internal parameter, that is used for checking whether a container was started by\n # teradataml.\n super().__setattr__('_container_started_by_teradataml', None)\n # Internal parameter, that is used for specifying the global model cataloging schema name which\n # will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_database', None)\n # Internal parameter, that is used for specifying the global model cataloging table name which\n # will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_table', None)\n # Internal parameter, that is used for specifying the license information as a string, file\n # path or column name which will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_license', None)\n # Internal parameter, that is used for specifying the source where the license came from\n # which will be used by the byom APIs.\n super().__setattr__('_byom_model_catalog_license_source', None)\n # Internal parameter, that is used for specifying the license table name\n # where the license is stored\n super().__setattr__('_byom_model_catalog_license_table', None)\n # Internal parameter, that is used for specifying the schema name where\n # the license table is stored\n super().__setattr__('_byom_model_catalog_license_database', None)\n # Internal parameter, that is used for specifying the URL to be used as\n # base URL in UES REST calls\n super().__setattr__('ues_url', None)\n # Internal parameter, that is used for specifying the Authentication token to be used\n # in UES REST calls\n super().__setattr__('auth_token', None)\n # Internal parameter, that is used to specify the certificate file in a secured HTTP request.\n super().__setattr__('certificate_file', False)\n # Internal parameter, that is used for specify the maximum size of the file\n # allowed by UES to upload it.\n super().__setattr__('_ues_max_file_upload_size', 10)\n # Internal parameter, that is used to specify the default environment,\n super().__setattr__('_default_user_env', None)\n\n # Internal parameter, that is used to post the Code verifier in OAuth work flow.\n super().__setattr__('_oauth_end_point', None)\n\n # Internal parameter, that is used for specifying the client id in OAuth work flow.\n super().__setattr__('_oauth_client_id', None)\n\n # Internal parameter, that is used for specifying the ID of Authentication token.\n super().__setattr__('_id_auth_token', None)\n\n # Internal parameter, that is used for specifying the Authentication token expiry time.\n super().__setattr__('_auth_token_expiry_time', None)\n\n # Internal parameter, that is used for specifying the refresh token to be used\n # in UES REST calls\n super().__setattr__('_refresh_token', None)\n\n # Internal parameter, that is used for specifying the refresh token to be used\n # in UES REST calls\n super().__setattr__('_pf_token_username_label', \"pf.username\")\n\n # Internal parameter, that is used for specifying the refresh token to be used\n # in UES REST calls\n super().__setattr__('_pf_token_password_label', \"pf.pass\")\n\n def __setattr__(self, name, value):\n if hasattr(self, name):\n if name == 'default_varchar_size':\n if not isinstance(value, int):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'int'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n if value <= 0:\n raise TeradataMlException(Messages.get_message(MessageCodes.TDMLDF_POSITIVE_INT, name,\n \"greater than\"),\n MessageCodes.TDMLDF_POSITIVE_INT)\n elif name == '_ues_max_file_upload_size':\n if type(value) != int:\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'int'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n if value < 0:\n raise TeradataMlException(Messages.get_message(MessageCodes.TDMLDF_POSITIVE_INT, name,\n \"greater than or equal to\"),\n MessageCodes.TDMLDF_POSITIVE_INT)\n elif name in ['column_casesensitive_handler', '_validate_metaexpression',\n '_validate_gc', '_latest_sandbox_exists']:\n\n if not isinstance(value, bool):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'bool'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n elif name == 'certificate_file':\n if not isinstance(value, str):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n\n if not os.path.exists(value):\n msg_code = MessageCodes.EXECUTION_FAILED\n raise TeradataMlException(Messages.get_message(msg_code,\n \"read contents of file '{}'\".format(value),\n 'File does not exist.'),\n msg_code)\n\n if not os.path.isfile(value):\n msg_code = MessageCodes.EXECUTION_FAILED\n raise TeradataMlException(Messages.get_message(msg_code,\n \"read contents of file '{}'\".format(value),\n 'Not a file.'),\n msg_code)\n\n elif name == 'vantage_version':\n if not isinstance(value, str):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n valid_versions = ['vantage1.0', 'vantage1.1', 'vantage1.3', 'vantage2.0']\n value = value.lower()\n if value not in valid_versions:\n raise TeradataMlException(Messages.get_message(MessageCodes.INVALID_ARG_VALUE,\n value,\n name,\n \"a value in {}\".format(valid_versions)),\n MessageCodes.INVALID_ARG_VALUE)\n\n elif name in ['val_install_location', 'byom_install_location', 'database_version',\n 'read_nos_function_mapping', 'write_nos_function_mapping',\n '_byom_model_catalog_database', '_byom_model_catalog_table',\n '_byom_model_catalog_license', '_byom_model_catalog_license_source']:\n if not isinstance(value, str):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n\n elif name in {'ues_url', 'auth_token', '_oauth_end_point', '_oauth_client_id',\n '_id_auth_token', '_refresh_token', '_pf_token_username_label',\n '_pf_token_password_label'}:\n\n if not isinstance(value, str):\n raise TypeError(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name, 'str'))\n\n if len(value) == 0:\n raise ValueError(Messages.get_message(MessageCodes.ARG_EMPTY, name))\n\n if name == 'ues_url':\n value = value[: -1] if value.endswith(\"/\") else value\n\n elif name in ['sandbox_container_id', '_container_started_by_teradataml',\n 'temp_table_database', 'temp_view_database',\n \"_byom_model_catalog_license_table\", \"_byom_model_catalog_license_database\"]:\n if not isinstance(value, str) and not isinstance(value, type(None)):\n raise TeradataMlException(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name,\n 'str or None'),\n MessageCodes.UNSUPPORTED_DATATYPE)\n\n elif name in {'_auth_token_expiry_time'}:\n\n if not isinstance(value, float):\n raise TypeError(Messages.get_message(MessageCodes.UNSUPPORTED_DATATYPE, name, 'float'))\n\n super().__setattr__(name, value)\n else:\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(self.__class__.__name__, name))\n\n\nconfigure = _Configure()","repo_name":"Teradata/teradata-dataiku-plugin","sub_path":"python-lib/teradataml/options/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":16556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3720328649","text":"import nltk\nimport logging as log\nfrom nltk import pos_tag, ne_chunk\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tree import Tree\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import PunktSentenceTokenizer\nimport recEntities\nfrom fuzzywuzzy import fuzz, process\nimport parse_tree\nimport db_handler\nimport util\nimport retry\nimport info\n\n\ndef recColoumns_temp(query_text):\n query_text_words = query_text.split()\n\n stem_columns = recEntities.init_datababse()\n # print(stem_columns)\n\n verb_to_col = recEntities.wrap_convert(stem_columns)\n\n print(verb_to_col)\n print(\"\\n\\n\")\n matched_words_col = {}\n # for col, col_var in verb_to_col.items():\n # for word in col_var:\n # res = process.extractOne(word, query_text_words)\n # if(res[1] > 70):\n # print(\"Column is \" + col)\n # print(\"Matched word is \" + res[0])\n # print(\"With accuracy \" + str(res[1]))\n # matched_words_col[res[0]] = col\n # print(\"\\n\\n\")\n\n for word in query_text_words:\n max_acc = 0\n col_mat = ''\n for col, col_var in verb_to_col.items():\n res = process.extractOne(word, col_var)\n if(res[1] > 70):\n print(\"word is \"+word)\n print(\"Column is \" + col)\n print(\"Matched word is \" + res[0])\n print(\"With accuracy \" + str(res[1]))\n if(res[1] > max_acc):\n print(res[1], max_acc, col, col_mat)\n max_acc = res[1]\n col_mat = col\n print(\"\\n\\n\")\n if(col_mat == ''):\n continue\n matched_words_col[word] = col_mat\n return matched_words_col\n\ndef recColoumns(query_text):\n if 'movie' in query_text:\n query_text.replace('movie', 'title')\n if 'movies' in query_text:\n query_text.replace('movies', 'title')\n\n query_text_words = query_text.split()\n\n if 'I' in query_text_words:\n query_text_words.remove('I')\n if 'i' in query_text_words:\n query_text_words.remove('i')\n \n stem_columns = recEntities.init_datababse()\n # print(stem_columns)\n\n verb_to_col = recEntities.wrap_convert(stem_columns)\n\n print(verb_to_col)\n print(\"\\n\\n\")\n matched_words_col = {}\n for col, col_var in verb_to_col.items():\n for word in col_var:\n res = process.extractOne(word, query_text_words)\n if(res[1] > 70):\n print(\"Column is \" + col)\n print(\"Matched word is \" + res[0])\n print(\"With accuracy \" + str(res[1]))\n matched_words_col[res[0]] = col\n print(\"\\n\\n\")\n return matched_words_col\n\ndef get_relationship(query_text, intent_info):\n str_parse_tree = parse_tree.get_parse_tree(query_text)\n matched_words_col = recColoumns_temp(query_text)\n\n if not matched_words_col:\n rows = retry.no_col_match(query_text)\n if rows:\n rows = [row.tolist() for row in rows]\n print(rows)\n return [rows]\n\n \n print(str_parse_tree)\n print(matched_words_col)\n db_inp_dic = {}\n col_type, col_pos = util.get_col_pos()\n print(col_pos)\n adj_dic = util.get_adj(query_text)\n rows = []\n for key, value in matched_words_col.items():\n pos_tag = col_pos[value]\n\n node, val = parse_tree.get_relation(str_parse_tree, key, pos_tag)\n\n print(\"\\n\\n\\n\\n\")\n print(node)\n print(\"\\n\\n\\n\\n\")\n print(val)\n\n if(val != False and val is not None):\n db_inp_dic[value.lower()] = val\n \n print(db_inp_dic)\n\n rows.append(db_handler.db_select(db_inp_dic, intent_info, col_type, adj_dic))\n print(rows)\n else:\n matched_rows = retry.retry(value, query_text)\n \n rows.append( [row.tolist() for row in matched_rows])\n print(rows)\n return rows\n\ndef get_intent_col(text):\n matched_words_col = recColoumns_temp(text)\n print('matched_words_col')\n \n print(matched_words_col)\n if not matched_words_col:\n return ['Title']\n elif 'movie' in text or 'movies' in text:\n return ['Title']\n cols = [val for key, val in matched_words_col.items()]\n return cols\n\ndef get_intent_info(query_text):\n intent = util.get_intent(query_text)\n \n for key, val in intent.items():\n if val:\n query_text = query_text.replace(key, '')\n cols = get_intent_col(query_text)\n\n number = util.get_number(query_text)\n\n \n intent_info = {'cols':cols, 'number':number, 'intent':intent}\n\n return intent_info\n\ndef chunking(tag_words):\n # grammar = r\"\"\"inter : {????}\n # intent : {????+??}\"\"\"\n\n grammar = r\"\"\"inter : {????}\"\"\"\n\n parser = nltk.RegexpParser(grammar)\n chunked = parser.parse(tag_words)\n\n # print(chunked)\n # for subtree in chunked.subtrees(filter=lambda t: t.label() == 'intent'):\n # print(subtree.label())\n intent_text = ''\n inter_text = ''\n # for subtree in chunked.subtrees(filter=lambda t: t.label() == 'intent'):\n # intent_text = \" \".join([text for text, pos in subtree.leaves()])\n for subtree in chunked.subtrees(filter=lambda t: t.label() == 'inter'):\n inter_text = \" \".join([text for text, pos in subtree.leaves()])\n \n q = []\n i = []\n f = True\n for chunk in chunked:\n if type(chunk) != Tree:\n if f:\n i.append(chunk[0])\n else:\n q.append(chunk[0])\n else:\n f = False\n \n query_text = \" \".join(q)\n intent_text = \" \".join(i)\n # log.info(intent_text)\n print(\"Intent text is ---\" + intent_text)\n print(\"Intermediate text is ---\" + inter_text) \n print(\"query is ---\" + query_text)\n print(\"\\n\\n\\n\\n\\n\")\n return intent_text, inter_text, query_text\n\n\ndef chunkIntent(tag_words):\n\n grammar = r\"\"\"intent : {????+??}\"\"\"\n parser = nltk.RegexpParser(grammar)\n chunked = parser.parse(tag_words)\n\n # print(chunked)\n for subtree in chunked.subtrees(filter=lambda t: t.label() == 'Chunk'):\n print(subtree)\n\ndef groupNounVerb(tag_words):\n proper_nouns = []\n verbs = []\n nouns = []\n\n proper_nouns = get_continuous_chunks(tag_words)\n\n is_noun = lambda pos : pos[:2] == 'NN'\n \n\n for word, pos in tag_words:\n if pos.startswith('V'):\n verbs.append(word)\n if is_noun(pos):\n nouns.append(word)\n\n split_proper_nouns = []\n for proper_noun in proper_nouns:\n split_proper_nouns += proper_noun.split()\n \n temp_nouns = [noun for noun in nouns if noun not in split_proper_nouns]\n nouns = temp_nouns\n return nouns, proper_nouns, verbs\n\n\ndef filter(sentence):\n words = word_tokenize(sentence)\n\n # filtered_words = remove_stopwords(words)\n tag_words = tagging(words)\n # print(tag_words)\n nouns, proper_nouns, verbs = groupNounVerb(tag_words)\n split_input = []\n split_input = chunking(tag_words)\n # print(Intent_classification_final.predict(split_input[0]))\n print(\"\\n\\n\\n\")\n print(\"nouns \" + str(nouns))\n print(\"proper nouns \" + str(proper_nouns))\n print(\"verbs \" + str(verbs))\n print(\"\\n\\n\\n\")\n intent_info = get_intent_info(split_input[0])\n rows = get_relationship(split_input[2], intent_info)\n final_rows, intent_info = info.filter_info(rows, intent_info)\n return final_rows, intent_info\n\ndef remove_stopwords(words):\n stop_words = list(stopwords.words('english'))\n filtered_words = [word for word in words if word not in stop_words] \n return filtered_words\n\ndef tagging(words):\n return pos_tag(words)\n\ndef get_continuous_chunks(tagged_words):\n chunked = ne_chunk(tagged_words)\n # print(chunked)\n continuous_chunk = []\n current_chunk = []\n\n for i in chunked:\n if type(i) == Tree:\n current_chunk.append(\" \".join([token for token, pos in i.leaves()]))\n elif current_chunk:\n named_entity = \" \".join(current_chunk)\n if named_entity not in continuous_chunk:\n continuous_chunk.append(named_entity)\n current_chunk = []\n else:\n continue\n\n if continuous_chunk:\n named_entity = \" \".join(current_chunk)\n if named_entity not in continuous_chunk:\n continuous_chunk.append(named_entity)\n\n return continuous_chunk\n \n\nif __name__ == \"__main__\":\n filter(\"get movies of 2016\" )\n","repo_name":"karthikbhat13/databot","sub_path":"nlu_module/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":8719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36389964295","text":"from application1 import app\nfrom flask import render_template\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\tsome1 = {'username': 'mike'}\n\tpostser = [\n\t{\n\t\t'author': {'username': 'John'},\n\t\t'body' : 'Beuatiful day in Portland!'\n\t},\n\t{\n\t\t'author' : {'username': 'Susan'},\n\t\t'body' : 'The Avengers is a cool movie'\n\t}\n\t]\n\n\treturn render_template('index.html', title='Home', user=some1, posts=postser)\n\n''' \n\n@app.route('/test1/')\ndef index1(name):\n\tsome1 = {'username': name+\"\\'s\"}\n\treturn render_template('index.html', title='Home', user=some1)\n'''\n\n","repo_name":"muthu-kr/blognew","sub_path":"application1/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73904313855","text":"from collections import Counter\nfrom itertools import groupby\n\nFONTSIZE = 15\n\nimport matplotlib\nmatplotlib.use('Agg')\nmatplotlib.rc('font', size=FONTSIZE)\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nfrom numba import njit\nimport numpy as np\nfrom scipy.stats import spearmanr, pearsonr, norm, uniform\nimport tqdm\n\nimport crisper\n\nfrom bananas.pipelines import mean_warszycki_logki\nfrom bananas.worlds import (\n BalancedAgglomerativeClustering,\n CrossValidation,\n KernelTSNE,\n Morgan,\n MurckoScaffoldSplit,\n PaperSplit,\n SMILESToMol,\n SpectralClustering,\n StoredCopy,\n TanimotoMinMaxRepresentationMaker,\n TwoClassLogisticRegression,\n)\nfrom elderberries.benchmarks2018.problem import (\n Benchmarks2018StructuralSimilarity,\n Benchmarks2018ProblemClassificationSummary,\n)\nfrom elderberries.benchmarks2018.solutions import (\n fingerprinter_by_name,\n)\n\ndef target_name(target_uid):\n return {\n \"CHEMBL214\": \"5-HT1A\",\n \"CHEMBL224\": \"5-HT2A\",\n \"CHEMBL225\": \"5-HT2C\",\n \"CHEMBL3371\": \"5-HT6\",\n \"CHEMBL3155\": \"5-HT7\",\n \"CHEMBL226\": \"A1\",\n \"CHEMBL251\": \"A2A\",\n \"CHEMBL217\": \"D2\",\n \"CHEMBL264\": \"H3\",\n \"CHEMBL216\": \"M1\",\n }[target_uid]\n\nweighted_accuracy = Benchmarks2018ProblemClassificationSummary.metrics[\"Weighted_Accuracy\"][0]\naccuracy = Benchmarks2018ProblemClassificationSummary.metrics[\"Accuracy\"][0]\n\nspearman = lambda x, y: spearmanr(x,y)[0]\n\nto_pki = lambda logki: 9. - logki\n\ndef _table(rows, cols, content, delimiter='\\t'):\n result = [delimiter.join([''] + list(cols)) + '\\n']\n for row_name, row in zip(rows, content):\n result.append(delimiter.join([row_name] + list(row)) + '\\n')\n return ''.join(result)\n\ndef _arr_header_to_html(arr, header):\n from herbivores._html import (\n to_arr_header,\n columns_width,\n to_html,\n sanitize_html,\n doc_template,\n style_template,\n table_style_1,\n div_style_1,\n href,\n tablesorter,\n )\n href_chembl_compound = lambda uid: href(\n \"https://www.ebi.ac.uk/chembl/compound/inspect/{}\".format(uid),\n uid,\n )\n href_chembl_document = lambda uid: href(\n \"https://www.ebi.ac.uk/chembl/doc/inspect/{}\".format(uid),\n uid,\n )\n width = columns_width(arr, header, 30)\n arr, header = sanitize_html(arr), sanitize_html(header)\n for i, key in enumerate(header):\n if \"uid\" in key and not \"doc\" in key:\n arr[:,i] = np.vectorize(href_chembl_compound, otypes=(np.str,))(arr[:,i])\n if \"uid\" in key and \"doc\" in key:\n arr[:,i] = np.vectorize(href_chembl_document, otypes=(np.str,))(arr[:,i])\n return doc_template(\n style_template(\n table_style_1(\"data_table\"),\n div_style_1(None),\n ) + '\\n' + tablesorter(),\n to_html(arr, header, width, \"data_table\"),\n )\n\ndef jj_thresholded_ki(N=10, N_SPLITS=5, target_uid=\"CHEMBL214\", split_name=\"cv\", C=10., class_weight=\"balanced\", weighted_score=True):\n\n from elderberries.benchmarks2018.problem import Benchmarks2018StructuralSimilarity\n\n preds = {}\n scores = np.zeros((N_SPLITS,N,N), dtype=np.float)\n lspace = np.linspace(0.,3.,10)\n for i in range(N):\n for j in range(N):\n if i <= j:\n thresholds = tuple((lspace[x] for x in (i,j)))\n dataset = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=thresholds,\n )[\"final\"]\n\n if split_name == \"cv\":\n split_ = CrossValidation(\n source=dataset,\n n_groups=N_SPLITS,\n seed=43,\n )\n elif split_name == \"bac\":\n split_ = BalancedAgglomerativeClustering(\n source=Benchmarks2018StructuralSimilarity(source=dataset),\n kernel=\"kernel\",\n n_groups=N_SPLITS,\n )\n else:\n raise ValueError(\"split_name: {}\".format(split_name))\n\n for n_split, split in enumerate(split_.get_splits()):\n tr, te = split.get_train(), split.get_test()\n fpr = Morgan(\n radius=4,\n use_chirality=True,\n use_bond_types=True,\n use_features=False,\n converter=SMILESToMol(),\n )\n fp_tr = fpr(source=tr)\n fp_te = fpr(source=te)\n repr_maker = TanimotoMinMaxRepresentationMaker(\n fingerprint=fp_tr)\n repr_tr = repr_maker(fingerprint=fp_tr)\n repr_te = repr_maker(fingerprint=fp_te)\n model = TwoClassLogisticRegression(\n source=repr_tr,\n C=C,\n class_weight=class_weight,\n )\n pred = StoredCopy(source=model.predict(source=repr_te))\n preds[(n_split, i, j)] = (te, pred)\n\n crisper.evaluate(\n *[k for tup in preds.values() for k in tup],\n label=\"J&J\"\n )\n\n for (n_split, i, j), (te, pred) in tqdm.tqdm(preds.items()):\n if weighted_score:\n scores[n_split, i, j] = weighted_accuracy(None, te, pred)\n else:\n scores[n_split, i, j] = accuracy(None, te, pred)\n scores_ = scores.mean(axis=0)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n im = ax.imshow(scores_, origin=\"lower\", vmin=sorted(set(scores_.ravel()))[1], vmax=sorted(scores_.ravel())[-1])\n lspace_ = np.array([\"{:.2f}\".format(to_pki(x)) for x in lspace])\n idx = np.arange(0,N,2)\n ax.set_xticks(idx)\n ax.set_xticklabels(lspace_[idx])\n ax.set_yticks(idx)\n ax.set_yticklabels(lspace_[idx])\n ax.set_xlabel(\"Inactivity threshold (pKi)\")\n ax.set_ylabel(\"Activity threshold (pKi)\")\n if weighted_score:\n ax.set_title(\"Weighted Accuracy\")\n else:\n ax.set_title(\"Accuracy\")\n fig.colorbar(im, ax=ax)\n fig.tight_layout()\n return fig\n\ndef fingercheats(\n target_uids, fpr_names, include_earliest_year=None,\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False):\n cor = np.zeros((len(target_uids), len(fpr_names)), dtype=np.float)\n cor2 = np.zeros((len(target_uids), len(fpr_names)), dtype=np.float)\n for i, target_uid in enumerate(target_uids):\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n for j, fpr_name in enumerate(fpr_names):\n fpr = fingerprinter_by_name[fpr_name]\n a = np.array(fpr(source=ds).data[(\"fingerprint\", \"data\")].sum(axis=1)).ravel()\n b = ds.data[\"value\"]\n cor[i,j] = spearmanr(a,b)[0]\n cor2[i,j] = pearsonr(a,b)[0]\n\n fig = plt.figure(figsize=(16,6))\n\n ax = fig.add_subplot(121)\n fig.colorbar(ax.imshow(cor), ax=ax, orientation=\"horizontal\")\n ax.set_title(\"Spearman rank-order correlation coefficient\")\n ax.set_yticks(np.arange(len(target_uids)))\n ax.set_yticklabels([target_name(u) for u in target_uids])\n ax.set_xticks(range(0, len(fpr_names), 2))\n ax.set_xticklabels([\"FP{}\".format(i+1) for i in range(0, len(fpr_names), 2)])\n\n ax = fig.add_subplot(122)\n fig.colorbar(ax.imshow(cor2), ax=ax, orientation=\"horizontal\")\n ax.set_title(\"Pearson correlation coefficient\")\n ax.set_yticks(np.arange(len(target_uids)))\n ax.set_yticklabels([target_name(u) for u in target_uids])\n ax.set_xticks(range(0, len(fpr_names), 2))\n ax.set_xticklabels([\"FP{}\".format(i+1) for i in range(0, len(fpr_names), 2)])\n\n return (\n fig,\n ''.join([\"FP{}: {}\\n\".format(i+1, fpr_name) \\\n for i, fpr_name in enumerate(fpr_names)]),\n )\n\ndef fingercheats_thr(\n target_uids, fpr_names, threshold=2., include_earliest_year=None,\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False):\n from sklearn.linear_model import LogisticRegression\n from sklearn.metrics import balanced_accuracy_score\n result = '\\t'.join([''] + [\"FP{}\".format(i+1) for i in range(len(fpr_names))]) + '\\n'\n acc = np.zeros((len(target_uids), len(fpr_names)), dtype=np.float)\n for i, target_uid in enumerate(target_uids):\n row = \"{}\\t\".format(target_name(target_uid))\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=threshold,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n for j, fpr_name in enumerate(fpr_names):\n fpr = fingerprinter_by_name[fpr_name]\n X = np.array(fpr(source=ds).data[(\"fingerprint\", \"data\")].sum(axis=1)).reshape(-1,1)\n y = ds.data[\"value\"].ravel()\n assert set(y) == set([0., 1.])\n lr = LogisticRegression(class_weight=\"balanced\")\n lr.fit(X, y)\n acc[i,j] = balanced_accuracy_score(y, lr.predict(X))\n row += \"{:.3f}\\t\".format(acc[i,j])\n result += row + '\\n'\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_subplot(111)\n fig.colorbar(ax.imshow(acc), ax=ax, orientation=\"horizontal\")\n ax.set_title(\"Weighted accuracy\")\n ax.set_yticks(np.arange(len(target_uids)))\n ax.set_yticklabels([target_name(u) for u in target_uids])\n ax.set_xticks(range(0, len(fpr_names), 2))\n ax.set_xticklabels([\"FP{}\".format(i+1) for i in range(0, len(fpr_names), 2)])\n return (\n fig,\n result,\n ''.join([\"FP{}: {}\\n\".format(i+1, fpr_name) \\\n for i, fpr_name in enumerate(fpr_names)]),\n )\n\ndef min_max_mean_per_paper(\n target_uids,\n include_earliest_year,\n ic50_conversion_strategy,\n fit_ic50,\n min_paper_size):\n fig = plt.figure(figsize=(12.3, len(target_uids)*4))\n counter = 0\n axes = []\n results = []\n for target_uid in target_uids:\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n result = []\n doc_uid = d.data[\"doc_uid\"]\n value = to_pki(d.data[\"value\"])\n key = lambda x: x[0]\n for k, g in groupby(sorted(zip(doc_uid, value), key=key), key):\n gu, gv = zip(*g)\n if len(gv) >= min_paper_size:\n tup = (np.min(gv), np.max(gv), np.mean(gv))\n result.append(tup)\n results.append(tup)\n for h in zip(*result):\n counter += 1\n ax = fig.add_subplot(len(target_uids), 3, counter)\n axes.append(ax)\n ax.hist(h, bins=43, range=(value.min(), value.max()))\n if counter % 3 == 1:\n ax.set_ylabel(target_name(target_uid) + '\\n')\n ax.set_xlabel({\n 1: \"Min pKi per paper (earliest)\",\n 2: \"Max pKi per paper (earliest)\",\n 0: \"Mean pKi per paper (earliest)\",\n }[counter % 3])\n xlim = (np.array(results).min()-.1, np.array(results).max()+.1)\n [ax.set_xlim(xlim) for ax in axes] \n fig.tight_layout()\n return fig\n\ndef how_many_records_per_paper(\n target_uids,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True):\n fig = plt.figure(figsize=(4.3, len(target_uids)*4))\n for i, target_uid in enumerate(target_uids):\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"]\n ax = fig.add_subplot(len(target_uids), 1, i+1)\n v = list(Counter(d.data[\"doc_uid\"]).values())\n ax.hist(v, bins=int(np.max(v)))\n ax.set_xlabel(\"Earliest records per paper\")\n ax.set_ylabel(target_name(target_uid) + \"\\n\")\n ax.set_yscale(\"log\", nonposy='clip')\n fig.tight_layout()\n return fig\n\ndef earliest_year_variants(target_uids):\n def compare_year(*ds):\n uids = np.array(sorted(set.union(*[set(d.data[\"uid\"]) for d in ds])))\n years = np.empty((len(uids),len(ds)),dtype=np.float)\n years.fill(np.nan)\n for i, d in enumerate(ds):\n idx = np.searchsorted(uids, d.data[\"uid\"])\n years[idx,i] = d.data[\"year\"]\n return years\n result = [\n \"Reference method: 'all_bioactivity_records'\\n\",\n \"Other:\\n\",\n \" 'Ki_IC50_records'\\n\",\n \" 'Ki_records'\\n\",\n \"target: differing/total\\n\",\n ]\n for target_uid in target_uids:\n ds = []\n for include_earliest_year in [\"all_bioactivity_records\", \"Ki_IC50_records\", \"Ki_records\"]:\n ds.append(mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=include_earliest_year,\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n )[\"final\"])\n y = compare_year(*ds)\n a = np.all(\n np.logical_or(\n y == np.nanmax(y, axis=1).reshape(-1,1),\n np.isnan(y)\n ),\n axis=1,\n )\n result.append(\"{}: {}/{}\\n\".format(target_name(target_uid), len(a)-sum(a), len(a)))\n return ''.join(result)\n\ndef activity_variants(target_uids, conversion_strategies, reference_idx):\n def compare_Ki(*ds):\n uids = np.array(sorted(set.union(*[set(d.data[\"uid\"]) for d in ds])))\n value = np.empty((len(uids),len(ds)),dtype=np.float)\n value.fill(np.nan)\n for i, d in enumerate(ds):\n idx = np.searchsorted(uids, d.data[\"uid\"])\n assert np.all(uids[idx] == d.data[\"uid\"])\n value[idx,i] = d.data[\"value\"]\n return value\n\n fig = plt.figure(figsize=(4*len(conversion_strategies),4*len(target_uids)))\n fig2 = plt.figure(figsize=(4*len(conversion_strategies),4*len(target_uids)))\n ax_counter = 0\n for target_uid in target_uids:\n ds = []\n corrections = []\n for ic50_conversion_strategy, fit_ic50, _ in conversion_strategies:\n dct = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )\n ds.append(dct[\"final\"])\n correction = None\n if fit_ic50:\n for n in reversed(dct[\"data_nodes\"]):\n try:\n correction = n.data[\"IC50_correction\"]\n break\n except KeyError:\n pass\n assert correction is not None\n else:\n correction = 0.\n corrections.append(correction)\n value = compare_Ki(*ds)\n ref_label = conversion_strategies[reference_idx][2]\n for i, (_, fit_ic50, label) in enumerate(conversion_strategies):\n ax_counter += 1\n ax = fig.add_subplot(len(target_uids),len(conversion_strategies),ax_counter)\n ax.scatter(to_pki(value[:,reference_idx]), to_pki(value[:,i]), s=8)\n if fit_ic50:\n ax.set_title(\"(coefficient: {:.3f})\".format(2*10**(-corrections[i])))\n ax.set_xlabel(\"{} (reference)\".format(ref_label))\n ax.set_ylabel(\n target_name(target_uid) + '\\n\\n' + label if i == 0 else label\n )\n ax = fig2.add_subplot(len(target_uids),len(conversion_strategies),ax_counter)\n ax.hist(to_pki(ds[i].data[\"value\"]), bins=43)\n ax.set_xlabel(label)\n if i == 0:\n ax.set_ylabel(target_name(target_uid) + '\\n')\n fig.tight_layout()\n fig2.tight_layout()\n return fig, fig2\n\ndef median_thresholded_activity_variants(\n target_uids, conversion_strategies):\n medians = np.zeros(\n (len(target_uids), len(conversion_strategies)),\n dtype=np.float,\n )\n for i, target_uid in enumerate(target_uids):\n for j, (ic50_conversion_strategy, fit_ic50, _) in enumerate(conversion_strategies):\n medians[i,j] = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=\"median\",\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )[\"final\"].data[\"value_threshold\"]\n medians = to_pki(medians)\n labels = [l for _, _, l in conversion_strategies]\n fig = plt.figure(figsize=(10,7))\n ax = fig.add_subplot(111)\n im = ax.imshow(medians.T)\n\n ax.set_xticks(range(len(target_uids)))\n ax.set_xticklabels([target_name(u) for u in target_uids])\n# ax.set_xlabel(\"Target\")\n plt.setp(ax.xaxis.get_majorticklabels(), rotation=90)\n\n ax.set_yticks(range(len(conversion_strategies)))\n ax.set_yticklabels(labels)\n# ax.set_ylabel(\"log Ki variant\")\n\n fig.colorbar(im, ax=ax, orientation='horizontal')\n ax.set_title(\"Median pKi\", fontsize=int(FONTSIZE*1.5))\n\n fig.tight_layout()\n txt = _table(\n rows=np.array([target_name(u) for u in target_uids]),\n cols=np.array(labels),\n content=np.vectorize(lambda f: \"{:.3f}\".format(f))(medians),\n delimiter='\\t'\n )\n return fig, txt\n\ndef density_bias(target_uids):\n def _distance_to_nth_neighbour(kernel, value):\n result = []\n for row in reversed(np.sort(kernel, axis=0)):\n result.append(spearman(row, value))\n return np.array(result, dtype=np.float)\n def _n_neighbours_in_radius(kernel, value):\n result = []\n lsp = np.linspace(0,1,201)\n for thr in lsp:\n x = np.sum(kernel>=thr, axis=1)\n result.append(spearman(x, value))\n return lsp, np.array(result, dtype=np.float)\n def _stationary(kernel, value, n=None):\n if n is not None:\n mask = np.zeros(kernel.shape, dtype=np.bool)\n for i, row in enumerate(kernel):\n mask[i,np.argsort(row)[-n:]] = True\n kernel = 0.001 * np.ones(kernel.shape, dtype=np.float)\n kernel[mask] = 1.\n _a = kernel/kernel.sum(axis=0).reshape(1,-1)\n a = _a - np.eye(len(value))\n b = np.zeros(len(value)+1)\n a = np.concatenate((a, np.ones(len(value)).reshape(1,-1)), axis=0)\n b[-1] = 1.\n x = np.linalg.lstsq(a,b)[0]\n return spearman(x, value)\n fig = plt.figure(figsize=(8, 4*len(target_uids)))\n for i, target_uid in enumerate(target_uids):\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False,\n )[\"final\"]\n c_doc_uid = Counter(ds.data[\"doc_uid\"])\n x = np.vectorize(lambda uid: c_doc_uid[uid])(ds.data[\"doc_uid\"])\n y = value = to_pki(ds.data[\"value\"])\n kernel = Benchmarks2018StructuralSimilarity(source=ds).data[\"kernel\"]\n result1 = _distance_to_nth_neighbour(kernel, value)\n lsp2, result2 = _n_neighbours_in_radius(kernel, value)\n _min, _max = min(np.nanmin(result1), np.nanmin(result2)), max(np.nanmax(result1), np.nanmax(result2))\n\n ax = fig.add_subplot(len(target_uids),2,2*i+1)\n x = np.arange(len(result1))\n mask = np.logical_not(np.isnan(result1))\n ax.plot(x[mask], result1[mask])\n ax.set_xlabel(\"Distance-sorted neighbours\")\n ax.set_ylabel(target_name(target_uid) + \"\\n\\nSpearman's Rho\")\n ax.set_ylim((_min-.05, _max+.05))\n\n ax = fig.add_subplot(len(target_uids),2,2*i+2)\n mask = np.logical_not(np.isnan(result2))\n ax.plot(lsp2[mask], result2[mask])\n ax.set_xlabel(\"Similarity threshold\")\n ax.set_ylabel(\"Spearman's Rho\")\n ax.set_ylim((_min-.05, _max+.05))\n\n fig.tight_layout()\n return fig\n\ndef similar_compounds(target_uid, n_top, n_bottom, n_random, seed=43):\n ds = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=False,\n )[\"final\"]\n uid = ds.data[\"uid\"]\n kernel = Benchmarks2018StructuralSimilarity(source=ds).data[\"kernel\"]\n ix, iy = np.tril_indices(kernel.shape[0], -1)\n idx = np.argsort(kernel[ix, iy])\n l = len(idx)\n idx = idx[np.sort(np.concatenate((\n np.arange(n_bottom),\n np.arange(l-n_top, l),\n n_bottom + np.random.RandomState(seed=seed).choice(\n l - n_top - n_bottom,\n size=n_random,\n replace=False,\n )\n )))]\n ix, iy = ix[idx], iy[idx]\n uid1, uid2 = uid[ix], uid[iy]\n sim = np.vectorize(lambda f: \"~{:.4f}\".format(f))(kernel[ix, iy])\n arr = np.stack((uid1, uid2, sim), axis=1)\n header = np.array([\"uid\", \"uid\", \"similarity\"])\n return _arr_header_to_html(arr, header)\n\ndef same_paper_cross_paper(target_uids):\n fig = plt.figure(figsize=(len(target_uids)*4, 4))\n for i, target_uid in enumerate(target_uids):\n d = Benchmarks2018StructuralSimilarity(source=mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"])\n kernel = d.data[\"kernel\"]\n same_paper = d.data[\"doc_uid\"].reshape(1,-1) == d.data[\"doc_uid\"].reshape(-1,1)\n cross_paper = np.logical_not(same_paper)\n same_paper[range(len(same_paper)),range(len(same_paper))] = False\n ax = fig.add_subplot(1,len(target_uids),i+1)\n ax.hist(kernel.ravel()[same_paper.ravel()], bins=43, label=\"same paper\", alpha=.5, density=True)\n ax.hist(kernel.ravel()[cross_paper.ravel()], bins=43, label=\"cross paper\", alpha=.5, density=True)\n ax.legend()\n ax.set_xlabel(\"Structural similarity\")\n ax.set_title(target_name(target_uid))\n fig.tight_layout()\n return fig\n\ndef year_structural_pareto(target_uids):\n @njit\n def _first(arr, x):\n for i in range(len(arr)):\n if arr[i] == x:\n return i\n raise ValueError()\n result = []\n for i, target_uid in enumerate(target_uids):\n result.append(\"TARGET: {}\".format(target_name(target_uid)))\n result.append(\"\")\n d = Benchmarks2018StructuralSimilarity(source=mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"])\n kernel = d.data[\"kernel\"]\n year = d.data[\"year\"]\n idx = np.flip(np.argsort(kernel.ravel()))\n delta_year = np.abs(year.reshape(-1,1) - year.reshape(1,-1)).ravel()[idx]\n for dy in sorted(set(delta_year.ravel())-set([0,0.])):\n _idx = idx[_first(delta_year, dy)]\n i, j = _idx // kernel.shape[0], _idx % kernel.shape[0]\n result.append(\"SIMILARITY: {:.3f}, DELTA YEAR: {}\".format(\n kernel[i,j],\n int(dy)\n ))\n for m in (i,j):\n result.append(\"UID: {}, SMILES: {}, VALUE: {}, YEAR: {}, DOC_UID: {}\".format(\n d.data[\"uid\"][m],\n d.data[\"smiles\"][m],\n d.data[\"value\"][m],\n int(d.data[\"year\"][m]),\n d.data[\"doc_uid\"][m],\n ))\n result.append(\"\")\n return '\\n'.join(result) + '\\n'\n\ndef aaaiiaii(value, groups, kernel, time_split):\n from numba import jit, njit\n result_all = np.zeros((kernel.size, 4), dtype=np.float)\n result_all_groups = np.zeros((kernel.size, 4), dtype=np.float)\n result_all_counter = np.zeros(4, dtype=np.int)\n result_nearest = np.empty((kernel.shape[0],2), dtype=np.float)\n result_nearest.fill(np.nan)\n @njit\n def f(value, groups, kernel, result_all, result_all_groups, result_all_counter, result_nearest):\n for i in range(kernel.shape[0]):\n for j in range(kernel.shape[1]):\n if groups[i] > groups[j] or (groups[i] < groups[j] and not time_split): # test to train\n idx = 3-(2*int(value[i])+int(value[j])) # aa ai ia ii\n result_all[result_all_counter[idx], idx] = kernel[i,j]\n result_all_groups[result_all_counter[idx], idx] = groups[i]\n result_all_counter[idx] += 1\n if np.isnan(result_nearest[i, value[j]]) or kernel[i,j] > result_nearest[i, value[j]]:\n result_nearest[i, value[j]] = kernel[i,j]\n f(value, groups, kernel, result_all, result_all_groups, result_all_counter, result_nearest)\n return {\n \"aa\": (result_all[:result_all_counter[0],0], result_all_groups[:result_all_counter[0],0]),\n \"ai\": (result_all[:result_all_counter[1],1], result_all_groups[:result_all_counter[1],1]),\n \"ia\": (result_all[:result_all_counter[2],2], result_all_groups[:result_all_counter[2],2]),\n \"ii\": (result_all[:result_all_counter[3],3], result_all_groups[:result_all_counter[3],3]),\n \"nearest_i\": result_nearest[:,0],\n \"nearest_a\": result_nearest[:,1],\n }\n\ndef splits_analysis(target_uids):\n def plot(value, groups, kernel, axes, split_label, time_split=False):\n dct = aaaiiaii(value, groups, kernel, time_split)\n\n not_nan_mask = np.logical_not(np.logical_or(\n np.isnan(dct[\"nearest_a\"]),\n np.isnan(dct[\"nearest_i\"]),\n ))\n aa, ai, ia, ii = (\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==0],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==0],\n )\n\n histtype, linewidth = \"step\", 3\n axes[0].hist(\n aa, bins=43, label=\"AA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].hist(\n ai, bins=43, label=\"AI\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].hist(\n ia, bins=43, label=\"IA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].hist(\n ii, bins=43, label=\"II\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[0].set_xlim((0.,1.))\n axes[0].set_xlabel(\"Nearest neighbour similarity\")\n axes[0].set_ylabel(split_label + '\\n')\n axes[0].legend()\n\n S = 8\n axes[1].scatter(ia, ii, label=\"inactive\", c=\"green\", s=S, alpha=.3)\n axes[1].scatter(aa, ai, label=\"active\", c=\"xkcd:sky blue\", s=S, alpha=.3)\n axes[1].scatter(ia.mean(), ii.mean(), facecolors=\"none\", edgecolors='red', s=150)\n axes[1].scatter(ia.mean(), ii.mean(), c=\"green\", marker=\"x\", s=43)\n axes[1].scatter(aa.mean(), ai.mean(), facecolors=\"none\", edgecolors=\"red\", s=150)\n axes[1].scatter(aa.mean(), ai.mean(), c=\"blue\", marker=\"x\", s=43)\n axes[1].plot([0.2, 0.9], [0.2, 0.9])\n axes[1].set_aspect(\"equal\")\n axes[1].legend()\n axes[1].set_xlabel(\"Nearest active similarity\")\n axes[1].set_ylabel(\"Nearest inactive similarity\")\n\n return [np.mean(x) for x in (aa, ai, ia, ii)]\n\n figs = []\n muv_result = []\n for target_uid in target_uids:\n muv_result.append(target_name(target_uid))\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=2.,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n value = d.data[\"value\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n kernel = kd.data[\"kernel\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd, \n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n year_groups = d.data[\"year\"]\n fig = plt.figure(figsize=(8,24))\n fig.axes_counter = 0\n def _axes():\n axes = []\n for _ in range(2):\n fig.axes_counter += 1\n axes.append(fig.add_subplot(6,2,fig.axes_counter))\n return axes\n for groups, split_label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n ):\n aa, ai, ia, ii = plot(value, groups, kernel, _axes(), split_label)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n aa, ai, ia, ii = plot(value, year_groups, kernel, _axes(), split_label=\"time split\", time_split=True)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n fig.tight_layout()\n figs.append(fig)\n\n return tuple(['\\n'.join(muv_result)+'\\n'] + figs)\n\ndef splits_analysis_3_columns(target_uids):\n def plot(value, groups, kernel, axes, split_label, time_split=False):\n dct = aaaiiaii(value, groups, kernel, time_split)\n for k in [\"aa\", \"ai\", \"ia\", \"ii\"]:\n axes[0].hist(\n dct[k][0], bins=43, label=k.upper(),\n density=True, histtype=\"step\", linewidth=3,\n )\n axes[0].set_xlim((0.,1.))\n axes[0].set_xlabel(\"All pairs similarity\")\n axes[0].set_ylabel(split_label + '\\n')\n axes[0].legend()\n\n not_nan_mask = np.logical_not(np.logical_or(\n np.isnan(dct[\"nearest_a\"]),\n np.isnan(dct[\"nearest_i\"]),\n ))\n aa, ai, ia, ii = (\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==1],\n dct[\"nearest_a\"][not_nan_mask][value[not_nan_mask]==0],\n dct[\"nearest_i\"][not_nan_mask][value[not_nan_mask]==0],\n )\n\n histtype, linewidth = \"step\", 3\n axes[1].hist(\n aa, bins=43, label=\"AA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].hist(\n ai, bins=43, label=\"AI\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].hist(\n ia, bins=43, label=\"IA\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].hist(\n ii, bins=43, label=\"II\",\n density=True, histtype=histtype, linewidth=linewidth,\n )\n axes[1].set_xlim((0.,1.))\n axes[1].set_xlabel(\"Nearest neighbour similarity\")\n axes[1].legend()\n\n S = 8\n axes[2].scatter(ia, ii, label=\"inactive\", c=\"green\", s=S, alpha=.3)\n axes[2].scatter(aa, ai, label=\"active\", c=\"xkcd:sky blue\", s=S, alpha=.3)\n axes[2].scatter(ia.mean(), ii.mean(), facecolors=\"none\", edgecolors='red', s=150)\n axes[2].scatter(ia.mean(), ii.mean(), c=\"green\", marker=\"x\", s=43)\n axes[2].scatter(aa.mean(), ai.mean(), facecolors=\"none\", edgecolors=\"red\", s=150)\n axes[2].scatter(aa.mean(), ai.mean(), c=\"blue\", marker=\"x\", s=43)\n axes[2].plot([0.2, 0.9], [0.2, 0.9])\n axes[2].set_aspect(\"equal\")\n axes[2].legend()\n axes[2].set_xlabel(\"Nearest active similarity\")\n axes[2].set_ylabel(\"Nearest inactive similarity\")\n\n return [np.mean(x) for x in (aa, ai, ia, ii)]\n\n figs = []\n muv_result = []\n for target_uid in target_uids:\n muv_result.append(target_name(target_uid))\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=2.,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n value = d.data[\"value\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n kernel = kd.data[\"kernel\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd, \n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n year_groups = d.data[\"year\"]\n fig = plt.figure(figsize=(12,24))\n fig.axes_counter = 0\n def _axes():\n axes = []\n for _ in range(3):\n fig.axes_counter += 1\n axes.append(fig.add_subplot(6,3,fig.axes_counter))\n return axes\n for groups, split_label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n ):\n aa, ai, ia, ii = plot(value, groups, kernel, _axes(), split_label)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n aa, ai, ia, ii = plot(value, year_groups, kernel, _axes(), split_label=\"time split\", time_split=True)\n muv = aa - ai + ii - ia\n muv_result.append(\"{:.3f} - {:.3f} + {:.3f} - {:.3f} = {:.3f}\".format(aa, ai, ii, ia, muv))\n fig.tight_layout()\n figs.append(fig)\n\n return tuple(['\\n'.join(muv_result)+'\\n'] + figs)\n\ndef splits_analysis_2(target_uids):\n fig = plt.figure(figsize=(4*(len(target_uids)+1),4))\n for i, target_uid in enumerate(target_uids):\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=2.,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n value = d.data[\"value\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n kernel = kd.data[\"kernel\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd, \n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n year_groups = d.data[\"year\"]\n ax = fig.add_subplot(1,len(target_uids),i+1)\n for groups, label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n ):\n dct = aaaiiaii(value, groups, kernel, time_split=False)\n x = np.maximum(dct[\"nearest_a\"], dct[\"nearest_i\"])\n ax.hist(\n x, bins=43, label=label,\n density=True, histtype=\"step\", linewidth=1,\n )\n dct = aaaiiaii(value, year_groups, kernel, time_split=True)\n x = np.maximum(dct[\"nearest_a\"], dct[\"nearest_i\"])\n x = x[np.logical_not(np.isnan(x))]\n ax.hist(\n x, bins=43, label=\"time split\",\n density=True, histtype=\"step\", linewidth=3,\n )\n ax.set_xlabel(\"Nearest neighbour similarity\")\n ax.set_title(target_name(target_uid))\n if i == len(target_uids) - 1:\n ax.legend(fontsize=\"small\", bbox_to_anchor=(1.04,1))\n fig.tight_layout()\n return fig\n\ndef simplest_dataset_hist(mus):\n fig = plt.figure(figsize=(4*len(mus), 8))\n alpha = .6\n for i, mu in enumerate(mus):\n\n ax = fig.add_subplot(2,len(mus),i+1)\n xs = np.linspace(-4.3,4.3,437)\n ax.fill_between(\n xs, norm(loc=mu).pdf(xs),\n label='\"inactive\"', alpha=alpha,\n )\n ax.fill_between(\n xs, norm(loc=-mu).pdf(xs),\n label='\"active\"', alpha=alpha,\n )\n ax.set_xlabel(\"mean: {:.1f}\".format(mu))\n ax.set_ylim((0.,0.6))\n ax.legend()\n if i == 0:\n ax.set_ylabel(\"Normal\\n\")\n\n ax = fig.add_subplot(2,len(mus),len(mus)+i+1)\n xs = np.linspace(-2.1,2.1,437)\n ax.fill_between(\n xs, uniform(loc=mu-1, scale=2.).pdf(xs),\n label='\"inactive\"', alpha=alpha,\n )\n ax.fill_between(\n xs, uniform(loc=-mu-1, scale=2.).pdf(xs),\n label='\"active\"', alpha=alpha,\n )\n ax.set_xlabel(\"mean: {:.1f}\".format(mu))\n ax.set_ylim((0.,0.7))\n ax.legend()\n if i == 0:\n ax.set_ylabel(\"Uniform\\n\")\n\n fig.tight_layout()\n return fig\n\ndef muv_on_simplest_dataset(mus, ns):\n def dataset(mu, n_train, n_test, distr, seed=43):\n if isinstance(n_train, int):\n rng = np.random.RandomState(seed=43)\n if distr == \"normal\":\n distr = rng.normal\n acc = norm.cdf(mu)\n elif distr == \"uniform\":\n distr = lambda size: rng.uniform(size=size) * 2. - 1.\n acc = min(1., .5 + .5*abs(mu))\n else:\n raise ValueError(distr)\n tr0 = distr(size=n_train) + mu\n tr1 = distr(size=n_train) - mu\n te0 = distr(size=n_test) + mu\n te1 = distr(size=n_test) - mu\n def _dist(x,y):\n return np.abs(x.reshape(-1,1)-y.reshape(1,-1))\n aa = np.min(_dist(te1, tr1), axis=1).mean()\n ai = np.min(_dist(te1, tr0), axis=1).mean()\n ia = np.min(_dist(te0, tr1), axis=1).mean()\n ii = np.min(_dist(te0, tr0), axis=1).mean()\n return {\n \"acc\": acc,\n \"muv\": aa - ai,\n \"atomwise\": aa - ai + ii - ia,\n }\n elif n_train == \"infty\" and n_test == \"infty\":\n if distr == \"normal\":\n aa, ai, ia, ii = 0., 0., 0., 0.\n return {\n \"acc\": norm.cdf(mu),\n \"muv\": aa - ai,\n \"atomwise\": aa - ai + ii - ia,\n }\n elif distr == \"uniform\":\n aa, ai, ia, ii = 0., min(mu**2,1.), min(mu**2,1.), 0.\n return {\n \"acc\": min(1., .5 + .5*abs(mu)),\n \"muv\": aa - ai,\n \"atomwise\": aa - ai + ii - ia,\n }\n else:\n raise ValueError()\n else:\n raise ValueError()\n result_normal = np.zeros((3, len(ns), len(mus)), dtype=np.float)\n result_uniform = np.zeros((3, len(ns), len(mus)), dtype=np.float)\n for i, n in enumerate(ns):\n for j, mu in enumerate(mus):\n d = dataset(mu, n, n, \"normal\")\n result_normal[0,i,j] = d[\"acc\"]\n result_normal[1,i,j] = d[\"muv\"]\n result_normal[2,i,j] = d[\"atomwise\"]\n\n d = dataset(mu, n, n, \"uniform\")\n result_uniform[0,i,j] = d[\"acc\"]\n result_uniform[1,i,j] = d[\"muv\"]\n result_uniform[2,i,j] = d[\"atomwise\"]\n fig = plt.figure(figsize=(24,6))\n axes = [fig.add_subplot(1,4,i+1) for i in range(4)]\n alpha = .6\n s = 43\n for i, n in enumerate(ns):\n label = \"4 x {}\".format(n) if n != \"infty\" else '∞'\n\n ax = axes[0]\n ax.scatter(\n result_normal[1,i,:], result_normal[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure (MUV part)\")\n ax.set_title(\"Normal\")\n\n ax = axes[1]\n ax.scatter(\n result_normal[2,i,:], result_normal[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure\")\n ax.set_title(\"Normal\")\n\n ax = axes[2]\n ax.scatter(\n result_uniform[1,i,:], result_uniform[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure (MUV part)\")\n ax.set_title(\"Uniform\")\n\n ax = axes[3]\n ax.scatter(\n result_uniform[2,i,:], result_uniform[0,i,:], label=label, alpha=alpha, s=s)\n ax.set_ylabel(\"Accuracy\")\n ax.set_xlabel(\"Bias measure\")\n ax.set_title(\"Uniform\")\n\n [ax.legend(loc=\"lower left\", title=\"Benchmark size\", fontsize=\"small\") for ax in axes]\n fig.tight_layout()\n return fig\n\ndef splits_tsne(target_uids):\n S = 8\n fig = plt.figure(figsize=(30,4*len(target_uids)))\n counter = 0\n for target_uid in target_uids:\n d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=\"all_bioactivity_records\",\n ic50_conversion_strategy=\"all_relations_half_ic50\",\n fit_ic50=True,\n )[\"final\"]\n kd = Benchmarks2018StructuralSimilarity(source=d)\n tsne = KernelTSNE(\n source=kd,\n kernel=\"kernel\",\n n_components=2,\n perplexity=43.,\n early_exaggeration=43.,\n learning_rate=4343.,\n ).data[\"tsne\"]\n bac_groups = BalancedAgglomerativeClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n cv_groups = CrossValidation(\n source=d,\n n_groups=5,\n seed=43,\n ).data[\"groups\"]\n spectral_groups = SpectralClustering(\n source=kd,\n kernel=\"kernel\",\n n_groups=5,\n ).data[\"groups\"]\n scaffold_groups = MurckoScaffoldSplit(\n source=d,\n generic=True,\n isomeric=False,\n ).data[\"groups\"]\n paper_groups = PaperSplit(source=d).data[\"groups\"]\n\n for c, split_label in (\n (paper_groups, \"paper split\"),\n (bac_groups, \"balanced agglomerative clustering\"),\n (spectral_groups, \"spectral clustering\"),\n (cv_groups, \"cross validation\"),\n (scaffold_groups, \"scaffold split\"),\n (d.data[\"year\"], \"time split\")):\n counter += 1\n ax = fig.add_subplot(len(target_uids),6,counter)\n a = ax.scatter(tsne.T[0], tsne.T[1], s=S, c=c)\n ax.set_xlim((-105,105))\n ax.set_ylim((-105,105))\n ax.set_aspect(\"equal\")\n ax.set_xlabel(split_label)\n if split_label == \"paper split\":\n ax.set_ylabel(target_name(target_uid) + '\\n')\n bar = fig.colorbar(a)\n bar.locator = MaxNLocator(integer=True)\n bar.update_ticks()\n fig.tight_layout()\n return fig\n\ndef noise_analysis(\n target_uids,\n delta_measurement_threshold,\n delta_measurement_upper_threshold,\n ic50_conversion_strategy,\n fit_ic50):\n fig = plt.figure(figsize=(16,len(target_uids)*4))\n N_PLOTS = 4\n counter = 0\n t1 = []\n t2 = []\n for target_uid in target_uids:\n _d = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )\n all_values = _d[\"data_nodes\"][-2] # threshold: None -> -2, not None -> -3\n mean_values = _d[\"final\"]\n\n count_uid = Counter(all_values.data[\"uid\"])\n how_many_samples = np.vectorize(lambda uid: count_uid[uid])(all_values.data[\"uid\"])\n\n uid_to_mean_value = dict(zip(mean_values.data[\"uid\"], mean_values.data[\"value\"]))\n a = np.vectorize(lambda uid: uid_to_mean_value[uid])(all_values.data[\"uid\"])\n b = all_values.data[\"value\"]\n\n two_measurements_same_paper = []\n two_measurements_different_paper = []\n key = lambda x: x[0]\n for k, g in groupby(sorted(zip(all_values.data[\"uid\"], all_values.data[\"smiles\"], all_values.data[\"value\"], all_values.data[\"doc_uid\"]), key=key), key):\n gu, gs, gv, gdu = zip(*g)\n if len(gu) == 2:\n if np.abs(gv[0]-gv[1]) > delta_measurement_threshold:\n if np.abs(gv[0]-gv[1]) <= delta_measurement_upper_threshold:\n if gdu[0] == gdu[1]:\n two_measurements_same_paper.append(gv)\n else:\n two_measurements_different_paper.append(gv)\n else:\n t1.append(\"TARGET: {}, UID: {}, SMILES: {}, DOC1: {}, VALUE1: {}, DOC2: {}, VALUE2: {}\".format(\n target_name(target_uid),\n gu[0],\n gs[0],\n gdu[0],\n gv[0],\n gdu[1],\n gv[1],\n ))\n\n _a = np.array(two_measurements_same_paper)\n _b = np.array(two_measurements_different_paper)\n _a = np.abs(_a[:,0]-_a[:,1])\n _b = np.abs(_b[:,0]-_b[:,1])\n\n counter += 1\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.hist(_a, bins=43)\n ax.set_xlabel(\"pKi abs. difference, same paper\")\n ax.set_ylabel(target_name(target_uid) + \"\\n\")\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n\n counter += 1\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.hist(_b, bins=43)\n ax.set_xlabel(\"pKi abs. difference, two papers\")\n t2.append(\"TARGET UID: {}, SAME: {:.3f} [{} SAMPLES], DIFFERENT: {:.3f} [{} SAMPLES]\".format(\n target_name(target_uid),\n np.mean(np.square(_a))/2,\n len(_a),\n np.mean(np.square(_b))/2,\n len(_b),\n ))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n\n result = []\n for j in range(1,np.max(list(count_uid.values()))):\n mask = how_many_samples > j\n result.append(np.square(b[mask]-a[mask]).mean())\n\n counter += 1\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.scatter(to_pki(a), to_pki(b), s=8)\n ax.set_xlabel(\"Mean pKi\")\n ax.set_ylabel(\"Reported pKi\")\n\n counter += 1\n count_count_uid = Counter(count_uid.values())\n x = np.array([\n count_count_uid[1],\n count_count_uid[2],\n len(mean_values.data[\"uid\"])-count_count_uid[1]-count_count_uid[2],\n ])\n assert sum(x) == len(mean_values.data[\"uid\"])\n ax = fig.add_subplot(len(target_uids),N_PLOTS,counter)\n ax.bar(x=[0,1,2], height=x)\n ax.set_xticks(np.arange(3))\n ax.set_xticklabels([\"1\", \"2\", \">2\"])\n ax.set_xlabel(\"Records per SMILES\")\n ax.set_yscale(\"log\", nonposy='clip')\n\n for rect, label in zip(ax.patches, x):\n ax.text(\n rect.get_x() + rect.get_width() / 2,\n rect.get_height() + 5,\n label,\n ha='center',\n va='bottom',\n bbox=dict(\n boxstyle=\"square\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n ),\n )\n\n fig.tight_layout()\n return fig, '\\n'.join(t1)+'\\n', '\\n'.join(t2)+'\\n'\n\ndef how_many_active_inactive(target_uids, conversion_strategies, threshold):\n result = np.empty((len(target_uids), len(conversion_strategies)), dtype=np.object)\n result.fill(\"\")\n for i, target_uid in enumerate(target_uids):\n for j, (ic50_conversion_strategy, fit_ic50, _) in enumerate(conversion_strategies):\n dct = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=threshold,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=fit_ic50,\n )\n result[i,j] = \"a:{} ia:{} p:{}\".format(\n (dct[\"final\"].data[\"value\"] == 1.).sum(),\n (dct[\"final\"].data[\"value\"] == 0.).sum(),\n len(set(dct[\"data_nodes\"][-3].data[\"doc_uid\"])),\n )\n rows = [target_name(u) for u in target_uids]\n cols = list(list(zip(*conversion_strategies))[2])\n return _table(rows, cols, result, '\\t')\n\ndef ic50_delta(target_uids, conversion_strategies):\n result = np.empty((len(target_uids), len(conversion_strategies)), dtype=np.object)\n result.fill(\"\")\n for i, target_uid in enumerate(target_uids):\n for j, (ic50_conversion_strategy, name) in enumerate(conversion_strategies):\n n = mean_warszycki_logki(\n target_uid=target_uid,\n chembl_filename=\"chembl_24.db\",\n threshold=None,\n include_earliest_year=None,\n ic50_conversion_strategy=ic50_conversion_strategy,\n fit_ic50=True,\n )[\"data_nodes\"][-2]\n assert n.__class__.__name__ == \"FitOriginalIC50ToKi\"\n result[i,j] = \"{:.3f} / {}\".format(\n 2*10**(-n.data[\"IC50_correction\"]),\n n.data[\"how_many_uids_to_estimate_correction\"],\n )\n rows = [target_name(u) for u in target_uids]\n cols = list(list(zip(*conversion_strategies))[1])\n return _table(rows, cols, result, '\\t')\n","repo_name":"lesniak43/ananas","sub_path":"fruits/elderberries/benchmarks2018/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":52363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"4898997950","text":"import pdb\n\ndef print_func(n):\n if n == 0: # funksiyanı bitirən əsas hal.\n pdb.set_trace()\n return 0\n elif n > 0:\n print(n)\n return print_func(n - 1) # rekursiv çağırış\n\n\nif __name__ == \"__main__\":\n pdb.set_trace()\n print_func(4)\n","repo_name":"AzePUG/Data_Structures_Algo_Python","sub_path":"Source_Code/python_kodlar/fesil2/fesil2_2.5_pdb.py","file_name":"fesil2_2.5_pdb.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"az","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"} +{"seq_id":"1753048167","text":"\"\"\"expertreview URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom expertreviewapp import views\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('',views.mainhome,name='mainhome'),\r\n path('login/',views.login,name='login'),\r\n path('cusreg',views.cusreg,name='cusreg'),\r\n path('expertreg',views.expertreg,name='expertreg'),\r\n path('addvehicle',views.addvehicle,name='addvehicle'),\r\n path('company',views.company,name='company'),\r\n path('adminviewcus',views.adminviewcus,name='adminviewcus'),\r\n path('adminviewexpert',views.adminviewexpert,name='adminviewexpert'),\r\n path('adminviewvehicle',views.adminviewvehicle,name='adminviewvehicle'),\r\n path('adminhome',views.adminhome,name='adminhome'),\r\n path('deletevehicle',views.deletevehicle,name='deletevehicle'),\r\n path('experthome',views.experthome,name='experthome'),\r\n path('companyhome',views.companyhome,name='companyhome'),\r\n path('expertviewvehicle',views.expertviewvehicle,name='expertviewvehicle'),\r\n path('comvvehicle',views.comviewvehicle,name='comvvehicle'),\r\n path('expertreview',views.expertreview,name='expertreview'),\r\n path('expertviewreviews',views.expertviewreviews,name='expertviewreviews'),\r\n path('cushome',views.cushome,name='cushome'),\r\n path('cusviewreviews',views.cusviewreviews,name='cusviewreviews'),\r\n path('custviewvehicle',views.custviewvehicle,name='expertreview'),\r\n \r\n path('expcardetails',views.expcardetails,name='expcardetails'),\r\n path('custcardetails',views.custcardetails,name='custcardetails'),\r\n path('adminreview',views.adminreview,name='adminreview'),\r\n path('adminreviewmore',views.adminreviewmore,name='adminreviewmore'),\r\n path('adminupdatereview',views.adminupdatereview,name='adminupdatereview'),\r\n \r\n path('expertprofile',views.expertprofile,name='expertprofile'),\r\n path('cusprofile',views.cusprofile,name='cusprofile'),\r\n path('req',views.req),\r\n path('expapp',views.expapp),\r\n path('exprem',views.exprem),\r\n path('cusvreq',views.cusvreq),\r\n path('expertvreq',views.expertvreq),\r\n path('comviewvehicle',views.comviewvehicle),\r\n path('inchat',views.inchat,name=\"inchat\"),\r\n path('sfChatPer',views.sfChatPer,name=\"sfChatPer\"),\r\n \r\n \r\n \r\n \r\n \r\n]","repo_name":"Rithw/Main-Project","sub_path":"expertreview/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24242308963","text":"from collections import namedtuple\n\nimport gdb\n\ndef _load_pwndbg():\n try:\n import pwndbg\n except:\n return None\n\n from gdb_comments.integrations.pwndbg_patch import load\n load()\n\n from gdb_comments.integrations import pwndbg_utils\n return pwndbg_utils\n\ndef _load_peda():\n # PEDA was never designed to be imported. Instead of writing an overt rant\n # here, I will simply list a series of facts and let the astute reader draw\n # their own conclusions (and my appologies to the keen gramarian for my use\n # of the gender-neutral, singular \"they\").\n #\n # Typically in Python, when you want to import something, you type\n # `import something` at the top of your file and it Just Works. Sadly, peda\n # cannot be imported this way. The main file (that contains the majority of\n # code in peda) is a 6,000+ line script that contains at least two classes\n # and 50 lines of initialization code that is not guarded inside of a\n # standard `if __name__ == '__main__'` construct.\n #\n # In the event that you could convince Python to load this file, peda would\n # generate a second instance of the PEDA class which would be in direct\n # violation of the comment above the instance stating\n #\n # # global instances of PEDA() and PEDACmd()\n # peda = PEDA()\n #\n # Typically, a project implicitly demonstrates how to import itself via its\n # test suite. However, peda has no tests and therefore cannot serve as a\n # reference on importing itself.\n #\n # With that said, I know that the peda object exists in memory. I can (and\n # do) `import gdb` and potentially the global namespace accessible through\n # the GDB interpreter is available through that import although I could\n # never find it. I wouldn't be surprised if a knowledgable someone came\n # across this comment and just so happened to know how to access the\n # interpreter environment through `import gdb`. However, I was unable to\n # find it.\n #\n # And that finally brings us to the third and current solution. Given that\n # the peda object is sitting somewhere in memory and this code is getting\n # executed under the same Python process, this code should be able to find\n # the peda object. A quick search on SO yielded a simple, yet horrific,\n # answer: just get a list of every object known to the garbage collector.\n # From there, find one with the correct class name (although I need to\n # compare strings because I don't actually have a reference to the PEDA\n # class).\n #\n # If you have had the patience to read this rather lengthy wall of text, my\n # hope is that you will understand why the next few lines of code exist and\n # why I am not a terrible person for writing them.\n import gc\n\n peda = None\n for obj in gc.get_objects():\n if str(obj.__class__) == \"\":\n peda = obj\n break\n if peda is None:\n return None\n\n from gdb_comments.integrations.peda_patch import load\n load(peda)\n\n from gdb_comments.integrations import peda_utils\n return peda_utils\n\ndef _make_utils():\n _utils = None\n if _utils is None:\n _utils = _load_pwndbg()\n\n # Loading PEDA is very inefficient so make sure it's the last thing we try.\n if _utils is None:\n _utils = _load_peda()\n\n if _utils is None:\n raise EnvironmentError('Could not find a supported environment to load comments.')\n return _utils.info, _utils.error\n\ninfo, error = _make_utils()\nutils = _make_utils()\n","repo_name":"supersam654/gdb-comments","sub_path":"gdb_comments/integrations/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10208468839","text":"#invoer\nuur_vertrek_thuis = int(input('Geef uur vertrek thuis: '))\nminuten_vertrek_thuis = int(input('Geef minuten vertrek thuis: '))\nuur_aankomst_bij_vriendin = int(input('Geef uur aankomst bij vriendin: '))\nminuten_aankomst_bij_vriendin = int(input('Geef minuten aankomst bij vriendin: '))\nuur_vertrek_van_vriendin = int(input('Geef uur vertrek van vriendin: '))\nminuten_vertrek_van_vriendin = int(input('Geef minuten vertrek van vriendin: '))\nuur_aankomst_thuis = int(input('Geef uur aankomst thuis: '))\nminuten_aankomst_thuis = int(input('Geef minuten aankomst thuis: '))\n\n#berekening reistijd heen of terug\nresultaat = ((1440 - (uur_vertrek_thuis * 60 + minuten_vertrek_thuis)) + (uur_aankomst_thuis * 60 + minuten_aankomst_thuis)) % 1440\nresultaat -= ((1440 - (uur_aankomst_bij_vriendin * 60 + minuten_aankomst_bij_vriendin)) + (uur_vertrek_van_vriendin * 60 + minuten_vertrek_van_vriendin)) % 1440\nresultaat /= 2\n\n\n#berekening tijdstip\ncorrecte_minuten_aankomst_thuis = int((minuten_vertrek_van_vriendin + (resultaat % 60)) % 60)\ncorrecte_uur_aankomst_thuis = int(((uur_vertrek_van_vriendin + (resultaat // 60)) + ((minuten_vertrek_van_vriendin +resultaat % 60)) // 60) % 24)\nprint(correcte_uur_aankomst_thuis)\nprint(correcte_minuten_aankomst_thuis)\n\n#15:45 945 18:05 1085 140\n# 16:30 990 17:14 1024 34\n# 53\n# python console gebruiken als rekenmachine\n#21 14 11 45 22 58 14 59 2 14\n#15 1 17 5 18 1 18 23 19 14\n#557213823281659284\n\n\n\n\n\n","repo_name":"astilleman/Informatica5","sub_path":"04 - Variabelen/De gestopte klok.py","file_name":"De gestopte klok.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11387493080","text":"#!usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nATM module.\n\"\"\"\n\n__author__ = 'Ziang Lu'\n\nfrom atm.dispatcher import (\n FiveDispatcher, HundredDispatcher, OneDispatcher, TenDispatcher,\n TwentyDispatcher\n)\n\n\nclass AtmMachine:\n __slots__ = ['_first_dispatcher']\n\n def __init__(self):\n \"\"\"\n Default constructor.\n \"\"\"\n self._first_dispatcher = HundredDispatcher.get_instance(\n TwentyDispatcher.get_instance(\n TenDispatcher.get_instance(\n FiveDispatcher.get_instance(OneDispatcher.get_instance())\n )\n )\n )\n\n def withdraw(self, requested_amount: int) -> None:\n \"\"\"\n Withdraws the given amount of money from this ATM.\n :param requested_amount: int\n :return: None\n \"\"\"\n # Delegate to the dispatchers to handle this withdraw request\n self._first_dispatcher.dispatch(requested_amount)\n","repo_name":"Ziang-Lu/Design-Patterns","sub_path":"4-Behavioral Patterns/8-Chain of Responsibility Pattern/Usage 2-One or More Receivers Handle Request/Python/atm/atm_machine.py","file_name":"atm_machine.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"15130103600","text":"class Solution:\n def minCost(self, nums: List[int], cost: List[int]) -> int:\n res=0\n nums = sorted(zip(nums,cost))\n total = sum(cost)//2\n for num,cost in nums:\n res+=cost\n if res>total:\n mid = num\n break\n return sum(abs(mid-n)*c for n,c in nums)","repo_name":"iamcvarma/DSA-leetcode","sub_path":"2448-minimum-cost-to-make-array-equal/2448-minimum-cost-to-make-array-equal.py","file_name":"2448-minimum-cost-to-make-array-equal.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26914915515","text":"import ipaddress as IP\nfrom os import system as linux\nlinux(\"clear\")\n\nip = '192.168.0.100'\n\nendereco = IP.ip_address(ip)\nrede = IP.ip_network(ip)\n\nprint(f\"rede: {rede}\")\n","repo_name":"Lucas20santos/BancoCarregourDataEngineer","sub_path":"FundamentosArquiteturaSistema/codigos/ips.py","file_name":"ips.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5107642347","text":"import collections as co\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.ticker import NullLocator\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nimport numpy as np\nimport os.path as op\nimport io\n# ---------------------------------------------------------------------------\n\nFORMAT = 'png'\n\n# ---------------------------------------------------------------------------\n\nScatterplotData = co.namedtuple('ScatterplotData', 'label shape level x y')\nPixel = co.namedtuple('Pixel', 'x y')\nScatterplotMetaData = co.namedtuple('ScatterplotMetaData',\n 'readout ligand concentration time')\nPointSpec = co.namedtuple('PointSpec', 'label shape level')\nResponseData = co.namedtuple('ResponseData', 'metadata data')\nMarkerSpec = co.namedtuple('MarkerSpec', 'marker color')\n\nmarker_map = {\n 'triangle': MarkerSpec('^', 'orange'),\n 'circle': MarkerSpec('o', 'mediumpurple'),\n 'square': MarkerSpec('s', 'mediumseagreen'),\n }\n\ndpi = 72.0\n\ncmap_bwr = LinearSegmentedColormap.from_list('bwr', ['blue', 'white', 'red'])\n\ndef scatterplot(points, metadata, lims=None, outpath='/dev/null',\n display=False):\n f = Figure(figsize=(300 / dpi, 300 / dpi), dpi=dpi)\n ax = f.gca()\n for p in points:\n if p.level is None:\n # overrides cmap\n color = marker_map[p.shape].color\n else:\n color = p.level\n ax.scatter(p.x, p.y, c=color, vmin=0, vmax=1, linewidth=0.5,\n marker=marker_map[p.shape].marker, s=100, cmap=cmap_bwr)\n if lims is None:\n all_data = sum(([p.x, p.y] for p in points), [])\n dmin = min(all_data)\n dmax = max(all_data)\n drange = dmax - dmin\n lims = dmin - drange * 0.1, dmax + drange * 0.1\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_aspect('equal')\n ax.set_xlabel(build_label(metadata[0]))\n ax.set_ylabel(build_label(metadata[1]))\n for loc in 'top', 'right':\n ax.spines[loc].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n f.subplots_adjust(left=0.2, bottom=0.15, right=1, top=1, wspace=0, hspace=0)\n plt.setp(f, 'facecolor', 'none')\n\n canvas = FigureCanvasAgg(f)\n f.set_canvas(canvas)\n\n # must always be called, even if outpath is '/dev/null', so that the\n # returned figure object yields consistent pixel coordinates\n canvas.print_png(outpath)\n\n if display:\n plt.show()\n\n return f\n\ndef pixels(points, figure):\n transform = figure.gca().transData.transform\n # see http://matplotlib.org/devel/transformations.html#matplotlib.transforms.Transform.transform\n height = figure.canvas.get_width_height()[1]\n return tuple(Pixel(int(round(q[0])), int(round(height - q[1])))\n for q in transform(np.array([(p.x, p.y) for p in points])))\n\n\ndef build_label(metadata):\n readout, ligand, concentration, time = metadata\n if readout is not None and all(x is None for x in (ligand, concentration, time)):\n # basal\n label = 'basal %s (a.u.)' % readout\n elif all(x is not None for x in metadata):\n # ligand response\n label = '%s [%s]\\n(fold change over basal)' % (readout, ligand)\n else:\n raise ValueError(\"unknown combination of metadata values\")\n return label\n\n\ndef legend_categorical(target_dir):\n # this just generates pieces, still need to manually assemble them\n # into the final result\n f = Figure(figsize=(300/dpi, 300/dpi), dpi=dpi)\n ax = f.gca()\n for subtype, shape in (('HER2amp', 'triangle'),\n ('TN', 'circle'),\n ('HR+', 'square')):\n ax.plot(0, 0, marker=marker_map[shape].marker, mfc=marker_map[shape].color,\n label=subtype, ls='none')\n ax.legend(prop={'size': 12})\n filename = op.join(target_dir, 'legend-categorical.png')\n canvas = FigureCanvasAgg(f)\n canvas.print_png(filename)\n\n\ndef legend_graded(target_dir):\n # this just generates pieces, still need to manually assemble them\n # into the final result\n f = Figure(figsize=(300/dpi, 300/dpi), dpi=dpi)\n ax = f.gca()\n for subtype, shape in (('HER2amp', 'triangle'),\n ('TN', 'circle'),\n ('HR+', 'square')):\n ax.plot(0, 0, marker=marker_map[shape].marker, label=subtype, mfc='none', ls='none')\n ax.set_xlabel('Subtype')\n cax = ax.imshow([[0,1]], cmap=cmap_bwr)\n cbar = f.colorbar(cax, ticks=[0, 0.5, 1], orientation='horizontal')\n cbar.ax.set_xticklabels(['Weak', 'Medium', 'Strong'])\n cbar.ax.set_xlabel('Lapatinib response')\n plt.setp(cbar.ax.get_xticklines(), alpha=0)\n ax.legend(prop={'size': 12})\n f.set_facecolor('none')\n filename = op.join(target_dir, 'legend-graded.png')\n canvas = FigureCanvasAgg(f)\n canvas.print_png(filename)\n\n\nif __name__ == '__main__':\n points = (ScatterplotData('AU-565', 'triangle', 0.554, 4.308, 4.311),\n ScatterplotData('BT-20', 'circle', 0.043, 3.843, 3.877),\n ScatterplotData('BT-474', 'triangle', 0.496, 3.455, 3.535),\n ScatterplotData('BT-483', 'square', 1.000, 3.805, 3.685),\n ScatterplotData('BT-549', 'circle', 0.873, 3.333, 3.197),\n ScatterplotData('CAMA-1', 'square', 1.000, 3.343, 3.230),\n ScatterplotData('HCC1187', 'circle', 0.403, 3.818, 3.723),\n ScatterplotData('HCC1395', 'circle', 0.859, 3.682, 3.720),\n ScatterplotData('HCC1419', 'triangle', 0.501, 4.068, 4.051),\n ScatterplotData('HCC1428', 'square', 0.640, 3.590, 3.376),\n ScatterplotData('HCC1806', 'circle', 0.246, 3.877, 3.843),\n ScatterplotData('HCC1937', 'circle', 0.854, 3.862, 3.727),\n ScatterplotData('HCC1954', 'triangle', 0.162, 4.032, 3.996),\n ScatterplotData('HCC202', 'triangle', 0.838, 4.199, 4.197),\n ScatterplotData('HCC38', 'circle', 1.000, 3.919, 3.838),\n ScatterplotData('HCC70', 'circle', 0.000, 4.263, 4.307),\n ScatterplotData('MCF7__b', 'square', 1.000, 3.148, 2.951),\n ScatterplotData('MDA-MB-134-VI', 'square', 1.000, 3.442, 3.475),\n ScatterplotData('MDA-MB-157', 'circle', 0.921, 3.294, 2.611),\n ScatterplotData('MDA-MB-175-VII', 'square', 0.163, 4.052, 3.831),\n ScatterplotData('MDA-MB-231__a', 'circle', 0.860, 3.903, 3.524),\n ScatterplotData('MDA-MB-361', 'triangle', 0.994, 3.092, 2.991),\n ScatterplotData('MDA-MB-436', 'circle', 0.950, 3.781, 3.635),\n ScatterplotData('MDA-MB-453', 'circle', 0.889, 3.290, 3.424),\n ScatterplotData('SK-BR-3__a', 'triangle', 0.608, 3.986, 3.999),\n ScatterplotData('T47D', 'square', 0.921, 3.804, 3.835),\n ScatterplotData('UACC-812', 'triangle', 0.537, 3.908, 3.907),\n ScatterplotData('UACC-893', 'triangle', 0.539, 3.677, 3.709),\n ScatterplotData('ZR-75-1', 'square', 1.000, 3.884, 3.569))\n metadata = (ScatterplotMetaData(readout='pErk', ligand='EGF', concentration='100', time=None),\n ScatterplotMetaData(readout='pErk', ligand='EPR', concentration='100', time=None))\n lims = (1.518, 4.395)\n\n scatterplot(points, metadata, lims, display=True)\n","repo_name":"hmslincs/hmslincs","sub_path":"src/scatterplot.py","file_name":"scatterplot.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"79"} +{"seq_id":"74305061694","text":"import tweepy\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\n\nfrom twitter_auth import authenticate_twitter_app\nfrom db_stuff import UserDB\n\nclass MyStreamListener(tweepy.StreamListener):\n \"\"\"\n Twitter listener, collects streaming tweets and output to a file\n \"\"\"\n\n def __init__(self, output_file=\"alc_tweets.db\", max_tweets=1000):\n super(MyStreamListener, self).__init__()\n self.max_tweets = max_tweets\n self.num_tweets = 0\n self.good_tweets = 0\n self.db = UserDB(output_file)\n\n def on_status(self, status):\n #print(status.text)\n tweet = status._json\n self.num_tweets=self.num_tweets+1\n \n text = \"\"\n \n # catching extended tweets (only way i found to do this with streaming API)\n try:\n text = status.extended_tweet['full_text']\n except:\n text = status.text\n\n if status.place != None:\n print(\"Inserting tweet of length: \" + str(len(text)))\n print(\"Text: \" + text)\n print(\"Country code: \" + status.place.country_code)\n self.good_tweets=self.good_tweets+1\n self.db.insert_tweet(int(status.user.id_str), text, status.place.country_code)\n self.db.save_changes()\n\n # Stops streaming when it reaches the limit\n if self.num_tweets <= self.max_tweets:\n if self.num_tweets % 100 == 0: # just to see some progress...\n print(str(self.num_tweets) + \" collected -> \" + str(self.good_tweets) + \" are applicable\")\n return True\n else:\n return False\n\n def on_error(self, status):\n print(status)\n return False\n \n def __del__(self):\n pass\n\n\nif __name__ == '__main__':\n\n print(\"Run Listener for crawling twitter data\")\n\n #Define search content\n key_words =[\"alcohol,beer,wine,drunk,drinking alcohol,party alcohol\"]\n\n\n l = MyStreamListener(max_tweets=100000)\n\n # Create you Stream object with authentication\n auth = authenticate_twitter_app()\n stream = tweepy.Stream(auth=auth, listener=l)\n\n # Filter Twitter Streams to capture data by the keywords:\n stream.filter(track=key_words,languages=['en'])\n\n# try out db stuff\n \n","repo_name":"DanielSudy/SMTAlcoholConsumption","sub_path":"sentiment_analysis/alcohol_streamer.py","file_name":"alcohol_streamer.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39507001799","text":"# 309. Best Time to Buy and Sell Stock with Cooldown\n# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/description/\n\nfrom functools import lru_cache\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n \n\n # Solution 1 - Dfs with memoization \n\n # cache = {}\n\n # @lru_cache\n # def dfs(i, canBuy):\n # # if (i, canBuy) in cache:\n # # return cache[(i, canBuy)]\n\n # if i >= len(prices):\n # return 0\n\n \n # res = dfs(i+1, canBuy)\n\n # if canBuy:\n # res = max(dfs(i+1, not canBuy) - prices[i], res)\n # else:\n # res = max(dfs(i+2, not canBuy) + prices[i], res)\n\n # # cache[(i, canBuy)] = res\n\n # return res\n\n # return dfs(0, True)\n\n\n # Solution 2 - Dynamic programming (Bottom up) approach with tabulation \n n = len(prices)\n\n stock = [0] * (n)\n no_stock = [0] * (n)\n sold = [0] * (n)\n\n stock[0] = -prices[0]\n\n\n for i in range(1, n):\n stock[i] = max(stock[i-1], no_stock[i-1] - prices[i])\n no_stock[i] = max(no_stock[i-1], sold[i-1])\n sold[i] = stock[i-1] + prices[i]\n\n return max(sold[n-1], no_stock[n-1])\n\n\n # Solution 3 - Space optimisation. You would only need three variables to hold previous state and nothing else hence space can be optimised to be constant. \n\n # n = len(prices)\n\n # stock = -prices[0]\n # no_stock = 0\n # sold = 0\n\n # for i in range(1, n):\n # prev_stock = stock\n # stock = max(stock, no_stock - prices[i])\n # no_stock = max(no_stock, sold)\n # sold = prev_stock + prices[i]\n\n\n # return max(sold, no_stock)\n\n\n\n \n\n \n# Example 1:\n\n# Input: prices = [1,2,3,0,2]\n# Output: 3\n# Explanation: transactions = [buy, sell, cooldown, buy, sell]\n \n# Example 2:\n\n# Input: prices = [1]\n# Output: 0\n\n\n\n\n\n\n\n\n","repo_name":"anoopanni/leetcode","sub_path":"BuySellStock.py","file_name":"BuySellStock.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71553703615","text":"import os\nimport argparse\n\nimport paddle\n\nfrom arch_unet import UNet\nfrom utils import load_pretrained_model\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Model export.')\n parser.add_argument(\n '--save_dir',\n dest='save_dir',\n help='The directory for saving the exported model',\n type=str,\n default='./output')\n parser.add_argument(\n '--model_path',\n dest='model_path',\n help='The path of model for export',\n type=str,\n default=None)\n\n return parser.parse_args()\n\n\ndef main(args):\n\n net = UNet(in_nc=3,\n out_nc=3,\n n_feature=48)\n\n if args.model_path:\n para_state_dict = paddle.load(args.model_path)\n net.set_dict(para_state_dict)\n print('Loaded trained params of model successfully.')\n\n\n shape = [-1, 3, 256, 256]\n\n new_net = net\n\n new_net.eval()\n new_net = paddle.jit.to_static(\n new_net,\n input_spec=[paddle.static.InputSpec(shape=shape, dtype='float32')])\n save_path = os.path.join(args.save_dir, 'model')\n paddle.jit.save(new_net, save_path)\n\n # yml_file = os.path.join(args.save_dir, 'deploy.yaml')\n # with open(yml_file, 'w') as file:\n # transforms = cfg.export_config.get('transforms', [{\n # 'type': 'Normalize'\n # }])\n # data = {\n # 'Deploy': {\n # 'transforms': transforms,\n # 'model': 'model.pdmodel',\n # 'params': 'model.pdiparams'\n # }\n # }\n # yaml.dump(data, file)\n\n print(f'Model is saved in {args.save_dir}.')\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)","repo_name":"txyugood/Neighbor2Neighbor_Paddle","sub_path":"export_model.py","file_name":"export_model.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"74035403456","text":"## Augmentation ##\r\n#Image shifts via the width_shift_range and height_shift_range arguments.\r\n#Image flips via the horizontal_flip and vertical_flip arguments.\r\n#Image rotations via the rotation_range argument\r\n#Image brightness via the brightness_range argument.\r\n#Image zoom via the zoom_range argument.\r\n\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\r\n\r\n# Construct an instance of the ImageDataGenerator class\r\n# Pass the augmentation parameters through the constructor. \r\n\r\ndatagen = ImageDataGenerator(\r\n rotation_range=40, # Random rotation between 0 and 40\r\n width_shift_range=0.2, # % shift\r\n height_shift_range=0.2,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest') # can also try nearest, constant, reflect, wrap\r\n\r\n\r\n\r\n############## Loading a single image and do the augmentation ##############\r\n\r\n#Using flow method to augment the image\r\n# Loading a sample image \r\n#Can use any library to read images but they need to be in an array form\r\n#If using keras load_img convert it to an array first\r\n\r\nimg = load_img('F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/images/000001.jpg') # this is a PIL image\r\nx = img_to_array(img) # this is a Numpy array with shape (500, 353, 3)\r\n\r\n# Reshape the input image because ...\r\n#x: Input data to datagen.flow must be Numpy array of rank 4 or a tuple.\r\n#First element represents the number of images\r\nx = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 500, 353, 3)\r\n\r\n# the .flow() command below generates batches of randomly transformed images\r\n# and saves the results to the `augmented_output/` directory\r\ni = 0\r\nfor batch in datagen.flow(x, batch_size=1,\r\n save_to_dir='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/augmented_output', save_prefix='man_with_puppy', save_format='jpeg'):\r\n i += 1\r\n if i > 4:\r\n break # otherwise the generator would loop indefinitely\r\n \r\n \r\n\r\n####################### Multiple images ######################\r\n\r\n#Manually read each image and create an array to be supplied to datagen via flow method\r\ndataset = []\r\n\r\nimport numpy as np\r\nfrom skimage import io\r\nimport os\r\nfrom PIL import Image\r\n\r\nimage_directory = 'F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/images/'\r\nSIZE = 400\r\ndataset = []\r\n\r\nmy_images = os.listdir(image_directory)\r\nfor i, image_name in enumerate(my_images):\r\n if (image_name.split('.')[1] == 'jpg'):\r\n image = io.imread(image_directory + image_name)\r\n image = Image.fromarray(image, 'RGB')\r\n image = image.resize((SIZE,SIZE))\r\n dataset.append(np.array(image))\r\n\r\nx = np.array(dataset) # this is a Numpy array with shape (7, 400, 400, 3)\r\n\r\ni = 0\r\nfor batch in datagen.flow(x, batch_size=1,\r\n save_to_dir='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/augmented_output', save_prefix='augments', save_format='jpeg'):\r\n i += 1\r\n if i > 27:\r\n break # otherwise the generator would loop indefinitely\r\n \r\n\r\n###################### accessing image in Multiclass problem #####################\r\n# Read directly from the folder structure using flow_from_directory\r\n\r\ni = 0\r\nfor batch in datagen.flow_from_directory(directory='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/', \r\n batch_size=16, \r\n target_size=(400, 400),\r\n color_mode=\"rgb\",\r\n save_to_dir='F:/AI assignment/AI assignment/Convolutional Neural Network_Assign_module_9/Augment_images/augmented_output', \r\n save_prefix='augments', \r\n save_format='png'):\r\n i += 1\r\n if i > 4:\r\n break \r\n\r\n#Creates 32 images for each class. \r\n \r\n#Once data is augmented, you can use it to fit a model via: fit.generator\r\n#instead of fit()\r\n#model = \r\n#fit model on augmented data\r\n#model.fit_generator(datagen.flow(x))","repo_name":"anandkvvlr/AI_assignment-works","sub_path":"CNN/module_9.py","file_name":"module_9.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6471816742","text":"import re\nimport json\nimport time\nimport datetime\nimport pandas as pd\n\nwith open(\"./AtomicCards.json\") as card_data:\n j_data = json.load(card_data)\n meta_data = j_data[\"meta\"]\n data = j_data[\"data\"]\n keys = list(data.keys())\n\nwith open(\"./SetList.json\") as set_data:\n set_j_data = json.load(set_data)\n set_meta_data = set_j_data[\"meta\"]\n set_data = set_j_data[\"data\"]\n dated = { time.mktime(datetime.datetime.strptime(s[\"releaseDate\"], \"%Y-%m-%d\").timetuple()) \\\n : (s[\"code\"] if \"parentCode\" not in s.keys() else s[\"parentCode\"], s[\"releaseDate\"]) for s in set_data}\n date_list = list(dated.keys())\n date_list.sort()\n sorted_dated = [dated[x] for x in date_list]\n\nINVALID_SETS = [\"PCEL\", \"PRM\"]\nFORBIDDEN_SETS = [\"UST\", \"UNH\", \"UGL\", \"UND\", \"AFR\", \"PCEL\", \"HHO\"]\n\ndef has_forb_set(c):\n ps = c[\"printings\"]\n if len(list(filter(lambda x : x not in FORBIDDEN_SETS, ps))) == 0:\n return True\n return False\n\nFORBIDDEN_TYPES = [\"Dungeon\"]\n\ndef has_forb_type(c):\n if c[\"type\"] in FORBIDDEN_TYPES:\n return True\n return False\n\ndouble_face_re = re.compile(r\"(.+) // (.+)\")\n\ndef get_print(c):\n valid_printings = list(filter(lambda x : len(x) <= 3 and x not in INVALID_SETS, c[\"printings\"]))\n for i in range(len(sorted_dated)):\n c_set = sorted_dated[i]\n if c_set[0] in valid_printings:\n return(c_set)\n\n# remove UN-sets\n# remove Dungeon type\n# check two-face\n\nrows = []\nignore_count = 0\n\nfor key in keys:\n for d in data[key]:\n if not(has_forb_type(d)) and not(has_forb_set(d)):\n if \"side\" in d.keys():\n m = double_face_re.match(d[\"name\"])\n if m:\n name = m.group(1) if d[\"side\"] == \"a\" else m.group(2)\n else:\n name = d[\"name\"]\n else:\n name = d[\"name\"]\n \n printing = get_print(d)\n if printing:\n colours = d[\"colors\"]\n red = \"R\" in colours\n green = \"G\" in colours\n black = \"B\" in colours\n white = \"W\" in colours\n blue = \"U\" in colours\n text = \"{EMPTY}\" if \"text\" not in d.keys() else d[\"text\"]\n text = re.sub(r\" \\({Q} is the untap symbol.\\)\", \"\", text).lower()\n if \"{q}\" in text:\n print(text)\n \n rows.append({\n \"name\" : name.lower(),\n \"printed\" : printing[1],\n \"r\" : int(red),\n \"g\" : int(green),\n \"b\" : int(black),\n \"w\" : int(white),\n \"u\" : int(blue),\n \"text\" : text,\n \"subtypes\" : d[\"subtypes\"],\n \"types\" : d[\"types\"]\n })\n else:\n print(\"Row ignored\")\n\n\ndataframe = pd.DataFrame(rows, columns=[\"name\", \"printed\", \"r\", \"g\", \"b\", \"w\", \"u\", \"text\", \"types\",\"subtypes\"])\ndataframe.to_csv(\"sanitized_cards.csv\", sep=\"|\")\n\n","repo_name":"Pickersgill/cardclassifier","sub_path":"datamine/acrew.py","file_name":"acrew.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15790003641","text":"from .life import *\nfrom .impact import *\n\nclass Enemy(Life):\n def __init__(self, game, animations, attack_timming, attack_end, attack_cooltime, direction = 1, location = vec(0, 0), speed = 0, attack_point = 0, hp = 0, area = 50, aggro_area= 50, die_mp = 15):\n self.groups = game.all_sprites, game.visibles, game.lifes, game.enemies\n Life.__init__(self, game, self.groups, animations, direction=direction, location=location, speed = speed, attack_point= attack_point, hp = hp)\n self.area = area\n self.aggro_area = aggro_area\n self.attack_timming = attack_timming\n self.attack_end = attack_end\n self.attack_cooltime = attack_cooltime\n self.start_time = 0\n self.allow = True\n self.none_operate = ['공격', '바닥충돌']\n self.die_mp = die_mp\n self.fly_states = ['부유', '추락']\n\n def movestate_update(self):\n if self.state != '죽음':\n if self.floor_contact():\n if not self.operation:\n if self.velocity.x == 0:\n if not self.state in self.none_operate:\n self.state_set('통상')\n self.operation = True\n if self.walk_control_l:\n self.walk_l()\n if self.walk_control_r:\n self.walk_r()\n if not (self.walk_control_l or self.walk_control_r):\n self.state_set('통상')\n self.friction_switch = False\n else:\n if self.friction_switch:\n self.velocity.x -= ((self.velocity.x > 0) * 2 - 1) * friction * TIME\n if -20 < (self.velocity.x) < 20:\n self.velocity.x = 0\n else:\n self.friction_switch = False\n if not self.state == '부유':\n if self.state == '추락':\n self.state_set('바닥충돌')\n else:\n if not self.state in self.none_operate:\n if self.walk_control_l or self.walk_control_r:\n self.state_set('걷기')\n else:\n self.state_set('통상')\n self.move()\n\n\n if self.velocity.y > 0 and self.state != '부유':\n self.velocity.y = 0\n\n else:\n self.velocity += self.acceleration * TIME\n if not self.state in self.fly_states:\n if self.velocity.y > 0:\n self.state_set('추락')\n else:\n self.state_set('부유')\n\n if self.ceiling_contact() and self.velocity.y < 0:\n self.velocity.y = 0\n \n self.rect.center += self.velocity * TIME\n\n self.physics_update()\n\n if self.state == '공격' or self.state == '넉백':\n if self.game.player.rect.centerx < self.rect.centerx:\n self.walk_control_l = True\n self.walk_control_r = False\n else:\n self.walk_control_r = True\n self.walk_control_l = False\n\n if self.state == '공격' and self.attack_end >= self.animation.p_frame >= self.attack_timming:\n self.make_attack()\n \n if not self.allow and ((time.time() - self.start_time) > self.attack_cooltime):\n self.allow = True\n \n if self.die_check():\n self.state_set('죽음')\n\n\n def move(self):\n if self.operation:\n if abs(self.game.player.rect.centerx - self.rect.centerx) < self.aggro_area:\n if self.game.player.rect.centerx < self.rect.centerx:\n if not self.walk_control_l:\n self.walk_r_cancel()\n self.walk_l()\n else:\n if not self.walk_control_r:\n self.walk_l_cancel()\n self.walk_r()\n else: \n self.walk_r_cancel()\n self.walk_l_cancel()\n\n if abs(self.game.player.rect.centerx - self.rect.centerx) < self.area:\n self.attack()\n\n def update(self):\n self.animation_end()\n self.animation_update()\n self.movestate_update()\n self.end_damaged()\n \n def animation_end(self):\n if self.animation.end_check():\n if self.state in self.none_operate:\n self.operation = True\n\n if self.state == '죽음':\n self.delete()\n self.game.player.heal_mp(self.die_mp)\n \n elif self.state == '바닥충돌':\n if self.walk_control_l or self.walk_control_r:\n self.state_set('걷기')\n else:\n self.state_set('통상')\n \n elif self.state == '공격':\n self.state_set('통상')\n if self.walk_control_l:\n self.walk_l()\n if self.walk_control_r:\n self.walk_r()\n if not (self.walk_control_l or self.walk_control_r):\n self.state_set('통상')\n\n \n def attack(self):\n if self.operation and self.allow:\n self.operation_cancel()\n self.state_set('공격')\n self.allow = False\n self.start_time = time.time()\n \n def floated(self, v):\n self.state_set('부유')\n self.operation_cancel()\n self.velocity += v\n \n def make_attack(self):\n pass","repo_name":"jwcho2005/Hihi","sub_path":"class_data/classes/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3306262237","text":"import numpy as np\nfrom tqdm import tqdm\n\nfrom maths import deg2rad, norm\nfrom rays import Ray\n\nRAYS_PER_PIXEL = 10\n\n\nclass Camera:\n def __init__(self, pos, dir, fov, resX, resY, clip_dst=0.1):\n \"\"\"\n camera coord space:\n --------> X\n |\n |\n |\n Y V z into screen\n\n\n \"\"\"\n self.pos = np.array(pos)\n self.dir = norm(np.array(dir))\n self.fov = deg2rad(fov)\n self.resX = resX\n self.resY = resY\n self.fovX = self.fov\n self.fovY = 2 * np.arctan2(np.tan(self.fovX / 2), self.resX / self.resY)\n self.clip_dst = clip_dst\n\n def set_direction(self, direction):\n self.dir = norm(direction)\n\n def get_ray_dir(self, px, py):\n # Dimensions of near clip plane\n clip_plane_X = 2 * np.tan(self.fovX / 2) * self.clip_dst\n clip_plane_Y = 2 * np.tan(self.fovY / 2) * self.clip_dst\n\n # Center camera view\n px_offset = px - self.resX // 2\n py_offset = py - self.resY // 2\n\n pixel_pos_cam_space = np.array(\n [\n clip_plane_X * px_offset / self.resX,\n clip_plane_Y * py_offset / self.resY,\n self.clip_dst,\n ]\n )\n pixel_pos_world_space = norm(\n np.matmul(self.cam_to_world_matrix(), pixel_pos_cam_space)\n )\n\n return pixel_pos_world_space\n\n def heading(self):\n dir_x = self.dir[0]\n dir_y = self.dir[1]\n # +x = 'north' = 0 rad\n heading = np.arctan2(dir_y, dir_x)\n\n return heading\n\n def elevation(self):\n dir_z = self.dir[2]\n # vertical up = pi/2, horizontal = 0, etc.\n return np.arcsin(dir_z)\n\n def cam_to_world_matrix(self):\n cam_x_in_world = np.array(\n [-np.sin(self.heading()), np.cos(self.heading()), 0.0]\n )\n cam_y_in_world = norm(\n np.array(\n [\n -self.dir[0] * np.sin(self.elevation()),\n -self.dir[1] * np.sin(self.elevation()),\n np.cos(self.elevation()),\n ]\n )\n )\n cam_z_in_world = self.dir\n\n matrix = np.column_stack([cam_x_in_world, cam_y_in_world, cam_z_in_world])\n\n return matrix\n\n def world_to_cam_matrix(self):\n return np.linalg.inv(self.cam_to_world_matrix())\n\n def draw(self, scene):\n pixel_data = np.zeros((self.resX, self.resY, 3))\n for n in tqdm(range(RAYS_PER_PIXEL)):\n for px in range(self.resX):\n for py in range(self.resY):\n ray = Ray(self.pos, self.get_ray_dir(px, py))\n pixel_data[px, py, :] += ray.trace(scene)\n\n return pixel_data / pixel_data.max()\n\n\nif __name__ == \"__main__\":\n cam = Camera([0, 0, 0], [1, 0, 0], 90, 800, 600)\n print(cam.elevation())\n print(cam.heading())\n print(cam.cam_to_world_matrix())\n print(cam.world_to_cam_matrix())\n print()\n print(cam.get_ray_dir(401, 301))\n print(cam.dir)\n","repo_name":"franklinscudder/RayTracer","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22937180238","text":"import pyautogui as pg\nimport time\nimport webbrowser\n# while True:\n# time.sleep(4)\n# pyautogui.typewrite('Hello! Motherfu**ing :D')\n# time.sleep(2)\n# pyautogui.press('enter')\n\n# time.sleep(2)\n# print(pg.position())\n# pg.moveTo(562, 755, 2)\n# pg.leftClick()\n\nurl = \"https://www.facebook.com/messages/t/100017290625742\"\nwebbrowser.get().open(url)\nprint(pg.position())\n# pg.moveTo(970, 1079, 2)\n# pg.moveTo(1026, 1052, 2)\n# pg.leftClick(1026, 1052, 1)\n# pg.keyDown('ctrl')\n# pg.press('t')\n# pg.keyUp('ctrl')\n# pg.moveTo(661, 479)\n# pg.leftClick()\n# pg.typewrite(\"hello bạn\")\n# pg.press('enter')\ntime.sleep(10)\n\nfor i in range(6):\n pg.keyDown('alt')\n for j in range(i):\n pg.press('tab')\n pg.press('enter')\n pg.keyUp('alt')\n pg.moveTo(959, 1026)\n pg.leftClick()\n pg.typewrite(\"hello bạn\")\n pg.press('enter')\n #pg.hotkey('alt', 'tab', 'enter')\n","repo_name":"nxhawk/AI-helper","sub_path":"function/auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"33363039108","text":"\"\"\"\nA general instrument class that returns a status for each command sent\nor recieved from its instrument. This allows it to be used with the \"com\"\nfunction in the main algorithm, when visa fails it does not halt the\nwhole program but reports the failure instead.\n\"\"\"\nimport visa\nimport time\nclass INSTRUMENT(object):\n \n \"\"\" This instrument class really is only a function to read and write to\n some instrument, using pyvisa. It has a general 'dictionary' to which\n more key word arguments can be added, and more sub functions can be\n used to make instruments specific. It can be used very generally, as just a semd\n and recieve class which also wraps each communication with a check to see if the\n communication was sucesful. \"\"\"\n\n def __init__(self,inst_bus,letter, **kwargs):\n self.com = {'label':'','address':'', 'Ranges':[], 'measure_seperation':'0', 'NoError':'',\\\n 'reset':'','status':'','init':'','Make_Safe':'', 'error':'', \\\n 'SettleTime':'0', 'DCVRange':'', 'SetVoltage':'', 'operate':'', \\\n 'standby':'','MeasureSetup':'','SingleMsmntSetup':''} #command dictionary\n self.com.update(kwargs) #update dictionary to include all sent commands.\n self.label = self.com[\"label\"]\n self.com.update(label=str(letter)+str(kwargs['label']) )\n self.range = eval(self.com['Ranges']) #Use eval here or string operations? Like split multiple times.\n self.address = self.com['address']\n #ensure values are ints\n try:\n self.com_settle_time = float(self.com['SettleTime'])\n except:\n print(\"settle time made into 1 on \"+str(self.com['label']+\", from unreadable: \"+str(self.com['SettleTime'])))\n self.com_settle_time = 1\n try:\n self.measure_seperation = float(self.com['measure_seperation'])\n except:\n print(\"measure seperation made into 0 on \"+str(self.com['label']+\", from unreadable: \"+str(self.com['measure_seperation'])))\n self.measure_seperation = 0\n \n self.inst_bus = inst_bus #save the instrument bus, either visa or the simulated visa\n\n def create_instrument(self):\n\n \"\"\"\n Needs to be called prior to any commands being sent or recieved.\n Creates the visa instrument object, to which commands will be sent\n and recieved. \n \"\"\"\n\n success = False\n string = str(time.strftime(\"%Y.%m.%d.%H.%M.%S, \", time.localtime()))+' Creating '+self.label+': '\n try:\n self.rm = self.inst_bus.ResourceManager()\n self.inst = self.rm.open_resource(self.address)\n string = string+\"success\"\n success = True\n except: #There are a number of issues visa might raise?\n string = string+\"visa failed at address \"+str(self.address)\n return [success,None,string]\n \n def send(self,command):\n \"\"\"\n From here a command is sent to the instrument, surrounded by the try block.\n If the command fails, it does not halt the problem but sends back a failed status.\n \"\"\"\n success = False #did we read successfully\n #string to be printed and saved in log file\n string = str(time.strftime(\"%Y.%m.%d.%H.%M.%S, \", time.localtime()))+' '+self.label+': ' \n\n try:\n self.inst.write(command)\n print(command)\n time.sleep(self.com_settle_time)\n \n string = string+str(command)\n success = True\n except self.inst_bus.VisaIOError:\n string = string+\"visa failed\"\n return [success,None,string]\n \n def read_instrument(self):\n \"\"\"\n Similar to the send function, but reads and expects a return value too.\n \"\"\"\n val = '0' #value to be returned, string-type like instruments\n success = False #did we read successfully\n #string to be printed and saved in log file\n string = str(time.strftime(\"%Y.%m.%d.%H.%M.%S, \", time.localtime()))+' reading '+self.label+': ' \n try:\n time.sleep(self.measure_seperation)\n val = self.inst.read()\n string = string+str(val)\n success = True\n except self.inst_bus.VisaIOError:\n string = string+\"visa failed\"\n return [success,val,string]\n\n def initialise_instrument(self):\n \"\"\"A specific instrument command to the ref-step algorithm,\ninitialises instruments with a set of commands\"\"\"\n success,nothing,string = self.send(self.com['init'])\n \n \n \n return [success,nothing,string]\n\n def make_safe(self):\n \"\"\"specific to the ref-step algorithm, should turn instruments off\"\"\"\n success,nothing,string = self.send(self.com['Make_Safe'])\n\n return [success,nothing,string]\n \n def inst_status(self):\n \"\"\"specific to the ref-step algorithm, used for reading status\"\"\"\n success,nothing,string = self.send(self.com['status'])\n\n return [success,nothing,string]\n\n def reset_instrument(self):\n \"\"\"specific to the ref-step algorithm, reset routine\"\"\"\n success,nothing,string = self.send(self.com['reset'])\n\n return [success,nothing,string]\n \n def set_DCrange(self, value):\n \"\"\"specific to the ref-step algorithm, setting a DC voltage\"\"\"\n \n \n \n line = str(self.com['DCVRange'])\n line = line.replace(\"$\",str(value))\n out = self.send(line)\n \n return out\n \n def query_error(self):\n \"\"\"specific to the ref-step algorithm, reading the instruments error\"\"\"\n success,nothing,string = self.send(self.com['error'])\n\n return [success,nothing,string]\n \n def set_DCvalue(self, value):\n \"\"\"specific to the ref-step algorithm, set a DC value for sources\"\"\"\n line = str(self.com['SetVoltage'])\n line = line.replace('$V',str(value)+'V')\n out = self.send(line)\n return out\n\n def Operate(self):\n \"\"\"specific to the ref-step algorithm, operates sources\"\"\"\n success,nothing,string = self.send(self.com['operate'])\n\n return [success,nothing,string]\n \n def Standby(self):\n \"\"\"specific to the ref-step algorithm, puts sources on standby\"\"\"\n success,nothing,string = self.send(self.com['standby'])\n\n return [success,nothing,string]\n\n def MeasureSetup(self):\n \"\"\"specific to the ref-step algorithm, pre measurement sequence set up\"\"\"\n success,nothing,string = self.send(self.com['MeasureSetup'])\n\n return [success,nothing,string]\n\n def SingleMsmntSetup(self):\n \"\"\"specific to the ref-step algorithm, should any commands be sent prior to an individual measurement\"\"\"\n success,nothing,string = self.send(self.com['SingleMsmntSetup'])\n\n return [success,nothing,string]\n\n","repo_name":"AtillaTheFun/RefStep","sub_path":"Sphinx_documentation_attempt/modules/gpib_inst.py","file_name":"gpib_inst.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26041130959","text":"# coding: utf-8\n\nimport pickle\nimport h5py\nimport torch\nimport torch.utils.data as data\nfrom args import train_caption_pkl_path\nfrom args import feature_h5_path, feature_h5_feats\n\n\nclass V2TDataset(data.Dataset):\n '''\n Video to Text数据集的描述类,用来加载和提供数据\n 支持MSR-VTT和MSVD数据集\n 构造的时候需要以下输入:\n 1. 提供文本特征的pkl文件\n 2. 包含视频帧信息的h5文件\n 提供文本和视频h5特征,以及根据caption的id来返回数据\n '''\n\n def __init__(self, cap_pkl, feature_h5):\n with open(cap_pkl, 'rb') as f:\n self.captions, self.lengths, self.video_ids = pickle.load(f)\n h5_file = h5py.File(feature_h5, 'r')\n self.video_feats = h5_file[feature_h5_feats]\n\n def __getitem__(self, index):\n '''\n 返回一个训练样本对(包含视频frame特征和对应的caption)\n 根据caption来找对应的video,所以要求video存储的时候是按照id升序排列的\n '''\n caption = self.captions[index]\n length = self.lengths[index]\n video_id = self.video_ids[index]\n video_feat = torch.from_numpy(self.video_feats[video_id])\n return video_feat, caption, length, video_id\n\n def __len__(self):\n return len(self.captions)\n\n\nclass VideoDataset(data.Dataset):\n '''\n 仅提供视频特征以及相应ID的数据加载类,\n 之所以单独提供这个类是希望加速评价指标的计算\n '''\n def __init__(self, eval_range, feature_h5):\n self.eval_list = tuple(range(*eval_range))\n h5_file = h5py.File(feature_h5, 'r')\n self.video_feats = h5_file[feature_h5_feats]\n\n def __getitem__(self, index):\n '''\n 返回一个训练样本对(包含视频特征和对应的ID)\n '''\n video_id = self.eval_list[index]\n video_feat = torch.from_numpy(self.video_feats[video_id])\n return video_feat, video_id\n\n def __len__(self):\n return len(self.eval_list)\n\n\ndef train_collate_fn(data):\n '''\n 用来把多个数据样本合并成一个minibatch的函数\n '''\n # 根据video的长度对数据进行排序\n data.sort(key=lambda x: x[-1], reverse=True)\n\n videos, captions, lengths, video_ids = zip(*data)\n\n # 把视频合并在一起(把2D Tensor的序列变成3D Tensor)\n videos = torch.stack(videos, 0)\n\n # 把caption合并在一起(把1D Tensor的序列变成一个2D Tensor)\n captions = torch.stack(captions, 0)\n return videos, captions, lengths, video_ids\n\n\ndef eval_collate_fn(data):\n '''\n 用来把多个数据样本合并成一个minibatch的函数\n '''\n data.sort(key=lambda x: x[-1], reverse=True)\n\n videos, video_ids = zip(*data)\n\n # 把视频合并在一起(把2D Tensor的���列变成3D Tensor)\n videos = torch.stack(videos, 0)\n\n return videos, video_ids\n\n\ndef get_train_loader(cap_pkl, feature_h5, batch_size=10, shuffle=True, num_workers=3, pin_memory=True):\n v2t = V2TDataset(cap_pkl, feature_h5)\n data_loader = torch.utils.data.DataLoader(dataset=v2t,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=train_collate_fn,\n pin_memory=pin_memory)\n return data_loader\n\n\ndef get_eval_loader(cap_pkl, feature_h5, batch_size=200, shuffle=False, num_workers=1, pin_memory=False):\n vd = VideoDataset(cap_pkl, feature_h5)\n data_loader = torch.utils.data.DataLoader(dataset=vd,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=eval_collate_fn,\n pin_memory=pin_memory)\n return data_loader\n\n\nif __name__ == '__main__':\n train_loader = get_train_loader(train_caption_pkl_path, feature_h5_path)\n print(len(train_loader))\n d = next(iter(train_loader))\n print(d[0].size())\n print(d[1].size())\n print(len(d[2]))\n","repo_name":"arieshx/ssta_video_caption","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"74588075774","text":"import pymongo\n\n\nclass LocalData:\n\n def __init__(self, host, port, dbname):\n # self.client = pymongo.MongoClient('mongodb://%s:%s@%s:%d/%s' % (settings.\n # MONGO_USER, settings.MONGO_PWD,\n # host, port,\n # settings.\n # MONGO_AUTHDB))[dbname]\n self.client = pymongo.MongoClient(host, port,\n socketTimeoutMS=20000)[dbname]\n self.collection = self.client['test_data']\n\n\nif __name__ == '__main__':\n LocalData('47.100.39.147', 9017, 'lilytest').collection.insert({'time': '2018', 'time2': '2019'})\n","repo_name":"BockeyE/pyprac1","sub_path":"Functions/DBConnector/pmongo_test.py","file_name":"pmongo_test.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6470805302","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"This module implements SqueezeNet models.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"SqueezeNet1_0\", \"SqueezeNet1_1\",\n]\n\nfrom abc import ABC\n\nimport torch\n\nfrom mon.coreml import layer as mlayer, model as mmodel\nfrom mon.foundation import pathlib\nfrom mon.globals import MODELS\nfrom mon.vision.classify import base\n\n_current_dir = pathlib.Path(__file__).absolute().parent\n\n\n# region Model\n\nclass SqueezeNet(base.ImageClassificationModel, ABC):\n \"\"\"SqueezeNet.\n \n See Also: :class:`mon.vision.enhance.base.ImageEnhancementModel`\n \"\"\"\n \n configs = {}\n zoo = {}\n map_weights = {}\n \n def load_weights(self):\n \"\"\"Load weights. It only loads the intersection layers of matching keys\n and shapes between the current model and weights.\n \"\"\"\n if isinstance(self.weights, dict) \\\n and self.weights[\"name\"] in [\"imagenet\"]:\n state_dict = mmodel.load_state_dict_from_path(\n model_dir=self.zoo_dir, **self.weights\n )\n model_state_dict = self.model.state_dict()\n \"\"\"\n for k in self.model.state_dict().keys():\n print(f\"\\\"{k}\\\": \")\n for k in state_dict.keys():\n print(f\"\\\"{k}\\\"\")\n \"\"\"\n for k, v in state_dict.items():\n if \"features.\" in k:\n k = k.replace(\"features.\", \"\")\n else:\n continue\n model_state_dict[k] = v\n if self.weights[\"num_classes\"] == self.num_classes:\n model_state_dict[\"13.conv.bias\"] = state_dict[\"classifier.1.bias\"]\n model_state_dict[\"13.conv.weight\"] = state_dict[\"classifier.1.weight\"]\n self.model.load_state_dict(model_state_dict)\n else:\n super().load_weights()\n\n\n@MODELS.register(name=\"squeezenet-1.0\")\nclass SqueezeNet1_0(SqueezeNet):\n \"\"\"SqueezeNet-1.0.\n \n See Also: :class:`mon.vision.enhance.base.ImageEnhancementModel`\n \"\"\"\n \n configs = {}\n zoo = {\n \"imagenet\": {\n \"name\" : \"imagenet\",\n \"path\" : \"https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth\",\n \"file_name\" : \"squeezenet-1.0-imagenet.pth\",\n \"num_classes\": 1000,\n },\n }\n map_weights = {}\n \n def __init__(self, *args, **kwargs):\n kwargs |= {\n \"config\" : \"squeezenet-1.0.yaml\",\n \"name\" : \"squeezenet\",\n \"variant\": \"squeezenet-1.0\"\n }\n super().__init__(*args, **kwargs)\n\n\n@MODELS.register(name=\"squeezenet-1.1\")\nclass SqueezeNet1_1(SqueezeNet):\n \"\"\"SqueezeNet-1.1.\n \n See Also: :class:`mon.vision.enhance.base.ImageEnhancementModel`\n \"\"\"\n \n configs = {}\n zoo = {\n \"imagenet\": {\n \"name\" : \"imagenet\",\n \"path\" : \"https://download.pytorch.org/models/squeezenet1_1-b8a52dc0.pth\",\n \"file_name\" : \"squeezenet-1.1-imagenet.pth\",\n \"num_classes\": 1000,\n },\n }\n map_weights = {}\n \n def __init__(self, *args, **kwargs):\n kwargs |= {\n \"config\" : \"squeezenet-1.1.yaml\",\n \"name\" : \"squeezenet\",\n \"variant\": \"squeezenet-1.1\"\n }\n super().__init__(*args, **kwargs)\n# endregion\n","repo_name":"phlong3105/deepacov2","sub_path":"src/mon/vision/classify/squeezenet.py","file_name":"squeezenet.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"69823912255","text":"\"\"\"\nGiven a value N, if we want to make change for N cents, \nand we have infinite supply of each of S = { S1, S2, .. , Sm} valued coins,\nhow many ways can we make the change? The order of coins doesn’t matter.\n\nFor example, \nFor N = 4 and S = {1,2,3}, there are four solutions: {1,1,1,1},{1,1,2},{2,2},{1,3}. \nSo output should be 4. \nFor N = 10 and S = {2, 5, 3, 6}, there are five solutions: {2,2,2,2,2}, {2,2,3,3}, {2,2,6}, {2,3,5} and {5,5}. \nSo output should be 5.\n\nTo count the total number of solutions, we can divide all set solutions into two sets.\n1) Solutions that do not contain mth coin (or Sm).\n2) Solutions that contain at least one Sm.\n\nLet count(S[], m, n) be the function to count the number of solutions, \nthen it can be written as sum of count(S[], m-1, n) and count(S[], m, n-Sm).\n\nwhere m is the size of coin set.\n\"\"\"\n\n\ndef coin_change(coin_set, m, sum):\n # We need n+1 rows as the table is constructed\n # in bottom up manner using the base case 0 value\n # case (n = 0)\n table = [[0 for x in range(m)] for y in range(sum + 1)]\n\n # Fill the entries for 0 value case (n = 0)\n for i in range(m):\n table[0][i] = 1\n\n # Fill rest of the table entries in bottom up manner\n for i in range(1, sum + 1):\n for j in range(m):\n # Count of solutions including S[j]\n x = table[i - coin_set[j]][j] if i - coin_set[j] >= 0 else 0\n # Count of solutions excluding S[j]\n y = table[i][j - 1] if j >= 1 else 0\n # total count\n table[i][j] = x + y\n\n return table[sum][m - 1]\n\n\ndef coin_change_recursive(coin_set, m, sum):\n\n # If n is 0 then there is 1\n # solution (do not include any coin)\n if sum == 0:\n return 1\n\n # If n is less than 0 then no\n # solution exists\n if sum < 0:\n return 0\n\n # If there are no coins and n\n # is greater than 0, then no\n # solution exist\n if m <= 0 and sum > 0:\n return 0\n\n # count is sum of solutions (i)\n # including S[m-1] (ii) excluding S[m-1]\n return coin_change_recursive(coin_set, m - 1, sum) + coin_change_recursive(coin_set, m, sum - coin_set[m - 1])\n\n\nif __name__ == '__main__':\n coin_list = [1, 2, 3]\n m = len(coin_list)\n sum = 4\n print(coin_change(coin_list, m, sum))\n","repo_name":"liquidpie/algorithms-py","sub_path":"dynamic_programming/coin_change_permutations.py","file_name":"coin_change_permutations.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71587430016","text":"from torch import nn\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom deep_shuffling.dataset import create_playlist_dataset, PlaylistDataset\nfrom deep_shuffling.neuralsort import NeuralSort\nfrom deep_shuffling.softsort import SoftSort\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmatplotlib.use('qtagg')\nn_batch_size = 1\nepochs = 32\nmaximum_playlist_length = 1024\nn_embed = 16\nn_heads = 8\ndevice = torch.device('cuda')\ntorch.cuda.manual_seed(1337)\ntorch.random.manual_seed(1337)\n\n\ndef project_p(P_hat):\n dim = 512\n P = torch.zeros_like(P_hat, device='cuda')\n b_idx = torch.arange(1).repeat([1, dim]).view(dim, 1).transpose(\n dim0=1, dim1=0).flatten().type(torch.cuda.LongTensor)\n r_idx = torch.arange(dim).repeat(\n [1, 1]).flatten().type(torch.cuda.LongTensor)\n c_idx = torch.argmax(P_hat, dim=-1).flatten() # this is on cuda\n brc_idx = torch.stack((b_idx, r_idx, c_idx))\n\n P[brc_idx[0], brc_idx[1], brc_idx[2]] = 1\n P_hat = (P - P_hat).detach() + P_hat\n return P_hat\n\n\nclass MultiheadAttentionBlock(nn.Module):\n def __init__(self, in_features: int, n_embed: int, n_heads: int):\n super().__init__()\n self.query = nn.Linear(in_features=in_features, out_features=n_embed, device=device)\n self.key = nn.Linear(in_features=in_features, out_features=n_embed, device=device)\n self.value = nn.Linear(in_features=in_features, out_features=n_embed, device=device)\n self.multiheadattention = nn.MultiheadAttention(embed_dim=n_embed,\n num_heads=n_heads,\n dropout=0,\n batch_first=True,\n device=device)\n\n def forward(self, x, mask):\n q = self.query(x)\n k = self.key(x)\n v = self.value(x)\n x = self.multiheadattention(query=q,\n key=k,\n value=v,\n key_padding_mask=mask,\n need_weights=False,\n attn_mask=None,\n average_attn_weights=True)\n return x\n\n\nclass ShuffleModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.b1 = MultiheadAttentionBlock(in_features=2, n_embed=n_embed, n_heads=n_embed // 2)\n self.relu1 = nn.ReLU()\n self.b2 = MultiheadAttentionBlock(in_features=n_embed, n_embed=n_embed, n_heads=n_embed // 2)\n self.relu2 = nn.ReLU()\n self.b3 = MultiheadAttentionBlock(in_features=n_embed, n_embed=1, n_heads=1)\n self.sort = SoftSort()#NeuralSort(tau=1)\n self.l1 = nn.Linear(in_features=2, out_features=n_embed, bias=False, device=device)\n self.l2 = nn.Linear(in_features=n_embed, out_features=1, bias=False, device=device)\n\n\n def forward(self, inp: dict[str, torch.tensor]):\n # x: {\"constant\", \"must_vary\"}\n xc = inp[\"constant\"]\n mask: torch.Tensor = inp[\"mask\"]\n #x, _ = self.b1(xc, mask)\n x = self.l1(xc)\n x = self.relu1(x)\n x = self.l2(x)\n #x, _ = self.b2(x, mask)\n #x = self.relu2(x)\n #x, _ = self.b3(x, mask)\n B, N, _ = x.shape\n x = torch.reshape(x, shape=(B, N))\n x = torch.masked_fill(x, mask=mask, value=-torch.inf)\n x = self.sort(x)\n return x\n\n\nclass PermutationMatrixLoss(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, M: torch.Tensor):\n B, N, N = M.shape\n M2 = torch.square(M)\n M_abs = torch.abs(M)\n column_loss = torch.sum(torch.sum(M_abs, dim=2, keepdim=True) - torch.pow(torch.sum(M2, dim=2, keepdim=True), exponent=0.5), dim=1, keepdim=True)\n row_loss = torch.sum(torch.sum(M_abs, dim=1, keepdim=True) - torch.pow(torch.sum(M2, dim=1, keepdim=True), exponent=0.5), dim=2, keepdim=True)\n loss = torch.squeeze(column_loss + row_loss)/N\n return loss\n\n\nclass ShuffleLoss(nn.Module):\n def __init__(self, lambd: float):\n super(ShuffleLoss, self).__init__()\n self.avg_pooling = torch.nn.AvgPool2d(kernel_size=(3, 1))\n self.permutation_matrix_loss = PermutationMatrixLoss()\n self.lambd = lambd\n\n def forward(self, permutation_matrix, features):\n features_sorted = torch.bmm(permutation_matrix, features[:, :])\n avg_feats = torch.sum(features_sorted, dim=-2)\n shifted_features = torch.roll(features_sorted, -1, -2)\n pooling = self.avg_pooling(features_sorted)\n noise_squared_diff = (features_sorted[:, :-1, :] - shifted_features[:, :-1, :]) ** 2\n noise_loss = torch.sum(noise_squared_diff)**0.5\n pooling_squared_diff = (pooling - avg_feats) ** 2\n global_level_loss = torch.sum(pooling_squared_diff)\n #permutation_matrix_loss = self.permutation_matrix_loss(permutation_matrix)\n loss = noise_loss + global_level_loss# + self.lambd*permutation_matrix_loss\n return loss\n\n\ndef train(model: nn.Module, dataset: PlaylistDataset):\n data_loader = DataLoader(dataset=dataset,\n batch_size=1)\n criterion = ShuffleLoss(lambd=1)\n optimizer = torch.optim.AdamW(model.parameters(),\n lr=0.01, )\n torch.autograd.set_detect_anomaly(True)\n for i in range(1000):\n for playlist in data_loader:\n criterion.zero_grad()\n out: torch.Tensor = model(playlist)\n # print(torch.argmax(out[0, 0, :]))\n loss = criterion(out, playlist[\"constant\"])\n print(loss.item())\n loss.backward()\n optimizer.step()\n return model\n\n\ndef apply_model(playlist, model):\n n = playlist[\"n\"]\n B, N, D = playlist[\"constant\"].shape\n p = model(playlist)\n p_star = project_p(p)[0, :, :]\n print(p_star)\n d_star = p_star @ playlist[\"constant\"][0, :n, :]\n\n d_line = (d_star[:, di] for di in range(D))\n for line in d_line:\n l = line.tolist()\n plt.scatter(list(range(n)), l)\n plt.show()\n\n\nif __name__ == \"__main__\":\n model = ShuffleModel()\n dataset = create_playlist_dataset()\n model = train(model=model, dataset=dataset)\n data_loader = DataLoader(dataset=dataset,\n batch_size=1)\n for playlist in data_loader:\n apply_model(playlist, model=model)\n","repo_name":"AdamSkarboJonsson/deep-playlist-shuffling","sub_path":"supervised_learning/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2360555174","text":"from helpers import cmd\nimport os.path\n\n# Have to go one folder up\ncmd.setBase(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\ncommands = [\n\t# Django\n\tcmd.relative(\"\"),\n\t[\"git\", \"submodule\", \"init\"],\n\t[\"git\", \"submodule\", \"update\"],\n\tcmd.relative(\"server/dobby\"),\n\t[\"git\", \"submodule\", \"init\"],\n\t[\"git\", \"submodule\", \"update\"],\n\tcmd.relative(\"server/djangoserver\"),\n\t[\"python\", \"manage.py\", \"syncdb\"],\n\t[\"python\", \"load_default_data.py\"]\n]\n\ndef run():\n\tcmd.run(commands)\n\n# And run.\nif __name__ == \"main\":\n\trun()","repo_name":"ialexi/Contacts","sub_path":"commands/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"79"} +{"seq_id":"72207716736","text":"\"\"\"day12\"\"\"\n\nfrom collections import deque\nimport numpy as np\n\nDEBUG, TEST = False, False\nDAY = \"12\"\n\n\nclass Graph:\n def __init__(self, graph: np.ndarray):\n self.graph = graph\n self.rows = graph[:, 0].size\n self.cols = graph[0].size\n self.edges = np.array([[0 for c in range(self.cols)] for r in range(self.rows)])\n\n def addEdge(self, r: int, c: int, height: int):\n self.edges[r, c] = height\n\n def bfs(self, start: str, target: str):\n queue: deque[tuple[tuple[int, int], int]] = deque()\n visited = set()\n # using array of vectors instead of tuples for that sweet sweet vector addition\n dirs = [\n np.array([0, 1]),\n np.array([0, -1]),\n np.array([1, 0]),\n np.array([-1, 0]),\n ]\n\n for r, row in enumerate(self.graph):\n for c, col in enumerate(row):\n if col == start:\n queue.appendleft(((r, c), 0))\n\n while queue:\n node, height = queue.pop()\n\n if self.graph[node] == target:\n return height\n\n if node not in visited:\n visited.add(node)\n\n for d in dirs:\n neighbor = node + d\n if 0 <= neighbor[0] < self.rows and 0 <= neighbor[1] < self.cols:\n if self.edges[tuple(neighbor)] <= 1 + self.edges[node]:\n queue.appendleft((tuple(neighbor), height + 1))\n\n\ndef solve(graph, start):\n heightMap = {letter: i for i, letter in enumerate(\"abcdefghijklmnopqrstuvwxyz\")}\n heightMap[\"S\"] = 0\n heightMap[\"E\"] = 25\n\n g = Graph(np.array(graph))\n\n for r, row in enumerate(graph):\n for c, col in enumerate(row):\n g.addEdge(r, c, heightMap[col])\n\n print(g.bfs(start, \"E\"))\n\n\nif __name__ == \"__main__\":\n # TEST = True\n # DEBUG = True\n datasets = [f\"./day{DAY}/day{DAY}input.txt\", f\"./day{DAY}/testday{DAY}input.txt\"]\n filename = datasets[1] if TEST else datasets[0]\n with open(file=filename, mode=\"r\", encoding=\"utf8\") as file:\n lines = [list(line.strip()) for line in file.readlines()]\n solve(lines, \"S\")\n solve(lines, \"a\")\n","repo_name":"m-ttaylor/adventofcode2022","sub_path":"day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71577294334","text":"from tasmanium import logger\nfrom tasmanium.registrars import Given\n\nl = logger.getLogger(__name__)\n\n\n@Given(\"a user which exists\")\ndef create_user_that_exists(context):\n l.info(f\"hello from the step 'Given a user which exists' - his data are {context.data_table} \")\n\n if context.data_table[0]['name'] == 'First B butterfly':\n context.attach_plaintext(data=\"A failing file in a failing test.\")\n assert False, \"assertion failed intentionally\"\n\n context.attach_plaintext(filename=\"success.txt\", data=\"this step succeeded!\", description=\"Some description of this file.\")\n context.attach_plaintext(filename=\"success2.txt\", data=\"This step succeeded as well!\")\n\n with open(\"D:/cool-crab.png\", \"rb\") as f:\n context.step.attach_image(filename=\"cool-crab.png\", data=f.read(), description=\"We can do pictures as well!\")\n\n context.attach_plaintext(filename=\"success3.txt\", data=\"Hi there! I am inside the file wee!\",\n description=\"Attach files anytime inside the step!\")\n\n with open(\"D:/lipsum.txt\", \"r\") as f:\n context.attach_plaintext(filename=\"lipsum.txt\", data=f.read(), description=\"This is a long file, check it out.\")\n\n # browser = webdriver.Remote(\n # desired_capabilities=webdriver.DesiredCapabilities.FIREFOX,\n # command_executor='http://localhost:4444/wd/hub'\n # )\n # from time import sleep\n # for _ in range(10):\n # browser.get(\"https://www.seznam.cz\")\n # sleep(1)\n # browser.get(\"https://www.google.com\")\n # sleep(1)\n # browser.get(\"https://www.atlas.cz\")\n # sleep(1)\n # browser.get(\"https://www.novinky.cz\")\n # sleep(1)\n # browser.get(\"https://www.yahoo.com\")\n\n\n@Given(\"a user which {status}\")\ndef create_user_doing_something(context, status):\n l.info(f\"hello from the step 'Given a user which {{status}}' - i am '{status}' right now\")\n l.info(f\"my docstring type is {context.docstring_type}, my docstring is {context.docstring}\")\n l.info(f\"my docstring parsed as json is {context.docstring_json}\")\n context.attach_plaintext(filename=f\"success-status.txt\", data=f\"i am '{status}' right now\")\n","repo_name":"Dri0m/tasmanium","sub_path":"steps/subcategory/given.py","file_name":"given.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29813307039","text":"import logging\nimport os, time, gc, argparse, math\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config, Conv1D\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport copy\nfrom util import init_para_frompretrained, num_params, prepare_dataset, linear_schedule, switch_schedule\nfrom model import VAEModel\nimport nltk\nfrom bi_training_core import train_step, Device\nfrom bi_loss import bidirectional_loss\nfrom bi_eval_step import validate_step, plot_input_distribution, generate_samples\n\nnltk.download('punkt')\nnltk.download('stopwords')\n# devices = '0'\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = devices\n\n\ndef main():\n logger = logging.getLogger(\"transformers\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument('experiment', type=str)\n\n # Default parameters are set based on single GPU training\n parser.add_argument('--lr', type=float, default=5e-5)\n parser.add_argument(\"--seed\", type=int, default=0)\n\n parser.add_argument('--data_type', type=str, default='t1', choices=['t' + str(i) for i in range(9)], help=\"t: type\")\n parser.add_argument('--model_type', type=str, default='cvae', choices=['cvae', 'ae_vae_fusion'])\n parser.add_argument('--iterations', type=int, default=101640 * 4) # wp 850001 wi 300001 ax 300001 yp 800001\n parser.add_argument('--dataset', type=str, default='wi', choices=['ax', 'yp', 'wp', 'wi'], help=\"Dataset to use for training\")\n parser.add_argument('--warmup', type=int, default=10000,\n help=\"Amount of iterations to warmup, then decay. (-1 for no warmup and decay)\")\n\n parser.add_argument('--switch-time', type=float, default=0,\n help=\"Percentage of iterations to spend on short sequence training.\")\n parser.add_argument('--data-dir', type=str, default='data')\n parser.add_argument('--out-dir', type=str, default='out')\n parser.add_argument('--load', type=str, help='path to load model from') # , default='out/test/'\n parser.add_argument('--workers', default=1, type=int, metavar='N',\n help='number of data loading workers')\n # use GPU\n parser.add_argument('--gpu', default=0, type=int)\n parser.add_argument('--no_gpu', action=\"store_true\")\n\n parser.add_argument('--fp16', action='store_true', help=\"Train using FP16?\")\n parser.add_argument('--fp16_opt_level', default='O0', type=str, required=False)\n\n # KL cost annealing, increase beta from beta_0 to 1 in beta_warmup steps\n parser.add_argument('--beta_0', default=1.00, type=float)\n parser.add_argument('--beta_warmup', type=int, default=50000)\n # cyc_vae parameters\n parser.add_argument('--cycle', type=int, default=101640)\n\n parser.add_argument('--add_input', action=\"store_true\")\n parser.add_argument('--add_attn', action=\"store_true\")\n parser.add_argument('--add_softmax', action=\"store_true\")\n parser.add_argument('--attn_proj_vary', action=\"store_true\")\n\n parser.add_argument('--learn_prior', action=\"store_true\")\n\n parser.add_argument('--train_batch_size', type=int, default=1)\n parser.add_argument('--val_batch_size', type=int, default=1)\n parser.add_argument('--test_batch_size', type=int, default=1)\n\n parser.add_argument('--short_seq_len', type=int, default=512)\n parser.add_argument('--long_seq_len', type=int, default=1024)\n\n # Loss weighting args\n parser.add_argument('--fwd_loss_weight', type=float, default=1, help=\"Weight multiplier for forward loss.\")\n parser.add_argument('--bkwd_loss_weight', type=float, default=1, help=\"Weight multiplier for backward loss.\")\n parser.add_argument('--all_sentence_loss_weight', type=float, default=1, help=\"Weight multiplier for all previous sentence loss (0 to A -> B).\")\n parser.add_argument('--prompt_loss_weight', type=float, default=1, help=\"Weight multiplier for backward prompt loss.\")\n \n # Reload args\n parser.add_argument('--reload_path', type=str, default='')\n parser.add_argument('--reload_iters', type=int, default=0)\n\n # NOTE: Use for changing the arguments of the program\n args = parser.parse_args()\n\n if args.model_type == 'cvae':\n args.learn_prior = True\n else:\n args.learn_prior = False\n\n devices = '0'\n\n # GPU\n if not torch.cuda.is_available():\n args.no_gpu = True\n\n gpu = not args.no_gpu\n if gpu:\n logger.info(f\"There are {torch.cuda.device_count()} available GPUs!\")\n logger.info('Using GPU devices {}'.format(devices))\n torch.cuda.set_device(args.gpu)\n logger.info('Current single GPU: {}'.format(torch.cuda.current_device()))\n\n Device.set_device(devices, args.gpu if gpu else \"cpu\")\n\n # randomness\n np.random.seed(args.seed)\n torch.random.manual_seed(args.seed)\n if gpu: torch.cuda.manual_seed(args.seed); torch.cuda.manual_seed_all(args.seed)\n\n logger.info('\\n*******************************************************************************\\n')\n logger.debug(\"the configuration:\")\n logger.debug(str(args).replace(',', '\\n'))\n\n logger.info('Loading models...')\n\n logger.setLevel(logging.WARNING)\n save_folder = os.path.join(args.out_dir, args.experiment)\n os.makedirs(save_folder, exist_ok=True)\n t_writer = SummaryWriter(os.path.join(save_folder, 'train'), flush_secs=5)\n # importlib.reload(logger)\n # logger.basicConfig(filename=os.path.join(save_folder, 'train.log'), level=logger.INFO, format='%(asctime)s--- %(message)s')\n cache_dir = os.path.join(args.out_dir, 'model_cache')\n os.makedirs(cache_dir, exist_ok=True)\n # Load pre-trained teacher tokenizer (vocabulary)\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir=cache_dir)\n # Hack to allow tokenizing longer sequences.\n tokenizer.max_len = int(1e12)\n gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2', cache_dir=cache_dir)\n logger.info(f'gpt2_params: {num_params(gpt2_model)}') # gpt2: 124439808\n config = GPT2Config()\n config.n_ctx = 1024\n\n # add special tokens\n special_tokens = {\n 'sentence_fwd': '',\n 'sentence_bkwd': ''\n }\n # special_tokens_dict = {\n # 'pad_token': '<|startoftext|>',\n # 'cls_token': '<|startofcond|>',\n # 'sep_token': '<|sepofcond|>',\n # 'mask_token': '<|endofcond|>'\n # }\n # num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n logger.info('We have added', len(special_tokens), 'special tokens')\n # # Notice: resize_token_embeddings expect to receive the full size of the new vocab\n # gpt2_model.resize_token_embeddings(len(tokenizer))\n # assert tokenizer.pad_token == '<|startoftext|>'\n\n VAE = VAEModel(config, add_input=args.add_input, add_attn=args.add_attn, add_softmax=args.add_softmax,\n attn_proj_vary=args.attn_proj_vary, learn_prior=args.learn_prior)\n init_para_frompretrained(VAE.transformer, gpt2_model.transformer, share_para=True)\n init_para_frompretrained(VAE.encoder, gpt2_model.transformer, share_para=False)\n if args.learn_prior:\n init_para_frompretrained(VAE.encoder_prior, VAE.encoder, share_para=True)\n VAE.encoder_prior.averageSelfAttention.attention_weights = VAE.encoder.averageSelfAttention.attention_weights\n \n VAE.lm_head.weight = gpt2_model.lm_head.weight\n if VAE.add_softmax:\n VAE.lm_head_rep = Conv1D(*gpt2_model.lm_head.weight.size())\n # VAE.lm_head_rep = LM_head_rep(*gpt2_model.lm_head.weight.size()[::-1])\n logger.setLevel(logging.INFO)\n logger.info(f'VAE_params: {num_params(VAE)}') # 286694400\n args.load = args.reload_path\n if args.load:\n logger.info('Loading model weights...')\n state = torch.load(os.path.join(args.load), map_location=\"cpu\")\n if 'module' in list(state.keys())[0]: # model_path is data parallel model with attr 'module'\n state_copy = copy.copy(state)\n keys = state_copy.keys()\n for k in keys:\n state[k.replace('module.', '')] = state.pop(k)\n VAE.load_state_dict(state)\n gc.collect()\n logger.info('Done.')\n\n # fix pre-trained parameters before certain iterations\n tuning_all_after_iters = 40000\n tuning_all = False\n for name, parameter in VAE.named_parameters():\n # logger.info((name, parameter.requires_grad))\n new_pars = ['c_z', 'attention_weights', 'mean', 'logvar', 'input_proj', 'attn_proj', 'Nu_fc1', 'Nu_fc2', 'lm_head_rep']\n\n if not any([True if n in name else False for n in new_pars]):\n parameter.requires_grad = False\n\n logger.info('Setup data...')\n curr_seq_len = args.short_seq_len\n train_loader, val_loader, test_loader = prepare_dataset(\n args.data_dir, args.dataset, tokenizer,\n args.train_batch_size, curr_seq_len,\n args.val_batch_size, curr_seq_len,\n args.test_batch_size, curr_seq_len,\n make_test=True,\n num_workers=args.workers, data_type=args.data_type\n )\n logger.info('Done.')\n\n logger.info('Wrapping models and optimizers...')\n\n # Apply linear scaling rule to increase batch size for short sequence training.\n curr_batch_size = args.train_batch_size\n curr_seq_len = args.short_seq_len\n lr_schedule = switch_schedule(linear_schedule(args), curr_batch_size / curr_seq_len,\n int(args.iterations * args.switch_time))\n VAE = VAE.to(Device.device)\n VAE.train()\n\n optimizer = torch.optim.AdamW(VAE.parameters(), lr=args.lr)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_schedule)\n\n loss_fn = nn.CrossEntropyLoss(reduction='none')\n logger.info('Done.')\n\n logger.info(\"Begin training iterations\")\n max_val_batches = 20000 # max num. of val batches\n logger.info(\"Total iteration: %d\" % args.iterations)\n e = 0 # number of epoch\n\n num_iters = 0\n # Resume training from a checkpoint\n if args.load:\n num_iters = int(args.reload_iters)\n logger.info(\"Resume training from iteration %d\" % num_iters)\n\n optimizer.zero_grad()\n beta = args.beta_0\n\n def eval_step():\n '''Evaluates the performance of the model after a training step'''\n\n logger.info(\"Measuring Input distribution...\")\n plot_input_distribution(VAE, tokenizer, args.model_type, test_loader, args.dataset, num_iters, save_folder)\n logger.info(\"Validation Step...\")\n validate_step(VAE, tokenizer, args.model_type, val_loader, num_iters, max_val_batches, loss_fn, save_folder)\n logger.info(\"Generate output samples...\")\n generate_samples(VAE, tokenizer, args, test_loader, num_iters, save_folder)\n\n def calculate_loss(x_mask, x_tokens, y_mask, y_tokens, input_tokens, target_tokens, mask):\n '''Calculates the loss of the model forward, backward, and for the sentence combinations'''\n\n # This computes a training step going from input to output and computes the losses\n # NORMAL LOSS, Prompt -> Story\n if args.fwd_loss_weight > 0:\n loss_forward, ce_loss_forward, kl_loss_forward = train_step(VAE, optimizer, x_mask, x_tokens, y_mask, y_tokens,\n input_tokens, target_tokens, mask, loss_fn, beta, args.model_type)[-1]\n else:\n loss_forward, ce_loss_forward, kl_loss_forward = 0, 0, 0\n\n # PROMPT LEVEL LOSS, Story -> Prompt\n if args.prompt_loss_weight > 0:\n loss_prompt_backward, ce_loss_prompt_backward, kl_loss_prompt_backward = train_step(VAE, optimizer, y_mask, y_tokens, x_mask, x_tokens,\n target_tokens, input_tokens, mask, loss_fn, beta, args.model_type)[-1]\n else:\n loss_prompt_backward, ce_loss_prompt_backward, kl_loss_prompt_backward = 0, 0, 0\n\n # BIDIRECTIONAL LOSSES\n\n # This finds the total loss for the previous sentence, Sentence B -> Sentence A and Sentence A -> Sentence B\n if args.bkwd_loss_weight > 0:\n previous_sentence_loss_output = bidirectional_loss(\"previous_sentence\", VAE, optimizer, y_mask,\n y_tokens, mask, loss_fn, beta, args.model_type, tokenizer, curr_batch_size, curr_seq_len, input_tokens)\n (total_loss_sentence_b_a, total_loss_sentence_a_b, total_ce_loss_sentence_b_a,\n total_ce_loss_sentence_a_b, total_kl_loss_sentence_b_a, total_kl_loss_sentence_a_b) = previous_sentence_loss_output\n else:\n total_loss_sentence_b_a, total_loss_sentence_a_b, total_ce_loss_sentence_b_a, total_ce_loss_sentence_a_b, total_kl_loss_sentence_b_a, total_kl_loss_sentence_a_b = 0, 0, 0, 0, 0, 0\n \n # This finds the total loss for all previous sentences, Sentence B -> All Previous Sentences\n if args.all_sentence_loss_weight > 0:\n all_previous_sentences_loss_output = bidirectional_loss(\"all_previous_sentences\", VAE, optimizer, y_mask,\n y_tokens, mask, loss_fn, beta, args.model_type, tokenizer, curr_batch_size, curr_seq_len, input_tokens)\n (total_loss_all_previous_sentences, total_ce_loss_all_previous_sentences, total_kl_loss_all_previous_sentences) = all_previous_sentences_loss_output\n else:\n total_loss_all_previous_sentences, total_ce_loss_all_previous_sentences, total_kl_loss_all_previous_sentences = 0, 0, 0\n\n # TOTAL LOSSES\n loss = (args.fwd_loss_weight*loss_forward) + (args.prompt_loss_weight*loss_prompt_backward) + \\\n (args.bkwd_loss_weight*total_loss_sentence_b_a) + \\\n (args.bkwd_loss_weight*total_loss_sentence_a_b) + (args.all_sentence_loss_weight*total_loss_all_previous_sentences)\n\n ce_loss = (args.fwd_loss_weight*ce_loss_forward) + (args.prompt_loss_weight*ce_loss_prompt_backward) + \\\n (args.bkwd_loss_weight*total_ce_loss_sentence_b_a) + \\\n (args.bkwd_loss_weight*total_ce_loss_sentence_a_b) + (args.all_sentence_loss_weight*total_ce_loss_all_previous_sentences)\n\n kl_loss = (args.fwd_loss_weight*kl_loss_forward) + (args.prompt_loss_weight*kl_loss_prompt_backward) + \\\n (args.bkwd_loss_weight*total_kl_loss_sentence_b_a) + \\\n (args.bkwd_loss_weight*total_kl_loss_sentence_a_b) + (args.all_sentence_loss_weight*total_kl_loss_all_previous_sentences)\n\n return loss, ce_loss, kl_loss\n\n # eval_step()\n torch.save(VAE.state_dict(), os.path.join(save_folder,\n 'model_' + '{:07d}'.format(num_iters) +\n f'_bidirectional_{args.fwd_loss_weight}_{args.bkwd_loss_weight}_{args.all_sentence_loss_weight}_{args.prompt_loss_weight}' + '.pt')\n )\n\n e = 0\n while num_iters < args.iterations:\n # Run epoch\n st = time.time()\n\n # Training\n logger.info('\\n----------------------------------------------------------------------')\n logger.info(\"Training loop. Batches: %d\" % len(train_loader))\n\n with tqdm(total=len(train_loader)) as pbar:\n for i, (x_mask, x_tokens, y_mask, y_tokens, input_tokens, target_tokens, mask) in enumerate(train_loader):\n # NOTE: Swaps all the variables for the bidirectional running of the program\n # if num_iters % args.cycle >= args.cycle - args.beta_warmup:\n # beta = min(1.0, beta + (1. - args.beta_0) / args.beta_warmup)\n\n if not tuning_all and num_iters >= tuning_all_after_iters:\n for name, parameter in VAE.named_parameters():\n # logger.info((name, parameter.requires_grad))\n parameter.requires_grad = True\n tuning_all = True\n\n try:\n loss, ce_loss, kl_loss = calculate_loss(x_mask, x_tokens, y_mask, y_tokens, input_tokens, target_tokens, mask)\n except RuntimeError as e:\n if 'out of memory' in str(e):\n logger.info('| WARNING: ran out of memory, skipping batch')\n torch.cuda.empty_cache()\n gc.collect()\n continue\n else:\n raise e\n\n if num_iters % 100 == 0:\n logger.info(f\"CURRENT ITERATION: {num_iters}\")\n logger.info(f\"CURRENT LOSS: Loss: {loss}, CE: {ce_loss}, KL: {kl_loss}\")\n\n lr = scheduler.get_last_lr()[0]\n # Log to Tensorboard\n t_writer.add_scalar('loss', loss, num_iters)\n t_writer.add_scalar('ppl', math.exp(min(ce_loss, 10)), num_iters)\n t_writer.add_scalar('lr', lr, num_iters)\n t_writer.add_scalar('iter_time', time.time() - st, num_iters)\n t_writer.add_scalar('kl', kl_loss, num_iters)\n t_writer.add_scalar('beta', beta, num_iters)\n\n if args.model_type == 'ae_vae_fusion':\n # Output is never defined. Raise error\n raise NotImplementedError()\n loss, ce_loss, kl_loss = output[0]\n # Log to Tensorboard\n t_writer.add_scalar('ae_loss', loss, num_iters)\n t_writer.add_scalar('ae_kl', kl_loss, num_iters)\n\n st = time.time()\n\n if args.warmup != -1:\n scheduler.step()\n \n end = num_iters >= args.iterations\n if end: break\n num_iters += 1\n pbar.update(1)\n\n if num_iters % args.cycle == 0:\n beta = args.beta_0\n logger.info('KL annealing restart')\n\n if num_iters % 10000 == 0:\n eval_step()\n\n if num_iters % 5000 == 0:\n logger.info('Saving model...')\n logger.info(\"Iteration completed: %d, remained %d\" % (num_iters, args.iterations - num_iters))\n logger.info(\"Saving model...\")\n logger.info('\\n------------------------------------------------------')\n torch.save(VAE.state_dict(), os.path.join(save_folder,\n 'model_' + '{:07d}'.format(num_iters) +\n f'_bidirectional_{args.fwd_loss_weight}_{args.bkwd_loss_weight}_{args.all_sentence_loss_weight}_{args.prompt_loss_weight}' + '.pt')\n )\n\n if args.switch_time > 0 and num_iters == int(args.iterations * args.switch_time):\n logger.info(\"Switch to long sequence training\")\n curr_seq_len = args.long_seq_len\n curr_batch_size = args.train_batch_size\n train_loader, val_loader, test_loader = prepare_dataset(\n args.data_dir, args.dataset, tokenizer,\n args.train_batch_size, curr_seq_len,\n args.val_batch_size, curr_seq_len,\n args.test_batch_size, curr_seq_len,\n make_test=True,\n num_workers=args.workers, data_type=args.data_type\n )\n\n if not end:\n e += 1\n logger.info(\"Training loop. The ith epoch completed: %d\" % e)\n\n torch.save(VAE.state_dict(), os.path.join(save_folder,\n 'model_' + '{:07d}'.format(num_iters) +\n f'_bidirectional_{args.fwd_loss_weight}_{args.bkwd_loss_weight}_{args.all_sentence_loss_weight}_{args.prompt_loss_weight}' + '.pt'))\n logger.info(\"Training complete.\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"AIRC-ASR/AIRC-ASR-Experimental","sub_path":"bidirectional_predictions/TransformerCVAE/train_bidirectional.py","file_name":"train_bidirectional.py","file_ext":"py","file_size_in_byte":19509,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"8256244384","text":"import vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\n\nfrom vktools import Keyboard, ButtonColor, Text, Carousel, Element\n\nvk = vk_api.VkApi(token=\"token\")\n\n\ndef send_message(user_id, message, carousel=None):\n values = {\n \"user_id\": user_id,\n \"message\": message,\n \"random_id\": 0\n }\n\n if carousel is not None:\n values[\"template\"] = carousel.add_carousel()\n\n vk.method(\"messages.send\", values)\n\n\nfor event in VkLongPoll(vk).listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me:\n text = event.text.lower()\n user_id = event.user_id\n\n if text == \"test carousel\":\n carousel = Carousel(\n [\n Element(\n \"Title 1\",\n \"Description 1\",\n \"-203980592_457239030\", # photo_id\n \"https://vk.com/fsoky\", # redirect url, if user click on element\n [Text(\"Button 1\", ButtonColor.POSITIVE)]\n ),\n Element(\n \"Title 2\",\n \"Description 2\",\n \"-203980592_457239030\", # photo_id\n \"https://vk.com/fsoky\", # redirect url, if user click on element\n [Text(\"Button 2\", ButtonColor.PRIMARY)]\n )\n ]\n )\n\n send_message(user_id, \"VkTools Carousel by Fsoky ~\", carousel=carousel)","repo_name":"Fsoky/vktools","sub_path":"examples/template_example.py","file_name":"template_example.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"9484296564","text":"#Nearest neighbour approach to classification of breast tissue after pre-processing data and removing bad fields from the dataset(NNBTClassifier+)\r\n\r\nimport datetime\r\nprint(datetime.datetime.now())\r\n\r\ndef dist(l1, l2):\r\n temp = 0\r\n for x in [2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,24,25,26,27,29]:\r\n temp += (float(l1[x]) - float(l2[x])) ** 2\r\n distance = temp ** 0.5\r\n return distance\r\n\r\nf = open(\"wbtdPP.txt\", \"r\")\r\n\r\ndata = []\r\nrecord = []\r\ndList = []\r\nnnList = []\r\ndiagnosis = []\r\n\r\nfor line in f:\r\n sTemp = str(line) \r\n record = list(sTemp.split(\",\"))\r\n data.append(record)\r\n\r\nf.close()\r\n\r\nprint(\"Total size of dataset: \", len(data), \" records found.\")\r\n\r\n#Building a prediction list\r\n\r\nfor i in range(0, len(data)):\r\n dList = []\r\n for j in range(0, len(data)):\r\n if i != j:\r\n d = dist(data[i], data[j])\r\n dList.append(d)\r\n for z in range(0, len(dList)):\r\n if dList[z] == min(dList):\r\n nnList.append(z)\r\n print(\".\", end = \"\")\r\nprint()\r\ncorrectpred = 0\r\n\r\nfor q in range(0, len(data)):\r\n if data[q][1] == data[nnList[q]][1]:\r\n correctpred += 1\r\naccuracy = (correctpred / int(len(data))) * 100\r\n\r\nprint(\"Accuracy of NNBTPP+ Classifier =\", accuracy, \"%\")\r\nprint(\"No. of correct predictions =\", correctpred)\r\nprint(datetime.datetime.now())\r\ninput()\r\n","repo_name":"akkivasu/Breast-Tissue-Analysis","sub_path":"NNBTClassifierPP+.py","file_name":"NNBTClassifierPP+.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14253534820","text":"import sys\nsys.stdin = open('W.txt')\n\n\ndef dfs(r, c, idx):\n global cnt\n if visited[r][c] != -1: # 이미 지난 지점이면 일단 컷!\n if visited[r][c] == idx: # 시작지점에 다시 도달했을 때만 +1\n cnt += 1\n return\n\n visited[r][c] = idx # 방문체크\n d = mat[r][c]\n new_r, new_c = r + dirs[d][0], c + dirs[d][1]\n if 0 <= new_r < N and 0 <= new_c < M:\n dfs(new_r, new_c, idx)\n\n\nN, M = map(int, input().split())\nmat = [list(input()) for _ in range(N)]\nvisited = [[-1]*M for _ in range(N)]\ndirs = {'U': (-1, 0), 'D': (1, 0), 'L': (0, -1), 'R': (0, 1)} # 방향설정 딕셔너리\ncnt = idx = 0\nfor i in range(N):\n for j in range(M):\n dfs(i, j, idx)\n idx += 1\nprint(cnt)","repo_name":"woohree/ALGO2ITHM_STUDY","sub_path":"baekjoon/07월/0725 스도쿠 피리부는사나이 순서 로봇시뮬레이션 단어덧셈/g3_16724_피리부는사나이/woohree.py","file_name":"woohree.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"28709433345","text":"import urllib.request\nimport json\n\n# Header declarations for output print.\nhostTitle = \"HOST DETAILS\\n\"\ncountryTitle = \"COUNTRY DETAILS\\n\"\n\n# Format printing constants.\ndotNumber = 70\ncountryPadding = 50\ndetailsPadding = 40\n\n'''\nAll available types of responses for IP along with their urls.\n'''\n\nipValidTypes = ['plain', 'json', 'jsonp']\nipPlain = 'https://get.geojs.io/v1/ip'\nipJson = 'https://get.geojs.io/v1/ip.json'\nipLookup = {'plain' : ipPlain, 'json' : ipJson}\n\n'''\nAll available types of responses for country along with their urls.\n'''\n\ncountryValidTypes = ['plain', 'plainfull', 'json', 'jsonp']\ncountryPlain = 'https://get.geojs.io/v1/ip/country'\ncountryFullPlain = 'https://get.geojs.io/v1/ip/country/full'\ncountryJson = 'https://get.geojs.io/v1/ip/country/{ip address}.json'\ncountryLookup = {'plain' : countryPlain, 'plainfull' : countryFullPlain, 'json' : countryJson}\n\n'''\nAll available types of responses for all geo data along with their urls.\n'''\ngeoJson = 'https://get.geojs.io/v1/ip/geo/{ip address}.json'\n\n'''\nAll available types of responses for DNS PTR records.\n'''\nptrPlain = 'https://get.geojs.io/v1/dns/ptr'\n\n# Gets the response of a url that returns plain text as response.\ndef getPlainResponse(url):\n return urllib.request.urlopen(url).read().decode().strip()\n\n# Gets the response of a url that returns json as response and replaces the default argument '{ip address}' with the IP address whose country we're looking\ndef getJsonResponse(url, ipAddress):\n response = urllib.request.urlopen(url.replace('{ip address}',ipAddress)).read().decode()\n outDict = json.loads(response)\n return outDict\n\n# Gets host's IP address, having default 'returnType' as 'plain', which can be changed accordingly.\ndef getIP(returnType = 'plain'):\n if isinstance(returnType,str):\n returnType = returnType.lower()\n if returnType in ipValidTypes:\n if returnType == 'plain':\n return getPlainResponse(ipLookup[returnType])\n else:\n return getJsonResponse(ipLookup[returnType],'')\n else:\n raise ValueError('\\'returnType\\' does not belong in valid types: ' + str(ipValidTypes))\n else:\n raise TypeError('\\'returnType\\' must be of type \\'str\\'(' + type(returnType).__name__ + ' was given).')\n\n# Gets the country of a specific IP address.\ndef getCountry(ipAddress, returnType = 'plain'):\n if not isinstance(ipAddress,str):\n raise TypeError('\\'ipAddress\\' is not an instance of \\'str\\'('+ type(ipAddress).__name__ + ' was given).')\n if isinstance(returnType,str):\n returnType = returnType.lower()\n if returnType in countryValidTypes:\n if returnType == 'plain':\n return getPlainResponse(countryLookup[returnType] + '/' + ipAddress)\n elif returnType == 'plainfull':\n return getPlainResponse(countryLookup[returnType] + '/' + ipAddress)\n else:\n return getJsonResponse(countryLookup[returnType], ipAddress)\n else:\n raise ValueError('\\'returnType\\' does not belong in valid types: ' + str(countryValidTypes))\n else:\n raise TypeError('\\'returnType\\' must be of type \\'str\\'(' + type(returnType).__name__ + ' was given).')\n\n# Gets all available geodata for a specific IP address. \ndef getGeoData(ipAddress):\n if isinstance(ipAddress, str):\n return getJsonResponse(geoJson, ipAddress)\n else:\n raise TypeError(\"\\'ipAddress\\' is not an instance of list.\")\n\n# Gets the DNS PTR record of an IP address, if possible.\ndef getPTR(ipAddress):\n if not isinstance(ipAddress, str):\n raise TypeError(\"\\'ipAddress\\' is not an instance of list.\")\n return getPlainResponse(ptrPlain)\n\n# Gets all country information for an IP address.\ndef showCountryDetails(ip=''):\n result = \"\"\n if ip == '':\n ip = getIP('plain')\n countryData = getCountry(ip, 'json')\n result += '-' * dotNumber + '\\n'\n result += (dotNumber//2 - len(countryTitle)//2) * ' ' + countryTitle\n result += '-' * dotNumber + '\\n'\n for key, value in countryData.items():\n cleanKey = key.replace('_',' ').capitalize() + ':'\n cleanKey = cleanKey.ljust(countryPadding, ' ')\n result += cleanKey + str(value) + '\\n'\n result += '-' * dotNumber + '\\n'\n print(result)\n\n# Get all available information provided for a specific IP address (country, location, region, etc.).\ndef showIpDetails(ip=''):\n result = \"\"\n if ip == '':\n ip = getIP('plain')\n country = getCountry(ip, 'plainFull')\n result += '-' * dotNumber + '\\n'\n result += (dotNumber//2 - len(hostTitle)//2) * ' ' + hostTitle\n result += '-' * dotNumber + '\\n'\n result += 'Country: '.ljust(countryPadding,' ') + country + '\\n'\n geoData = getGeoData(ip)\n ptrData = getPTR(ip)\n for key, value in geoData.items():\n cleanKey = key.replace('_',' ').capitalize() + ':'\n cleanKey = cleanKey.ljust(countryPadding,' ')\n result += cleanKey + str(value) + '\\n'\n result += '-' * dotNumber + '\\n'\n print(result)\n","repo_name":"VasilisG/IP-location-tracker","sub_path":"geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"} +{"seq_id":"42011932485","text":"#see the readme.md file for description and data from typing import Any, Union, Tuple, List\r\n\r\nimport random\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\n\r\nshots = 0 #global variable to count the total number of shots\r\n\r\ndef ship_position(ship): #returns a list of tuples giving all coordinates of a ship\r\n ship_pos = [(ship[0], ship[1])]\r\n if ship[2] == True:\r\n for i in range(1, ship[3]):\r\n ship_pos.append((ship[0], ship[1] + i))\r\n elif ship[2] == False:\r\n for i in range(1, ship[3]):\r\n ship_pos.append((ship[0] + i, ship[1]))\r\n return ship_pos\r\n\r\n\r\ndef is_sunk(ship):\r\n if ship[3] == len(ship[4]):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef ship_type(ship):\r\n if ship[3] == 4:\r\n return \"battleship\"\r\n elif ship[3] == 3:\r\n return \"cruiser\"\r\n elif ship[3] == 2:\r\n return \"destroyer\"\r\n else:\r\n return \"submarine\"\r\n\r\n\r\ndef is_open_sea(row, column, fleet):\r\n if (row > 9 or row < 0) or (column > 9 or column < 0):\r\n return False\r\n else:\r\n for ship in fleet:\r\n ship_pos = ship_position(ship)\r\n for pos in ship_pos:\r\n if row == pos[0]:\r\n if column == pos[1] or column == pos[1]+1 or column == pos[1]-1:\r\n return False\r\n if row == pos[0]-1:\r\n if column == pos[1] or column == pos[1] + 1 or column == pos[1] - 1:\r\n return False\r\n if row == pos[0]+1:\r\n if column == pos[1] or column == pos[1]+1 or column == pos[1]-1:\r\n return False\r\n return True\r\n\r\n\r\ndef ok_to_place_ship_at(row, column, horizontal, length, fleet):\r\n hits = set()\r\n tempship = (row, column, horizontal, length, hits)\r\n ok = True\r\n ship_pos = ship_position(tempship)\r\n for pos in ship_pos:\r\n if is_open_sea(pos[0], pos[1], fleet) == False:\r\n ok = False\r\n if ok == True:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef place_ship_at(row, column, horizontal, length, fleet):\r\n hits = set()\r\n new_ship = (row, column, horizontal, length, hits)\r\n fleet.append(new_ship)\r\n\r\n\r\ndef randomly_place_all_ships():\r\n fleet = []\r\n finished = False\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #battleship\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 4, fleet) == True:\r\n place_ship_at(row,col,horiz,3,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n #re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #cruisers\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 3, fleet) == True:\r\n place_ship_at(row,col,horiz,3,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 3, fleet) == True:\r\n place_ship_at(row,col,horiz,3,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #destroyers\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 2, fleet) == True:\r\n place_ship_at(row,col,horiz,2,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 2, fleet) == True:\r\n place_ship_at(row,col,horiz,2,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 2, fleet) == True:\r\n place_ship_at(row,col,horiz,2,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n #submarines\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n while finished == False:\r\n if ok_to_place_ship_at(row, col, horiz, 1, fleet) == True:\r\n place_ship_at(row,col,horiz,1,fleet)\r\n finished = True\r\n else:\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n finished = False\r\n # re-randomize the values\r\n row = random.randint(0, 10)\r\n col = random.randint(0, 10)\r\n horiz = random.choice([True, False])\r\n\r\n return fleet\r\n\r\n\r\ndef check_if_hits(row, column, fleet):\r\n hit = False\r\n check_hit = (row, column)\r\n for ship in fleet:\r\n ship_pos = ship_position(ship)\r\n for pos in ship_pos:\r\n if check_hit == pos:\r\n hit = True\r\n if hit == True:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef hit(row, column, fleet):\r\n check_hit = (row, column)\r\n for ship in fleet:\r\n ship_pos = ship_position(ship)\r\n for pos in ship_pos:\r\n if check_hit == pos:\r\n ship[4].add(check_hit)\r\n if ship[3] == len(ship[4]):\r\n print(\"You sank a \" + ship_type(ship))\r\n if are_unsunk_ships_left(fleet) == False:\r\n print(\"You win! Total shots = \" + str(shots))\r\n return (fleet, ship)\r\n\r\n\r\n\r\ndef are_unsunk_ships_left(fleet):\r\n sunk_count = 0\r\n for ship in fleet:\r\n if is_sunk(ship) == True:\r\n sunk_count +=1\r\n if len(fleet) == sunk_count:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n#the following 3 functions are all needed for createbuttons()\r\ndef labelx(t, c, r, b):\r\n ttk.Label(b, text=t).grid(column=c, row=r, sticky=W, padx=5)\r\n\r\ndef labely(t, c, r, b):\r\n ttk.Label(b, text=t).grid(column=c, row=r, sticky=W)\r\n\r\ndef shoot(r, c, fleet, b):\r\n global shots\r\n if check_if_hits(r, c, fleet) == True:\r\n print(\"You hit!\")\r\n hit(r, c, fleet) #the hit function then deals with checking if a ship has been sunk and communicating that\r\n b.configure(bg=\"green\")\r\n shots +=1\r\n else:\r\n print(\"You missed\")\r\n b.configure(bg=\"red\")\r\n shots += 1\r\n\r\n\r\n\r\ndef createbuttons(board, tempfleet): #this creates the axis and the button grid\r\n for i in range(0, 10):\r\n labelx(i, i + 2, 1, board)\r\n\r\n for j in range(0, 10):\r\n labely(j, 1, j + 2, board)\r\n\r\n for i in range(2, 12):\r\n for j in range(2, 12):\r\n btn = Button(board)\r\n btn.config(width=3, command=lambda r=i-2, c=j-2, fleet=tempfleet, b=btn: shoot(r, c, fleet, b)) #this feeds the button corrdinates into shoot() when the button is clicked\r\n btn.grid(column=i, row=j)\r\n\r\n\r\ndef quitter(): #this is needed for the quit button\r\n sys.exit()\r\n\r\ndef main():\r\n\r\n current_fleet = randomly_place_all_ships()\r\n\r\n #the following sets up the board\r\n root = Tk()\r\n title = ttk.Label(root)\r\n title.configure(text=\"Battleships\", anchor=\"center\")\r\n title.grid(column = 1, row = 1)\r\n subtitle = ttk.Label(root)\r\n subtitle.configure(text=\"Click on a square to shoot!\", anchor=\"center\")\r\n subtitle.grid(column=1, row=2)\r\n\r\n board = ttk.Frame(root, padding=\"5 5 5 5\")\r\n board.grid(column=1, row=3, sticky=(N, W, E, S))\r\n root.columnconfigure(0, weight=1)\r\n root.rowconfigure(0, weight=1)\r\n\r\n #each square is a button, created using loops\r\n createbuttons(board, current_fleet)\r\n\r\n #this creates the lower portion of the board and the quit button\r\n scores = ttk.Frame(root, padding=\"5 5 5 5\")\r\n scores.grid(column=1, row=4, sticky=(W, E))\r\n quit_button = Button(scores)\r\n quit_button.configure(text=\"Quit\", bg=\"red\", command=quitter)\r\n quit_button.grid(column=1, row=1)\r\n\r\n\r\n root.mainloop()\r\n\r\nif __name__ == '__main__': #keep this in\r\n main()\r\n","repo_name":"franc17/battleships","sub_path":"battleships.py","file_name":"battleships.py","file_ext":"py","file_size_in_byte":10712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74426490174","text":"import cv2\nimport pandas as pd\nfrom .ops import LabelRatio2Coord, clipping_coordinate\nfrom data_process.file_utils.basic import TraverseDir, PathHandler\n\n# cv2.putText(影像, 文字, 座標, 字型, 大小, 顏色, 線條寬度, 線條種類)\n\ndef PlotBox(img, bbox, info=None):\n # Color set\n color_set = {'0': (0, 255, 255), # yellow\n '1': (255, 255, 0), # blue\n '2': (0, 255, 0)} # green\n h, w, _ = img.shape\n if 'x1' in bbox and bbox['x1'] < 1:\n bbox = LabelRatio2Coord(img, bbox)\n if bbox is False:\n return False\n text_coord = clipping_coordinate(img, [bbox['x1'] - w*0.01, bbox['y1'] - h*0.01])\n if info is not None and 'label' in info:\n print('plot', text_coord, img.shape)\n cv2.putText(img, str(bbox['label']),\\\n tuple(text_coord), cv2.FONT_HERSHEY_SIMPLEX,\\\n w*0.002, (0, 255, 255), 2, cv2.LINE_AA)\n cv2.rectangle(img, (bbox['x1'], bbox['y1']),\\\n (bbox['x2'], bbox['y2']), color_set[str(bbox['label'])], 2)\n else:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']),\\\n (bbox['x2'], bbox['y2']), color_set['0'], 2)\n\n\ndef ReadYoloLabel(label_path, bbox_format):\n \"\"\"\n bbox_format: 'xyxy' or 'xywh'\n\n returns:\n bbox_list : list of bbox dicts\n *** ratio\n *** clipping\n \"\"\"\n bbox_list = []\n f = open(label_path, 'r')\n for i in f:\n i = i.split(' ')\n bbox = dict()\n label = int(i[0])\n bbox['label'] = label\n if bbox_format == 'xyxy':\n x_center = float(i[1])\n y_center = float(i[2])\n w_box = float(i[3])\n h_box = float(i[4])\n x1 = x_center-w_box/2\n x2 = x_center+w_box/2\n y1 = y_center-h_box/2\n y2 = y_center+h_box/2\n bbox['x_center'] = x_center\n bbox['y_center'] = y_center\n bbox['w_box'] = w_box\n bbox['h_box'] = h_box\n bbox['x1'] = x1\n bbox['x2'] = x2\n bbox['y1'] = y1\n bbox['y2'] = y2\n elif bbox_format == 'xywh':\n x_center = float(i[1])\n y_center = float(i[2])\n w_box = float(i[3])\n h_box = float(i[4])\n bbox['x_center'] = x_center\n bbox['y_center'] = y_center\n bbox['w_box'] = w_box\n bbox['h_box'] = h_box\n bbox_list.append(bbox)\n return bbox_list\n\n\ndef WriteYoloLabel(label_path, bbox_list):\n f = open(label_path, 'w')\n for bbox in bbox_list:\n f.write('%d %f %f %f %f\\n'%(bbox['label'],\\\n bbox['x_center'],\\\n bbox['y_center'],\\\n bbox['w_box'],\\\n bbox['h_box']))\n f.close()\n return True\n\n\ndef WriteYoloLabelListFile(label_file_list):\n pass\n\n\ndef ReadGTFile(gt_file_path, answer_column):\n answer_dict = dict()\n df = pd.read_csv(gt_file_path)\n for i, lpnumber in df.iterrows():\n if isinstance(lpnumber[answer_column], str):\n ans = lpnumber[answer_column].strip()\n answer_dict[ans] = 0\n return answer_dict\n\n\ndef ReadBBoxPredictFile(file_path):\n \"\"\"\n Args:\n file path : str\n\n File format:\n image_name:\n (percentage) (abs)\n ,,,,,\n ...\n end\n\n example:\n image_name:a.jpg\n full,98%,19,30,37,50\n ...\n end\n\n Returns:\n imgs_bbox : dict\n\n {img_name1: [bbox1, bbox2, ...],\n img_name2: [bbox1, bbox2, ...],\n ...\n }\n \"\"\"\n f = open(file_path, 'r')\n imgs_bbox = {}\n img_bbox = []\n imgs_name = []\n for l in f:\n if 'image_name:' in l or 'end' in l:\n if len(img_bbox) != 0:\n img_bbox.sort(key = lambda x: x['conf'], reverse=True)\n imgs_bbox[l] = img_bbox.copy()\n img_bbox = []\n # record image name\n img_name = l.split(':')[-1]\n imgs_name.append(img_name)\n else:\n # Read bboxes!\n l = l.split(',')\n bbox = dict()\n bbox['label'] = l[0]\n bbox['conf'] = float(l[1].split('%')[0])\n bbox['x1'] = int(l[2])\n bbox['y1'] = int(l[3])\n bbox['x2'] = int(l[4])\n bbox['y2'] = int(l[5])\n\n img_bbox.append(bbox)\n return imgs_bbox\n\n\ndef ReadBBoxYoloLabels(dir_path):\n img_file_list = TraverseDir(dir_path, '.jpg', check_exist='txt')\n imgs_bbox = {}\n for img_path in img_file_list:\n label_path = PathHandler(img_path, 'find_txt')\n img = cv2.imread(img_path)\n bboxes = ReadYoloLabel(label_path, 'xyxy')\n abs_bbox_list = []\n for bbox in bboxes:\n bbox = LabelRatio2Coord(img, bbox)\n abs_bbox_list.append(bbox)\n\n imgs_bbox[img_path] = abs_bbox_list.copy()\n return imgs_bbox\n\n\ndef ReadLandmarkFile(file_path, w, h):\n f = open(file_path, 'r')\n preds = []\n for line in f:\n l = line.split(',')\n x = float(l[0]) * w\n y = float(l[1]) * h\n preds.append((x, y))\n if len(preds) > 0:\n return [preds]\n return None\n\n\ndef WriteLandmarkFile(Landmarks, file_path, w, h):\n f = open(file_path, 'w')\n if Landmarks == None:\n f.close()\n return\n for i in range(1, 68+1):\n landmark = Landmarks[i]\n x_ratio = max(min(landmark.x/w, 1.), 0.)\n y_ratio = max(min(landmark.y/h, 1.), 0.)\n # print(x_ratio, y_ratio)\n f.write(str(x_ratio)+\",\"+str(y_ratio)+\"\\n\")\n f.close()\n","repo_name":"heathcliffYang/data_process","sub_path":"src/data_process/label_utils/label_io.py","file_name":"label_io.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71664801536","text":"# https://www.hackerrank.com/challenges/ctci-balanced-brackets/problem\n\n# Given n strings of brackets, determine whether each sequence of brackets is balanced.\n# If a string is balanced, print YES on a new line; otherwise, print NO on a new line.\n\n# Input Format\n# The first line contains a single integer n denoting the number of strings.\n# Each line i of the n subsequent lines consists of a single string s denoting a sequence of brackets.\n\n# Output Format\n# For each string, print whether or not the string of brackets is balanced on a new line.\n# If the brackets are balanced, print YES; otherwise, print NO.\n\n\n# https://codereview.stackexchange.com/questions/180567/checking-for-balanced-brackets-in-python\ndef is_matched(expression):\n opening = tuple('({[')\n closing = tuple(')}]')\n mapping = dict(zip(opening, closing))\n queue = []\n\n for letter in expression:\n if letter in opening:\n queue.append(mapping[letter])\n elif letter in closing:\n if not queue or letter != queue.pop():\n return False\n return not queue\n\nt = int(input().strip())\nfor a0 in range(t):\n expression = input().strip()\n if is_matched(expression) == True:\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"ck-unifr/hackerrank-cracking-the-code-interview","sub_path":"stacks-balanced-brackets.py","file_name":"stacks-balanced-brackets.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27961438406","text":"from starlette.applications import Starlette\nfrom starlette.responses import JSONResponse\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.templating import Jinja2Templates\nfrom starlette.routing import Route\nimport uvicorn\nimport os\nimport sys\nimport logging\nfrom random import uniform\nimport run_generation\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# Needed to avoid cross-domain issues\nresponse_header = {\n 'Access-Control-Allow-Origin': '*'\n}\n\nEOG_TOKEN = '<|endofgenre|>'\nEOT_TOKEN = '<|endoftitle|>'\nEOS_TOKEN = '<|endoftext|>'\n\ndef generate_text(params):\n \"\"\"Generate text using transformers.\"\"\"\n prompt = ''\n if not params['genre'] and not params['title'] and not params['prefix']:\n prompt += EOS_TOKEN\n if params['genre']:\n prompt += params['genre'] + EOG_TOKEN\n if params['title']:\n prompt += params['title'] + EOT_TOKEN\n if params['prefix']:\n prompt += params['prefix']\n text = run_generation.main([\n '--model_type=gpt2',\n '--model_name_or_path=app/output',\n f\"--prompt={prompt}\" if prompt else '--prompt=\"\"',\n f'--temperature={float(params[\"temp\"]) if params[\"temp\"] else uniform(0.7, 1)}',\n f'--top_p={float(params[\"top_p\"]) if params[\"top_p\"] else 0}',\n '--num_samples=1',\n '--length=256',\n f'--stop_token={EOS_TOKEN}'\n ])\n return prompt+text\n\ndef parse_text(text):\n \"\"\"Parse text.\"\"\"\n logging.info(text)\n if len(text.split(EOS_TOKEN)[0]) > 0:\n main = text.split(EOS_TOKEN)[0]\n else:\n # eos_token can be at the beginning\n main = text.split(EOS_TOKEN)[1]\n if EOG_TOKEN in main:\n genre = main.split(EOG_TOKEN)[0]\n main = main.split(EOG_TOKEN)[1]\n else:\n genre = ''\n if EOT_TOKEN in main:\n title = main.split(EOT_TOKEN)[-2]\n main = main.split(EOT_TOKEN)[-1]\n else:\n title = ''\n plot = '.'.join(main.split('.')[:-1])+'.'\n return {\n 'genre': genre.strip(),\n 'title': title.strip(),\n 'plot': plot.strip()\n }\n\nasync def generate(request):\n \"\"\"Generate text and return the parsed result as a dict.\"\"\"\n if request.method == 'GET':\n params = request.query_params\n elif request.method == 'POST':\n params = await request.json()\n elif request.method == 'HEAD':\n return JSONResponse({'text': ''}, headers=response_header)\n logging.info(params)\n return JSONResponse(parse_text(generate_text(params)), headers=response_header)\n\nasync def homepage(request):\n \"\"\"Return HTML homepage.\"\"\"\n return templates.TemplateResponse('index.html', {'request': request})\n\nroutes = [\n Route(\"/\", endpoint=homepage),\n Route(\"/generate\", endpoint=generate, methods=[\"GET\", \"POST\"]),\n]\n\napp = Starlette(routes=routes, debug=True)\napp.mount('/static', StaticFiles(directory='app/static'))\ntemplates = Jinja2Templates(directory='app/templates')\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host='0.0.0.0', port=int(os.environ.get('PORT', 5000)), log_level=\"info\")","repo_name":"polakowo/textai","sub_path":"MoviePlots/text_generation/with-titles/app/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"26064560717","text":"import sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom utils import EncryptionUtility\n\ndef test_encryption_decryption():\n original_message = \"Secret Message\"\n key = EncryptionUtility.generate_key()\n encrypted_message = EncryptionUtility.encrypt_message(original_message, key)\n decrypted_message = EncryptionUtility.decrypt_message(encrypted_message, key)\n\n assert original_message == decrypted_message","repo_name":"Cdaprod/cda.CredKeeper","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1934243896","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass LeNet(nn.Module):\n\tdef __init__(self):\n\t\tsuper(LeNet, self).__init__()\n\t\tself.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n\t\tself.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n\t\tself.conv2_drop = nn.Dropout2d()\n\t\tself.fc1 = nn.Linear(320, 50)\n\t\tself.fc2 = nn.Linear(50, 10)\n\n\tdef forward(self, x):\n\t\tx = F.relu(F.max_pool2d(self.conv1(x), 2))\n\t\tx = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n\t\tx = x.view(-1, 320)\n\t\tx = F.relu(self.fc1(x))\n\t\tx = F.dropout(x, training=self.training)\n\t\tx = self.fc2(x)\n\t\treturn F.log_softmax(x)\n\t\n\tdef name(self):\n\t\treturn 'LeNet'\n\nclass MLPNet(nn.Module):\n def __init__(self):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(28*28, 500)\n self.fc2 = nn.Linear(500, 256)\n self.fc3 = nn.Linear(256, 10)\n self.ceriation = nn.CrossEntropyLoss()\n def forward(self, x, target):\n x = x.view(-1, 28*28)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n loss = self.ceriation(x, target)\n return x, loss\n def name(self):\n return 'MLPNet'","repo_name":"jackyko1991/MNIST-pytorch","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"5029301295","text":"''' *****************************************************************************\n * Name: Arbaaz Khan\n * Language: python3 \n *\n * Description: Implementation of maximum heap datastructure.\n *\n * Written: 8/1/2018\n * Last updated: 8/1/2018\n * \n * TIME COMPLEXITIES:\n * -----------------------------------------------------------------\n * | Operations | WorstCase | AverageCase | BestCase |\n * -----------------------------------------------------------------\n * | insertion | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | deletion | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | traversal | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | searching | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | bubbleUp | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | bubbleDown | bigO(-) | bigO(-) | bigO(-) |\n * -----------------------------------------------------------------\n * | findMax | bigO(1) | bigO(1) | bigO(1) |\n * -----------------------------------------------------------------\n *\n * % python maxheap.py\n *\n***************************************************************************** '''\nclass Heap(object):\n HEAP_SIZE = 10\n\n def __init__(self):\n self.heap = [0]*self.HEAP_SIZE\n self.current_position = -1\n\n def insert(self,item):\n if self.isFull():\n print(\"Heap is full!\")\n else:\n self.current_position += 1\n self.heap[self.current_position] = item\n self.bubbleUp(self.current_position)\n\n def isFull(self):\n if self.current_position+1 == self.HEAP_SIZE:\n return True\n else:\n return False\n def isEmpty(self):\n if self.current_position == -1:\n return True\n else:\n return False\n\n def bubbleUp(self,pos): \n ### Ensures that the new item inserted maintains the rule of max-heap. ###\n # pos holds the index of the item whose position has to be checked that whether it follows the rule of the max heap\n # the item in question is compared with it's parent (pos-1/2), since in max heap the parent is greater than it's children\n # the item has to be swapped with it's parent if is found to be greater than it's parent. After swapping the item moves to\n # it's parent's place, now it's position is again checked by comparing it with it's parent. For this the pos is updated as \n # the item has moved to it's parent's place. Hence the parent index is also updated.\n if pos < 0: #Don't perform bubbleup if index becomes negative\n return\n parent_index = (pos-1)//2 #floor int value is used\n while parent_index >= 0 and self.heap[pos] >= self.heap[parent_index]:\n temp = self.heap[parent_index]\n self.heap[parent_index] = self.heap[pos]\n self.heap[pos] = temp\n pos = parent_index\n parent_index = (pos-1)//2\n\n def findMax(self):\n if not self.isEmpty():\n return self.heap[0]\n else:\n print(\"Heap is empty!\")\n\n def heapSort(self):\n # It works by putting the largest item in the last node in each iteration. \n # It swaps the root node with the last node\n # \n # \n for i in range(self.current_position+1):\n temp = self.heap[0]\n self.heap[0] = self.heap[self.current_position-i]\n self.heap[self.current_position-i] = temp\n self.bubbleDown(self.current_position-i-1)\n \n def bubbleDown(self,pos):\n root_index = 0\n if pos<0:\n return\n while root_index < pos:\n if((2*root_index+1 <= pos) and (2*root_index+2 <= pos)): \n if (self.heap[root_index] < self.heap[2*root_index+1]) and (self.heap[root_index] < self.heap[2*root_index+2]):\n if self.heap[2*root_index+1] > self.heap[2*root_index+2]:\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+1]\n self.heap[2*root_index+1] = temp\n root_index = 2*root_index+1\n else:\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+2]\n self.heap[2*root_index+2] = temp\n root_index = 2*root_index+2\n elif (self.heap[root_index] < self.heap[2*root_index+1]):\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+1]\n self.heap[2*root_index+1] = temp\n root_index = 2*root_index+1\n elif (self.heap[root_index] < self.heap[2*root_index+2]):\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+2]\n self.heap[2*root_index+2] = temp\n root_index = 2*root_index+2\n else:\n break\n elif (2*root_index+1 <= pos):\n if self.heap[root_index] < self.heap[2*root_index+1]:\n temp = self.heap[root_index]\n self.heap[root_index] = self.heap[2*root_index+1]\n self.heap[2*root_index+1] = temp\n root_index = 2*root_index+1\n else:\n break\n else:\n break\n\n def show(self):\n for i in range(self.current_position+1):\n print(self.heap[i])\n\nheap = Heap()\nheap.insert(5)\nheap.insert(4)\nheap.insert(10)\nheap.insert(3)\nheap.insert(2)\nheap.insert(100)\nheap.insert(12)\nheap.insert(40)\nheap.show()\nprint(\"Max value = \",heap.findMax())\nheap.heapSort()\nprint(\"After heapsort\")\nheap.show()\n","repo_name":"arzzon/PythonLearning","sub_path":"DataStructures/Heap/MaxHeap/maxheap_old_first_approach.py","file_name":"maxheap_old_first_approach.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9113060347","text":"import numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import f1_score as f1\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn import svm\nimport warnings\nfrom sklearn.model_selection import GridSearchCV\nimport lyp_preprocessing as lyp\nimport kent\nimport util\nfrom sklearn.tree import DecisionTreeClassifier\nimport collections\nfrom gensim.models import KeyedVectors\n#from xgboost import XGBClassifier\nimport mord\nimport re\n\ndef get_para(view, like, dislike, comment):\n \"\"\"\n :param view: number of view, NumPy array shape (n_examples, 1)\n :param like: number of like, NumPy array shape (n_examples, 1)\n :param dislike: number dislike, NumPy array shape (n_examples, 1)\n :param comment: number of comment, NumPy array shape (n_examples, 1)\n :return: parameter, NumPy array shape (n_examples, 1), float\n \"\"\"\n return (like - 1.5 * dislike) * comment / view\n\ndef label(view, parameter, view_bar, para_bar):\n \"\"\"\n Args:\n view: number of view, NumPy array shape (n_examples, 1)\n parameter: the enmotional trend of the reflects from viewers, NumPy array shape (n_examples, 1)\n view_bar: number dislike, NumPy array shape (n_examples, 1)\n para_bar: bars of parameters, a list (2,)\n\n Returns:\n label, NumPy array shape (n_examples, 1), int\n 0: Not hot\n 1: Negative, dislike >> like\n 2: Controdictory, dislike ~= like\n 3: Positive, like >> dislike\n \"\"\"\n label = np.zeros(np.shape(view))\n n = len(view)\n [bar1, bar2] = para_bar\n for i in range(n):\n if view[i] < view_bar:\n label[i] = 0\n elif parameter[i] < bar1:\n label[i] = 1\n elif parameter[i] < bar2:\n label[i] = 2\n else:\n label[i] = 3\n return label\n\n\ndef loadGolveModel(glove_file):\n f = open(glove_file, 'r', encoding='UTF-8')\n model = {}\n for line in f:\n splitline = line.split()\n word = splitline[0].replace(\"'\", \"\")\n embedding = np.array([float(val) for val in splitline[1: ]])\n model[word] = embedding\n print(\"Done.\", len(model), \"words loaded!\")\n return model\n\n\ndef load_index_dic(glove_file):\n f = open(glove_file, 'r', encoding='UTF-8')\n dic = []\n for line in f:\n splitline = line.split()\n dic.append(splitline[0])\n f.close()\n return dic\n\n\ndef glove_embedding_one_string(string, dictionary):\n words = string.lower().split()\n new_words = [re.sub('[{}!#?,.:\";@$%^&*()_+-=|[]:;\">/?<,.~]', '', word) for word in words]\n temp = [dictionary[i] for i in new_words if i in dictionary.keys()]\n temp = np.array(temp)\n return np.sum(temp, axis=0)\n\n\ndef glove_embedding(list, dictionary):\n n, t = len(list), 0\n l = dictionary['a'].shape[0]\n temp = np.zeros((n, l))\n for i in list:\n temp[t] = glove_embedding_one_string(i, dictionary)\n t += 1\n return np.array(temp)\n\n\ndef get_token(string, header, k):\n \"\"\"\n Word embedding for token\n Function: remove the punctuation, lowercases words, and covert the words to sequences of integers\n :param string: A list of word, lenth: n\n header: type of string\n k: size of dictionary\n :return: A list of integers, representing the word\n Site: https://towardsdatascience.com/recurrent-neural-networks-by-example-in-python-ffd204f99470\n \"\"\"\n if header == 'tags':\n tokenizer = Tokenizer(num_words=k, # Word with top k frequency\n filters='!@#$%^&*()_+-=\\|{}[]:;\">/?<,.~',\n lower=True, split='|')\n else:\n tokenizer = Tokenizer(num_words=k,\n filters='!@#$%^&*()_+-=\\|{}[]:;\">/?<,.~',\n lower=True)\n\n tokenizer.fit_on_texts(string)\n sequences = tokenizer.texts_to_sequences(string)\n return sequences\n\ndef one_hot(string, k):\n \"\"\"\n One hot word embedding\n :param string: A list of strings\n k: size of dictionary\n :return: A matrix of integers reflecting the string\n dim: n-examples x m-size of dictionary\n Type: np.array\n \"\"\"\n t = Tokenizer(num_words=k,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=' ')\n t.fit_on_texts(string)\n encoded_docs = t.texts_to_matrix(string, mode='binary')\n return np.array(encoded_docs)\n\n\ndef one_hot_test(train, test, k):\n t = Tokenizer(num_words=k,\n filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n',\n lower=True, split=' ')\n t.fit_on_texts(train)\n encoded_docs = t.texts_to_matrix(test, mode='binary')\n return np.array(encoded_docs)\n\n\ndef word_embedding(csv_path, dictionary):\n \"\"\"\n Get the structured input data\n :param csv_path: The trina,valid, and test test path, .csv file name\n :param size_of_dictionary: a int\n :return: structured title, tag, description, list type, each with a lenth of dictionary,\n category as integer, publish_time as time\n Type: np.array\n \"\"\"\n title, trending_date, publish_time, category, tags, description, duration = kent.get_feature(csv_path)\n glove_title = glove_embedding(title, dictionary)\n glove_description = glove_embedding(description, dictionary)\n glove_tags = glove_embedding(tags, dictionary)\n time = lyp.get_time_gap(publish_time, trending_date)\n category = util.add_intercept_fn(np.reshape(category, (len(category), 1)))\n time = time.reshape((len(time), 1))\n duration = duration.reshape((len(duration), 1))\n return glove_title, time, category, glove_tags, glove_description, duration\n\n\ndef word_embedding_test(train_path, test_path, size_of_dictionary, size_of_dictionary_description):\n train_title, train_trending_date, train_publish_time, train_category, train_tags, train_description = kent.get_feature(train_path)\n test_title, test_trending_date, test_publish_time, test_category, test_tags, test_descriotion = kent.get_feature(test_path)\n one_hot_title = util.add_intercept_fn(one_hot_test(train_title, test_title,size_of_dictionary))\n one_hot_description = util.add_intercept_fn(one_hot_test(train_description, test_descriotion, size_of_dictionary_description))\n one_hot_tags = util.add_intercept_fn(one_hot_test(train_tags, test_tags, size_of_dictionary))\n time = lyp.get_time_gap(test_publish_time, test_trending_date)\n time = util.add_intercept_fn(np.reshape(time, (len(time), 1)))\n category = util.add_intercept_fn(np.reshape(test_category, (len(test_category), 1)))\n return one_hot_title, time, category, one_hot_tags, one_hot_description\n\n\ndef separa_test(csv):\n \"\"\"\n Seprarte the test data by publish date\n :return: three set, containing the index of the video in test set\n first set: videos trended in the train or valid set\n third set: videos published and trended in the test set\n second set: rest of the videos\n \"\"\"\n new1 = []\n new3 = []\n publish_time = kent.get_time(csv)\n test_title = lyp.get_string_header(csv, 'title')\n train_title = lyp.get_string_header(csv, 'title')\n valid_title = lyp.get_string_header(csv, 'title')\n title = train_title + valid_title\n for i in range(len(publish_time)):\n pt_year = int(publish_time[i][0:4])\n pt_month = int(publish_time[i][5:7])\n pt_date = int(publish_time[i][8:10])\n if pt_year < 2018 and test_title[i] in title:\n new1 += [i]\n elif pt_year == 2018 and pt_month < 4 and test_title[i] in title:\n new1 += [i]\n elif pt_year == 2018 and pt_month == 4 and pt_date < 14 and test_title[i] in title:\n new1 += [i]\n elif pt_year == 2018 and pt_month > 4:\n new3 += [i]\n elif pt_year == 2018 and pt_month == 4 and pt_date >= 14:\n new3 += [i]\n return new1, new3\n\n\ndef accurancy(y_label, prediction):\n \"\"\"\n Calculate the accurancy\n :param y_label: a list of true label\n :param prediction: a list of predicted label\n :return: the accurancy, float\n \"\"\"\n n = len(y_label)\n result = 0\n new = np.zeros((4, ))\n for i in range(n):\n if y_label[i] == prediction[i]:\n result += 1\n t = int(y_label[i])\n new[t] += 1\n print('The accurancy count in each type', new)\n print('The count of each type:', collections.Counter(prediction))\n return result / n\n\n\ndef first_layer(fit_type, train_label, valid_type):\n \"\"\"\n :param fit_type: Description, Title, Tags etc. a list\n :param train_label: a list of train label\n :param valid_type: a list of valid label\n :return: an array of the probability\n \"\"\"\n y_train = train_label\n clf = SGDClassifier(alpha=0.2, loss=\"modified_huber\", penalty='l2', tol=1e-6, max_iter=10000, fit_intercept=False)\n clf.fit(fit_type, y_train)\n predict = clf._predict_proba(valid_type)\n train_probability = clf._predict_proba(fit_type)\n return predict, train_probability\n\n\ndef GBM_model(train, train_label, test, test_label):\n \"\"\"\n\n :param train: n x factor array, representing all factors in array\n :param test: n x factor array, representing all factors in array\n :param label_train: n x 1 array, representing the label of train\n :param label_test: n x 1 array, representing the label of test\n :return: the prediction result of GBM model\n \"\"\"\n model = GradientBoostingClassifier(max_depth=5, tol=0.0001, n_estimators=100)\n eval_set = [(train, train_label), (test, test_label)]\n model.fit(train, train_label, eval_metric=[\"merror\", \"mlogloss\"], eval_set=eval_set, verbose=True)\n print('Finish GBM fit')\n prediction = model.predict(test)\n print('Finish GBM prediction')\n return prediction\n\n\ndef GBM_multi_model(train, train_label, test):\n \"\"\"\n\n :param train: n x factor array, representing all factors in array\n :param test: n x factor array, representing all factors in array\n :param label_train: n x 1 array, representing the label of train\n :param label_test: n x 1 array, representing the label of test\n :return: the prediction result of GBM model\n \"\"\"\n # w_array = np.array([0.7] * train_label.shape[0])\n # w_array[train_label == 0] = 0.9\n # w_array[train_label == 1] = 8\n # w_array[train_label == 3] = 1.7\n model = GradientBoostingClassifier(max_depth=8, tol=0.0001, n_estimators=100)\n model.fit(train, train_label)\n print('Finish GBM fit')\n prediction = model.predict(test)\n print('Finish GBM prediction')\n return prediction\n\ndef random_forest(train, train_label, test):\n clf = RandomForestClassifier(random_state=27 ,max_features=None, n_estimators=300,\n class_weight={0:2.92, 1:65, 2:1, 3:7.4})\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\ndef random_forest_multi(train, train_label, test):\n clf = RandomForestClassifier(random_state=27 ,max_features=None, n_estimators=300)\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\n\ndef neuron_network(train, label_train, test):\n clf = MLPClassifier(solver='adam', activation='logistic', alpha=0.4, tol=1e-5,\n hidden_layer_sizes=(100, 20), max_iter=500)\n clf.fit(train, label_train)\n prediction = clf.predict(test)\n return prediction\n\n\ndef vote(fun1, fun2, fun3, train, train_label, valid):\n clf = VotingClassifier(estimators=[('fun1', fun1), ('fun2', fun2), ('fun3', fun3)], voting='hard')\n clf.fit(train, train_label)\n prediction = clf.predict(valid)\n return prediction\n\n\ndef svm_prediction(train, train_label, test):\n clf = svm.SVC(C=1.0, cache_size=200, coef0=1.0,\n decision_function_shape='ovo', degree=5, gamma='scale', kernel='poly',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=True)\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\n#\n# def mord_predict(train, train_label, test):\n# clf = mord.MulticlassLogistic()\n# clf.fit(train, train_label)\n# prediction = clf.predict(test)\n# return prediction\n#\n# def xgb_prediction(train, train_label, test):\n# clf = XGBClassifier(booster = \"gbtree\") #objective = reg:squaredlogerror\n# clf.fit(train, train_label)\n# return clf.predict(test)\n\ndef tree(train, train_label, test, i):\n clf = DecisionTreeClassifier(random_state=i, class_weight={0:5, 1:5, 2:0.05, 3:1}) #, class_weight={0:1, 1:1, 2:1, 3:1}\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\ndef tree_multi(train, train_label, test):\n clf = DecisionTreeClassifier() #, class_weight={0:1, 1:1, 2:1, 3:1}\n clf.fit(train, train_label)\n prediction = clf.predict(test)\n return prediction\n\n\ndef relable(label, target_label):\n \"\"\"\n change the multiple class into binary class\n :param label: the array of the original label\n :param target_label:\n :return: an array of the label, 1 means label is the targeted one and 0 is other labels\n \"\"\"\n return np.array([int(i == target_label) for i in label])\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('Model Performance')\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n return accuracy\n\n\ndef sgdc(train, train_label, test, random):\n clf = SGDClassifier(random_state=random, alpha=0.2, loss=\"modified_huber\", penalty='l2', tol=1e-6, max_iter=10000, fit_intercept=False)\n clf.fit(train, train_label)\n predict = clf.predict(test)\n return predict\n\ndef sgdc_multi(train, train_label, test):\n clf = SGDClassifier(alpha=7.5, loss=\"modified_huber\", penalty='l2', tol=1e-6, fit_intercept=False)\n clf.fit(train, train_label)\n predict = clf.predict(test)\n return predict\n\n\ndef delete_feature(train, function, train_label, test, test_label, name, random):\n \"\"\"\n :param list: list of separate feature\n :param function: the training model\n :return:\n \"\"\"\n def g(train, test, name):\n # Get the f1 score\n n = len(train)\n f1_score = np.zeros((n,))\n temp_name = name\n c = []\n if n == 1:\n # print('The last class:', name[0])\n return None\n for i in range(n):\n temp_train, temp_test = train.copy(), test.copy()\n temp_train.pop(i)\n temp_test.pop(i)\n new_train = temp_train[0]\n new_test = temp_test[0]\n if n - 2 > 0:\n for j in range(n - 2):\n new_train = np.hstack((new_train, temp_train[j + 1]))\n new_test = np.hstack((new_test, temp_test[j + 1]))\n prediction = function(new_train, train_label, new_test, random)\n c += [collections.Counter(prediction)]\n warnings.filterwarnings('ignore')\n f1_score[i] = f1(test_label, prediction, average='weighted')\n # print(\"the f1 score with class\", name[i], \"excluded:\", f1_score[i])\n remain_class = np.argmax(f1_score)\n del name[remain_class]\n train.pop(remain_class)\n test.pop(remain_class)\n print('The remaining class is:', temp_name)\n print('the class predicted is:', c[remain_class])\n return delete_feature(train, function, train_label, test, test_label, name, random)\n\n return g(train, test, name)\n","repo_name":"No21-lqz/CS229AAA","sub_path":"LIQIAN.py","file_name":"LIQIAN.py","file_ext":"py","file_size_in_byte":15849,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"71397246970","text":"from django.contrib.auth import authenticate\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom api.models import Advisor, Booking, User\nfrom api.serializer import AdvisorSerializer, AdviserViewSerializer\n\n\nclass AdvisorView(viewsets.ModelViewSet):\n queryset = Advisor.objects.all()\n serializer_class = AdvisorSerializer\n\n\n@api_view(['GET'])\ndef advisor_list(request, user_id):\n try:\n User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return Response(\"User doesn't exist\", status=status.HTTP_404_NOT_FOUND)\n\n adv_serializer = AdviserViewSerializer(Advisor.objects.all(), many=True)\n print(adv_serializer.data)\n return Response(adv_serializer.data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef book_advisor(request, user_id, advisor_id):\n try:\n User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return Response(\"User doesn't exist\", status=status.HTTP_404_NOT_FOUND)\n\n try:\n adv = Advisor.objects.get(pk=advisor_id)\n except Advisor.DoesNotExist:\n return Response('Advisor not found', status=status.HTTP_404_NOT_FOUND)\n\n booking = Booking.objects.create(user_id=user_id, advisor_id=advisor_id, date=request.POST.get('date'))\n booking.save()\n\n return Response(status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef get_bookings(request, user_id):\n try:\n user = User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return Response(\"User doesn't exist\", status=status.HTTP_404_NOT_FOUND)\n\n bookings = Booking.objects.filter(user=user)\n data = []\n for booking in bookings:\n adv = Advisor.objects.get(id=booking.id)\n data.append(({\n 'advisor_name': adv.name,\n 'advisor_profile_pic': adv.photo,\n 'advisor_id': adv.id,\n 'booking_time': booking.date,\n 'booking_id': booking.id\n }))\n return Response(data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef register(request):\n try:\n user = User.objects.create_user(username=request.POST.get('email'), name=request.POST.get('name'),\n password=request.POST.get('password'), email=request.POST.get('email'))\n except Exception as e:\n return Response(\"Fields missing\", status=status.HTTP_400_BAD_REQUEST)\n\n token, id = user.save()\n data = {\n \"token\": token,\n \"id\": id\n }\n return Response(data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef login(request):\n user = authenticate(username=request.POST.get('email'),\n password=request.POST.get('password'))\n if user is None:\n return Response(\"Invalid Login\", status=status.HTTP_400_BAD_REQUEST)\n\n token = user.jwt_token\n id = user.id\n data = {\n \"token\": token,\n \"id\": id\n }\n return Response(data, status=status.HTTP_200_OK)\n","repo_name":"ayanshaikh18/AdvisoryNetwork","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27761132837","text":"# import RobotRaconteur as RR\n# RRN=RR.RobotRaconteurNode.s\n# RRN.SetLogLevel(RR.LogLevel_Trace)\n# node_setup=RR.CommandLineConfigParser(0)\n# #node_setup=RR.ClientNodeSetup(argv=[\"--robotraconteur-tcp-enable=false\"])\n# #browser_transport = RR.BrowserWebSocketTransport()\n# #RRN.RegisterTransport(browser_transport)\n# print(\"done\")\n\n\nfrom js import print_div\nfrom RobotRaconteur.Client import *\n\nprint_div(\"Begin test_transport\")\n\nc1 = None\n\ndef i32_huge_cb(i32_huge, err):\n print_div (\"i32_huge: \" + str(i32_huge))\n print_div (\"i32_huge error: \" + str(err))\n\ndef d1_cb(d1, err):\n print_div (\"d1: \" + str(d1))\n print_div (\"d1 error: \" + str(err))\n c1.async_get_i32_huge(i32_huge_cb)\n\ndef connect_cb(c, err):\n global c1\n c1 = c\n print_div(\"connect error: \" + str(err))\n c.async_get_d1(d1_cb)\n\nRRN.SetLogLevel(RR.LogLevel_Debug)\n\nRRN.AsyncConnectService(\"rr+ws://localhost:22222?service=RobotRaconteurTestService\", None, None, None, connect_cb)\n\n\n","repo_name":"robotraconteur/robotraconteur_pyodide","sub_path":"testing/pyodide_test/test/test_transport.py","file_name":"test_transport.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26371110316","text":"#!/usr/bin/python3\nimport scripts\nimport subprocess\nimport datetime\n\nDEBUG=1\nFATAL=2\n\n# Host class to turn config host files into actual data structures\nclass _host:\n def __init__(self,hostname):\n self.hostname=hostname\n self.online=False\n\n# logging, Duh\ndef log(priority,message):\n if (priority == DEBUG):\n priority = \"[DEBUG, %s] \" % datetime.datetime.now()\n elif (priority == FATAL):\n priority = \"[FATAL, %s] \" % datetime.datetime.now()\n logfile.write(priority + message+\"\\n\")\n logfile.flush()\n\n# takes host object and does operations testing network connectivity\ndef ping(h):\n log(DEBUG, \"Pinging host: \" +h.hostname)\n cmd={ \n \"ping\" : [\"ping\",\"-c 2\",h.hostname]\n }\n try:\n subprocess.check_output(cmd[\"ping\"])\n h.online=True\n log(DEBUG,\"Ping Success!\")\n return True\n except:\n h.online=False\n log(DEBUG,\"Ping failed!\")\n return False\n\n# Parses host files and return _host objects\ndef host_parse(hostfile):\n log(DEBUG, \"Parsing Hosts\")\n lines=hostfile.read().split(\"\\n\")\n lines=lines[:len(lines)-1] # cleans excess ''\n hosts=[]\n\n #Create host objects from config files\n for host in lines:\n hostname=host.split(\",\")[0]\n hosts.append(_host(hostname))\n # Then below interact with objects\n\n # And return all hosts\n return hosts\n\n# Get all User defined functions from \"scripts\" dir and execute them\n# h is a _host object \ndef execute_functions(h):\n global log\n global logfile\n global DEBUG\n global FATAL\n for i in dir(scripts):\n if \"__\" not in i :\n\n # Get pointers to functions included in module inbound\n script=getattr(scripts,i)\n\n # Set logging pointers for scripts plugin\n setattr(script,\"logfile\",logfile) \n setattr(script,\"log\",log) \n setattr(script,\"DEBUG\",DEBUG)\n setattr(script,\"FATAL\",FATAL)\n script.execute(h)\n\ndef main():\n\n identity_file=open(\"hosts\",\"r\")\n log(DEBUG, \"Logging initialized\")\n\n hosts=host_parse(identity_file) #returns list of host objects\n \n # For every host execute all anon funcs\n # (Which hosts that functions are executed for\n # are defined in the anonymous functions themselves)\n for Object in hosts:\n if ping(Object):\n execute_functions(Object)\n log(DEBUG, \"Execution Completed\")\n\n\nlogfile=open(\"logs/log.log\",\"w+\")\nmain()\n","repo_name":"flareriderdash/TransparentSync","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10558539803","text":"from __future__ import annotations\n\nfrom collections.abc import Iterator\nfrom datetime import timedelta\nimport hashlib\nfrom typing import Any\nfrom urllib.parse import urlsplit, urlunsplit\n\nimport boto3\nfrom botocore.config import Config\nfrom botocore.exceptions import ClientError\nfrom dandischema.digests.dandietag import PartGenerator\nfrom django.conf import settings\nfrom django.core.files.storage import Storage, get_storage_class\nfrom minio.error import NoSuchKey\nfrom minio_storage.policy import Policy\nfrom minio_storage.storage import MinioStorage, create_minio_client_from_settings\nfrom s3_file_field._multipart_boto3 import Boto3MultipartManager\nfrom s3_file_field._multipart_minio import MinioMultipartManager\nfrom storages.backends.s3 import S3Storage\n\n\nclass ChecksumCalculatorFile:\n \"\"\"File-like object that calculates the checksum of everything written to it.\"\"\"\n\n def __init__(self):\n self.h = hashlib.sha256()\n\n def write(self, bytes):\n self.h.update(bytes)\n\n @property\n def checksum(self):\n return self.h.hexdigest()\n\n\nclass DandiMultipartMixin:\n @staticmethod\n def _iter_part_sizes(file_size: int) -> Iterator[tuple[int, int]]:\n generator = PartGenerator.for_file_size(file_size)\n for part in generator:\n yield part.number, part.size\n\n _url_expiration = timedelta(days=7)\n\n\nclass DandiBoto3MultipartManager(DandiMultipartMixin, Boto3MultipartManager):\n \"\"\"A custom multipart manager for passing ACL information.\"\"\"\n\n def _create_upload_id(self, object_key: str, content_type: str | None = None) -> str:\n kwargs = {\n 'Bucket': self._bucket_name,\n 'Key': object_key,\n 'ACL': 'bucket-owner-full-control',\n }\n\n if content_type is not None:\n kwargs['Content-Type'] = content_type\n\n resp = self._client.create_multipart_upload(**kwargs)\n return resp['UploadId']\n\n\nclass DandiMinioMultipartManager(DandiMultipartMixin, MinioMultipartManager):\n \"\"\"A custom multipart manager for passing ACL information.\"\"\"\n\n def _create_upload_id(self, object_key: str, content_type: str | None = None) -> str:\n metadata = {'x-amz-acl': 'bucket-owner-full-control'}\n\n if content_type is not None:\n metadata['Content-Type'] = content_type\n\n return self._client._new_multipart_upload(\n bucket_name=self._bucket_name,\n object_name=object_key,\n metadata=metadata,\n )\n\n\nclass DeconstructableMinioStorage(MinioStorage):\n \"\"\"\n A MinioStorage which is deconstructable by Django.\n\n This does not require a minio_client argument to the constructor.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # A minio.api.Minio instance cannot be serialized by Django. Since all constructor\n # arguments are serialized by the @deconstructible decorator, passing a Minio client as a\n # constructor argument causes makemigrations to fail.\n kwargs['minio_client'] = create_minio_client_from_settings()\n super().__init__(*args, **kwargs)\n\n\nclass VerbatimNameStorageMixin:\n \"\"\"A Storage mixin, storing files without transforming their original filename.\"\"\"\n\n # The basic S3Storage does not implement generate_filename or get_valid_name,\n # so upon FileField save, the following call stack normally occurs:\n # FieldFile.save\n # FileField.generate_filename\n # Storage.generate_filename\n # Storage.get_valid_name\n # Storage.generate_filename attempts to normalize the filename as a path.\n # Storage.get_valid_name uses django.utils.text.get_valid_filename,\n # which cleans spaces and other characters.\n # Since these are designed around filesystem safety, not S3 key safety, it's\n # simpler to do sanitization before saving.\n def generate_filename(self, filename: str) -> str:\n return filename\n\n\nclass TimeoutS3Storage(S3Storage):\n \"\"\"Override boto3 default timeout values.\"\"\"\n\n def __init__(self, **settings):\n super().__init__(**settings)\n\n self.config = self.config.merge(\n Config(connect_timeout=5, read_timeout=5, retries={'max_attempts': 2})\n )\n\n\nclass VerbatimNameS3Storage(VerbatimNameStorageMixin, TimeoutS3Storage):\n @property\n def multipart_manager(self):\n return DandiBoto3MultipartManager(self)\n\n def etag_from_blob_name(self, blob_name) -> str | None:\n client = self.connection.meta.client\n\n try:\n response = client.head_object(\n Bucket=self.bucket_name,\n Key=blob_name,\n )\n except ClientError:\n return None\n else:\n etag = response['ETag']\n # S3 wraps the ETag in double quotes, so we need to strip them\n if etag[0] == '\"' and etag[-1] == '\"':\n return etag[1:-1]\n return etag\n\n def generate_presigned_put_object_url(self, blob_name: str, base64md5: str) -> str:\n return self.connection.meta.client.generate_presigned_url(\n ClientMethod='put_object',\n Params={\n 'Bucket': self.bucket_name,\n 'Key': blob_name,\n 'ACL': 'bucket-owner-full-control',\n 'ContentMD5': base64md5,\n },\n ExpiresIn=600, # TODO proper expiration\n )\n\n def generate_presigned_head_object_url(self, key: str) -> str:\n return self.bucket.meta.client.generate_presigned_url(\n 'head_object',\n Params={'Bucket': self.bucket.name, 'Key': key},\n )\n\n def generate_presigned_download_url(self, key: str, path: str) -> str:\n return self.connection.meta.client.generate_presigned_url(\n 'get_object',\n Params={\n 'Bucket': self.bucket_name,\n 'Key': key,\n 'ResponseContentDisposition': f'attachment; filename=\"{path}\"',\n },\n )\n\n def generate_presigned_inline_url(self, key: str, path: str, content_type: str) -> str:\n return self.connection.meta.client.generate_presigned_url(\n 'get_object',\n Params={\n 'Bucket': self.bucket_name,\n 'Key': key,\n 'ResponseContentDisposition': f'inline; filename=\"{path}\"',\n 'ResponseContentType': content_type,\n },\n )\n\n def sha256_checksum(self, key: str) -> str:\n calculator = ChecksumCalculatorFile()\n obj = self.bucket.Object(key)\n obj.download_fileobj(calculator)\n return calculator.checksum\n\n\nclass VerbatimNameMinioStorage(VerbatimNameStorageMixin, DeconstructableMinioStorage):\n @property\n def multipart_manager(self):\n return DandiMinioMultipartManager(self)\n\n def etag_from_blob_name(self, blob_name) -> str | None:\n try:\n response = self.client.stat_object(self.bucket_name, blob_name)\n except NoSuchKey:\n return None\n else:\n return response.etag\n\n def generate_presigned_put_object_url(self, blob_name: str, _: str) -> str:\n # Note: minio-py doesn't support using Content-MD5 headers\n\n # storage.client will generate URLs like `http://minio:9000/...` when running in\n # docker. To avoid this, use the secondary base_url_client which is configured to\n # generate URLs like `http://localhost:9000/...`.\n return self.base_url_client.presigned_put_object(\n bucket_name=self.bucket_name,\n object_name=blob_name,\n expires=timedelta(seconds=600), # TODO proper expiration\n )\n\n def generate_presigned_head_object_url(self, key: str) -> str:\n return self.base_url_client.presigned_url('HEAD', self.bucket_name, key)\n\n def generate_presigned_download_url(self, key: str, path: str) -> str:\n return self.base_url_client.presigned_get_object(\n self.bucket_name,\n key,\n response_headers={'response-content-disposition': f'attachment; filename=\"{path}\"'},\n )\n\n def generate_presigned_inline_url(self, key: str, path: str, content_type: str) -> str:\n return self.base_url_client.presigned_get_object(\n self.bucket_name,\n key,\n response_headers={\n 'response-content-disposition': f'inline; filename=\"{path}\"',\n 'response-content-type': content_type,\n },\n )\n\n def sha256_checksum(self, key: str) -> str:\n calculator = ChecksumCalculatorFile()\n obj = self.client.get_object(self.bucket_name, key)\n for chunk in obj.stream(amt=1024 * 1024 * 16):\n calculator.write(chunk)\n return calculator.checksum\n\n\ndef create_s3_storage(bucket_name: str) -> Storage:\n \"\"\"\n Return a new Storage instance, compatible with the default Storage class.\n\n This abstracts over differences between S3Storage and MinioStorage,\n allowing either to be used as an additional non-default Storage.\n \"\"\"\n # For production, calling django.core.files.storage.get_storage_class is fine\n # to return the storage class of S3Storage.\n default_storage_class = get_storage_class()\n\n if issubclass(default_storage_class, S3Storage):\n storage = VerbatimNameS3Storage(bucket_name=bucket_name)\n # Required to upload to the sponsored bucket\n storage.default_acl = 'bucket-owner-full-control'\n elif issubclass(default_storage_class, MinioStorage):\n base_url = None\n if getattr(settings, 'MINIO_STORAGE_MEDIA_URL', None):\n # If a new base_url is set for the media storage, it's safe to assume one should be\n # set for this storage too.\n base_url_parts = urlsplit(settings.MINIO_STORAGE_MEDIA_URL)\n # Reconstruct the URL with an updated path\n base_url = urlunsplit(\n (\n base_url_parts.scheme,\n base_url_parts.netloc,\n f'/{bucket_name}',\n base_url_parts.query,\n base_url_parts.fragment,\n )\n )\n\n # The MinioMediaStorage used as the default storage is cannot be used\n # as an ad-hoc non-default storage, as it does not allow bucket_name to be\n # explicitly set.\n storage = VerbatimNameMinioStorage(\n bucket_name=bucket_name,\n base_url=base_url,\n # All S3Storage URLs are presigned, and the bucket typically is not public\n presign_urls=True,\n auto_create_bucket=True,\n auto_create_policy=True,\n policy_type=Policy.read,\n # Required to upload to the sponsored bucket\n object_metadata={'x-amz-acl': 'bucket-owner-full-control'},\n )\n # TODO: generalize policy_type?\n # TODO: filename transforming?\n # TODO: content_type\n else:\n raise Exception(f'Unknown storage: {default_storage_class}')\n\n return storage\n\n\ndef get_boto_client(storage: Storage | None = None):\n \"\"\"Return an s3 client from the current storage.\"\"\"\n storage = storage if storage else get_storage()\n if isinstance(storage, MinioStorage):\n return boto3.client(\n 's3',\n endpoint_url=storage.client._endpoint_url,\n aws_access_key_id=storage.client._access_key,\n aws_secret_access_key=storage.client._secret_key,\n region_name='us-east-1',\n )\n\n return storage.connection.meta.client\n\n\ndef get_storage_params(storage: Storage):\n if isinstance(storage, MinioStorage):\n return {\n 'endpoint_url': storage.client._endpoint_url,\n 'access_key': storage.client._access_key,\n 'secret_key': storage.client._secret_key,\n }\n\n return {\n 'endpoint_url': storage.endpoint_url,\n 'access_key': storage.access_key,\n 'secret_key': storage.secret_key,\n }\n\n\ndef get_storage() -> Storage:\n return create_s3_storage(settings.DANDI_DANDISETS_BUCKET_NAME)\n\n\ndef get_storage_prefix(instance: Any, filename: str) -> str:\n return f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}{filename}'\n\n\ndef get_embargo_storage() -> Storage:\n return create_s3_storage(settings.DANDI_DANDISETS_EMBARGO_BUCKET_NAME)\n\n\ndef get_embargo_storage_prefix(instance: Any, filename: str) -> str:\n return f'{settings.DANDI_DANDISETS_EMBARGO_BUCKET_PREFIX}{filename}'\n","repo_name":"dandi/dandi-archive","sub_path":"dandiapi/api/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":12444,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"34722148061","text":"#!/usr/bin/python3\n\ndef roman_to_int(roman_string):\n '''\n roman_to_int - function that convert roman string to integre\n '''\n roman_to_decimal = {\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000\n }\n number = 0\n previos_value = 0\n if type(roman_string) != str or roman_string is None:\n return (0)\n else:\n for i in roman_string:\n value = roman_to_decimal[i]\n if value > previos_value:\n number += value - (2 * previos_value)\n else:\n number += value\n previos_value = value\n return (number)\n","repo_name":"OuYa01/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/12-roman_to_int.py","file_name":"12-roman_to_int.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11097763868","text":"import os\nimport shutil\n\nfrom contextlib import contextmanager\nfrom pathlib import Path\n\n\n@contextmanager\ndef copy_work(working_dir, text_to_replace, replacement_text):\n \"\"\"\n Recursive function that iterates down through source directory until a file is reached. If file is newer than same\n file in the target directory then replaces target file with source version. If source doesn't exist in target\n directory then copies source file into target directory.\n :param replacement_text: replacement text to put into source path i.e /a/b//file\n :param text_to_replace: text that needs to be replaced in source path i.e /a/b//file\n :param working_dir: the source directory that contains the newest files.\n :return: copied file\n \"\"\"\n os.chdir(working_dir)\n for file in Path.cwd().iterdir():\n if file.is_file():\n try:\n p1, p2 = os.path.getmtime(Path(file.as_posix())), os.path.getmtime(Path(\n f'{os.path.split(file.as_posix())[0].replace(text_to_replace, replacement_text)}/{os.path.split(file.as_posix())[1]}').as_posix())\n if p1 > p2:\n shutil.copy(Path(file).as_posix(), Path(\n f'{os.path.split(file.as_posix())[0].replace(text_to_replace, replacement_text)}/{os.path.split(file.as_posix())[1]}'))\n print(f'{Path(file).name} replaced.')\n except:\n shutil.copy(Path(file).as_posix(), Path(\n f'{os.path.split(file.as_posix())[0].replace(text_to_replace, replacement_text)}/{os.path.split(file.as_posix())[1]}'))\n print(f'{Path(file).name} added.')\n else:\n copy_work(file, text_to_replace, replacement_text)\n\n","repo_name":"larymak/Python-project-Scripts","sub_path":"AUTOMATION/FileReplaceWithNewer/replace_with_newer.py","file_name":"replace_with_newer.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":929,"dataset":"github-code","pt":"77"} +{"seq_id":"38331748775","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport streamlit as st\nimport plotly.graph_objects as go\nfrom plotly import tools\nimport plotly.offline as py\nimport plotly.express as px\nimport cufflinks as cf\nfrom plotly.offline import plot\nimport chart_studio.plotly as py\n\n\n# In[2]:\n\n\ncf.go_offline()\n\n\n# In[3]:\n\n\ndf = pd.read_csv('../notebooks/summary.csv')\npc = pd.read_csv('../notebooks/pc_comp_scrap/pc.csv')\nworten = pd.read_csv('../notebooks/worten_scrap/worten.csv')\n\n\n# In[ ]:\n\n\n\n\n\n# In[4]:\n\n\nst.title(\"** :desktop_computer:** **TV Marketplace Price Evolution** **:desktop_computer:**\")\nst.header(\"This is an App created to visualize the price Evolution of Ultra HD 4K TVs in 2 Manufactures: LG and Samsung, in 2 different marketplaces: Pc Componentes and Worten.\")\nst.subheader(\"The Dashboards will show Price evolution since October 3rd.\")\n\n\n# In[5]:\n\n\nimage = ('/Users/juandediegosuanzes/desktop/Ironhack-Final-Project/streamlit/samsung_vs_lg_')\n\n\n# In[6]:\n\n\nst.image(image, width=None)\n\n\n# In[7]:\n\n\npc_ok = pc[['PC LG', 'PC SS']]\nworten_ok = worten[['Worten LG', 'Worten SS']]\ndf_ok = df[['PC LG', 'PC SS', 'Worten LG', 'Worten SS']]\n\n\n# In[8]:\n\n\nst.markdown(\"#### \" +\"Pc Componentes & Worten Price Evolution in LG and Samsung\")\n\n\n# In[9]:\n\n\nst.line_chart(data=df_ok, width=0, height=0, use_container_width=True)\n\n\n# In[10]:\n\n\nst.markdown(\"#### \" +\"Select the manufacturer and the marketplace you would like to see the metrics in detail\")\n\nselected_metrics = st.selectbox(\n label=\"Choose...\", options=['PC LG','PC SS','Worten LG','Worten SS']\n)\n\n\n# In[11]:\n\n\nfig = go.Figure()\nif selected_metrics == 'PC LG':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['PC LG'],\n mode='lines+markers', name='PC LG'))\nif selected_metrics == 'PC SS':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['PC SS'],\n\t mode='lines+markers', name='PC SS'))\nif selected_metrics == 'Worten LG':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['Worten LG'],\n\t mode='lines+markers',name='Worten LG'))\nif selected_metrics == 'Worten SS':\n\tfig.add_trace(go.Scatter(x=df.day, y=df['Worten SS'],\n\t mode='lines+markers',name='Worten SS'))\nst.plotly_chart(fig, use_container_width=True)\n\n\n# In[12]:\n\n\nif st.checkbox('Show dataframe'):\n st.dataframe(df.style.highlight_max(axis=0))\n\n\n# In[13]:\n\n\nst.markdown(\"#### \" +\"Pc Componentes Price Evolution by Manufacturer\")\n\n\n# In[14]:\n\n\nimage_pc = ('/Users/juandediegosuanzes/desktop/Ironhack-Final-Project/streamlit/PcComponentes.png')\n\n\n# In[15]:\n\n\nst.image(image_pc, width=None)\n\n\n# In[16]:\n\n\nst.area_chart(data=pc_ok, width=0, height=0, use_container_width=True)\n\n\n# In[17]:\n\n\nif st.checkbox('Show PC Componentes dataframe'):\n st.dataframe(pc.style.highlight_max(axis=0))\n\n\n# In[18]:\n\n\nst.markdown(\"#### \" +\"Worten Price Evolution by Manufacturer\")\n\n\n# In[19]:\n\n\nimage_worten = ('/Users/juandediegosuanzes/desktop/Ironhack-Final-Project/streamlit/worten_im.webp')\n\n\n# In[20]:\n\n\nst.image(image_worten, width=None)\n\n\n# In[21]:\n\n\nst.area_chart(data=worten_ok, width=0, height=0, use_container_width=True)\n\n\n# In[22]:\n\n\nif st.checkbox('Show Worten dataframe'):\n st.dataframe(worten.style.highlight_max(axis=0))\n\n\n# In[23]:\n\n\n#st.title(\"** :champagne:** **¡¡GRACIAS A TODOS!!: Lead Teachers, TA y enhorabuena compañeros!!** **:champagne:**\")\n\n\n# In[24]:\n\n\n#video_file = open('/Users/juandediegosuanzes/desktop/video.mp4', 'rb')\n#video_bytes = video_file.read()\n#st.video(video_bytes)\n\n\n# In[25]:\n\n\n#audio_file = open('/Users/juandediegosuanzes/desktop/champ.mp3', 'rb')\n#audio_bytes = audio_file.read()\n#st.audio(audio_bytes, format='audio/ogg', start_time=34)\n\n\n# In[26]:\n\n\n#fig = df.iplot(kind='box', \n# histnorm='percent', \n # xTitle='October Scraping', \n # yTitle='Price €', \n # title='Summary Price by Brand and Marketplace',\n # subplots=True)\n\n#st.pyplot(fig)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"juanema74/Ironhack-Final-Project","sub_path":"streamlit/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27620432502","text":"import os\nimport cv2\nfrom pairs import Pairs\nfrom hog import getHog\nfrom contrast import getImageContrast\nfrom classes import class_filter\nfrom filters import calculateMetricsForImages\nfrom show import showContinuously, showFromClassContinuously\nfrom Report import Report\nimport argparse\nfrom dataset_specific_api import getDatasetSpecificApi\n\n# parsing command line args\n\nparser = argparse.ArgumentParser(description='Calculate objects metrics')\nparser.add_argument('--dataset', type=str,\n help='dataset name', default=None)\nparser.add_argument('--api', type=str,\n help='dataset specific api name', default=None)\nparser.add_argument('--threads', type=str,\n help='threads number', default='1')\nparser.add_argument('--overwrite', type=str,\n help='overwrite existing pairs or not', default='0')\nparser.add_argument('--continue_calc', type=str,\n help='continue first calculatinon', default='1')\nparser.add_argument('--metrics_file', type=str,\n help='metrics file name (without extension! must be in observer\\'s folder)', default='default_metrics')\nargs = parser.parse_args()\n\ndataset_name = args.dataset\ndataset_specific_api_name = args.api or dataset_name\nthreads = int(args.threads)\noverwrite = int(args.overwrite)\ncontinue_calc = int(args.continue_calc)\nmetrics_file_path = args.metrics_file\n\n# importing metrics\nmetrics = __import__(metrics_file_path).metrics\n\n# creating report object\nreport_file_path = 'report_' + dataset_name + '.json'\nif overwrite or (not os.path.exists(report_file_path)):\n\treport = Report(report_file_path)\n\n# geting dataset specific api\ndataset_specific_api = getDatasetSpecificApi(dataset_specific_api_name)\n\n# geting pairs from directory\ndirectory = 'pairs_' + dataset_name + '_new'\nif overwrite or (not os.path.exists(directory)) or continue_calc:\n\tdirectory = directory.replace('_new', '')\npairs = Pairs(directory, get_classes_function=dataset_specific_api.getClasses)\n\n\n\n# using this function you can see and list (press q) images with from class\n# showFromClassContinuously(pairs, 'Unknown', dataset_specific_api.getClasses)\n\n\n\n# counting objects in classes\nif overwrite or (not os.path.exists(report_file_path)):\n\tobjects_number_by_class = pairs.countObjectsInClasses()\n\treport.write('objects number by class', objects_number_by_class)\n\n\n\n# counting videos in classes\n# videos_number_by_class = countVideosInClasses(pairs, dataset_specific_api.getClasses)\n# report.write('videos number by class', videos_number_by_class)\n\n# available metrics\n\n\n# calculating metrics\nnew_pairs_folder_path = 'pairs_' + dataset_name + '_new'\ncalculateMetricsForImages(pairs, metrics, new_pairs_folder_path, threads=threads, overwrite=overwrite)\n# if overwrite:\n# \tpairs.dumpClasses(os.path.normcase(new_pairs_folder_path + '/' + 'classes_list.json'))","repo_name":"MentalBlood/observer","sub_path":"get_metrics.py","file_name":"get_metrics.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30907857249","text":"\n\"\"\"\nmake model tutorial\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\n\ndef make_model() -> keras.Model:\n x0 = keras.layers.Input(shape=(28, 28, 3))\n x = keras.layers.Conv2D(32, 3, activation='relu')(x0)\n x = keras.layers.Conv2D(64, 3, activation='relu')(x)\n x = keras.layers.Flatten()(x)\n x = keras.layers.Dense(128, activation='relu')(x)\n x = keras.layers.Dense(10, activation='softmax')(x)\n model = keras.Model(inputs=(x0), outputs=(x))\n return model\n\nif __name__ == \"__main__\":\n model = make_model()\n model.summary()\n","repo_name":"torigara603/tensorflowtips","sub_path":"tips/tutorials/N03_SaveModel/make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22282641293","text":"# Python code to find the co-ordinates of\n# the contours detected in an image.\nimport cv2\n\n\ndef parse_image(image: str):\n # file_path: str = \"./xray_file.png\"\n # Reading image\n font = cv2.FONT_HERSHEY_COMPLEX\n img2 = cv2.imread(image, cv2.IMREAD_COLOR)\n\n # Reading same image in another\n # variable and converting to gray scale.\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n # edged = cv2.Canny(img, 20, 300)\n\n # Converting image to a binary image\n # ( black and white only image).\n _, threshold = cv2.threshold(img, 200, 455, cv2.THRESH_BINARY)\n\n # Detecting contours in image.\n # contours, _ = cv2.findContours(threshold, cv2.RETR_TREE,\n # cv2.CHAIN_APPROX_SIMPLE)\n contours, hierarchy = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Going through every contours found in the image.\n for cnt in contours:\n\n approx = cv2.approxPolyDP(cnt, 0.020 * cv2.arcLength(cnt, True), True)\n\n # draws boundary of contours.\n # cv2.drawContours(img2, 0, (0, 0, 255), 5)\n # cv2.drawContours(img2, contours, -1, (10, 355, 100), 3)\n cv2.drawContours(img2, contours, 0, (0,255, 0), 3)\n\n # Used to flatten the array containing\n # the co-ordinates of the vertices.\n values = approx.ravel()\n i = 0\n\n for _ in values:\n if i % 2 == 0:\n x = values[i]\n y = values[i + 1]\n\n # String containing the co-ordinates.\n string = f\"{str(x)} {str(y)}\"\n\n if i != 0:\n # text on remaining co-ordinates.\n cv2.putText(img2, string, (x, y), font, 0.5, (0, 255, 0))\n i = i + 1\n\n # Saving the image\n cv2.imwrite(\"./output_image/image.jpg\", img2)\n\n","repo_name":"Nakul21/fastapiImage","sub_path":"process_image.py","file_name":"process_image.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"815965119","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom pymongo import MongoClient\n\n\nclass Spot:\n collection_old = 'latestattractions'\n\n collection_new = 'spot'\n\n params_map = {}\n\n def __init__(self):\n pass\n\n @staticmethod\n def create_spot(address_old, port_old, address_new, port_new, collection_old, collection_new,\n params_map):\n\n # old database connection\n client = MongoClient(address_old, port_old)\n travel1 = client.travel1\n\n # new database connection\n client = MongoClient(address_new, port_new)\n travel2 = client.travel2\n\n # get old collection and create new collection\n db_old = travel1[collection_old]\n db_new = travel2[collection_new]\n\n # clean former data\n db_new.remove()\n\n # 临时数组\n temp = [''] * len(params_map.keys())\n\n # 判断当前文档是否含有某个字段,若有则取出后赋值给临时数组,否则为 None\n for document in db_old.find():\n for i in range(len(params_map.keys())):\n if params_map.keys()[i] in document:\n temp[i] = document[params_map.keys()[i]]\n\n image_url = 'http://weegotest.b0.upaiyun.com/attractions/iosimgs/'\n post = {}\n\n if 'spot' in document:\n spot = document['spot']\n if spot is not None:\n for i in range(len(spot)):\n if 'cover_image' in spot[i]:\n if spot[i]['cover_image'] != '':\n cover_image = image_url + spot[i]['cover_image']\n if 'title' in spot[i]:\n title = spot[i]['title']\n if 'desc' in spot[i]:\n desc = spot[i]['desc']\n if 'advice' in spot[i]:\n advice = spot[i]['advice']\n \n num = db_new.find({'cover_image': cover_image, 'title': title,\n 'desc': desc, 'advice': advice}).count() \n if num > 1:\n print('重复项')\n print(document['_id'])\n else:\n temp_spot = {}\n temp_spot.update({'cover_image': cover_image, 'title': title,\n 'desc': desc, 'advice': advice, 'tag': ''})\n db_new.insert(temp_spot)\n print(temp_spot)\n","repo_name":"hezhensong/MongoConvertor","sub_path":"mongodb/Spot.py","file_name":"Spot.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36978017098","text":"import argparse\nimport time\nimport csv\nimport socket\nimport os, shutil\nfrom datetime import timedelta\nfrom multiprocessing import Process, Manager, Value, Lock\nfrom subprocess import Popen, PIPE, TimeoutExpired\nfrom ipaddress import ip_network\nfrom datetime import datetime\n\n\nclass Counter(object):\n def __init__(self, initval=0):\n self.val = Value('i', initval)\n self.lock = Lock()\n\n def increment(self):\n with self.lock:\n self.val.value += 1\n\n def value(self):\n with self.lock:\n return self.val.value\n\n\ndef generate_args():\n \"\"\" Create main parser \"\"\"\n parser = argparse.ArgumentParser(prog='ping.py')\n # Create global arguments\n parser.add_argument('--hosts', dest='hosts', type =str, help=\"Specify network to ping using CIDR notation.\"\n \"Example: 10.0.0.0/24\",\n required=True)\n args = parser.parse_args()\n return args\n\ndef subnet_ping(ip, counter, ip_results):\n \"\"\" Run ping subprocess and keep track of ping result\n Append results to a list of dictionaries \"\"\"\n # Linux/mac\n if os.name == 'posix':\n sub_p = Popen(['ping', '-c', '4', str(ip)], stdout=PIPE, stderr=PIPE, stdin=PIPE)\n # Windows\n elif os.name == 'nt':\n sub_p = Popen(['ping', '-n', '4', str(ip)], stdout=PIPE, stderr=PIPE, stdin=PIPE)\n # grab output and errors from subprocess\n # sleep a bit (mainly for windows because ping return output is rather slow\n # FIX THIS - use more elegant way of checking if output is finished\n time.sleep(10)\n try:\n output, errors = sub_p.communicate(timeout=15)\n except TimeoutExpired:\n sub_p.kill()\n output, errors = sub_p.communicate()\n # differences in output of poxis vs nt\n if os.name == 'posix':\n # if you don't see 0 packets in the output, then you must have received packets from the host\n if not '0 packets received' in str(output):\n #print(ip, 'is up!', \"\\n\")\n log_out = \"{} is up! \\n\".format(ip)\n log_file(log_out)\n counter.increment()\n ip_results.append({'ip': ip, 'status': 'up'})\n else:\n #print(ip, \"is down or can't be pinged!\", \"\\n\")\n log_out = \"{} is down or can't be pinged! \\n\".format(ip)\n log_file(log_out)\n ip_results.append({'ip': ip, 'status': 'down'})\n elif os.name == 'nt':\n if not 'Received = 0' in str(output):\n #print(ip, 'is up!', \"\\n\")\n log_out = \"{} is up! \\n\".format(ip)\n log_file(log_out)\n counter.increment()\n ip_results.append({'ip': ip, 'status': 'up'})\n else:\n #print(ip, \"is down or can't be pinged!\", \"\\n\")\n log_out = \"{} is down or can't be pinged! \\n\".format(ip)\n log_file(log_out)\n ip_results.append({'ip': ip, 'status': 'down'})\n\ndef log_file(info):\n \"\"\" Write to a log file \"\"\"\n ## FIX - Windows seems to have a problem using the global reference log_filename ##\n with open('ping_log.txt', 'a+') as f:\n f.write(str(info))\n\ndef export_hosts_to_csv(hosts):\n with open('ping_results.csv', 'w+', newline='') as csvfile:\n fieldnames = ['ip', 'status']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for host in hosts:\n writer.writerow({'ip': host['ip'], 'status': host['status']})\n\n\n\nif __name__ == '__main__':\n start_time = time.time()\n args = generate_args()\n # use manager for sharing the list between processes\n manager = Manager()\n ip_results = manager.list()\n # Mac limits resources by default - this sets the number of open files from default 256 to 10240\n # for this parent process and all subs. Don't think this is a Linux problem, but this sets it\n # for all posix compliant machines \n if os.name == 'posix':\n import resource\n resource.setrlimit(resource.RLIMIT_NOFILE, (10240, 10240))\n hosts = args.hosts\n # shared counter for all processes to have access to increment\n counter = Counter(0)\n dt = datetime.now()\n log_filename = \"ping_log.txt\"\n archive_log_filename = \"ping_log_{}_{}_{}_{}_{}_{}.txt\".format(dt.month, dt.day, dt.year, dt.hour,\n dt.minute, dt.second,)\n archive_logfile_path = \"Archive/{}\".format(archive_log_filename)\n # remove old log file if it exists, create new archive folder if one doesn't exist, move old to archive\n if not os.path.exists('Archive'):\n os.mkdir('Archive')\n if os.path.exists(log_filename):\n os.rename(log_filename, archive_log_filename)\n shutil.move(archive_log_filename, archive_logfile_path)\n\n # build ips\n hosts = list(ip_network(hosts).hosts())\n hosts = [str(host) for host in hosts]\n # grab total number of hosts within the subnet to ping (length of list)\n total_hosts = len(hosts)\n # create process queue for each ip to be pinged. Prob need to look into better management of this\n processes = []\n workers = [0 for x in range(100)]\n # increment on index of ip_addr because a list is returned\n idx = 0\n # grab number of IPs - later count down to 0\n hosts_len = len(hosts)\n try:\n while hosts_len > 0:\n if 0 not in workers:\n workers = [0 for x in range(100)]\n for w in range(len(workers)):\n p = Process(target=subnet_ping, args=(hosts[idx], counter, ip_results))\n # start the process\n p.start()\n # add to list of workers available to run processes\n processes.append(p)\n workers.remove(0)\n idx += 1\n hosts_len -= 1\n # calling process blocked until process who's join method is called terminates.\n # used more or less for queuing. If join is not used all processes join immediately\n # you can also specify an optional timeout in case waiting is too long\n for p in processes:\n p.join()\n except IndexError:\n pass\n\n # continually check if process is still alive, when done provide results\n process_running = True\n while process_running:\n if not processes[-1].is_alive():\n print(\"--> {} of {} hosts could be pinged.\".format(counter.value(), total_hosts))\n host_result_summary = \"\\n{} of {} hosts could be pinged.\".format(counter.value(), total_hosts)\n datetime_completed = \"\\nCompleted on {}/{}/{} @ {}:{}:{}\".format(dt.month,dt.day, dt.year,dt.hour,\n dt.minute, dt.second)\n log_file(host_result_summary)\n log_file(datetime_completed)\n # sort the results from first ip to last by using socket's builtin inet_aton\n ip_results = sorted(ip_results, key=lambda host: socket.inet_aton(host['ip']))\n export_hosts_to_csv(ip_results)\n process_running = False\n else:\n continue\n end_time = time.time() - start_time\n end_time = str(timedelta(seconds=end_time))\n print(\"--> Process running time: {} (Hours:Minutes:Seconds.Microseconds)\".format(end_time))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kirbocannon/network_tools","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8736132165","text":"import logging\nimport threading\nimport time\n\nfrom peregrine_mail.data.models import Email\nfrom peregrine_mail.data.database import db\nfrom peregrine_mail.sending_emails import send_email, find_mail_to_send, find_mail_to_delete\n\nlogger = logging.getLogger('peregrine')\n\n\nclass Threading:\n \"\"\"Run emails in the background\"\"\"\n\n def __init__(self, email_queue, app, sleep_time=10):\n self.app = app\n self.email_queue = email_queue\n self.sleep_time = sleep_time\n thread = threading.Thread(target=self.send_emails)\n thread.daemon = True\n thread.start()\n\n def send_emails(self):\n db.app = self.app\n\n while True:\n # Send NEW emails\n try:\n self.sending_emails_from_queue()\n except Exception as err:\n logger.exception(f'Unexpected error while sending new mail: {err}')\n\n emails = db.session.query(Email).all()\n\n # Resend FAILED emails\n try:\n for email in find_mail_to_send(self.app, emails):\n send_email(self.app, **email)\n except Exception as err:\n logger.exception(f'Unexpected error while finding failed mail to send: {err}')\n\n # Delete old emails\n try:\n find_mail_to_delete(self.app, emails)\n except Exception as err:\n logger.exception(f'Unexpected error while executing retention policy deletion: {err}')\n\n time.sleep(self.sleep_time)\n\n def sending_emails_from_queue(self):\n while not self.email_queue.empty():\n send_email(self.app, **self.email_queue.get())\n","repo_name":"beautiousmax/peregrine_mail","sub_path":"peregrine_mail/background_thread.py","file_name":"background_thread.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25198082124","text":"import adafruit_dht\nimport board\nimport time\n\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(17, GPIO.OUT)\n\ndht_pin = board.D4\ndht_sensor = adafruit_dht.DHT11(dht_pin, use_pulseio=False)\n\ndef callback_func(*args):\n print(\"Button was pushed!\")\n while True:\n try:\n GPIO.output( 17, GPIO.HIGH )\n temp_c = dht_sensor.temperature\n temp_f = temp_c * (9 / 5) + 32\n hum = dht_sensor.humidity\n print(\"Temperature =\", temp_c, 'C,', temp_f, 'F')\n print(\"Humidity =\", hum, '%')\n time.sleep( 0.5 )\n GPIO.output( 17, GPIO.LOW )\n break\n except:\n print('error reading, trying again...')\n continue\n\nGPIO.add_event_detect(10, edge=GPIO.FALLING, callback=callback_func, bouncetime=200)\n\ninput(\"press enter 2 quit\\n\") # block program from exiting\n\n\n\n\n\n","repo_name":"ucsd-ece196/ucsd-ece196.github.io","sub_path":"examples/pi/combo.py","file_name":"combo.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"20496960003","text":"import pickle\nfrom typing import List\nfrom fastapi import FastAPI, File, Form, UploadFile\nfrom starlette.middleware.cors import CORSMiddleware\nimport io\nimport face_recognition\nimport numpy as np\nfrom fastapi.encoders import jsonable_encoder\nfrom PIL import Image, ImageDraw\nimport cv2\nfrom Encode_face import EncodeFace\n\n#encode available image on start server\nEncodeFace().load_encoding_images(\"./images\")\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware, allow_origins=[\"*\"], allow_methods=[\"*\"], allow_headers=[\"*\"]\n)\n\n@app.post(\"/api/Identify\")\nasync def faces_recognition(image_upload: UploadFile = File(...)):\n data = await image_upload.read()\n known_face_names =[]\n known_face_encodings=[]\n \n image = face_recognition.load_image_file(io.BytesIO(data))\n #img = Image.open(io.BytesIO(data))\n #draw = ImageDraw.Draw(img)\n\n \n\n with open('know_face_names.p','rb') as f:\n while 1:\n try:\n known_face_names.append(pickle.load(f))\n except EOFError:\n break\n with open('know_face_encodes.p','rb') as f:\n while 1:\n try:\n known_face_encodings.append(pickle.load(f))\n except EOFError:\n break\n #print(known_face_names)\n\n # Detect face(s) and encode them\n face_locations = face_recognition.face_locations(image)\n face_encodings = face_recognition.face_encodings(image, face_locations)\n\n \n face_names = []\n face_loc=[]\n\n # Recognize face(s)\n for face_encoding, face_location in zip(face_encodings, face_locations):\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.4)\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n #print(face_distances)\n best_match_index = np.argmin(face_distances)\n #print(best_match_index)\n if matches[best_match_index]: \n name = known_face_names[best_match_index]\n else:\n name = \"Unknown\"\n #top, right, bottom, left = face_location\n #draw.rectangle([left, top, right, bottom],width = 4)\n #draw.text((left, top), name)\n face_names.append(name)\n face_loc.append(face_location)\n #img.show()\n return {\"Face name \": face_names,\"Face location \": face_loc}\n\n\n\n@app.post(\"/api/AddImg\")\nasync def faces_recognition(image_upload: UploadFile = File(...),name :str =Form()):\n data = await image_upload.read()\n img = Image.open(io.BytesIO(data))\n img.save(\"./images/{}.png\".format(name))\n image = face_recognition.load_image_file(io.BytesIO(data))\n face_locations = face_recognition.face_locations(image)\n face_encodings = face_recognition.face_encodings(image, face_locations)[0]\n \n with open('know_face_names.p','ab') as f:\n pickle.dump((name), f)\n with open('know_face_encodes.p','ab') as f:\n pickle.dump((face_encodings), f)\n\n return {\"message\" : \"add success\"}\n\n\n\n@app.post(\"/api/AddMultiImg\")\nasync def create_upload_files(files: List[UploadFile],name :str=Form()):\n for data in files:\n data = await data.read()\n image = face_recognition.load_image_file(io.BytesIO(data))\n face_locations = face_recognition.face_locations(image)\n face_encodings = face_recognition.face_encodings(image, face_locations)[0]\n with open('know_face_names.p','ab') as f:\n pickle.dump((name), f)\n with open('know_face_encodes.p','ab') as f:\n pickle.dump((face_encodings), f)\n \n return {\"message\":\"add success\"}\n\n\n\n\n\n","repo_name":"numan9199/face-ocr","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29484266259","text":"class Twitter:\n def __init__(self):\n self.trending_topics = []\n\n def tweet(self, mensaje):\n if len(mensaje) > 140:\n print(\"El mensaje excede el límite de 140 caracteres.\")\n return\n\n hashtags = self.obtener_hashtags(mensaje)\n self.actualizar_trending_topics(hashtags)\n\n def obtener_hashtags(self, mensaje):\n palabras = mensaje.split()\n hashtags = [palabra[1:] for palabra in palabras if palabra.startswith(\"#\")]\n return hashtags\n\n def actualizar_trending_topics(self, hashtags):\n for hashtag in hashtags:\n encontrado = False\n for i, trending_topic in enumerate(self.trending_topics):\n if hashtag == trending_topic[0]:\n self.trending_topics[i] = (hashtag, trending_topic[1] + 1)\n encontrado = True\n break\n if not encontrado:\n self.trending_topics.append((hashtag, 1))\n \n self.trending_topics.sort(key=lambda x: x[1], reverse=True)\n self.trending_topics = self.trending_topics[:3]\n\n\n# Ejemplo de uso\ntwitter = Twitter()\n\n# Primer tweet\ntwitter.tweet(\"Hola, estoy probando mi prototipo de Twitter. #twitter #prototipo #prueba\")\nprint(twitter.trending_topics) # [('twitter', 1), ('prototipo', 1), ('prueba', 1)]\n\n# Segundo tweet\ntwitter.tweet(\"Me encanta el desarrollo web. #web #desarrollo #programación\")\nprint(twitter.trending_topics) # [('web', 1), ('desarrollo', 1), ('programación', 1)]\n\n# Tercer tweet\ntwitter.tweet(\"Hoy es un día soleado. #clima #sol #verano\")\nprint(twitter.trending_topics) # [('sol', 2), ('web', 1), ('desarrollo', 1)]\n","repo_name":"pabloschwarzenberg/grader","sub_path":"tema9_ej1/tema9_ej1_db6ce14501fafe028282235b78618db2.py","file_name":"tema9_ej1_db6ce14501fafe028282235b78618db2.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8117935151","text":"\"\"\"Tests for helper functions.\"\"\"\n\nimport rudra.utils.helper as helper\nimport requests\nimport pytest\n\n\ndef test_get_github_repo_info():\n gh_repo1 = 'https://github.com/fabric8-analytics/f8a-hpf-insights'\n gh_repo2 = 'https://github.com/fabric8-analytics/f8a-hpf-insights.git'\n gh_repo3 = 'git+https://github.com/fabric8-analytics/f8a-hpf-insights'\n gh_repo4 = 'fabric8-analytics/f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo1)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo2)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo3)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n user, repo = helper.get_github_repo_info(gh_repo4)\n assert user == 'fabric8-analytics' and repo == 'f8a-hpf-insights'\n\n\ndef test_get_training_file_url():\n user = 'fabric8-analytics'\n repo = 'f8a-hpf-insights'\n file_url = helper.get_training_file_url(user, repo)\n resp = requests.get(file_url)\n assert resp.status_code == 200\n\n file_url = helper.get_training_file_url(user, repo, branch='training-code')\n resp = requests.get(file_url)\n assert resp.status_code == 200\n\n file_url = helper.get_training_file_url(\n user, repo, training_file_path='src/flask_endpoint.py')\n resp = requests.get(file_url)\n assert resp.status_code == 200\n\n\ndef test_load_hyper_params():\n # mock command line args\n helper.argv = ['helper.py', '{\"a\": 111, \"b\": \"some text\"}']\n hyper_params = helper.load_hyper_params()\n assert hyper_params.get('a') == 111\n assert hyper_params.get('b') == \"some text\"\n\n\ndef test_cache_dict_with_zero_max_size():\n cache_dict = helper.CacheDict(0)\n with pytest.raises(KeyError):\n cache_dict['key1'] = 'value1'\n assert len(cache_dict) == 0\n\n\ndef test_cache_dict_with_one_max_size():\n cache_dict = helper.CacheDict(1)\n cache_dict['key1'] = 'value1'\n cache_dict['key2'] = 'value2'\n assert len(cache_dict) == 1\n assert 'key2' in cache_dict\n assert 'key1' not in cache_dict\n\n\ndef test_cache_dict():\n # default max_len = 1024\n cache_dict = helper.CacheDict()\n for i in range(2000):\n cache_dict[i] = i * i\n assert len(cache_dict) == cache_dict.max_len\n assert cache_dict[i] == i * i\n del cache_dict[i]\n assert len(cache_dict) == cache_dict.max_len - 1\n assert cache_dict[cache_dict.max_len - 2] == pow(cache_dict.max_len - 2, 2)\n assert len(list(cache_dict)) == cache_dict.max_len - 1\n assert str(cache_dict.max_len - 2) in str(cache_dict)\n","repo_name":"fabric8-analytics/fabric8-analytics-rudra","sub_path":"tests/utils/test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"30866850939","text":"#! /usr/bin/env python3\r\n#\r\ndef blowup_deriv ( t, y ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_deriv() evaluates the right hand side of blowup_ode().\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 10 November 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Reference:\r\n#\r\n# John D Cook,\r\n# Approximating a solution that doesn't exist,\r\n# https://www.johndcook.com/blog/2009/08/11/approximating-a-solution-that-doesnt-exist/\r\n# 11 August 2009.\r\n#\r\n# Input:\r\n#\r\n# real T, Y: the time and solution value.\r\n#\r\n# Output:\r\n#\r\n# real DYDT: the derivative value.\r\n#\r\n dydt = y**2\r\n\r\n return dydt\r\n\r\ndef blowup_euler ( n ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_euler() solves blowup_ode() using euler.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 10 November 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# integer N: the number of steps to take.\r\n#\r\n import matplotlib.pyplot as plt\r\n import numpy as np\r\n\r\n print ( '' )\r\n print ( 'blowup_euler():' )\r\n print ( ' Use euler() to solve blowup_ode().' )\r\n#\r\n# Get the parameters.\r\n#\r\n t0, y0, tstop = blowup_parameters ( )\r\n\r\n f = blowup_deriv\r\n tspan = np.array ( [ t0, tstop ] )\r\n\r\n t, y = euler ( f, tspan, y0, n )\r\n\r\n print ( '' )\r\n print ( ' Number of equal steps is %d\\n', n );\r\n\r\n ye = blowup_exact ( t )\r\n#\r\n# Plot the solution curve.\r\n#\r\n plt.clf ( )\r\n plt.plot ( t, y, 'ro', linewidth = 3 )\r\n plt.plot ( t, ye, 'b-', linewidth = 3 )\r\n plt.grid ( True )\r\n plt.xlabel ( '<--- T --->' )\r\n plt.ylabel ( '<--- X(T) --->' )\r\n plt.title ( 'blowup_ode(): euler()' )\r\n plt.legend ( ( 'Computed', 'Exact' ) )\r\n filename = 'blowup_euler.png'\r\n plt.savefig ( filename )\r\n print ( ' Graphics saved as \"%s\"' % ( filename ) )\r\n plt.show ( block = False )\r\n plt.close ( )\r\n\r\n return\r\n\r\ndef blowup_exact ( t ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_exact() evaluates the exact solution of blowup_ode().\r\n#\r\n# Discussion:\r\n#\r\n# y' = y^2\r\n# dy/y^2 = dt (Separation of variables)\r\n# -1/y = t + C (Antiderivatives)\r\n# y = - 1 / ( t + C )\r\n# C = - t0 - 1/y0\r\n# y = - 1 / ( t - t0 - 1/y0 ) (Exact formula)\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 29 April 2021\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# real T(:): the evaluation times.\r\n#\r\n# Output:\r\n#\r\n# real Y(:): the exact solution values.\r\n#\r\n import numpy as np\r\n\r\n t0, y0, tstop = blowup_parameters ( )\r\n\r\n if ( y0 == 0.0 ):\r\n value = np.zeros ( t.shape )\r\n else:\r\n value = - 1.0 / ( t - t0 - 1.0 / y0 )\r\n\r\n return value\r\n\r\ndef blowup_ode_test ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_ode_test() tests blowup_ode().\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 10 November 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import platform\r\n\r\n print ( '' )\r\n print ( 'blowup_ode_test():' )\r\n print ( ' Python version: %s' % ( platform.python_version ( ) ) )\r\n print ( ' Test blowup_ode().' )\r\n\r\n t0, y0, tstop = blowup_parameters ( )\r\n print ( '' )\r\n print ( ' parameters:' )\r\n print ( ' t0 = ', t0 )\r\n print ( ' y0 = ', y0 )\r\n print ( ' tstop = ', tstop )\r\n\r\n n = 40\r\n blowup_euler ( n )\r\n#\r\n# Terminate.\r\n#\r\n print ( '' )\r\n print ( 'blowup_ode_test():' )\r\n print ( ' Normal end of execution.' )\r\n return\r\n\r\ndef blowup_parameters ( t0_user = None, y0_user = None, \\\r\n tstop_user = None ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## blowup_parameters() returns the parameters of blowup_ode().\r\n#\r\n# Discussion:\r\n#\r\n# If input values are specified, this resets the default parameters.\r\n# Otherwise, the output will be the current defaults.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 28 January 2022\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# real T0_USER: the initial time.\r\n#\r\n# real Y0_USER(4): the initial condition.\r\n#\r\n# real TSTOP_USER: the final time.\r\n#\r\n# Output:\r\n#\r\n# real T0: the initial time.\r\n#\r\n# real Y0(1): the initial condition.\r\n#\r\n# real TSTOP: the final time.\r\n#\r\n import numpy as np\r\n#\r\n# Initialize defaults.\r\n#\r\n if not hasattr ( blowup_parameters, \"t0_default\" ):\r\n blowup_parameters.t0_default = 0.0\r\n\r\n if not hasattr ( blowup_parameters, \"y0_default\" ):\r\n blowup_parameters.y0_default = 1.0\r\n\r\n if not hasattr ( blowup_parameters, \"tstop_default\" ):\r\n blowup_parameters.tstop_default = 0.95\r\n#\r\n# Update defaults if input was supplied.\r\n#\r\n if ( t0_user is not None ):\r\n blowup_parameters.t0_default = t0_user\r\n\r\n if ( y0_user is not None ):\r\n blowup_parameters.y0_default = y0_user\r\n\r\n if ( tstop_user is not None ):\r\n blowup_parameters.tstop_default = tstop_user\r\n#\r\n# Return values.\r\n#\r\n t0 = blowup_parameters.t0_default\r\n y0 = blowup_parameters.y0_default\r\n tstop = blowup_parameters.tstop_default\r\n \r\n return t0, y0, tstop\r\n\r\ndef euler ( dydt, tspan, y0, n ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## euler() approximates the solution to an ODE using Euler's method.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license.\r\n#\r\n# Modified:\r\n#\r\n# 22 April 2020\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n# Input:\r\n#\r\n# function dydt: points to a function that evaluates the right\r\n# hand side of the ODE.\r\n#\r\n# real tspan[2]: contains the initial and final times.\r\n#\r\n# real y0[m]: an array containing the initial condition.\r\n#\r\n# integer n: the number of steps to take.\r\n#\r\n# Output:\r\n#\r\n# real t[n+1], y[n+1,m]: the times and solution values.\r\n#\r\n import numpy as np\r\n\r\n if ( np.ndim ( y0 ) == 0 ):\r\n m = 1\r\n else:\r\n m = len ( y0 )\r\n\r\n tfirst = tspan[0]\r\n tlast = tspan[1]\r\n dt = ( tlast - tfirst ) / n\r\n t = np.zeros ( n + 1 )\r\n y = np.zeros ( [ n + 1, m ] )\r\n t[0] = tspan[0]\r\n y[0,:] = y0\r\n\r\n for i in range ( 0, n ):\r\n t[i+1] = t[i] + dt\r\n y[i+1,:] = y[i,:] + dt * ( dydt ( t[i], y[i,:] ) )\r\n\r\n return t, y\r\n\r\ndef timestamp ( ):\r\n\r\n#*****************************************************************************80\r\n#\r\n## timestamp() prints the date as a timestamp.\r\n#\r\n# Licensing:\r\n#\r\n# This code is distributed under the GNU LGPL license. \r\n#\r\n# Modified:\r\n#\r\n# 21 August 2019\r\n#\r\n# Author:\r\n#\r\n# John Burkardt\r\n#\r\n import time\r\n\r\n t = time.time ( )\r\n print ( time.ctime ( t ) )\r\n\r\n return\r\n\r\nif ( __name__ == '__main__' ):\r\n timestamp ( )\r\n blowup_ode_test ( )\r\n timestamp ( )\r\n\r\n","repo_name":"jjeongGrp/MathSubroutines","sub_path":"Python3/blowup_ode.py","file_name":"blowup_ode.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15088421796","text":"#!/usr/bin/python3\n\nwith open('24.in') as f:\n parts = [i.splitlines() for i in f.read().split('inp w')[1:]]\n\nstack = []\nfor i, part in enumerate(parts):\n add = int(part[5][6:])\n if add > 0:\n stack.append((i, int(part[-3][6:])))\n continue\n show = stack.pop()\n add += show[1]\n print('decimal %d + %d = decimal %d' % (i, -1 * add, show[0]))\n","repo_name":"fridokus/advent-of-code","sub_path":"2021/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"34168519165","text":"import argparse\nimport cpe_utils\nimport json\nimport os\nimport re\nfrom tqdm import tqdm\nimport vm_automation\n\n\ndef get_vm_server(config_file):\n if os.path.isfile(config_file):\n with open(config_file) as config_file_handle:\n config_map = json.load(config_file_handle)\n if config_map['HYPERVISOR_TYPE'].lower() == \"esxi\":\n vmServer = vm_automation.esxiServer.createFromConfig(config_map, 'esxi_automation.log')\n vmServer.connect()\n if config_map['HYPERVISOR_TYPE'].lower() == \"workstation\":\n vmServer = vm_automation.workstationServer(config_map, 'workstation_automation.log')\n return vmServer\n return None\n\n\ndef vm_as_cpe_string(vm_name):\n cpe_parts = {\n \"ubuntu\" : {\n \"vendor\" : \"canonical\",\n \"product\" : \"ubuntu_linux\",\n \"version_pattern\" : \".*ubuntu(\\d+).*\",\n \"update\" : \"\"\n },\n \"fedora\" : {\n \"vendor\" : \"fedoraproject\",\n \"product\" : \"fedora\",\n \"version_pattern\" : \".*fedora(\\d+).*\",\n \"update\" : \"\"\n },\n \"centos\" : {\n \"vendor\" : \"centos\",\n \"product\" : \"centos\",\n \"version_pattern\" : \".*centos(\\d+).*\",\n \"update\" : \"\"\n }\n }\n\n if \"x64\" in vm_name:\n arch = \"x64\"\n else:\n arch = \"x86\"\n \n vm_name = vm_name[vm_name.index(\"linux\") + len(\"linux\"):]\n os_pattern = re.compile(\"[a-z]+\")\n os_name = os_pattern.match(vm_name)\n if os_name:\n os_name = os_name.group(0)\n else: exit\n\n if os_name in cpe_parts:\n version_pattern = re.compile(cpe_parts[os_name]['version_pattern'])\n v = version_pattern.match(vm_name)\n version = v.group(1)\n\n if \"ubuntu\" in os_name:\n version = version[:2] + \".\" + version[2:]\n\n cpe_str = \":\".join([\"cpe:/o\", cpe_parts[os_name]['vendor'], cpe_parts[os_name]['product'],\n version, cpe_parts[os_name]['update'], arch])\n\n return cpe_str\n else: exit\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", \"--keyword\", help=\"VM search parameter\")\n parser.add_argument(\"-o\", \"--output\", help=\"output file location [defaults to catalog.json]\")\n parser.add_argument(\"hypervisorConfig\", help=\"json hypervisor config\")\n\n args = parser.parse_args()\n\n prefix = args.keyword\n\n catalog_file = \"catalog.json\"\n if args.output is not None:\n catalog_file = args.output\n\n vm_server = get_vm_server(config_file=args.hypervisorConfig)\n if vm_server is None:\n print (\"Failed to connect to VM environment\")\n exit(1)\n\n vm_list = []\n vm_server.enumerateVms()\n for vm in vm_server.vmList:\n if prefix in vm.vmName:\n vm_list.append(vm.vmName)\n cpe_catalog = {}\n\n if os.path.isfile(catalog_file):\n with open(catalog_file) as catalog_handle:\n cpe_catalog = json.load(catalog_handle)\n\n for name in tqdm(vm_list):\n if \"linux\" in name.lower(): \n cpe_str = vm_as_cpe_string(name.lower())\n if cpe_str:\n cpe = cpe_utils.CPE(cpe_str)\n vm_entry = {\n 'NAME': name,\n 'CPE': cpe_str,\n 'USERNAME': \"vagrant\",\n 'PASSWORD': \"vagrant\",\n 'OS': cpe.human()\n }\n cpe_catalog[vm_server.hostname + \"_\" + name] = vm_entry\n\n with open(catalog_file, \"w\") as catalog_handle:\n json.dump(cpe_catalog, catalog_handle, indent=2, sort_keys=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rapid7/metasploit-baseline-builder","sub_path":"helpers/generateLinuxCatalog.py","file_name":"generateLinuxCatalog.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"77"} +{"seq_id":"11710887377","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 7 13:18:53 2021\n\n@author: arun\n\"\"\"\n\nimport time\nimport datetime\nimport h5py\nimport numpy as np\nfrom random import randint\n\nfrom os import listdir\nfrom os.path import isfile, join\n# import matplotlib.pyplot as plt\n# import scipy.io as sio\nimport os\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\nst_0 = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') \nstart_time_0=time.time()\n\n#works for mat file version 7.3 which is the new default.\n\n\n\nDataPath='/home/arun/Documents/MATLAB/ImageDB/PrintoutDB/DB33/'\n\nonlyfiles = [f for f in listdir(DataPath) if isfile(join(DataPath, f))]\nonlyfiles.sort()\nonlyfileslenrem=len(onlyfiles)-round(len(onlyfiles)*0.7)\nonlyfiles = onlyfiles[0:-onlyfileslenrem]\nmatfiles=[join(DataPath,f) for f in onlyfiles]\nmat_fname_ind=np.random.choice(len(matfiles),replace=False)\n\nmat_contents=h5py.File(matfiles[mat_fname_ind])\nmat_contents_list=list(mat_contents.keys())\n\nPlanCTCellRef=mat_contents['CTInfoCell']\nCTLen=np.shape(PlanCTCellRef)\nCTsl=np.zeros([CTLen[1],1])\nfor cti in range(CTLen[1]):\n CTmatsizref=mat_contents['CTInfoCell'][1,cti]\n CTLocR=mat_contents[CTmatsizref]\n CTLoc=CTLocR.value\n CTsiz=np.shape(CTLoc)\n if CTsiz[1]>300:\n CTsl[cti]=1\n else:\n CTsl[cti]=0\nCTindex=np.where(CTsl==1)\nCTindex=CTindex[0]\nCTindex=int(CTindex)\nPlanCTLocRef=mat_contents['CTInfoCell'][1, CTindex]\nPlanCTLocRef=mat_contents[PlanCTLocRef]\nPlanCTLoc=PlanCTLocRef.value\nPlanCTCellRef=mat_contents['CTInfoCell'][2, CTindex]\nPlanCTCellRef=mat_contents[PlanCTCellRef]\nPlanCT=PlanCTCellRef.value\nPlanCT=np.transpose(PlanCT,(2,1,0))\nbatch_size=10\nCTsiz1=PlanCT.shape\n# CT_rand_index=np.random.choice(CTsiz1[2],size=batch_size,replace=False)\n# batch_CT_img=np.zeros((CTsiz1[0],CTsiz1[1],len(CT_rand_index)))\n# for ri in range(len(CT_rand_index)):\n# batch_CT_img[:,:,ri]=PlanCT[:,:,CT_rand_index[ri]]\nPlanCTCellRef=mat_contents['CTInfoCell'][3, CTindex]\nPlanCTCellRef=mat_contents[PlanCTCellRef]\nPlanCTvoxel=PlanCTCellRef.value\nCBCTCellRef=mat_contents['CBCTInfocell']\nCBCLen=np.shape(CBCTCellRef)\n#Random CBCT scan selection\nCBCTi=randint(0,CBCLen[1]-1)\nCBCellRef=mat_contents['CBCTInfocell'][2, CBCTi]\nCBCellRef=mat_contents[CBCellRef]\nCBCT=CBCellRef.value\nCBCT=np.transpose(CBCT,(2,1,0))\nCBLocRef=mat_contents['CBCTInfocell'][1, CBCTi]\nCBLocRef=mat_contents[CBLocRef]\nCBCTLoc=CBLocRef.value\n#%%\n#Sequential CBCT scan selection\n# CBCTs=[]\n# for CBCTi in range(CBCLen[1]):\n# # print(CBCTi)\n# CBCellRef=mat_contents['CBCTInfocell'][4, CBCTi]\n# CBCellRef=mat_contents[CBCellRef]\n# CBCT=CBCellRef.value\n# CBCT=np.transpose(CBCT,(2,1,0))\n# CBCTs.append(CBCT)\n# CBLocRef=mat_contents['CBCTInfocell'][1, CBCTi]\n# CBLocRef=mat_contents[CBLocRef]\n# CBCTLoc=CBLocRef.value\n# CBCellRef=mat_contents['CBCTInfocell'][3, CBCTi]\n# CBCellRef=mat_contents[CBCellRef]\n# CBCTvoxel=CBCellRef.value\n# CBsiz=CBCT.shape\n# # CB_rand_pat_index=np.random.choice(CBCLen[1],size=batch_size,replace=True)\n# # batch_CB_img=np.zeros((CTsiz1[0],CTsiz1[1],len(CB_rand_pat_index)))\n# batch_CB_img=np.zeros((CTsiz1[0],CTsiz1[1],batch_size))\n# for cbi in range(batch_size):\n# CB_rand_sl_index=np.random.choice(CBsiz[2])\n# CB_rand_pat_index=np.random.choice(CBCLen[1],replace=False)\n# print(CB_rand_pat_index)\n# print(CB_rand_sl_index)\n# batch_CB_img[:,:,cbi]=CBCTs[CB_rand_pat_index][:,:,CB_rand_sl_index]\n\n\n#%% \nprint('Script started at')\nprint(st_0)\nruntimeN0=(time.time()-start_time_0)/60\n# runtimeN0=(time.time()-start_time_0)\nprint('Script Total Time = %s min'%(runtimeN0))\nprint('Script ended at')\nst_0 = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\nprint(st_0)","repo_name":"duraiarun-p/cycleGAN","sub_path":"cyclegan3D_1.py","file_name":"cyclegan3D_1.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71220321849","text":"# Já para o caso do sufixo ...more , poderíamos utilizar fatiamento para removê-lo. Mas, antes, é importante verificarmos se o conteúdo possui o sufixo, evitando assim perda de conteúdo de forma acidental. Vamos ver como isso funciona no arquivo teste.py .\n\nfrom parsel import Selector\nimport requests\n\n\nresponse = requests.get(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\nselector = Selector(text=response.text)\n\n# Extrai a descrição\ndescription = selector.css(\"#product_description ~ p::text\").get()\nprint(description)\n\n# \"Fatiamos\" a descrição removendo o sufixo\nsuffix = \"...more\"\nif description.endswith(suffix):\n description = description[:-len(suffix)]\nprint(description)\n","repo_name":"gusttavocaruso/trybeExercises","sub_path":"MODULO.04_computerScience/BLOCO.35_WEB & CRAWLER/35.3 - SCRAPING/DATACLEANING/teste_02.py","file_name":"teste_02.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"71275102970","text":"from dotenv import load_dotenv\nimport json\nimport os\nfrom requests_oauthlib import OAuth1Session\n\ndotenv_path = os.path.join(os.path.dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\nCONSUMER_KEY = os.environ.get('CONSUMER_KEY')\nCONSUMER_SECRET = os.environ.get('CONSUMER_SECRET')\nACCESS_TOKEN = os.environ.get('ACCESS_TOKEN')\nACCESS_TOKEN_SECRET = os.environ.get('ACCESS_TOKEN_SECRET')\n\ntwitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\ntweet = input('Tweet: ')\nparams = {'status': tweet}\nreq = twitter.post('https://api.twitter.com/1.1/statuses/update.json', params = params)\n\nif req.status_code != 200:\n print('Tweet was failed...')\nelse:\n print('Tweet was successfull!')","repo_name":"koluku/twitwi","sub_path":"twitwi.py","file_name":"twitwi.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26887480018","text":"from leetcode.tree.binary_tree_traversals import TreeNode\nfrom typing import Optional\n\n\nclass Solution:\n def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:\n range_sum = 0\n\n def helper(node):\n nonlocal range_sum\n if node:\n if low <= node.val <= high:\n range_sum += node.val\n if node.val > low:\n helper(node.left)\n if node.val < high:\n helper(node.right)\n\n helper(root)\n return range_sum\n\n\nif __name__ == '__main__':\n root_node1 = TreeNode(10)\n root_node1.left = TreeNode(5)\n root_node1.right = TreeNode(15)\n root_node1.left.left = TreeNode(3)\n root_node1.left.right = TreeNode(7)\n root_node1.right.left = TreeNode(13)\n root_node1.right.right = TreeNode(18)\n root_node1.left.left.left = TreeNode(1)\n root_node1.left.right.left = TreeNode(6)\n print(Solution().rangeSumBST(root_node1, 6, 10))\n","repo_name":"pk0912/ProgrammingPractice","sub_path":"leetcode/tree/binary_search_tree/range_sum.py","file_name":"range_sum.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6660269926","text":"from PIL import Image\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tqdm import tqdm\n\ndata_set_path = r'C:/Users/babymon/Desktop/데이터셋/사람얼굴/archive/img_align_celeba/img_align_celeba'\n\nimages = list()\n\nfor i in os.listdir(data_set_path)[0:50000]:\n old_image = Image.open(f'{data_set_path}/{i}').crop((20, 30, 160, 180)).convert('L').resize((64, 64))\n images.append(np.array(old_image))\n\n# plt.imshow(images[0])\n# plt.show()\n\n# print(images.shape)\n\n# 이미지 전처리\nimages = np.divide(images, 255)\nimages = images.reshape(50000, 64, 64, 1) # 흑백 이미지 4차원으로 증강\n# images.reshape( 5 ,)\n\nprint(images.shape)\n\n# discriminator 모델 생성\ndiscriminator = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding='same', input_shape=[64,64,1]),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.Conv2D(64, (3,3), strides=(2, 2), padding='same'),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.Dropout(0.4),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nnoise_shape = 100\n\n# generator 모델 생성\ngenerator = tf.keras.models.Sequential([\n tf.keras.layers.Dense(4 * 4 * 256, input_shape=(noise_shape,)),\n tf.keras.layers.Reshape((4, 4, 256)),\n tf.keras.layers.Conv2DTranspose(256, 3, strides=2, padding='same'), # upsampling2D도 찾아볼것\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Conv2DTranspose(128, 3, strides=2, padding='same'),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Conv2DTranspose(64, 3, strides=2, padding='same'),\n tf.keras.layers.LeakyReLU(alpha=0.2),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Conv2DTranspose(1, 3, strides=2, padding='same', activation='sigmoid')\n])\n\ngenerator.summary()\n\nGAN = tf.keras.models.Sequential([generator, discriminator])\n\ndiscriminator.compile(optimizer='adam', loss='binary_crossentropy')\ndiscriminator.trainable = False\n\nGAN.compile(optimizer='adam', loss='binary_crossentropy')\n\n\ndef predict_pic(time: int, cycle: int):\n\n show_img = plt\n show_img.figure(f'{str(cycle+1)} 회차 결과')\n predict_value = generator.predict((lambda x, y : np.random.uniform(x, y, size=(20, 100)))(-1, 1))\n # print(predict_value.shape)\n for i in range(20):\n show_img.subplot(4, 5, i+1)\n show_img.imshow(predict_value[i].reshape(64, 64), cmap='gray') # 컬러면 64, 64, 3\n show_img.axis('off')\n\n show_img.tight_layout()\n show_img.show(block=False)\n show_img.pause(time)\n show_img.close()\n\n\nx_data = images\n\n\nfor i in tqdm(range(300)):\n print(f'현재 epoch {i+1}회차')\n predict_pic(5, i)\n\n for j in range(50000//128):\n if j % 100 == 0:\n print(f'현재 batch {j+1}회차')\n\n # discriminator 트레이닝\n real_images = x_data[j*128:(j+1)*128]\n real_markings = np.ones(shape=(128, 1))\n loss1 = discriminator.train_on_batch(real_images, real_markings) # 진짜 사진\n\n random_value = np.random.uniform(-1, 1, size=(128, 100))\n fake_images = generator.predict(random_value, verbose=0)\n fake_markings = np.zeros(shape=(128, 1))\n\n loss2 = discriminator.train_on_batch(fake_images, fake_markings) # 가짜 사진\n \n # real_images 와 fake_images 셔플해서 학습해보기\n\n # generator 트레이닝\n loss3 = GAN.train_on_batch(random_value, real_markings)\n\n print(f'이번 epoch 의 최종 loss discriminator loss : {loss1+2}, GAN loss : {loss3}')\n\n\n'''\n더 해봐야할 것 들\nGAN 네트워크의 layer들을 수정하고 더해보기 \n이미지를 더 사용하거나 살짝 비틀어서 집어넣어��기\nlabel smoothing 같은 잡기술 넣어보기 \nnoise (랜덤숫자) 다르게 설정해보기 \n요즘 GAN은 어떻게 만드나 살펴보기\n'''","repo_name":"surplusboy/machine_learning_ex","sub_path":"GAN_model_ex/tensor_a.py","file_name":"tensor_a.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18127392915","text":"from re import search\nimport csv\nimport time\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nimport json\nfrom webdriver_manager.chrome import ChromeDriverManager\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument(\"--headless\")\ndriver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=chrome_options)\noutputfile = open('xyz.csv', 'w')\ncsvwriter = csv.writer(outputfile)\nwith open('amfoss.json') as f :\n data = json.loads(f.read())\nfor i in range(len(data)):\n link = []\n time.sleep(2)\n query = data[i][\"School_Name_EN\"]\n url = f\"https://www.google.com/search?q={query}\"\n driver.get(url)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n search = soup.find('div', class_=\"yuRUbf\")\n z = search.a.get('href')\n z = str(z)\n link.append(z)\n print(query)\n csvwriter.writerow(link)\n","repo_name":"Arindam200/Python_Projects","sub_path":"Projects/API projects/Google_Selenium_Searcher/Google_Search.py","file_name":"Google_Search.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"77"} +{"seq_id":"7623061233","text":"n =int(input())\nlis =list(map(int,input().split()))\np =0\nv =0\nd =[]\nfor i in lis:\n if lis.count(i)==i and i not in d:\n p+=1\n v+=i\n d.append(i)\nif p==0:\n print(\"-1\")\nelse:\n s =v/p\n print(\"%.2f\"%(s))\n","repo_name":"Happy-76/codemind-python","sub_path":"Average_of_super_elements.py","file_name":"Average_of_super_elements.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"42634124013","text":"\ndef main():\n filename = input('Введите имя файла: ')\n with open(filename, 'w') as f:\n data = None\n while data != '':\n data = input('Введите строку для записи в файл или пустую строку для выхода: ')\n f.write(data + '\\n')\n\n\nmain()\n","repo_name":"AlexanderMaslikhin/python","sub_path":"lesson5/lesson5_dz1.py","file_name":"lesson5_dz1.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29442130099","text":"def suma_divisores(a):\n suma = 0\n\n for i in range(1, a):\n if a % i == 0:\n suma += i\n\n if suma == 1:\n es_primo = True\n else:\n es_primo = False\n\n return suma, es_primo\n\nif __name__ == \"__main__\":\n a = int(input(\"Ingresa un número entero positivo: \"))\n resultado, primo = suma_divisores(a)\n\n print(\"La suma de los divisores de {a} es: {resultado}\")\n print(\"El número {a} {'es primo' if primo else 'no es primo'}\")\n\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema3_ej1/tema3_ej1_dd724f9fce4a2e00b679294dc181be55.py","file_name":"tema3_ej1_dd724f9fce4a2e00b679294dc181be55.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"69824676410","text":"from logging import getLogger\nfrom typing import Any\n\nfrom aiohttp import ClientSession\n\nlog = getLogger(__name__)\n\n\nclass Aeza:\n def __init__(\n self,\n token: str | None = None,\n session: ClientSession = ClientSession(),\n http_proxy: str | None = None,\n ) -> None:\n self.session = session\n self.base_url = \"https://my.aeza.net/api/\"\n self.http_proxy = http_proxy\n\n self.headers = {}\n if token is not None:\n self.headers[\"X-API-Key\"] = token\n\n async def _request(self, method: str, url: str, **kwargs: Any) -> dict[str, Any]:\n if self.http_proxy is not None:\n kwargs[\"proxy\"] = self.http_proxy\n async with self.session.request(\n method, self.base_url + url, headers=self.headers, **kwargs\n ) as resp:\n resp.raise_for_status()\n return await resp.json()\n\n async def get_product_group_statuses(self) -> dict[int, bool]:\n out = {}\n resp = await self._request(\"GET\", \"services/products\")\n for group in resp[\"data\"][\"items\"]:\n try:\n id_ = group[\"id\"]\n status = group[\"group\"][\"payload\"].get(\"isDisabled\", False) in [\n \"true\",\n True,\n ]\n out[id_] = False if status else True\n except (KeyError, TypeError) as e:\n if group is None:\n continue\n log.debug(\n f\"Error in get_product_group_statuses, id: {group.get('id', 'ID not defined')}: {str(e)}\"\n )\n return out\n","repo_name":"cofob/aeza-assistant","sub_path":"aeza_assistant/aeza.py","file_name":"aeza.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"21920846946","text":"#!/usr/bin/env python\n\n# stdlib imports\nimport re\nimport os.path\n\n# third party imports\nimport pandas as pd\nimport numpy as np\n\n# local imports\nfrom losspager.utils.exception import PagerException\nfrom losspager.utils.country import Country\n\nDEFAULT_RATE = 1.17 / 100.0\n\n\ndef adjust_pop(population, tpop, tevent, rate):\n \"\"\"Adjust input population between two input years given growth rate.\n\n :param population:\n Population starting value at time *tpop*.\n :param tpop:\n Year in which input population data was collected.\n :param tevent:\n Year to which population data should be adjusted.\n :param rate:\n Population growth rate value.\n :returns:\n Adjusted population value at time *tevent*.\n \"\"\"\n T = tpop - tevent\n adjpop = np.round(population * np.power((1 + rate), (-1 * T)))\n return adjpop\n\n\nclass PopulationGrowth(object):\n def __init__(self, ratedict, default_rate=DEFAULT_RATE):\n \"\"\"Initialize Population growth with dictionary containing rates over given time \n spans, per country. \n\n :param ratedict:\n dictionary like: {841: {'end': [1955, 1960, 1965],\n 'rate': [0.01, 0.02, 0.03],\n 'start': [1950, 1955, 1960]},\n 124: {'end': [1955, 1960, 1965],\n 'rate': [0.02, 0.03, 0.04],\n 'start': [1950, 1955, 1960]}}\n Where 841 and 842 in this case are country codes (US and Canada), and the three \"columns\" for each \n country are the year start of each time interval, the year end of each time interval, and the growth \n rates for those time intervals.\n :param default_rate:\n Value to be used for growth rate when input country codes are not found in ratedict.\n \"\"\"\n # check the fields in the ratedict\n for key, value in ratedict.items():\n if 'start' not in value or 'end' not in value or 'rate' not in value:\n raise PagerException(\n 'All country rate dictionaries must contain keys \"start\",\"end\",\"rate\"')\n if not (len(value['start']) == len(value['end']) == len(value['rate'])):\n raise PagerException(\n 'Length of start/end year arrays must match length of rate arrays.')\n self._dataframe = pd.DataFrame(ratedict)\n self._default = default_rate\n\n @classmethod\n def fromDefault(cls):\n homedir = os.path.dirname(os.path.abspath(\n __file__)) # where is this module?\n excelfile = os.path.join(\n homedir, '..', 'data', 'WPP2015_POP_F02_POPULATION_GROWTH_RATE.xls')\n return cls.fromUNSpreadsheet(excelfile)\n\n @classmethod\n def fromUNSpreadsheet(cls, excelfile, default_rate=DEFAULT_RATE):\n \"\"\"Instantiate population growth rates from UN global spreadsheet.\n http://esa.un.org/unpd/wpp/Download/Standard/Population/\n\n :param excelfile:\n Path to Excel file containing UN population growth rate data per country.\n :param default_rate:\n Value to be used for growth rate when input country codes are not found in ratedict.\n :returns:\n PopulationGrowth instance.\n \"\"\"\n re_year = '[0-9]*'\n df = pd.read_excel(excelfile, header=16)\n ratedict = {}\n starts = []\n ends = []\n for col in df.columns:\n matches = re.findall(re_year, col)\n if len(matches) and len(matches[0]):\n starts.append(int(matches[0]))\n ends.append(int(matches[2]))\n\n ccode_idx = df.columns.get_loc('Country code')\n uscode = 840\n usrates = None\n country = Country()\n for idx, row in df.iterrows():\n key = row['Country code']\n rates = row.iloc[ccode_idx + 1:].values / 100.0\n if key == uscode:\n usrates = rates.copy()\n if country.getCountry(key) is None:\n continue\n ratedict[key] = {'start': starts[:], 'end': ends[:], 'rate': rates}\n\n # we have three non-standard \"country\" codes for California, eastern US, and western US.\n ratedict[902] = {'start': starts[:], 'end': ends[:], 'rate': usrates}\n ratedict[903] = {'start': starts[:], 'end': ends[:], 'rate': usrates}\n ratedict[904] = {'start': starts[:], 'end': ends[:], 'rate': usrates}\n\n return cls(ratedict, default_rate=default_rate)\n\n def getRate(self, ccode, year):\n \"\"\"Return population growth rate(s) for a given country code and year.\n\n :param ccode:\n Numeric country code.\n :param year:\n Integer year to be used to find growth rate (will be between start and end years,\n or before first start year or after last end year).\n :returns:\n Scalar growth rate.\n \"\"\"\n ccode = int(ccode)\n if ccode not in self._dataframe.columns:\n return self._default\n starts = np.array(self._dataframe[ccode]['start'])\n ends = np.array(self._dataframe[ccode]['end'])\n rates = np.array(self._dataframe[ccode]['rate'])\n if year is None:\n return dict(list(zip(starts, rates)))\n if year < starts.min():\n rate = rates[0]\n elif year > ends.max():\n rate = rates[-1]\n else:\n idx = (np.abs(year - ends)).argmin()\n rate = rates[idx]\n return rate\n\n def getRates(self, ccode):\n \"\"\"Return population growth rates for a given country code.\n\n :param ccode:\n Numeric country code.\n :param year:\n Integer year to be used to find growth rate (will be between start and end years,\n or before first start year or after last end year).\n :returns:\n Tuple of two lists of (start_years,rates).\n \"\"\"\n if ccode not in self._dataframe.columns:\n raise PagerException(\n 'Country %s not found in PopulationGrowth data structure.' % ccode)\n starts = np.array(self._dataframe[ccode]['start'])\n rates = np.array(self._dataframe[ccode]['rate'])\n return (starts, rates)\n\n def adjustPopulation(self, population, ccode, tpop, tevent):\n \"\"\"Adjust population based on growth rates.\n\n :param population:\n Number of people.\n :param ccode:\n Numeric country code.\n :param tpop:\n Year of population data collection.\n :param tevent:\n Year to which population data should be adjusted from tpop.\n :returns:\n Population adjusted for growth rates in years between tpop and tevent. \n \"\"\"\n if tpop == tevent:\n return population\n if tpop < tevent:\n interval = 1\n else:\n interval = -1\n newpop = population\n for startpop in np.arange(tpop, tevent, interval):\n endpop = startpop + interval\n rate = self.getRate(ccode, startpop)\n newpop = adjust_pop(newpop, startpop, endpop, rate)\n\n return newpop\n","repo_name":"mhearne-usgs/pager","sub_path":"losspager/models/growth.py","file_name":"growth.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"30124623447","text":"# While we can manually send and receive data over HTTP using the socket library,\n# there is a much simpler way to perform this common task in Python by using the\n# urllib library.\n\n# Using urllib, you can treat a web page much like a file. You simply indicate\n# which web page you would like to retrieve and urllib handles all of the HTTP\n# protocol and header details. The following is equivalent to 12.2:\n\nimport urllib.request\n\nfhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')\nfor line in fhand:\n print(line.decode().strip())\n\n# As an example, we can write a program to retrieve the data for romeo.txt and\n# compute the frequency of each word in the file as follows:\n\nfileOpen = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')\n\ncounts = dict()\nfor line in fileOpen:\n words = line.decode().split()\n for word in words:\n counts[word] = counts.get(word, 0) + 1\nprint(counts)\n\n# Refer to urllib documentation for more functionality:\n# https://docs.python.org/3/library/urllib.html","repo_name":"kylev114/PY4E","sub_path":"Chapter 12 Network Programs/12.4_urllibLibrary.py","file_name":"12.4_urllibLibrary.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72101227768","text":"from gi.overrides import GLib\n\nimport src.globals\nfrom src.api.api import send_p2p_message, send_p2g_message, get_group_name, get_group_member_num, get_contact_list, \\\n is_contact_group, get_nickname_by_id, get_p2p_messages_after_time, get_p2g_messages_after_time\n\nfrom src.utils import get_cached_user_id, append_cached_group_list, is_id_in_group_cache, \\\n append_to_cached_contact_list, get_cached_contact_list, get_cached_selected_contact_id, get_text_buffer_cache\nfrom src.utils.common_utils import write_log\nfrom src.utils.message_utils import *\n\n\ndef send_p2p_message_worker(receiver_id, content):\n send_p2p_message(receiver_id, content)\n\n\ndef send_group_message_worker(group_id, content):\n send_p2g_message(group_id, content)\n\n\ndef init_local_storage():\n contact_id_list = get_contact_list(get_cached_user_id())\n write_log(\"contact_list: \" + str(contact_id_list))\n for contact_id in contact_id_list:\n is_group_id = is_contact_group(contact_id)\n if is_group_id:\n write_log(\"group_id: \" + str(contact_id))\n append_cached_group_list(contact_id)\n for contact_id in contact_id_list:\n local_latest_message_time = get_local_latest_message_time(contact_id)\n if not is_id_in_group_cache(contact_id):\n p2p_messages_after_time = get_p2p_messages_after_time(contact_id, local_latest_message_time)\n for message in p2p_messages_after_time:\n append_message_storage(contact_id, False, message[\"content\"], message[\"create_time\"])\n latest_message = get_local_latest_message(contact_id)\n if latest_message is not None:\n local_latest_message_time = get_local_latest_message_time(contact_id)\n else:\n latest_message = {'message_content': \"\", 'is_sender': False, 'sent_time': int(round(time.time()*1000))}\n local_latest_message_time = latest_message['sent_time']\n nickname = get_nickname_by_id(contact_id)\n append_to_cached_contact_list(contact_id, nickname, latest_message['message_content'], local_latest_message_time)\n else:\n p2g_message_after_time = get_p2g_messages_after_time(contact_id, local_latest_message_time)\n for message in p2g_message_after_time:\n append_group_message_storage(contact_id, False, message[\"content\"], message[\"create_time\"], message[\"sender_name\"])\n latest_message = get_local_latest_message(contact_id)\n if latest_message is not None:\n local_latest_message_time = get_local_latest_message_time(contact_id)\n else:\n latest_message = {'message_content': \"\", 'is_sender': False, 'sent_time': int(round(time.time()*1000))}\n local_latest_message_time = latest_message['sent_time']\n group_name = get_group_name(contact_id)\n append_to_cached_contact_list(contact_id, group_name, latest_message['message_content'], local_latest_message_time)\n write_log(\"拉取消息成功 return\")\n\n\ndef init_chat_window(chat_window):\n write_log(\"开始初始化聊天窗口\")\n \"\"\"\n chat窗口的初始化工作\n 1.读取cache里的联系人列表\n 2.读取cache里的消息记录\n 3.生成联系人ContactItem和对应聊天记录列表的映射关系\n 4.将联系人列表和聊天记录列表添加到chat窗口的对应容器中\n \"\"\"\n contact_list = get_cached_contact_list()\n \"\"\"从本地cache中取出所有的本地联系人列表\"\"\"\n for contact in contact_list:\n is_selected = False\n contact_nickname = contact['nickname']\n contact_id = contact['contact_id']\n contact_sent_time = contact['sent_time']\n contact_last_message = contact['last_message']\n message_list = get_stored_messages(contact_id)\n\n \"\"\"如果用户上一次使用过程中选中的是该联系人,则在打开chat窗口时,将该联系人的聊天记录列表显示出来,并将字体small化,以凸显选中\"\"\"\n if get_cached_selected_contact_id() == contact_id:\n is_selected = True\n \"\"\"将本地消息记录列表填入聊天记录列表容器中\"\"\"\n if is_id_in_group_cache(contact_id):\n for message in message_list:\n chat_window.insert_group_message(message['message_content'], message['is_sender'], message['sender_name'])\n group_name = get_group_name(contact_id)\n member_num = get_group_member_num(contact_id)\n chat_window.message_header_bar.set_title(group_name + \" (\" + str(member_num) + \")\")\n else:\n for message in message_list:\n chat_window.insert_message(message[\"message_content\"], message[\"is_sender\"])\n \"\"\"将联系人昵称填入Header Bar里\"\"\"\n chat_window.message_header_bar.set_title(contact_nickname)\n\n \"\"\"读入上次退出程序,text buffer中的内容\"\"\"\n text = get_text_buffer_cache(contact_id)\n chat_window.text_box.get_buffer().set_text(text)\n chat_window.insert_contact(contact_nickname, contact_last_message, contact_sent_time, contact_id, is_selected)\n write_log(\"拉取消息成功,show窗口\")\n\n\ndef __insert_message_from_contact(chat_window, contact_id, sent_time, message_content):\n \"\"\"\n 本函数用于接收到消息后,将消息插入到聊天记录json中\n :param chat_window: Gtk.Window\n :param contact_id: 向当前用户发消息的联系人\n :param sent_time: 消息��送的时间戳,13位毫秒级UNIX时间戳\n :param message_content: 消息内容\n \"\"\"\n append_message_storage(contact_id, False, message_content, sent_time)\n is_selected = False\n if src.globals.LAST_SELECTED_CONTACT.contact_id == contact_id:\n is_selected = True\n GLib.idle_add(chat_window.insert_message, message_content, False)\n GLib.idle_add(src.globals.LAST_SELECTED_CONTACT.update_contact, message_content, sent_time, is_selected)\n chat_window.scroll_flag = not chat_window.scroll_flag\n\n\ndef __insert_message_from_group(chat_window, group_id, sender_id, sender_name, sent_time, message_content):\n \"\"\"\n 本函数用于接收到消息后,将消息插入到聊天记录json中\n :param chat_window: Gtk.Window\n :param group_id: 向当前用户发消息的群组\n :param sender_id: 消息发送者\n :param sender_name: 消息发送者昵称\n :param sent_time: 消息发送的时间戳,13位毫秒级UNIX时间戳\n :param message_content: 消息内容\n \"\"\"\n GLib.idle_add()\n print(\"insert message from group\")\n append_group_message_storage(group_id, False, message_content, sent_time, sender_name)\n write_log(\"appended to cache\")\n is_selected = False\n if src.globals.LAST_SELECTED_CONTACT.contact_id == group_id:\n is_selected = True\n GLib.idle_add(chat_window.insert_group_message, message_content, False, sender_name)\n GLib.idle_add(src.globals.LAST_SELECTED_CONTACT.update_contact, message_content, sent_time, is_selected)\n chat_window.scroll_flag = not chat_window.scroll_flag\n\n\ndef parse_p2p_msg_api_result(chat_window, msg_list):\n write_log(\"parse_p2p_msg_api_result\"+str(msg_list))\n for message in msg_list:\n __insert_message_from_contact(chat_window,\n message[\"senderID\"],\n message[\"create_time\"],\n message[\"content\"])\n\n\ndef parse_p2g_msg_api_result(chat_window, msg_list):\n write_log(\"parse_p2g_msg_api_result\")\n write_log(str(isinstance(msg_list, list)))\n for message in msg_list:\n __insert_message_from_group(chat_window,\n message[\"groupID\"],\n message[\"senderID\"],\n message[\"sender_name\"],\n message[\"create_time\"],\n message[\"content\"])\n","repo_name":"xiaoheng86/avo-chat-client","sub_path":"src/controllers/chat_controller.py","file_name":"chat_controller.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27444089510","text":"from collections import deque\n\ndq = deque()\ndq.append(0)\ndq.append(1)\ndq.append(2)\ndq.appendleft(-1)\ndq.appendleft(-2)\n\nfor i in dq:\n print(i, end=\" \")\nprint()\n\ndq.pop()\nfor i in dq:\n print(i, end=\" \")\nprint()\n\ndq.popleft()\nfor i in dq:\n print(i, end=\" \")","repo_name":"rkskek1226/Algorithm","sub_path":"Data_Structure/Linear_DS/DoubleEndedQueue.py","file_name":"DoubleEndedQueue.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35347522062","text":"from unittest import TestCase\n\nfrom IntersectionOfTwoLinkedLists import IntersectionOfTwoLinkedLists, ListNode\n\n\nclass TestIntersectionOfTwoLinkedLists(TestCase):\n def test_getIntersectionNode(self):\n i = IntersectionOfTwoLinkedLists()\n\n self.assertIsNone(i.getIntersectionNode(None, None))\n\n node345 = ListNode(3)\n node345.next = ListNode(4)\n node345.next.next = ListNode(5)\n\n self.assertIsNone(i.getIntersectionNode(node345, ListNode(6)))\n\n node12345 = ListNode(1)\n node12345.next = ListNode(2)\n node12345.next.next = node345\n\n self.assertEqual(i.getIntersectionNode(node12345, node345), node345)\n","repo_name":"TonnyL/Windary","sub_path":"Python/IntersectionOfTwoLinkedListsTest.py","file_name":"IntersectionOfTwoLinkedListsTest.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"77"} +{"seq_id":"2506353047","text":"'''\n19943:图的拉普拉斯矩阵(matrix)\nhttp://cs101.openjudge.cn/practice/19943/\n\n'''\nnode, edge = [int(i) for i in input().split()]\nmatrix = []\nfor i in range(node):\n matrix.append([0] * node)\nfor fake_i in range(edge):\n i, j = [int(i) for i in input().split()]\n matrix[i][i] += 1\n matrix[j][j] += 1\n matrix[i][j] = -1\n matrix[j][i] = -1\n\nfor i in range(node):\n print(*matrix[i], sep=' ')","repo_name":"forxhunter/ComputingIntro","sub_path":"solutions/cs101_openjudge/19943.py","file_name":"19943.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"5545848540","text":"# Name of csv file for storing data\nCSV_FILE = \"phonebook.csv\"\n\n# Names of table columns\nHEADER_FIELDS = [\n \"Last name\",\n \"First Name\",\n \"Middle Name\",\n \"Company\",\n \"Phone (work)\",\n \"Phone (cell)\",\n]\n\n# Widths of colums (for print formating)\nTOTAL_WIDTH = 130\n\n# Minimum number of contacts to initiate paged output\nPAGED_OUT_THRESHOLD = 10\n","repo_name":"kgdpete2022/phonebook","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29333421819","text":"#Conversión de Decimal a Binario\n# Entrada\nnumero_decimal = float(input(\"Ingrese un numero: \"))\n\nnumero_binario = 0\nmultiplicador = 1\n\n# Procesamiento \nwhile numero_decimal != 0: # paso 3\n # pasos 1, 4 y 5 se multiplica el módulo por su multiplicador\n numero_binario = numero_binario + numero_decimal % 2 * multiplicador\n numero_decimal //= 2 # paso 1\n multiplicador *= 10 # paso 5\n\n# Salida\nprint(\"Resultado =\", numero_binario) ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej4/hito1_ej4_128398e4fa0d6b009b3a8f8b495f8dc2.py","file_name":"hito1_ej4_128398e4fa0d6b009b3a8f8b495f8dc2.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34499609805","text":"import pickle, time\nfrom sys import stdin, stdout, stderr\nfrom collections import OrderedDict\nimport numpy as np\nimport theano as th\nimport theano.tensor as T\n\n\nclass ModelParams:\n \"\"\"Base class for RNN variants.\n NOTE: Not intended to be instantiated!\n \"\"\"\n # Parameter matrix names and ordering\n # Defined by model subclass\n pnames = []\n\n def __init__(self, hyper, epoch=0, pos=0, pvalues=None):\n self.hyper = hyper\n self.epoch = epoch\n self.pos = pos\n\n if not pvalues:\n pvalues = self._build_p()\n\n # Initialize shared variables\n\n # Create parameter dicts\n # OrderedDict used to keep paramater access deterministic throughout\n self.params = OrderedDict()\n self.mparams = OrderedDict()\n\n # Load parameter matrices and create rmsprop caches\n for n in self.pnames:\n self.params[n] = th.shared(name=n, value=pvalues[n].astype(th.config.floatX))\n self.mparams['m'+n] = th.shared(name='m'+n, value=np.zeros_like(pvalues[n]).astype(th.config.floatX))\n\n # Build Theano generation functions\n self._built_g = False\n self._built_t = False\n self._build_g()\n\n # Model-specific definitions of parameters, forward propagation, regularization, state initialization\n def _build_p(self):\n pass\n def _forward_step(self, x_t, s_t):\n pass\n def _weight_cost(self, reg_lambda):\n pass\n def freshstate(self, batchsize):\n pass\n\n # Theano-generated model-dependent functions\n def gen_chars(self, *args, **kwargs):\n pass\n def gen_chars_max(self, *args, **kwargs):\n pass\n def train_step_bat(self, *args, **kwargs):\n pass\n def errs_bat(self, *args, **kwargs):\n pass\n def err_bat(self, *args, **kwargs):\n pass\n def grad_bat(self, *args, **kwargs):\n pass\n\n # Cross-model definitions of generation functions\n def _build_g(self):\n \"\"\"Build Theano graph and define generation functions.\"\"\"\n\n stdout.write(\"Compiling generation functions...\")\n stdout.flush()\n time1 = time.time()\n\n # Local binding for convenience\n forward_step = self._forward_step\n\n ### SEQUENCE GENERATION ###\n\n x_in = T.vector('x_in')\n x_seq = T.matrix('x_seq')\n s_in = T.matrix('s_in')\n k = T.iscalar('k')\n temperature = T.scalar('temperature')\n\n rng = T.shared_randomstreams.RandomStreams(seed=(int(time.time()) % 1000000000))\n\n # Generate output sequence based on input single onehot and given state.\n\n # Main version:\n # Chooses output char by multinomial, and feeds back in for next step.\n # Scaled by temperature parameter before softmax (temperature 1.0 leaves\n # softmax output unchanged).\n # Returns matrix of one-hot vectors.\n def generate_step(x_t, s_t, temp):\n # Do next step\n o_t1, s_t1 = forward_step(x_t, s_t)\n\n # Get softmax\n o_ts = T.nnet.softmax(o_t1 / temp)[-1]\n\n # Randomly choose by multinomial distribution\n o_rand = rng.multinomial(n=1, pvals=o_ts, dtype=th.config.floatX)\n\n return o_rand, s_t1\n\n [o_chs, s_chs], genupdate = th.scan(\n fn=generate_step,\n outputs_info=[dict(initial=x_in), dict(initial=s_in)],\n non_sequences=temperature,\n n_steps=k)\n s_ch = s_chs[-1]\n\n self.gen_chars = th.function(\n inputs=[k, x_in, s_in, th.Param(temperature, default=0.5)], \n outputs=[o_chs, s_ch], \n name='gen_chars', \n updates=genupdate)\n\n # Alternate version:\n # As above, but chooses output char by argmax, and feeds back in.\n def generate_step_max(x_t, s_t):\n # Do next step\n o_t1, s_t1 = forward_step(x_t, s_t)\n\n # Get softmax\n o_ts = T.nnet.softmax(o_t1)[-1]\n\n # Now find selected index\n o_idx = T.argmax(o_ts)\n\n # Create one-hot\n o_ret = T.zeros_like(o_ts)\n o_ret = T.set_subtensor(o_ret[o_idx], 1.0)\n\n return o_ret, s_t1\n\n [o_chms, s_chms], _ = th.scan(\n fn=generate_step_max,\n outputs_info=[dict(initial=x_in), dict(initial=s_in)],\n n_steps=k)\n s_chm = s_chms[-1]\n\n self.gen_chars_max = th.function(\n inputs=[k, x_in, s_in], \n outputs=[o_chms, s_chm], \n name='gen_chars_max')\n\n # Sequence processing without generation:\n # Input is onehot-encoded string, output is sequence\n # of predictions and states at each step. Useful for\n # direct comparisons of output probabilities and \n # per-neuron activations\n def process_step(x_t, s_t, temp):\n # Do next step\n o_t1, s_t1 = forward_step(x_t, s_t)\n\n # Get softmax\n o_ts = T.nnet.softmax(o_t1 / temp)[-1]\n\n return o_ts, s_t1\n\n [o_seq, s_seq], _ = th.scan(\n fn=process_step,\n outputs_info=[None, dict(initial=s_in)],\n sequences=x_seq,\n non_sequences=temperature)\n\n self.seq_process = th.function(\n inputs=[x_seq, s_in, th.Param(temperature, default=0.5)],\n outputs=[o_seq, s_seq],\n name='seq_process')\n\n # And done!\n time2 = time.time()\n stdout.write(\"done!\\nCompilation took {0:.3f} s.\\n\\n\".format(time2 - time1))\n stdout.flush()\n self._built_g = True\n\n # Cross-model definitions of training functions\n def _build_t(self):\n \"\"\"Build Theano graph and define training functions.\"\"\"\n\n stdout.write(\"Compiling training functions...\")\n stdout.flush()\n time1 = time.time()\n\n # Local bindings for convenience\n forward_step = self._forward_step\n reg_cost = self._reg_cost\n\n # Scalar training parameters\n learnrate = T.scalar('learnrate')\n decayrate = T.scalar('decayrate')\n reg_lambda = T.scalar('reg_lambda')\n\n ### BATCH-SEQUENCE TRAINING ###\n\n # Batch inputs\n x_bat = T.tensor3('x_bat')\n y_bat = T.tensor3('y_bat')\n s_in_bat = T.tensor3('s_in_bat')\n\n # Step function\n def batch_step(x_t, y_t, s_t):\n o_t1, s_t = forward_step(x_t, s_t)\n # We can use the whole matrix from softmax for batches\n o_ts = T.nnet.softmax(o_t1)\n return o_ts, s_t\n\n [o_bat, s_seq_bat], _ = th.scan(\n batch_step, \n sequences=[x_bat, y_bat], \n truncate_gradient=self.hyper.bptt_truncate,\n outputs_info=[None, dict(initial=s_in_bat)])\n s_out_bat = s_seq_bat[-1]\n\n # We have to reshape the outputs, since Theano's categorical cross-entropy\n # function will only work with matrices or vectors, not tensor3s.\n # Thus we flatten along the sequence/batch axes, leaving the prediction\n # vectors as-is, compute cross-entropy, then reshape the errors back to \n # their proper dimensions.\n o_bat_flat = T.reshape(o_bat, (o_bat.shape[0] * o_bat.shape[1], -1))\n y_bat_flat = T.reshape(y_bat, (y_bat.shape[0] * y_bat.shape[1], -1))\n o_errs_bat = T.nnet.categorical_crossentropy(o_bat_flat, y_bat_flat)\n o_errs_res = T.reshape(o_errs_bat, (o_bat.shape[0], o_bat.shape[1]))\n\n # Next, we reshuffle to group sequences together instead\n # of batches, then sum the individual sequence errors.\n # (Hopefully Theano's auto-differentials follow this...)\n o_errs_shuf = o_errs_res.dimshuffle(1, 0)\n o_errs_sums = T.sum(o_errs_shuf, axis=1)\n # Regularization term (without averaging over samples (done outside Theano)).\n # reg_cost() defined per-model.\n reg_sum = reg_cost(reg_lambda)\n # Final cost (with regularization):\n cost_bat = T.sum(o_errs_sums) + reg_sum\n\n # Gradients\n dparams_bat = [ T.grad(cost_bat, p) for p in self.params.values() ]\n\n # rmsprop parameter updates\n uparams_bat = [ decayrate * mp + (1 - decayrate) * dp ** 2 for mp, dp in zip(self.mparams.values(), dparams_bat) ]\n\n # Gather updates\n train_updates_bat = OrderedDict()\n # Apply rmsprop updates to parameters\n for p, dp, up in zip(self.params.values(), dparams_bat, uparams_bat):\n train_updates_bat[p] = p - learnrate * dp / T.sqrt(up + 1e-6)\n # Update rmsprop caches\n for mp, up in zip(self.mparams.values(), uparams_bat):\n train_updates_bat[mp] = up\n\n # Batch training step function\n self.train_step_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, \n th.Param(learnrate, default=0.001), \n th.Param(decayrate, default=0.95),\n th.Param(reg_lambda, default=0.0)],\n outputs=s_out_bat,\n updates=train_updates_bat,\n name='train_step_bat')\n\n ### ERROR CHECKING ###\n\n # Mostly for internal debug, returns unsummed error tensor and regularization cost\n self.errs_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, th.Param(reg_lambda, default=0.0)], \n outputs=[o_errs_res, reg_sum, s_out_bat])\n\n # Full error sum, not averaged over sample size (done in outer non-Theano func)\n self.err_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, th.Param(reg_lambda, default=0.0)], \n outputs=[cost_bat, s_out_bat])\n\n # Gradient calculations\n # We'll use this at some point for gradient checking\n self.grad_bat = th.function(\n inputs=[x_bat, y_bat, s_in_bat, th.Param(reg_lambda, default=0.0)], \n outputs=dparams_bat)\n\n ### Whew, I think we're done! ###\n time2 = time.time()\n stdout.write(\"done!\\nCompilation took {0:.3f} s.\\n\\n\".format(time2 - time1))\n stdout.flush()\n self._built_t = True\n\n @classmethod\n def loadfromfile(cls, infile):\n \"\"\"Load model parameters from file and rebuild model.\"\"\"\n\n with np.load(infile) as f:\n # Extract hyperparams and position\n p = f['p']\n hparams = pickle.loads(p.tobytes())\n hyper, epoch, pos = hparams['hyper'], hparams['epoch'], hparams['pos']\n\n # Load matrices\n pvalues = { n:f[n] for n in cls.pnames }\n\n # Create instance\n if isinstance(infile, str):\n stdout.write(\"Loaded model parameters from {0}\\n\".format(infile))\n stdout.write(\"Rebuilding model...\\n\")\n model = cls(hyper, epoch, pos, pvalues)\n\n return model\n\n def savetofile(self, outfile):\n \"\"\"Save model parameters to file.\"\"\"\n\n # Pickle non-matrix params into bytestring, then convert to numpy byte array\n pklbytes = pickle.dumps({'hyper': self.hyper, 'epoch': self.epoch, 'pos': self.pos}, \n protocol=pickle.HIGHEST_PROTOCOL)\n p = np.fromstring(pklbytes, dtype=np.uint8)\n\n # Gather parameter matrices and names\n pvalues = { n:m.get_value() for n, m in self.params.items() }\n\n # Now save params and matrices to file\n try:\n np.savez_compressed(outfile, p=p, **pvalues)\n except OSError as e:\n raise e\n else:\n if isinstance(outfile, str):\n stdout.write(\"Saved model parameters to {0}\\n\".format(outfile))\n\n def calc_loss(self, dataset, startpos=0, batchsize=16, num_examples=0, init_state=None):\n \"\"\"Calculates average cross-entropy loss over given batchsize.\"\"\"\n\n # First build training functions if not already done\n if not self._built_t:\n self._build_t()\n\n step_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(batchsize)\n\n if batchsize < 1:\n raise NotImplementedError(\"Single-sequence training is no longer available.\")\n\n data_len = dataset.batchepoch(batchsize)\n valid_len = num_examples if num_examples else data_len\n errors = np.zeros(valid_len)\n\n # Use explicit indexing instead of fancy slicing so we can \n # roll over properly\n data_pos = startpos\n for valid_pos in range(valid_len):\n xbatch, ybatch = dataset.batch(data_pos, batchsize)\n errors[valid_pos], step_state = self.err_bat(xbatch, ybatch, step_state, self.hyper.regcost)\n data_pos += 1\n # Advance position and overflow\n if data_pos >= data_len:\n data_pos = 0\n # Roll state vector on batch axis, to keep continuity\n step_state = np.roll(step_state, 1, axis=1)\n\n # Return total loss divided by number of characters in sample\n return np.sum(errors).item() / float(valid_len * batchsize * dataset.seq_len)\n\n def train(self, dataset, batchsize=16, num_examples=0, callback_every=1000, callback=None, init_state=None):\n \"\"\"Train model on given dataset for num_examples, with optional \n batch size.\n\n Optional callback function called after callback_every, with \n model and current state as arguments.\n\n If num_examples is 0, will train for full epoch.\n \"\"\"\n\n # Batched training only\n if batchsize < 1:\n raise NotImplementedError(\"Single-sequence training is no longer available.\")\n\n # First build training functions if not already done\n if not self._built_t:\n self._build_t()\n\n input_len = dataset.batchepoch(batchsize)\n train_len = num_examples if num_examples else input_len\n\n # Start with fresh state if none provided\n step_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(batchsize)\n\n # Debug\n # print(\"Training with batchsize {0:d}, state shape {1}\".format(batchsize, repr(step_state.shape)))\n\n # Use explicit indexing instead of fancy slicing so we can \n # keep track, both for model status and checkpoint purposes\n for train_pos in range(train_len):\n # Learning step\n xbatch, ybatch = dataset.batch(self.pos, batchsize)\n step_state = self.train_step_bat(xbatch, ybatch, step_state, \n self.hyper.learnrate, self.hyper.decay, self.hyper.regcost)\n\n # Advance position and overflow\n self.pos += 1\n if self.pos >= input_len:\n self.epoch += 1\n self.pos = 0\n # Roll state vector on batch axis, to keep continuity\n step_state = np.roll(step_state, 1, axis=1)\n\n # Optional callback\n if callback and callback_every and (train_pos + 1) % callback_every == 0:\n # Make sure to only pass a slice of state if batched\n callback(self, step_state[:,0,:])\n\n # Return final state\n return step_state\n\n def traintime(self, dataset, batchsize=16, pos=0, init_state=None):\n \"\"\"Prints time for batch training step (default size 16).\"\"\"\n\n # First build training functions if not already done\n if not self._built_t:\n self._build_t()\n\n # Fresh state\n start_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(batchsize)\n\n # Get slice\n xbatch, ybatch = dataset.batch(pos, batchsize)\n\n # Time training step\n time1 = time.time()\n self.train_step_bat(xbatch, ybatch, start_state, \n self.hyper.learnrate, self.hyper.decay, self.hyper.regcost)\n time2 = time.time()\n\n stdout.write(\n \"Time for SGD/RMS learning batch of {0:d} sequences, {1:d} chars each: {2:.4f} ms\\n\".format(\n xbatch.shape[1], xbatch.shape[0], (time2 - time1) * 1000.0))\n\n # Time loss calc step\n time1 = time.time()\n self.err_bat(xbatch, ybatch, start_state, self.hyper.regcost)\n time2 = time.time()\n\n stdout.write(\"Time for loss calculation step of {0:d} chars: {1:.4f} ms\\n\".format(\n xbatch.shape[0], (time2 - time1) * 1000.0))\n\n def genchars(self, charset, numchars, init_state=None, seedch=None, \n print_seed=True, use_max=False, temperature=0.5):\n \"\"\"Generate string of characters from current model parameters.\n\n If use_max is True, will select most-likely character at each step.\n\n Probabilities can be optionally scaled by temperature during generation\n if use_max=False. \n \"\"\"\n\n # Fresh state\n start_state = init_state if isinstance(init_state, np.ndarray) else self.freshstate(0)\n\n # Seed given or random character to start (as one-hot)\n if seedch:\n seedidx = charset.idxofchar(seedch)\n else:\n try:\n seedidx = charset.semirandomidx()\n except AttributeError:\n seedidx = charset.randomidx()\n\n seedvec = charset.onehot(seedidx)\n\n # Get generated sequence\n if use_max:\n idxs, end_state = self.gen_chars_max(numchars, seedvec, start_state)\n else:\n idxs, end_state = self.gen_chars(numchars, seedvec, start_state, temperature)\n\n # Convert to characters\n chars = [ charset.charatidx(np.argmax(i)) for i in idxs ]\n\n # Now construct string\n if print_seed:\n retstr = charset.charatidx(np.argmax(seedvec))\n else:\n retstr = ''\n return retstr + \"\".join(chars), end_state\n\n\n","repo_name":"rneilson/rngru","sub_path":"rn_rnn_model.py","file_name":"rn_rnn_model.py","file_ext":"py","file_size_in_byte":17624,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"73872087607","text":"import numpy as np\r\nimport array\r\nclass myArray(array.array):\r\n arraymember1 = np.array([1,3,4,5])\r\n arraymember2 = np.array([2,4,5,6])\r\n def array_addition(self):\r\n resultarray = self.arraymember1 + self.arraymember2\r\n print(\"This array addition function returns the result array \\n\")\r\n return resultarray\r\n\r\narrayObj = myArray('u')\r\narrayObj.arraymember1 = np.array([[1,2,3,4],[34,54,36,67]])\r\narrayObj.arraymember2 = np.array([[2,38,95,26],[32,23,89,75]])\r\nresultarray = arrayObj.array_addition()\r\nprint(resultarray)","repo_name":"PelluriDeepthi/PelluriDeepthi","sub_path":"ArrayWrapper.py","file_name":"ArrayWrapper.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4621951901","text":"# coding:utf-8\nfrom enum import Enum\n\nfrom qfluentwidgets import (qconfig, QConfig, ConfigItem, OptionsConfigItem, BoolValidator,\n OptionsValidator, RangeConfigItem, RangeValidator,\n FolderListValidator, EnumSerializer, FolderValidator)\n\n\n\nclass Language(Enum):\n \"\"\" Language enumeration \"\"\"\n\n CHINESE_SIMPLIFIED = \"zh\"\n CHINESE_TRADITIONAL = \"hk\"\n ENGLISH = \"en\"\n AUTO = \"Auto\"\n\n\nclass Config(QConfig):\n \"\"\" Config of application \"\"\"\n\n # folders\n musicFolders = ConfigItem(\n \"Folders\", \"LocalMusic\", [], FolderListValidator())\n downloadFolder = ConfigItem(\n \"Folders\", \"Download\", \"app/download\", FolderValidator())\n\n # main window\n dpiScale = OptionsConfigItem(\n \"MainWindow\", \"DpiScale\", \"Auto\", OptionsValidator([1, 1.25, 1.5, 1.75, 2, \"Auto\"]), restart=True)\n language = OptionsConfigItem(\n \"MainWindow\", \"Language\", Language.AUTO, OptionsValidator(Language), EnumSerializer(Language), restart=True)\n\n # software update\n checkUpdateAtStartUp = ConfigItem(\"Update\", \"CheckUpdateAtStartUp\", True, BoolValidator())\n\n\nYEAR = 2023\nAUTHOR = \"软盘驱动程序\"\nVERSION = \"v0.1.1\"\nHELP_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml\"\nREPO_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml\"\nFEEDBACK_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml/issues\"\nRELEASE_URL = \"https://github.com/clean-master/stable-diffusion-webui-launcher-directml/releases/latest\"\n\n\ncfg = Config()\nqconfig.load('app/config/config.json', cfg)","repo_name":"clean-master/stable-diffusion-webui-launcher-directml","sub_path":"app/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"22084417882","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, HttpResponseRedirect, HttpResponse, redirect\nfrom django.contrib import messages\nfrom ..users.models import User\nfrom .models import Book, Review, Author\nimport bcrypt\n\n# Create your views here.\ndef add(request):\n authors = Author.objects.all()\n context = {\n 'authors': authors,\n }\n return render(request, 'books/new.html', context)\ndef create(request):\n errors = Book.objects.book_validator(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error, extra_tags=tag)\n return redirect('/books/add')\n else:\n if request.POST['author'] > 0:\n author = Author.objects.get(id = request.POST['author'])\n else:\n name = request.POST['new_author']\n author = Author.objects.create(name = name)\n title = request.POST['title']\n review = request.POST['review']\n rating = request.POST['rating']\n id = request.session['id']\n reviewer = User.objects.get(id = id)\n book = Book.objects.create(title = title, author = author)\n r = Review.objects.create(stars = rating, review = review, reviewer = reviewer, book = book)\n return redirect('/dashboard')\ndef book(request, book_id):\n book = Book.objects.get(id = book_id)\n context = {\n 'id': request.session['id'],\n 'book': book,\n 'reviews': Review.objects.filter(book = book),\n }\n return render(request, 'books/book.html', context)\ndef review(request):\n book_id = request.POST['book_id']\n review = request.POST['review']\n rating = request.POST['rating']\n id = request.session['id']\n reviewer = User.objects.get(id = id)\n book = Book.objects.get(id = book_id)\n Review.objects.create(stars = rating, review = review, reviewer = reviewer, book = book)\n return redirect('/dashboard')\ndef delete(request, review_id):\n review = Review.objects.get(id = review_id)\n review.delete()\n return redirect('/dashboard')","repo_name":"marmegh/enigma","sub_path":"beltreviewer/apps/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"270301356","text":"# Silver 5_1436\n\n# 종말의 숫자란 어떤 수에 6이 적어도 3개이상 연속으로 들어가는 수를 말한다.\n# 제일 작은 종말의 숫자는 666이고, 그 다음으로 큰 수는 1666, 2666, 3666, .... 과 같다.\n\n# 따라서, 숌은 첫 번째 영화의 제��은 세상의 종말 666,\n# 두 번째 영화의 제목은 세상의 종말 1666 이렇게 이름을 지을 것이다.\n# 일반화해서 생각하면, N번째 영화의 제목은 세상의 종말 (N번째로 작은 종말의 숫자) 와 같다.\n# 숌이 만든 N번째 영화의 제목에 들어간 숫자를 출력하는 프로그램을 작성하시오.\n# 숌은 이 시리즈를 항상 차례대로 만들고, 다른 영화는 만들지 않는다.\n\nn = int(input())\nc = 0\nstart = 666\nwhile True:\n if '666' in str(start):\n c += 1\n if c == n:\n print(start)\n break\n start += 1","repo_name":"chaerui7967/Today_I_Learned","sub_path":"Baekjoon/movie_director_shom_210717.py","file_name":"movie_director_shom_210717.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"28905014989","text":"# coding: utf-8\nimport datetime\nfrom flask import Flask, redirect\n\napp = Flask(__name__)\n\n@app.route('/today')\ndef today():\n return redirect(\n 'http://show-time.xyz/{}.html'.format(datetime.date.today().strftime('%Y%m%d')))\n\n@app.route('/tommorow')\ndef tommorow():\n date = datetime.date.today() + datetime.timedelta(days=1)\n return redirect(\n 'http://show-time.xyz/{}.html'.format(date.strftime('%Y%m%d')))\n\nif __name__ == '__main__':\n app.run(port=9997)\n","repo_name":"maruchanman/__band_app","sub_path":"back/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16022419445","text":"#input de que pedem para o usuário informar um número inteiro\nnumber1 = input(\"Informe um primeiro número inteiro: \")\nnumber2 = input(\"Informe o segundo número inteiro: \")\nnumber3 = input(\"Informe o terceiro número inteiro: \")\n\n# execução da primeiro cálculo pedido na questão\nproduct = ((int(number1) * 2) * (int(number2) / 2)) + int(number3)\nprint(int(product))\n\n# execução do segundo cálculo pedido na questão\nsoma = (int(number1) * 3 + int(number3)) * int(number2) \nprint(soma)","repo_name":"kaynann/PYTHON","sub_path":"aula10.18/desafio07.py","file_name":"desafio07.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17551942735","text":"# coding=utf-8\nimport time\nimport numpy as np\nimport logging\nimport os\nimport tensorflow as tf\nfrom tensorflow.contrib import slim\n\nfrom db_config import cfg\n\nimport lib.networks.model as model\nfrom lib.networks.losses import compute_loss, compute_acc\nfrom lib.dataset.dataloader import get_batch\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef make_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\ndef tower_loss(images, gt_score_maps, gt_threshold_map, gt_score_mask,\n gt_thresh_mask, reuse_variables):\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):\n binarize_map, threshold_map, thresh_binary = model.model(images, is_training=True)\n\n model_loss = compute_loss(binarize_map, threshold_map, thresh_binary,\n gt_score_maps, gt_threshold_map, gt_score_mask, gt_thresh_mask)\n\n total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n\n # add summary\n if reuse_variables is None:\n tf.summary.image('gt/input_imgs', images)\n tf.summary.image('gt/score_map', gt_score_maps)\n tf.summary.image('gt/threshold_map', gt_threshold_map * 255)\n tf.summary.image('gt/score_mask', gt_score_mask)\n tf.summary.image('gt/thresh_mask', gt_thresh_mask)\n\n tf.summary.image('pred/binarize_map', binarize_map)\n tf.summary.image('pred/threshold_map', threshold_map * 255)\n tf.summary.image('pred/thresh_binary', thresh_binary)\n\n tf.summary.scalar('model_loss', model_loss)\n tf.summary.scalar('total_loss', total_loss)\n\n return total_loss, model_loss, binarize_map, threshold_map, thresh_binary\n\n\ndef average_gradients(tower_grads):\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n grads = []\n for g, _ in grad_and_vars:\n expanded_g = tf.expand_dims(g, 0)\n grads.append(expanded_g)\n\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n\n return average_grads\n\n\ndef _train_logger_init():\n \"\"\"\n 初始化log日志\n :return:\n \"\"\"\n train_logger = logging.getLogger('train')\n train_logger.setLevel(logging.DEBUG)\n\n # 添加文件输出\n log_file = os.path.join(cfg[\"TRAIN\"][\"TRAIN_LOGS\"], time.strftime('%Y%m%d%H%M', time.localtime(time.time())) + '.logs')\n file_handler = logging.FileHandler(log_file, mode='w')\n file_handler.setLevel(logging.DEBUG)\n file_formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n file_handler.setFormatter(file_formatter)\n train_logger.addHandler(file_handler)\n\n # 添加控制台输出\n consol_handler = logging.StreamHandler()\n consol_handler.setLevel(logging.DEBUG)\n consol_formatter = logging.Formatter('%(message)s')\n consol_handler.setFormatter(consol_formatter)\n train_logger.addHandler(consol_handler)\n return train_logger\n\n\ndef main():\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = cfg.TRAIN.VIS_GPU\n if not tf.gfile.Exists(cfg[\"TRAIN\"][\"CHECKPOINTS_OUTPUT_DIR\"]):\n tf.gfile.MkDir(cfg[\"TRAIN\"][\"CHECKPOINTS_OUTPUT_DIR\"])\n\n train_logger = _train_logger_init()\n\n input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')\n input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps')\n input_threshold_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_threshold_maps')\n\n input_score_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_masks')\n input_threshold_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_threshold_masks')\n\n global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)\n\n learning_rate = tf.train.exponential_decay(cfg[\"TRAIN\"][\"LEARNING_RATE\"], global_step, decay_steps=10000,\n decay_rate=0.94, staircase=True)\n\n if cfg.TRAIN.OPT == 'adam':\n # learning_rate = tf.constant(cfg[\"TRAIN\"][\"LEARNING_RATE\"], tf.float32)\n opt = tf.train.AdamOptimizer(learning_rate)\n elif cfg.TRAIN.OPT == 'momentum':\n opt = tf.train.MomentumOptimizer(learning_rate, 0.9)\n else:\n assert 0, 'error optimzer'\n print('use ', cfg.TRAIN.OPT)\n\n # add summary\n tf.summary.scalar('learning_rate', learning_rate)\n\n gpus = [str(i) for i in range(len(cfg.TRAIN.VIS_GPU.split(',')))]\n input_images_split = tf.split(input_images, len(gpus))\n input_score_maps_split = tf.split(input_score_maps, len(gpus))\n input_threshold_maps_split = tf.split(input_threshold_maps, len(gpus))\n input_score_masks_split = tf.split(input_score_masks, len(gpus))\n input_threshold_masks_split = tf.split(input_threshold_masks, len(gpus))\n\n\n tower_grads = []\n reuse_variables = None\n total_binarize_acc = 0\n total_thresh_binary_acc = 0\n for i, gpu_id in enumerate(gpus):\n print('gpu_id', gpu_id)\n with tf.device('/gpu:' + gpu_id):\n with tf.name_scope('model_' + gpu_id) as scope:\n gt_imgs = input_images_split[i]\n gt_scores = input_score_maps_split[i]\n gt_thresholds = input_threshold_maps_split[i]\n gt_score_masks = input_score_masks_split[i]\n gt_threshold_masks = input_threshold_masks_split[i]\n total_loss, model_loss, binarize_map, threshold_map, thresh_binary = \\\n tower_loss(gt_imgs, gt_scores, gt_thresholds, gt_score_masks, gt_threshold_masks, reuse_variables)\n binarize_acc, thresh_binary_acc = compute_acc(binarize_map, threshold_map, thresh_binary,\n gt_scores, gt_thresholds, gt_score_masks, gt_threshold_masks)\n total_binarize_acc += binarize_acc\n total_thresh_binary_acc += thresh_binary_acc\n reuse_variables = True\n\n batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))\n\n grads = opt.compute_gradients(total_loss)\n tower_grads.append(grads)\n\n grads = average_gradients(tower_grads)\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n avg_binarize_acc = total_binarize_acc / 2.0\n avg_thresh_binary_acc = total_thresh_binary_acc / 2.0\n\n summary_op = tf.summary.merge_all()\n\n variable_averages = tf.train.ExponentialMovingAverage(cfg[\"TRAIN\"][\"MOVING_AVERAGE_DECAY\"], global_step)\n\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):\n train_op = tf.no_op(name='train_op')\n\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=cfg.TRAIN.SAVE_MAX)\n\n\n train_logs_dir = os.path.join(cfg.TRAIN.TRAIN_LOGS, 'train')\n val_logs_dir = os.path.join(cfg.TRAIN.TRAIN_LOGS, 'val')\n\n make_dir(train_logs_dir)\n make_dir(val_logs_dir)\n\n train_summary_writer = tf.summary.FileWriter(train_logs_dir, tf.get_default_graph())\n val_summary_writer = tf.summary.FileWriter(val_logs_dir, tf.get_default_graph())\n\n\n init = tf.global_variables_initializer()\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n try:\n\n if cfg[\"TRAIN\"][\"RESTORE\"]:\n train_logger.info('continue training from previous checkpoint')\n ckpt = tf.train.get_checkpoint_state(cfg[\"TRAIN\"][\"RESTORE_CKPT_PATH\"])\n train_logger.info('restore model path:', ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n train_logger.info(\"done\")\n elif cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"] is not None:\n sess.run(init)\n print(cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"])\n train_logger.info('load pretrain model:{}', str(cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"]))\n variable_restore_op = slim.assign_from_checkpoint_fn(cfg[\"TRAIN\"][\"PRETRAINED_MODEL_PATH\"],\n slim.get_trainable_variables(),\n ignore_missing_vars=True)\n variable_restore_op(sess)\n train_logger.info(\"done\")\n\n else:\n sess.run(init)\n except:\n assert 0, 'load error'\n\n train_data_generator = get_batch(num_workers=cfg.TRAIN.NUM_READERS,\n img_dir=cfg.TRAIN.IMG_DIR,\n label_dir=cfg.TRAIN.LABEL_DIR,\n batchsize=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(gpus))\n\n val_data_generator = get_batch(num_workers=10,\n img_dir=cfg.EVAL.IMG_DIR,\n label_dir=cfg.EVAL.LABEL_DIR,\n batchsize=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(gpus))\n\n test_data_generator = get_batch(num_workers=1,\n img_dir=cfg.EVAL.IMG_DIR,\n label_dir=cfg.EVAL.LABEL_DIR,\n batchsize=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(gpus),\n is_eval=True)\n\n test_epoch = 0\n\n start = time.time()\n for step in range(cfg[\"TRAIN\"][\"MAX_STEPS\"]):\n train_data = next(train_data_generator)\n\n train_feed_dict = {input_images: train_data[0],\n input_score_maps: train_data[1],\n input_threshold_maps: train_data[3],\n input_score_masks: train_data[2],\n input_threshold_masks: train_data[4]}\n\n ml, tl, _ = sess.run([model_loss, total_loss, train_op], feed_dict=train_feed_dict)\n if np.isnan(tl):\n train_logger.info('Loss diverged, stop training')\n break\n\n if step % 10 == 0:\n avg_time_per_step = (time.time() - start) / 10\n avg_examples_per_second = (10 * cfg[\"TRAIN\"][\"BATCH_SIZE_PER_GPU\"] * len(gpus)) / (time.time() - start)\n start = time.time()\n train_logger.info(\n '{}->Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, {:.2f} examples/second'.format(\n cfg.TRAIN.VERSION, step, ml, tl, avg_time_per_step, avg_examples_per_second))\n\n if step % cfg[\"TRAIN\"][\"SAVE_CHECKPOINT_STEPS\"] == 0:\n saver.save(sess, os.path.join(cfg[\"TRAIN\"][\"CHECKPOINTS_OUTPUT_DIR\"],\n 'DB_' + cfg.BACKBONE + '_' + cfg.TRAIN.VERSION + '_model.ckpt'),\n global_step=global_step)\n\n if step % cfg[\"TRAIN\"][\"SAVE_SUMMARY_STEPS\"] == 0:\n _, tl, train_summary_str = sess.run([train_op, total_loss, summary_op], feed_dict=train_feed_dict)\n train_summary_writer.add_summary(train_summary_str, global_step=step)\n\n val_data = next(val_data_generator)\n val_feed_dict = {input_images: val_data[0],\n input_score_maps: val_data[1],\n input_threshold_maps: val_data[3],\n input_score_masks: val_data[2],\n input_threshold_masks: val_data[4]}\n eval_summary_str = sess.run(summary_op, feed_dict=val_feed_dict)\n\n val_summary_writer.add_summary(eval_summary_str, global_step=step)\n\n if step % cfg.EVAL.TEST_STEP == 0 and step != 0:\n temp_epoch = test_epoch\n train_logger.info('~~~~~~~~~~~~~~~~~~start to test~~~~~~~~~~~~~~~~~~~~~')\n avg_bc = []\n avg_tbc = []\n while temp_epoch==test_epoch:\n test_data = next(test_data_generator)\n test_feed_dict = {input_images: test_data[0],\n input_score_maps: test_data[1],\n input_threshold_maps: test_data[3],\n input_score_masks: test_data[2],\n input_threshold_masks: test_data[4]}\n test_epoch = test_data[5]\n bc, tbc = sess.run([avg_binarize_acc, avg_thresh_binary_acc],\n feed_dict=test_feed_dict)\n\n avg_bc.append(bc)\n avg_tbc.append(tbc)\n\n train_logger.info('avg binarize acc is :{}'.format(sum(avg_bc)/len(avg_bc)))\n train_logger.info('avg thresh binary acc is :{}'.format(sum(avg_tbc)/len(avg_tbc)))\n\n\nif __name__ == '__main__':\n\n main()\n\n","repo_name":"iamrishab/DB-tf","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13145,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"20727963815","text":"\"\"\"\nRelated paras for concepts collector\nmainly used for pre-processing\n\"\"\"\nfrom typing import Callable\nimport attr\nfrom HistoMIL import logger\nfrom HistoMIL.DATA.Slide.concepts.WholeSlideImage import WholeSlideImage \n\nfrom HistoMIL.DATA.Database.data_aug import naive_transforms,only_naive_transforms\n\n\n##############################################################################\n# para for slide\n##############################################################################\n\n@attr.s(auto_attribs=True)\nclass SlideParas(object):\n \n folder:str=None\n fname:str = None\n\n##############################################################################\n# para for tissue\n##############################################################################\n@attr.s(auto_attribs=True)\nclass TissueParas(object):\n \"\"\"\n include all paras for tissue concepts in pre-processing and usage\n \"\"\"\n seg_level:int = 0 # level for segment tissue mask\n min_seg_level:int = None # min level for segment tissue mask if chose fast mode\n\n ref_patch_size:int = 256 # reference patch size for tissue mask\n\n #------> parameters for blurring\n mthresh:int = 7 # paras for Apply median blurring\n\n #------> parameters for otsu\n use_otsu:bool = True\n sthresh:int = 20 \n sthresh_up:int = 255\n\n \n #------> Morphological closing\n close:int = 0\n\n #------> parameters for contours in mask2contours()\n filter_params:dict = {'a_t':100,'a_h': 16, 'max_n_holes':8}\n\n # if there is more than one contours, exclude option:default empty list\n to_contours:bool = True\n exclude_ids:list = []\n keep_ids:list = []\n \n #------> create a name for instance\n name:str = f\"tissue_{seg_level}_otsu_{use_otsu}_contours_{to_contours}\"\n\ndef set_min_seg_level(tissue_para:TissueParas,slide:WholeSlideImage,\n min_seg_level:int=None):\n \"\"\"\n get minimum seg level for tissue mask\n \"\"\"\n if min_seg_level is None:\n tissue_para.seg_level = len(slide.meta.level_dims)-1\n else:\n tissue_para.seg_level = min(len(slide.meta.level_dims)-1,min_seg_level)\n logger.info(f\"TissuePara:: set min_seg_level to {tissue_para.seg_level},in {slide.meta.level_dims} \")\n return tissue_para\n\n##############################################################################\n# para for patch\n##############################################################################\n@attr.s(auto_attribs=True)\nclass PatchParas(object):\n \"\"\"\n include all paras for patch concepts in pre-processing and usage\n \"\"\"\n #------> parameters for patch\n patch_level:int = 0 # level for patch\n patch_size = (512,512) # patch size\n step_size:int = 512 # step size for patch\n\n #------> parameters for patch extraction\n from_contours:bool = True # extract patches from contours otherwise from tissue mask\n # debug: set mp to 1 to avoid not solved error \n mp_processor:int = 1 # number of processors for multiprocessing\n #------> parameters for patch extraction function \n contour_fn_name:str = \"four_pt\" # function name for contour extraction\n use_padding:bool = True # whether padding\n top_left = None # top left point for patch extraction area\n bot_right = None # bot right point for patch extraction area\n\n #------> name for instance\n name:str = f\"patch({patch_level})_size({patch_size[0]})_step({step_size})_contours({contour_fn_name})\"\n\n\n##############################################################################\n# para for faeture\n##############################################################################\n@attr.s(auto_attribs=True)\nclass FeatureParas(object):\n \"\"\"\n include all paras for feature concepts in pre-processing and usage\n \"\"\"\n #------> parameters for feature encoder\n model_name:str = \"resnet18\"\n\n model_instance = None\n img_size = None\n out_dim = None\n #-----> for inference part \n\n device:str = \"cuda\"\n trans:Callable = only_naive_transforms\n \n batch_size:int = 32\n\n #------> parameters for cluster\n cluster_nb:int = 200\n with_semantic_shifts:bool = False\n\n##############################################################################\n# para for collectorß\n##############################################################################\n@attr.s(auto_attribs=True)\nclass CollectorParas(object):\n \"\"\"\n include all paras for collector concepts in pre-processing and usage\n \"\"\"\n #------> parameters for collector\n slide:SlideParas = SlideParas() # get instance of slide paras\n tissue:TissueParas = TissueParas() # get instance of tissue paras\n patch:PatchParas = PatchParas() # get instance of patch paras\n feature:FeatureParas = FeatureParas()\n\nDEFAULT_CONCEPT_PARAS = CollectorParas()\n\n","repo_name":"secrierlab/HistoMIL","sub_path":"EXP/paras/slides.py","file_name":"slides.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"26906770008","text":"from pathlib import Path\nimport toml\nimport cv2\nfrom utils import CameraParam, LensUndistorter, ImageSaver, LensUndistorterWithKroi\n\n# Load Pathes\nBASE_DIR = Path(__file__).resolve().parent.parent\nDATA_DIR = Path(BASE_DIR, \"data\")\nCFG_PARAM_PATH = str(Path(DATA_DIR, \"camera_param.toml\"))\nRGB_IMAGE_PATH = str(Path(DATA_DIR, \"rgb_img.png\"))\nRESULT_SAVE_DIR = str(Path(BASE_DIR, \"results\"))\nresult_saver = ImageSaver(RESULT_SAVE_DIR)\n\n# Get config file and rgb image\ndict_param = toml.load(open(CFG_PARAM_PATH))\nrgb_img = cv2.imread(RGB_IMAGE_PATH)\n\n# Get camera parameter\ncamera_param = CameraParam.from_dict(dict_param[\"Rgb\"])\nK_rgb = camera_param.intrinsic_matrix\nD_rgb = camera_param.distortion\nimage_height, image_width = camera_param.size\n\n# Image Correction\nlens_undistorter = LensUndistorter(K_rgb, D_rgb, image_width, image_height)\nlens_undistorter_roi = LensUndistorterWithKroi(K_rgb, D_rgb, image_width, image_height)\nrgb_img_undistorted = lens_undistorter.correction(rgb_img)\nrgb_img_undistorted_roi = lens_undistorter_roi.correction(rgb_img)\n\n\nresult_saver.save_image(\"raw_image.png\", rgb_img)\nresult_saver.save_image(\"rgb_img_undistorted.png\", rgb_img_undistorted)\nresult_saver.save_image(\"rgb_img_undistorted_roi.png\", rgb_img_undistorted_roi)\n","repo_name":"yuki-inaho/test_getOptimalNewCameraMatrix","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3492854108","text":"import json\nimport logging\nimport os\nimport tarfile\nfrom io import TextIOWrapper\nfrom typing import IO\nfrom typing import Iterable\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nfrom . import convert\nfrom . import siteinfo as si\n\nlog = logging.getLogger(__name__)\n\n\ndef replace_extensions(path: str, new_exts: Iterable = ()) -> str:\n \"\"\"\n >>> replace_extensions(\"/a/b/c/dump.njson.tar.gz\")\n '/a/b/c/dump'\n >>> replace_extensions(\"dump.njson.tar.gz\")\n 'dump'\n >>> replace_extensions(\"dump.njson.tar.gz\", new_exts=[\"slob\"])\n 'dump.slob'\n >>> replace_extensions(\"/a/b/c/dump.njson.tar.gz\", new_exts=[\"siteinfo\", \"json\"])\n '/a/b/c/dump.siteinfo.json'\n \"\"\"\n basename = os.path.basename(path)\n dirname = os.path.dirname(path)\n noext, *_ = basename.split(os.path.extsep)\n return os.path.join(dirname, os.path.extsep.join((noext, *new_exts)))\n\n\ndef get_outname(args):\n outname = args.output_file\n if outname is None:\n basename = os.path.basename(args.dump_file[0])\n outname = replace_extensions(basename, [\"slob\"])\n return outname\n\n\ndef get_siteinfo(args):\n siteinfo_path = args.siteinfo\n if not siteinfo_path:\n siteinfo_path = replace_extensions(args.dump_file, [\"siteinfo\", \"json\"])\n\n with open(siteinfo_path) as siteinfo_file:\n siteinfo_dict = json.load(siteinfo_file)\n\n return siteinfo_dict\n\n\ndef parse_loc_spec(s: str) -> Tuple[int, int]:\n if \":\" in s:\n fileno, lineno = s.split(\":\")\n return int(fileno), int(lineno)\n return 1, int(s)\n\n\ndef articles(\n dump_files: Sequence[str],\n info: si.Info,\n start_line_spec: str = \"1:1\",\n end_line_spec: Optional[str] = None,\n html_encoding=\"utf-8\",\n remove_embedded_bg=\"\",\n ensure_ext_image_urls=True,\n) -> Iterable[convert.ConvertParams]:\n\n start_file, start_line = parse_loc_spec(start_line_spec)\n if end_line_spec:\n end_file, end_line = parse_loc_spec(end_line_spec)\n else:\n end_file, end_line = None, None\n\n for dump_file in dump_files:\n dump_file = os.path.expanduser(dump_file)\n print(f\"Reading articles from ${dump_file}\")\n files: Iterable[Union[TextIOWrapper, IO[bytes]]] = []\n\n if dump_file.endswith(\".tar.gz\") or dump_file.endswith(\".tar\"):\n if dump_file.endswith(\".tar.gz\"):\n tar = tarfile.open(dump_file, \"r:gz\")\n else:\n tar = tarfile.open(dump_file, \"r\")\n ctx_manager = tar\n files = (\n f for f in (tar.extractfile(member) for member in tar) if f is not None\n )\n else:\n ctx_manager = open(dump_file)\n files = [ctx_manager]\n\n with ctx_manager:\n for k, f in enumerate(files):\n file_number = k + 1\n if file_number < start_file:\n continue\n if end_file and file_number > end_file:\n break\n for i, line in enumerate(f):\n line_number = i + 1\n j = 0\n if line_number < start_line:\n if i % 1000 == 0:\n print(\".\", end=\"\", flush=True)\n j += 1\n if j % 50 == 0:\n print(flush=True)\n j = 0\n continue\n if end_line and line_number > end_line:\n break\n try:\n data = json.loads(line)\n html = data[\"article_body\"][\"html\"]\n title = data[\"name\"]\n redirects = data.get(\"redirects\", ())\n aliases = [r[\"name\"] for r in redirects]\n print(f\"{file_number}:{line_number} {title} ({len(html)})\")\n yield convert.ConvertParams(\n title=title,\n aliases=aliases,\n text=html,\n rtl=info.rtl,\n server=info.server,\n articlepath=\"./\", # TODO needs to be arg?\n site_articlepath=info.articlepath,\n encoding=html_encoding,\n remove_embedded_bg=remove_embedded_bg,\n ensure_ext_image_urls=ensure_ext_image_urls,\n )\n except:\n log.exception(f\"Failed to read line {i}\")\n","repo_name":"itkach/mw2slob","sub_path":"mw2slob/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"77"} +{"seq_id":"26132631056","text":"# Create a string and save im a variable\noriginal_string = input(\"Please, insert a sentence: \")\n\n# Use loop to extract and alternate case in the string\nnew_alt_char_string = \"\"\n\n# Use enumerate to access the indexes and control better the item/case alternation\nfor index, item in enumerate(original_string):\n if index % 2 == 0:\n new_alt_char_string = new_alt_char_string + item.lower()\n else:\n new_alt_char_string = new_alt_char_string + item.upper()\n\nprint(new_alt_char_string)\n\n# With the same string but making each alternative word lower and upper case\nsplit_string = original_string.split(\" \")\nnew_alt_word_string = [] # Split converts a string into an array\n\nfor index, item in enumerate(split_string):\n if index % 2 == 0:\n new_alt_word_string.append(item.lower()) # Use .append to manipulate the array\n else:\n new_alt_word_string.append(item.upper())\n\nprint(\" \".join(new_alt_word_string)) # Use .join to include the empty spaces","repo_name":"tmitidieri/python-projects-hyperion-training","sub_path":"T17/alternative.py","file_name":"alternative.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74027833529","text":"import re\nimport numpy as np\nimport gensim\nimport requests\nimport json\nfrom scipy import spatial\n\ndata = []\nwith open('./avas_list.txt') as inputfile:\n for line in inputfile:\n data.append(line)\n\nprint(\"Loaded function data\")\n\nmodel = gensim.models.KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True, limit=500000)\nprint(\"Loaded model\")\nindex2word_set = set(model.wv.index2word)\n\ndef avg_feature_vector(sentence, model, num_features, index2word_set):\n words = sentence.split()\n feature_vec = np.zeros((num_features, ), dtype='float32')\n n_words = 0\n for word in words:\n if word in index2word_set:\n n_words += 1\n feature_vec = np.add(feature_vec, model[word])\n if (n_words > 0):\n feature_vec = np.divide(feature_vec, n_words)\n return feature_vec\n\ndef make_list(name):\n words = []\n if('_' in name): #if snake case\n name = name.lower()\n words = name.split('_')\n else: #identify if camel case\n word = \"\"\n for c in name:\n if(c.islower()):\n word +=c\n else:\n words.append(word)\n word = \"\"\n word += c.lower()\n words.append(word)\n return words\n\ndef make_sentence(words):\n sentence = \"\"\n for w in words:\n sentence += w\n sentence += \" \"\n return sentence[:-1]\n\ndef similarity_sentences(s1, s2):\n s1_afv = avg_feature_vector(s1, model=model, num_features=300, index2word_set=index2word_set)\n s2_afv = avg_feature_vector(s2, model=model, num_features=300, index2word_set=index2word_set)\n sim = 1 - spatial.distance.cosine(s1_afv, s2_afv)\n return sim\n\n#s1 = make_sentence(make_list('remove'))\n#s2 = make_sentence(make_list('delete'))\n#print(similarity_sentences(s1,s2))\n\n\ndef camel_to_snake(name):\n list = make_list(name)\n new_name = \"\"\n for w in list:\n new_name += w\n new_name += \"_\"\n return new_name[:-1]\n\ndef snake_to_camel(name):\n list = make_list(name)\n new_name = \"\"\n pp = False\n for w in list:\n c = w[0].upper() if pp else w[0].lower()\n pp = True\n new_name += c\n new_name += w[1:]\n return new_name\n\ndef change_case(name):\n if('_' in name): #this is snake\n return snake_to_camel(name)\n return camel_to_snake(name)\n\n\ndef find_synonyms(word):\n\n #dev\n return [\"sum\",\"total\",\"append\"]\n\n p = make_sentence(make_list(word))\n s_list = []\n\n\n #r = requests.get('https://wordsapiv1.p.mashape.com/words/'+word+'/synonyms'\n # , headers={\"x-rapidapi-host\": \"wordsapiv1.p.rapidapi.com\",\n # \t\"x-rapidapi-key\": \"\"} )\n\n #print(json.loads(r.content))\n #synonym_list = json.loads(r.content)['synonyms']\n\n r = requests.get('https://words.bighugelabs.com/api/2/3d61b2dab0e22df66fd693006de7a367/'+word+'/json')\n\n j = json.loads(r.content)\n synonym_list = []\n for (key,val) in j.items():\n if('syn' in val.keys()):\n synonym_list += val['syn']\n #synonym_list = j['noun']['syn'] + j['verb']['syn']\n\n for s in synonym_list:\n if(s.count(' ')>0):\n continue\n p1 = make_sentence(make_list(s))\n sim = similarity_sentences(p,p1)\n obj = ( s, sim)\n if(not np.isnan(sim)):\n s_list.append(obj)\n #print(s_list)\n\n s_list.sort(key = lambda synonym: synonym[1] )\n firsts = [t[0] for t in s_list]\n return firsts[-3:]\n\ndef getReplacementsName(name):\n #for each word in the name, get the replacements\n words = make_list(name)\n replace_dict = []\n for w in words:\n w_replacements = find_synonyms(w)\n w_replacements.append(w)\n w_replacements = list(set(w_replacements))\n replace_dict.append(w_replacements)\n\n a = replace_dict[0]\n for b in replace_dict[1:]:\n o = []\n for ia in a:\n for ib in b:\n o.append(ia+\"_\"+ib)\n a = o\n ca = a.copy()\n for poss in a:\n ca.append(change_case(poss))\n return ca\n\n\ndef extractName(regex):\n i = regex.find(\"def\")\n before = regex[:(i+4)]\n\n i += 4\n name = \"\"\n while True:\n name+=regex[i]\n i+=1\n if(i>=len(regex)):\n break\n if(i 0.7):\n fast_regex += '|('+d_before+d_def+d_after+')'\n found = True\n #if(not found):\n # print(\"Nothing good and fast\")\n #else:\n # print(\"GOOD:\",fast_regex)\n #look for the synonyms\n r = replaceFunctionNames(regex) + '|'+fast_regex\n return r\n\n#print(extractName(\"def delete_selected\"))\nprint(lookup('def removeSelected'))\n#print(lookup('somestuff def base64ToInt\\([a-z]*\\): func'))\n#print(lookup('somestuff def checkErr: func'))\n#print(lookup('somestuff def add_one[a-z]*: func'))\n#print(replaceFunctionNames('somestuff def addOne\\(\\): func'))\n","repo_name":"avaspataru/hackcambridge101","sub_path":"phrase_similarity.py","file_name":"phrase_similarity.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32748885174","text":"import os, csv\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lab1.settings')\n\nimport django\n\ndjango.setup()\n\nfrom films.models import Movie, Genre, Tag, Rating\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nMOVIES_DIR = os.path.join(BASE_DIR, 'lab1/data/movies.csv')\nTAGS_DIR = os.path.join(BASE_DIR, 'lab1/data/tags.csv')\nRATINGS_DIR = os.path.join(BASE_DIR, 'lab1/data/ratings.csv')\nLINKS_DIR = os.path.join(BASE_DIR, 'lab1/data/links.csv') \n\n\nmovies = csv.reader(open(MOVIES_DIR), delimiter=',')\ntags = csv.reader(open(TAGS_DIR), delimiter=',')\nratings = csv.reader(open(RATINGS_DIR), delimiter=',')\nlinks = csv.reader(open(LINKS_DIR), delimiter=',')\n\n\n# for n in range(1, 100): # movieId,title,genres\n# # movie = Movie.objects.create(\n# # movieID=movies[n][0],\n# # title=movies[n][1],\n# # )\n# movie = Movie()\n# movie.movieID = movies[n][0]\n# movie.title = movies[n][1]\n# movie.save()\n# genres = movies[n][2].split('|')\n# for g in genres:\n# genre, created = Genre.objects.get_or_create(name=g)\n# if not created:\n# genre.save()\n# movie.genres.add(genre)\n\n# for n in range(1, 100):\n# movie = Movie.objects.get(movieID=links[n][0])\n# # print links[n][1], links[n][2]\n# movie.imdbId = links[n][1]\n# movie.tmdbId = links[n][2]\n# movie.save()\n \n\n\nfor row in movies: # movieId,title,genres\n if row[0] != 'movieId':\n movie = Movie()\n movie.movieID = row[0]\n movie.title = row[1]\n movie.save()\n \n genres = row[2].split('|')\n# for g in genres:\n# genre = addGenre(g)\n# movie.genres.add(genre)\n for g in genres:\n genre, created = Genre.objects.get_or_create(name=g)\n if not created:\n genre.save()\n movie.genres.add(genre)\n\nfor row in links: # movieId,imdbId,tmdbId\n if row[0] != 'movieId':\n movie = Movie.objects.get(movieID=row[0])\n if row[1] != '':\n movie.imdbId = row[1]\n if row[2] != '':\n movie.tmdbId = row[2]\n movie.save()\n\n\nfor row in tags: # userId,movieId,tag,timestamp\n if row[0] != 'userId':\n tag = Tag()\n tag.content = row[2]\n tag.movie = Movie.objects.get(movieID=row[1])\n tag.save()\n\nfor row in ratings: # userId,movieId,rating,timestamp\n if row[0] != 'userId':\n rating = Rating()\n rating.rate = row[2]\n rating.movie = Movie.objects.get(movieID=row[1])\n rating.save()\n\n\n\n\n \n \n\n","repo_name":"vicrosa25/AII","sub_path":"populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4170946500","text":"from typing import List\nfrom fastapi import Depends, HTTPException,Request\nfrom sqlalchemy.orm import Session \nfrom Database import get_db\nfrom Database.models import models\nfrom responsables.Schemas.Create import EstanciaCreate\nfrom uuid import uuid4\nfrom fastapi_jwt_auth import AuthJWT\n\nclass EstanciaController:\n\n def __init__(self, db:Session = Depends(get_db),AuthJWT:AuthJWT = Depends()):\n self.db = db\n self.auth_jwt = AuthJWT\n\n async def get_estancias(self):\n self.auth_jwt.jwt_required()\n user_data = self.auth_jwt.get_raw_jwt()\n data = self.db.query(models.Estancia) \\\n .filter(models.Estancia.re.any(models.Responsable.id_responsable == user_data.get(\"id_responsable\"))) \\\n .order_by(models.Estancia.fecha_ingreso.desc()).all()\n if not data:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n return data\n\n \n async def get_estancia(self,id_estancia:int):\n self.auth_jwt.jwt_required()\n #self.auth_jwtjwt_optional()\n user_data = self.auth_jwt.get_raw_jwt()\n \n data = self.db.query(models.Estancia) \\\n .filter(models.Estancia.re.any(models.Responsable.id_responsable == user_data.get(\"id_responsable\"))) \\\n .filter(models.Estancia.id_estancia == id_estancia) \\\n .first()\n if not data:\n raise HTTPException(status_code=404, detail=\"Item not found\")\n return data\n\n async def create_estancia(self,estancia: EstanciaCreate):\n identificador = uuid4()\n data = estancia.dict()\n data['identificador'] = identificador.hex\n db_item = models.Estancia(**data)\n self.db.add(db_item)\n self.db.commit()\n\n \n \n\n\n","repo_name":"devlfx/SalaBackend","sub_path":"responsables/Controllers/EstanciaController.py","file_name":"EstanciaController.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9526591241","text":"import socket\r\nimport select\r\n\r\n# function section\r\n\r\n\r\ndef reliable_send(message, ip):\r\n global received, sock_send, sock_receive\r\n sock_receive.bind((UDP_IP_r_proxy, UDP_PORT_r_proxy))\r\n sock_receive.setblocking(0)\r\n sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n received = 2 # 0 just send 1 receive ok 2 time out/send\r\n callSend = 1\r\n fragment = 0\r\n if len(message) > 6500:\r\n callSend = int(len(message) / 6500) + 1\r\n fragment = 1 # 1 moreFragment 0 o.w\r\n for x in range(0, callSend):\r\n start = x * 6500\r\n end = (x + 1) * 6500\r\n print(callSend)\r\n if x == callSend - 1:\r\n fragment = 0\r\n FragmentedMESSAGE = str(x) + '*' + str(fragment) + '*' + MESSAGE[start: end] + '*' + str(\r\n ip) + \"*\" + make_parity(MESSAGE[start: end])\r\n print(\"send packet : \" + FragmentedMESSAGE)\r\n if reliable_send_fragmented(FragmentedMESSAGE):\r\n print(\"send succsecfully packet : \" + str(x))\r\n print(\"\\n\")\r\n x += 1\r\n received = 2\r\n else:\r\n print(\"can not send packet number : \" + str(x))\r\n # parity ip/port/split dns\r\n return False\r\n sock_send.close()\r\n sock_receive.close()\r\n return True\r\n\r\n\r\ndef reliable_send_fragmented(message):\r\n counter = 0\r\n global received\r\n while counter < 15:\r\n if received == 0:\r\n result = receive_http()\r\n if received == 1:\r\n counter = 15\r\n return True\r\n if received == 2:\r\n send_http(message)\r\n counter += 1\r\n\r\n if counter == 15 and received == 2:\r\n print(\"proxy is not ready to answer\")\r\n return False\r\n\r\n\r\ndef check_parity(message):\r\n # m[2] data - m[4] parity\r\n temp = str(message)\r\n m = temp[2:-1].split('*')\r\n p = 0\r\n for i in m[2]:\r\n p += ord(i)\r\n parity = bin(p)\r\n parity = parity.split('b')\r\n if m[4] == parity[1]:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef make_parity(message):\r\n print(message)\r\n m = bytes(message, \"utf-8\")\r\n message = str(m)\r\n print(message)\r\n parity = 0\r\n p = 0\r\n for i in message[2:-1]:\r\n p += ord(i)\r\n parity = bin(p)\r\n parity = parity.split('b')\r\n return parity[1]\r\n\r\n\r\ndef send_http(message):\r\n global received\r\n # print(\"send packet\")\r\n # print(\"UDP target IP:\", UDP_IP_s)\r\n # print(\"UDP target port:\", UDP_PORT_s)\r\n # print(\"message:\", message)\r\n sock_send.sendto(bytes(message, \"utf-8\"), (UDP_IP_s_proxy, UDP_PORT_s_proxy))\r\n received = 0\r\n\r\n\r\ndef receive_http():\r\n global received\r\n print(\"client waiting for answer ...\")\r\n ready = select.select([sock_receive], [], [], 1)\r\n if ready[0]:\r\n receive_data, addr = sock_receive.recvfrom(1024) # buffer size is 1024 bytes\r\n # print(\"client receive message \")\r\n if check_parity(receive_data):\r\n received = 1\r\n assert isinstance(receive_data, object)\r\n show_result(receive_data)\r\n return receive_data\r\n else:\r\n received = 2\r\n print(\"parity error\")\r\n return 0\r\n\r\n else:\r\n received = 2\r\n print(\"time out \")\r\n return 0\r\n\r\n\r\ndef show_result(message):\r\n assert isinstance(message, object)\r\n print(\"received message:\", message)\r\n\r\n\r\ndef receive_http_proxy():\r\n global TCP_IP_s_server, sock_receive, sock_send\r\n sock_receive = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n sock_receive.bind((UDP_IP_r_proxy, UDP_PORT_r_proxy))\r\n sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n hope = 1\r\n temp = receive_http_fragmented()\r\n if temp != str(-1):\r\n # print(temp)\r\n TCP_IP_s_server = str(temp[3])\r\n myMessage = str(temp[2])\r\n while temp[1] == str(1):\r\n temp = receive_http_fragmented()\r\n if temp[0] == str(hope):\r\n myMessage += temp[2]\r\n hope += 1\r\n print(\"defragment finish\")\r\n return myMessage\r\n else:\r\n print(\"parity error , remove the packet from buffer...\")\r\n sock_receive.close()\r\n sock_send.close()\r\n\r\n\r\ndef receive_http_fragmented():\r\n print(\"client is waiting for response packet ...\")\r\n notReceive = True\r\n while notReceive:\r\n data, addr = sock_receive.recvfrom(6500) # buffer size is 6500 bytes\r\n print(\"receive packet\")\r\n assert isinstance(data, object)\r\n print(\"received message:\", data)\r\n notReceive = False\r\n\r\n if check_parity(data):\r\n print(data)\r\n temp = str(data)\r\n m = temp[2:-1].split('*')\r\n send_ack_http_proxy(data)\r\n return m\r\n else:\r\n return -1\r\n\r\n\r\ndef send_ack_http_proxy(data):\r\n print(\"send ack to proxy\")\r\n global sock_send\r\n sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n print(\"UDP target IP:\", UDP_IP_s_proxy)\r\n print(\"UDP target port:\", UDP_PORT_s_proxy)\r\n print(\"message:\", data)\r\n print(\"\\n\")\r\n sock_send.sendto(data, (UDP_IP_s_proxy, UDP_PORT_s_proxy))\r\n sock_send.close()\r\n\r\n\r\n# send part initiation\r\nUDP_IP_s_proxy = \"127.0.0.1\" # \"185.211.88.22\"\r\nUDP_PORT_s_proxy = 5005\r\nsock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n\r\n# receive part initiation\r\nUDP_IP_r_proxy = \"127.0.0.1\" # \"185.211.88.22\"\r\nUDP_PORT_r_proxy = 5006\r\nsock_receive = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\r\n\r\nTCP_IP_s_server = \"\"\r\n# code section\r\nreceived = 2 # 0 just send 1 receive ok 2 time out/send\r\n# MESSAGE = \"GET / HTTP/1.0\\r\\n\\r\\n\"\r\n# DES_IP = input(\"enter destionation IP : \")\r\n# MESSAGE = input(\"enter your http message : \")\r\nDES_IP = \"www.aut.ac.ir\"\r\nMESSAGE = \"GET / HTTP/1.0\\r\\n\\r\\n\"\r\nreliable_send(MESSAGE, DES_IP)\r\nprint(\"send with no problem\")\r\nresult = receive_http_proxy()\r\nprint(result)\r\n# parity ip/port/split dns\r\n\r\n# http type setting numberOfPacke * moreFragment * message * IPDestination * parity\r\n","repo_name":"Yasaman1997/Computer_Networks","sub_path":"Python/new_test_pkg/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29328649759","text":"#Aprobación de créditos\ningreso=int(input(\"¿cúal es tu ingreso?:\"))\nnacimiento=int(input(\"¿qué año naciste?:\"))\nhijos=int(input(\"¿cuántos hijos tienes?:\"))\npertenencia=int(input(\"¿hace cuántos años estás en este banco?:\"))\nestadocivil=input(\"¿cuál es tu estado civil?:\") \nC= estadocivil\nS= estadocivil\nvive=input(\"¿dónde vives? (si es en campo escriba R, si es en ciudad escriba U):\")\nR = vive\nU = vive\nif pertenencia > 10 and hijos >= 2:\n print(\"APROBADO\")\nelif estadocivil == C and hijos > 3 and ((2018 - nacimiento)> 45 or (2018 - nacimiento)< 55):\n print(\"APROBADO\")\nelif ingreso >2500000 and estadocivil==S and vive == U:\n print(\"APROBADO\")\nelif ingreso > 3500000 and pertenencia <5:\n print(\"APROBADO\")\nelif vive== R and estadocivil==C and hijos < 2:\n print(\"APROBADO\")\nelse:\n print(\"RECHAZADO\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_731250392801519201fcd3e41f9cb6ee.py","file_name":"hito1_ej3_731250392801519201fcd3e41f9cb6ee.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8961094132","text":"from twilio.rest import TwilioRestClient\nimport os\n\ndef send_text_message(message, phone_num):\n\t\"\"\"Sends a text message to the given phone number.\n\n\tIs called when Ronnie's 'text address' link is clicked.\n\t\"\"\"\n\t\n\tACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')\n\tAUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')\n\tTWILIO_NUMBER = os.environ.get('TWILIO_NUMBER')\n\n\tclient = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)\n\n\tm = client.messages.create(\n\t\tto=phone_num,\n\t\tfrom_=TWILIO_NUMBER,\n\t\tbody=message,\n\t\t)\n\n\treturn m.sid","repo_name":"mfbalder/ChatappFeedmeBot-HB","sub_path":"send_message.py","file_name":"send_message.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"77"} +{"seq_id":"602438345","text":"from flojoy import flojoy, OrderedPair\nfrom time import sleep\nfrom typing import Optional\nimport serial\nimport numpy as np\nfrom datetime import datetime\n\n\n@flojoy(deps={\"pyserial\": \"3.5\"})\ndef SERIAL_TIMESERIES(\n default: Optional[OrderedPair] = None,\n comport: str = \"/dev/ttyUSB0\",\n baudrate: int = 9600,\n num_readings: int = 100,\n record_period: int = 1,\n) -> OrderedPair:\n \"\"\"The SERIAL_TIMESERIES node extracts simple time-dependent 1D data from an Arduino or a similar serial device.\n\n Parameters\n ----------\n num_readings : int\n Number of points to record.\n record_period : float\n Length between two recordings in seconds.\n baudrate : int\n Baud rate for the serial device.\n comport : string\n COM port of the serial device.\n\n num_readings * record_period :\n Is roughly the run length in seconds.\n \"\"\"\n\n ser = serial.Serial(comport, timeout=1, baudrate=baudrate)\n readings = []\n times = []\n # The first reading is commonly empty.\n s = ser.readline().decode()\n\n for i in range(num_readings):\n ts = datetime.now()\n s = ser.readline().decode()\n # Some readings may be empty.\n if s != \"\":\n reading = s[:-2].split(\",\")\n if len(reading) == 1:\n reading = reading[0]\n readings.append(reading)\n\n ts = datetime.now()\n seconds = float(\n ts.hour * 3600 + ts.minute * 60 + ts.second + ts.microsecond / 10**6\n )\n\n times.append(seconds)\n\n if len(times) > 0:\n time1 = seconds - times[i]\n else:\n # Estimate execution time.\n time1 = 0.1\n\n if time1 < record_period:\n sleep(record_period - time1)\n\n times = np.array(times)\n try:\n times -= times[0]\n except IndexError:\n raise IndexError(\"No data detected from the Arduino\")\n\n readings = np.array(readings)\n readings = readings.astype(\"float64\")\n\n return OrderedPair(x=times, y=readings)\n","repo_name":"flojoy-io/nodes","sub_path":"IO/PROTOCOLS/SERIAL/BASIC/SERIAL_TIMESERIES/SERIAL_TIMESERIES.py","file_name":"SERIAL_TIMESERIES.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"4375416156","text":"# \n\nfrom __future__ import nested_scopes\n\n\ndef interpret(formula, dictionary):\n \"\"\" Interpretation einer Formel in Postfix-Form\n Erlaubte Operatoren: AND, OR, NOT\n Das dictionary enth�lt die auszuf�hrenden Funktionen \"\"\"\n\n stack = []\n for token in formula.split():\n if token == \"AND\":\n p = stack.pop()\n q = stack.pop()\n stack.append(lambda x: q(x) & p(x))\n elif token == \"OR\":\n p = stack.pop()\n q = stack.pop()\n stack.append(lambda x: q(x) | p(x))\n elif token == \"NOT\":\n p = stack.pop()\n stack.append(lambda x: not p(x))\n else:\n stack.append(dictionary[token])\n return stack.pop()\n","repo_name":"johsieders/potpourri","sub_path":"fttp/src/interpreters/formula.py","file_name":"formula.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13504100674","text":"\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\nimport pathlib\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\n# long_description = (here / 'README.md').read_text(encoding='utf-8')\n\n# Arguments marked as \"Required\" below must be included for upload to PyPI.\n# Fields marked as \"Optional\" may be commented out.\n\nsetup(\n name='supermarket',\n version='1.0.0',\n description='A Python project to demonstrate APM-Logs correlation',\n author='Emanuil Tolev',\n author_email='etolev@elastic.co',\n\n # You can just specify package directories manually here if your project is\n # simple. Or you can use find_packages().\n #\n # Alternatively, if you just want to distribute a single Python file, use\n # the `py_modules` argument instead as follows, which will expect a file\n # called `my_module.py` to exist:\n #\n # py_modules=[\"my_module\"],\n #\n packages=find_packages(where='.'), # Required\n python_requires='>=3.5, <4'\n)\n","repo_name":"emanuil-tolev/logs-traces-correlation","sub_path":"supermarket/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9554769058","text":"import codecs\nimport json\nimport re\nimport logging\nfrom datetime import datetime\nfrom urllib.request import urlopen\nfrom typing import Optional, Tuple\n\nfrom ulauncher.config import API_VERSION\nfrom ulauncher.utils.version import satisfies\nfrom ulauncher.modes.extensions.ExtensionManifest import ExtensionManifest\n\nlogger = logging.getLogger()\n\nCommit = Tuple[str, str]\n\n\nclass ExtensionRemoteError(Exception):\n pass\n\n\nclass InvalidExtensionUrlWarning(Exception):\n pass\n\n\nclass ExtensionNetworkError(Exception):\n pass\n\n\nclass ExtensionIncompatibleWarning(Exception):\n pass\n\n\ndef json_fetch(url):\n try:\n return json.loads(urlopen(url).read())\n except Exception as e:\n # If json.loads fails, treat it as a network error too.\n # It should never happen as all these API endpoint are exclusively JSON\n raise ExtensionNetworkError(f'Could not access repository resource \"{url}\"') from e\n\n\nclass ExtensionRemote:\n url_match_pattern = r\"^(?:git@|https:\\/\\/)(?P[^\\/]+)\\/(?P[^\\/]+)\\/(?P[^\\/]+)\"\n date_format = '%Y-%m-%dT%H:%M:%S%z'\n\n def __init__(self, url):\n self.url = url.lower()\n match = re.match(self.url_match_pattern, self.url, re.I)\n if not match:\n raise InvalidExtensionUrlWarning(f'Invalid URL: {url}')\n\n self.user = match.group(\"user\")\n self.repo = match.group(\"repo\")\n self.host = match.group(\"host\")\n\n if \".\" not in self.host:\n self.extension_id = f\"{self.host}.{self.user}.{self.repo}\"\n else:\n domain, tld = self.host.rsplit(\".\", 1)\n self.extension_id = f\"{tld}.{domain}.{self.user}.{self.repo}\"\n\n if self.host == \"github.com\":\n self.host_api = \"https://api.github.com\"\n self.date_format = '%Y-%m-%dT%H:%M:%SZ'\n elif self.host == \"gitlab.com\":\n host_api = \"https://gitlab.com/api/v4\"\n projects = json_fetch(f\"{host_api}/users/{self.user}/projects?search={self.repo}\")\n project = next((p for p in projects if p[\"name\"] == self.repo), None)\n\n self.host_api = f\"{host_api}/projects/{project['id']}/repository\"\n self.date_format = '%Y-%m-%dT%H:%M:%S.%f%z'\n else:\n self.host_api = f\"https://{self.host}/api/v1\"\n\n def get_download_url(self, commit: str) -> str:\n if self.host == \"gitlab.com\":\n return f'https://{self.host}/{self.user}/{self.repo}/-/archive/{commit}/{self.repo}-{commit}.tar.gz'\n return f'https://{self.host}/{self.user}/{self.repo}/archive/{commit}.tar.gz'\n\n def fetch_file(self, file_path) -> Optional[str]:\n # This saves us a request compared to using the \"raw\" file API that needs to know the branch\n file_api_url = f\"{self.host_api}/repos/{self.user}/{self.repo}/contents/{file_path}\"\n if self.host == \"gitlab.com\":\n file_api_url = f\"{self.host_api}/files/{file_path}?ref=HEAD\"\n\n file_data = json_fetch(file_api_url)\n\n if file_data and file_data.get(\"content\") and file_data.get(\"encoding\"):\n return codecs.decode(file_data[\"content\"].encode(), file_data[\"encoding\"]).decode()\n\n return None\n\n def get_compatible_commit_from_tags(self) -> Optional[Commit]:\n \"\"\"\n This method is new for v6, but intentionally undocumented because we still want extension\n devs to use the old way until Ulauncher 5/apiv2 is fully phased out\n \"\"\"\n tags = {}\n # pagination is only implemented for GitHub (default 30, max 100)\n tags_url = f\"{self.host_api}/repos/{self.user}/{self.repo}/tags?per_page=100\"\n if self.host == \"gitlab.com\":\n # GitLab's API allows to filter out tags starting with our prefix\n tags_url = f\"{self.host_api}/tags?search=^apiv\"\n\n try:\n tags_data = json_fetch(tags_url)\n\n for tag in tags_data or []:\n if tag[\"name\"].startswith(\"apiv\") and satisfies(API_VERSION, tag[\"name\"][4:]):\n commit = tag[\"commit\"]\n version = tag[\"name\"][4:]\n id = commit.get(\"sha\", commit.get(\"id\")) # id fallback is needed for GitLab\n commit_time = commit.get(\"created\", commit.get(\"created_at\"))\n tags[version] = (id, commit_time)\n\n if tags:\n id, commit_time = tags[max(tags)]\n if id and self.host == \"github.com\": # GitHub's tag API doesn't give any dates\n commit_data = json_fetch(f\"{self.host_api}/repos/{self.user}/{self.repo}/commits/{id}\")\n commit_time = commit_data[\"commit\"][\"committer\"][\"date\"]\n if id and commit_time:\n date = datetime.strptime(commit_time, self.date_format)\n return id, date.isoformat()\n\n except Exception as e:\n logger.warning(\"Unexpected error retrieving version from tags '%s' (%s: %s)\", self.url, type(e).__name__, e)\n\n return None\n\n def get_commit(self, ref: str = \"HEAD\") -> Commit:\n if self.host == \"gitlab.com\":\n url = f\"{self.host_api}/commits/{ref}\"\n elif self.host == \"github.com\":\n url = f\"{self.host_api}/repos/{self.user}/{self.repo}/commits/{ref}\"\n else:\n # Gitea/Codeberg API differs from GitHub here, but has the same API\n url = f\"{self.host_api}/repos/{self.user}/{self.repo}/git/commits/{ref}\"\n\n try:\n response = json_fetch(url)\n id = response.get(\"sha\") or response.get(\"id\")\n commit_time = response.get(\"created_at\") or response[\"commit\"][\"committer\"][\"date\"]\n date = datetime.strptime(commit_time, self.date_format)\n return id, date.isoformat()\n except (KeyError, TypeError) as e:\n raise ExtensionRemoteError(f'Could not fetch reference \"{ref}\" for {self.url}.') from e\n\n def get_latest_compatible_commit(self) -> Commit:\n \"\"\"\n Finds first version that is compatible with users Ulauncher version.\n Returns a commit hash and datetime.\n \"\"\"\n manifest = ExtensionManifest(json.loads(self.fetch_file(\"manifest.json\") or \"{}\"))\n\n if satisfies(API_VERSION, manifest.api_version):\n return self.get_commit()\n\n tag = self.get_compatible_commit_from_tags()\n if tag:\n return tag\n\n if satisfies(\"2.0\", manifest.api_version):\n logger.warning(\"Falling back on using API 2.0 version for %s.\", self.repo)\n return self.get_commit()\n\n raise ExtensionIncompatibleWarning(f\"{manifest.name} does not support Ulauncher API v{API_VERSION}.\")\n","repo_name":"otisdog8/Ulauncher","sub_path":"ulauncher/modes/extensions/ExtensionRemote.py","file_name":"ExtensionRemote.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"29313262844","text":"import math\nimport json\nimport requests\nimport itertools\nimport numpy as np\nimport time\nimport pickle\nimport tqdm\n\nfrom datetime import datetime, timedelta\nprint('import complete')\n\ndef make_request(uri, max_retries = 5):\n\n def fire_away(uri):\n response = requests.get(uri)\n assert response.status_code == 200\n return json.loads(response.content)\n current_tries = 1\n while current_tries < max_retries:\n try:\n time.sleep(1)\n response = fire_away(uri)\n return response\n except:\n time.sleep(1)\n current_tries += 1\n return fire_away(uri)\n\n\ndef pull_posts_for(subreddit, start_at, end_at):\n\n def map_posts(posts):\n return list(map(lambda post: {\n 'id': post['id'],\n 'created_utc': post['created_utc'],\n 'permalink': post['permalink'],\n }, posts))\n\n SIZE = 100 # maximum request amount to pushshift.io at once\n URI_TEMPLATE = r'https://api.pushshift.io/reddit/search/submission/?subreddit={}&after={}&before={}&limit={}&fields=id,created_utc,permalink'\n\n post_collections = map_posts( \\\n make_request( \\\n URI_TEMPLATE.format( \\\n subreddit, start_at, end_at, SIZE))['data'])\n n = len(post_collections)\n while n == SIZE:\n time.sleep(1)\n last = post_collections[-1]\n new_start_at = last['created_utc'] - (10)\n\n more_posts = map_posts( \\\n make_request( \\\n URI_TEMPLATE.format( \\\n subreddit, new_start_at, end_at, SIZE))['data'])\n\n n = len(more_posts)\n post_collections.extend(more_posts)\n\n # remove duplicates\n res = []\n [res.append(x) for x in post_collections if x not in res]\n\n return res\n\n############################################################################################################\n\ndays = 3\nsubreddit = 'citiesskylines'\nend_at = math.ceil(datetime.utcnow().timestamp())\nstart_at = math.floor((datetime.utcnow() - \\\n timedelta(days=days)).timestamp())\nprint(f'from {start_at} to {end_at}, {days} days @ r/{subreddit}')\n\nposts = pull_posts_for(subreddit, start_at, end_at)\n\nprint(len(posts))\n\nf = open(\"./data/post_filtered_pickle\", \"wb\")\npickle.dump(posts, f)\nf.close()\n","repo_name":"maxjo020418/OKBHscraper","sub_path":"pushshiftio_post.py","file_name":"pushshiftio_post.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"43507790285","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.optim import SGD, Adam\r\nimport torch.utils.data as Data\r\nimport torchvision.transforms as transforms\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nfilename = \"G:\\data\\spambase.csv\" # 读取文件位置\r\nspam = pd.read_csv(filename) # (4600,58) 4600个样本,每个样本有58个特征\r\n# print(spam.head())\r\nX = spam.iloc[:, 0:57].values # 去掉最后一列标签列\r\ny = spam.spam.values\r\n\r\n# 数据归一化\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=123) # 将数据分为训练集和测试集\r\nscales = MinMaxScaler(feature_range=(0, 1)) # 将数据缩放到0,1\r\nX_train_s = scales.fit_transform(X_train) # 对X_train_s 缩放,下同\r\nX_test_s = scales.transform(X_test) #\r\n\r\n# 使用箱线图对比邮件的每个特征分布\r\ncolname = spam.columns.values[:-1]\r\nplt.figure(figsize=(20, 14))\r\nfor ii in range(len(colname)):\r\n plt.subplot(7, 9, ii+1)\r\n sns.boxplot(x=y_train, y=X_train[:,ii])\r\n plt.title(colname[ii])\r\nplt.subplots_adjust(hspace=0.4)\r\nplt.savefig('box.png')\r\nplt.show()\r\n\r\n\r\n# 搭建MLP网络\r\nclass MLPclassifica(nn.Module):\r\n def __init__(self):\r\n super(MLPclassifica, self).__init__() #构造方法必须���\r\n\r\n # Sequential()表示将括号里的层链接起来,下面nn.Linear表示输入有57个神经元,输出有30个神经元,存在偏置神经元(默认开启)\r\n # 然后将输出结果带入ReLu函数,Linear与Relu合在一起起名为hidden1,上层的输出为下层的输入\r\n self.hidden1 = nn.Sequential(\r\n nn.Linear(\r\n in_features=57,\r\n out_features=30,\r\n bias=True,\r\n ),\r\n nn.ReLU()\r\n )\r\n\r\n self.hidden2 = nn.Sequential(\r\n nn.Linear(30, 10),\r\n nn.ReLU()\r\n )\r\n\r\n self.classifica = nn.Sequential(\r\n nn.Linear(10, 2),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x): # 定义前向传播函数\r\n fc1 = self.hidden1(x)\r\n fc2 = self.hidden2(fc1)\r\n output = self.classifica(fc2)\r\n\r\n return fc1, fc2, output\r\n\r\n\r\n# 数据转为张量\r\nX_train_t = torch.from_numpy(X_train_s.astype(np.float32))\r\ny_train_t = torch.from_numpy(y_train.astype(np.int64))\r\n\r\nX_test_t = torch.from_numpy(X_test_s.astype(np.float32))\r\ny_test_t = torch.from_numpy(y_test.astype(np.int64))\r\n\r\ntrain_data = Data.TensorDataset(X_train_t, y_train_t)\r\n# 定义一个数据加载器,会将数据分批次喂给神经网络,这里定义的一批为64个样本\r\ntrain_loader = Data.DataLoader(\r\n dataset=train_data, # 数据是什么\r\n batch_size=64, # 每批多少个\r\n shuffle=True, # 是否打乱数据\r\n #num_workers=2\r\n)\r\n\r\n# 我们的网络结构是个类,将其实例化一下\r\nmlpc = MLPclassifica()\r\n\r\n# 定义优化器,使用Adam优化算法,可自动调节学习率\r\noptimizer = torch.optim.Adam(mlpc.parameters(), lr=0.01)\r\n\r\nloss_func = nn.CrossEntropyLoss() # 定义损失函数为二分类损失函数\r\n\r\nmax_epoch = 15 # 训练轮次\r\ntrain_loss_list = [] # 定义一个空列表,等下来存储训练的损失\r\naccuracy_list = [] #同上,来存储精度\r\n\r\nfor epoch in range(max_epoch):\r\n\r\n for step,(b_x,b_y) in enumerate(train_loader):\r\n _, _, output = mlpc(b_x) # 将b_x喂给神经网络,得到输出\r\n train_loss = loss_func(output, b_y) # 根据输出计算损失函数\r\n optimizer.zero_grad() # torch中每次求导梯度会叠加,所以我们在反向传播的过程中先将梯度清零再求导\r\n train_loss.backward() # 求导\r\n optimizer.step() # 更新参数\r\n print(train_loss)\r\n\r\n niter = epoch * len(train_loader)+step+1\r\n\r\n if niter % 25 == 0:\r\n train_loss_list.append(train_loss.detach().numpy()) # 没经过25次迭代记录一次损失值\r\n _, _, output = mlpc(X_test_t)\r\n _, pre_index = torch.max(output, 1)\r\n test_accuracy = accuracy_score(y_test, pre_index) # 计算精度\r\n accuracy_list.append(test_accuracy)\r\n\r\nplt.subplot(2,1,1) #画loss\r\nplt.plot(train_loss_list)\r\nplt.title('loss')\r\n\r\nplt.subplot(2,1,2) #画精度表\r\nplt.title('accracy')\r\nplt.plot(accuracy_list)\r\nplt.savefig('train.png')\r\nplt.show()\r\n\r\n#torch.save(mlpc, \"spam_model.pkl\") #保存模型的网络结构与参数\r\n#torch.save(mlpc.state_dict(), \"spam_state_dict.pkl\") # 仅保存所有的参数\r\n","repo_name":"saber805/spam_classify","sub_path":"trian_spam_classifica.py","file_name":"trian_spam_classifica.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5875091163","text":"from modules.db import db\nfrom flask import session, flash\nfrom os import abort\n\ndef get():\n sql = \"SELECT * FROM schools WHERE id=:school_id\"\n result = db.session.execute(sql, {\"school_id\": session[\"school\"]})\n school = result.fetchone()\n return school\n\ndef create(form):\n if session[\"csrf_token\"] != form[\"csrf_token\"]:\n abort(403)\n schoolname = form[\"schoolname\"]\n info = form[\"info\"]\n address = form[\"address\"]\n phone = form[\"phone\"]\n www = form[\"www\"]\n if len(schoolname) < 3 or len(info) < 10 or len(address) < 10 or len(phone) < 4 or len(www) < 3:\n flash(\"Tarkista, että kaikki kentät ovat oikein täytetty\", \"error\")\n return False\n sql = \"INSERT INTO schools (schoolname, info, address, phone, www, visible) VALUES (:schoolname, :info, :address, :phone, :www, 'true') RETURNING id\"\n result = db.session.execute(sql, {\"schoolname\":schoolname, \"info\": info, \"address\": address, \"phone\": phone, \"www\": www})\n school_id = result.fetchone()[0]\n sql = \"INSERT INTO schooladmins (user_id, school_id) VALUES (:user_id, :school_id)\"\n db.session.execute(sql, {\"user_id\":session[\"user_id\"], \"school_id\": school_id})\n db.session.commit()\n session[\"school\"] = school_id\n return True\n\ndef edit(form):\n if session[\"csrf_token\"] != form[\"csrf_token\"]:\n abort(403)\n schoolname = form[\"schoolname\"]\n info = form[\"info\"]\n address = form[\"address\"]\n phone = form[\"phone\"]\n www = form[\"www\"]\n if len(schoolname) < 3 or len(info) < 10 or len(address) < 10 or len(phone) < 4 or len(www) < 3:\n flash(\"Tarkista, että kaikki kentät ovat oikein täytetty\", \"error\")\n return False\n sql = \"SELECT schoolname FROM schools WHERE id=:id\"\n result = db.session.execute(sql, {\"id\": session[\"school\"]})\n oldname = result.fetchone()[0]\n if oldname != schoolname: # someone wants to change the name of the school\n sql = \"SELECT * FROM schools WHERE schoolname=:schoolname\"\n result = db.session.execute(sql, {\"schoolname\": schoolname})\n if result.fetchone():\n flash(\"Tämä koulunimi on jo käytössä muualla\", \"error\")\n return False\n sql = \"UPDATE schools SET schoolname=:schoolname, info=:info, address=:address, phone=:phone, www=:www WHERE id=:id\"\n db.session.execute(sql, {\"schoolname\": schoolname, \"info\": info, \"address\": address, \"phone\": phone, \"www\": www, \"id\": session[\"school\"]})\n db.session.commit()\n return True\n","repo_name":"rundtjan/kielipelisovellus","sub_path":"modules/schoolz.py","file_name":"schoolz.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19006237386","text":"from collections import deque\nclass Solution:\n \n #Function to return list containing vertices in Topological order.\n def topoSort(self, V, adj):\n # Code here\n indeg = [0]*V\n ans = []\n for i in range(V):\n for x in adj[i]:\n indeg[x] += 1\n \n pq = deque()\n for i in range(V):\n if indeg[i] == 0:\n pq.append(i)\n \n while(len(pq)) > 0:\n t = pq.popleft()\n ans.append(t)\n for x in adj[t]:\n indeg[x] -= 1\n if indeg[x] == 0:\n pq.append(x)\n \n return ans","repo_name":"godspell/Data_Structure_and_Algorithms","sub_path":"Graphs/Topological sort/ans2.py","file_name":"ans2.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3149402862","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport numpy as np\nimport time\nimport multiprocessing as mp\nimport os, sys\nfrom itertools import repeat\n\nimport LGTp as lgt\n\nprint(\"default_rng:\",np.random.default_rng())\n\ncpu_count = os.cpu_count()\nprint(\"os cpu_count:\",cpu_count)\n\n\n# calculate equilibrating phase\n# Change only here\n#N = 6\n#N_t = 12\n#run_n = 4\n#beta_id = \"b050to200s40\"\n#n_conf = 200\n\nif __name__ == '__main__':\n\n\targs = sys.argv # N, N_t, run_n, beta_id, n_conf, data_dir\n\n\tprint('Argument : {}'.format(args))\n\t\n\tif len(args) != 8:\n\t\traise SyntaxError(\"Check args : N, N_t, run_n, beta_id, prec, data_dir, fig_dir\")\n\n\tN = int(args[1]) # Spatial lattice point number \n\tN_t = int(args[2]) # Temporal lattice point number\n\trun_n = int(args[3]) # run id\n\tbeta_id = str(args[4]) # beta set id\n\tprec = float(args[5]) # target precision\n\tdata_dir = str(args[6]) # data save directory\n\tfig_dir = str(args[7]) # figure save directory\n\n\tstart_b = float(beta_id[1:4])*0.01\n\tend_b = float(beta_id[6:9])*0.01\n\tsteps = int(beta_id[-2:])\n\n\tmax_steps = 500\n\t\n\tbeta_list = np.linspace(start_b,end_b,steps)\n\tprint(\"generating U1-%d \"%(N)+beta_id)\n\n\tnt = len(beta_list)\n\n\tensem = []\n\n\t# for b in range(nt):\n\tdef simulate(b):\n\t# start = time.time()\n\t\t\t#seed = int(beta_list[b]*1000)\n\t\t\tseed = int((time.time() % 1)*1000)\n\n\t\t\tu1 = lgt.Lattice([N,N,N,N_t])\n\t\t\tu1.init_fields('U1','Cold',seed)\n\t\t\t\n\t\t\tbare_parameters = u1.bare_parameter_generator()\n\t\t\tbare_parameters['beta'] = beta_list[b]\n\t\t\t\n\t\t\tg = lgt.action(u1,bare_parameters)\n\t\t\tO = g.polyakovLoopR_nb # Target observable\n\n\t\t\tt_eq, t_ac, _, _ = lgt.calc_teq_tac(bare_parameters,\n\t\t\t\t\tO, \n\t\t\t\t\tu1, \n\t\t\t\t\ttol=prec, \n\t\t\t\t\tmax_steps=max_steps, \n\t\t\t\t\tverbose=True, \n\t\t\t\t\tfig_dir=fig_dir, \n\t\t\t\t\tuse_lat=True)\n\t\t\t\n\t\t\tt_eq = int(np.round(t_eq+0.5))\n\t\t\tt_ac = int(np.round(t_ac+0.5))\n\t\t\t\n\t\t\tprint(\"beta\",beta_list[b],\" teq : \",t_eq,\" tac : \",t_ac)\n\t\t\t\n\t\t\tif t_ac > max_steps:\n\t\t\t\t\treturn\n\n\t\t\t# Finish thermalizing if t_eq > max_steps\n\t\t\tif t_eq > max_steps*3:\n\t\t\t\trem_eq = max_steps*2\n\t\t\telse:\n\t\t\t\trem_eq = t_eq - max_steps\n\n\t\t\tfor i in range(rem_eq):\n\t\t\t\tlgt.metropolis(u1,bare_parameters)\n\t\t\t\n\t\t\tconf = []\n\t\t\t\t\t\n\t\t\t# Generate minimum number of configurations\n\t\t\tO_mean = O(u1.field)\n\t\t\tO_hist = []\n\t\t\tO_diff_hist = []\n\t\t\tfor i in range(100):\n\t\t\t\tO_mean_old = O_mean\n\t\t\t\t\n\t\t\t\tfor t in range(2*t_ac):\n\t\t\t\t#for t in range(t_ac):\n\t\t\t\t\tlgt.metropolis(u1,bare_parameters)\n\t\t\t\tconf.append(u1.field)\n\n\t\t\t\tO_hist.append(O(u1.field))\n\t\t\t\tO_mean = np.mean(O_hist)\n\t\t\t\tO_diff = np.abs(O_mean - O_mean_old)\n\t\t\t\tO_diff_hist.append(O_diff)\n\n\t\t\t# Generate conf of target precision\n\t\t\twhile np.mean(O_diff_hist[-100:]) > prec and len(O_diff_hist) < max_steps*3:\n\t\t\t\t\n\t\t\t\tO_mean_old = O_mean\n\t\t\t\t\n\t\t\t\tfor t in range(2*t_ac):\n\t\t\t\t#for t in range(t_ac):\n\t\t\t\t\tlgt.metropolis(u1,bare_parameters)\n\t\t\t\tconf.append(u1.field)\n\n\t\t\t\tO_hist.append(O(u1.field))\n\t\t\t\tO_mean = np.mean(O_hist)\n\t\t\t\tO_diff = np.abs(O_mean - O_mean_old)\n\t\t\t\tO_diff_hist.append(O_diff)\n\t\t\t\n\t\t\tbeta = beta_list[b]\n\t\t\tconf_name = data_dir+'/U1_b%0.3fN%dtac%dS%d.npy' %(beta,N,t_ac,seed)\n\t\t\tnp.save(conf_name, conf)\n\n\t# Test run\n\tprint(\"starting test run\")\n\tstart = time.time()\n\tsimulate(0)\n\tdur = time.time() - start\n\n\tn_ensem = len(beta_list)\n\tn_core = cpu_count\n\texpected_dur = n_ensem*dur/n_core\n\n\tprint(\"test run duration : %.5f sec\"%(dur))\n\tprint(\"for %d ensemble ~ %d sec ~ %0.3f hour\"%(n_ensem,n_ensem*dur,n_ensem*dur/3600.))\n\tprint(\"with %d core, expecting : %0.3f hour\"%(n_core, expected_dur/3600))\n\n\n\tnow = time.ctime(time.time())\n\texpected_end = time.ctime(time.time() + expected_dur)\n\n\tprint(\"starting at \"+now)\n\tprint(\"expected end time : \"+expected_end)\n\n\tstart = time.time()\n\n\tp = mp.Pool(n_core)\n\tres = p.map(simulate, range(nt)[1:])\n\tp.close()\n\tp.join()\n\n\tdue = time.time() - start\n\tprint(\"time span:\",due)\n\n\tprint(due/3600)\n\n","repo_name":"chanjure/LGTp","sub_path":"scripts/U1_auto_conf_gen.v5.py","file_name":"U1_auto_conf_gen.v5.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73573130487","text":"from django.db import models\nfrom common.models import CommonModel\n\n# Create your models here.\n\n\nclass Review(CommonModel):\n user = models.ForeignKey(\n \"users.User\",\n on_delete=models.CASCADE,\n )\n # boarder = models.ForeignKey(\n # \"boarders.Boarder\",\n # null=True,\n # blank=True,\n # on_delete=models.SET_NULL,\n # related_name=\"reviews\",\n # )\n sitter = models.ForeignKey(\n \"sitters.Sitter\",\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"reviews\",\n )\n payload = models.TextField()\n rating = models.PositiveIntegerField()\n\n def __str__(self):\n return f\"{self.user} / {self.rating}тнР\"\n","repo_name":"bellakim0843/pawfect_match_backend","sub_path":"reviews/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29311608609","text":"#Juego adivina mi número\nprint(\"Intenta adivinar mi numero, esta entre el 1 y el 20\")\na=int(input(\"En que numero estoy pensando :\"))\nimport random\nb = (random.randrange(20))\nx = 1\nwhile x < 5:\n if b > a:\n print(\"Mi numero es mayor\")\n a=int(input(\"En que numero estoy pensando :\"))\n elif b < a:\n print(\"Mi numero es menor\")\n a=int(input(\"En que numero estoy pensando :\"))\n elif a == b:\n print(\"Adivinaste, mi numero era\",(b))\n break\n x = x + 1\nprint(\"No adivinaste, mi número era\",(b))\n\n \n ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_b42ebbf40e5d7a3362012473908552d4.py","file_name":"hito1_ej12_b42ebbf40e5d7a3362012473908552d4.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18543968961","text":"import socket\nimport threading\n\nhost = socket.gethostname()\nport = 6666\nbuff = 1024\n\nclient_sock = socket.socket()\nclient_sock.connect((host, port))\n\ndef recieve():\n while True:\n rMsg = client_sock.recv(buff).decode()\n if not rMsg:\n print('Ending connection')\n break\n print()\n print(\"revd:\", rMsg)\n\ndef send():\n while True:\n sMsg = input()\n client_sock.send(sMsg.encode())\n\nt1 = threading.Thread(target=send, name=1)\nt2 = threading.Thread(target=recieve, name=2)\n\nt1.start()\nt2.start()","repo_name":"mihirs16/Computer-Networks","sub_path":"Full Duplex/full_dup_client.py","file_name":"full_dup_client.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3924529049","text":"\n\nfrom gensim.test.utils import common_dictionary, common_corpus\nfrom gensim.models import LsiModel\n\n\n\n\n\nimport jieba\nimport jieba.posseg as pseg\nimport gensim\nimport json\nfrom gensim import corpora\nimport time\nfrom algorithm.base import dbs\n\ndef keywords_save():\n # 把所有keyword写入文件\n keywords = open('keywords.txt', encoding='utf-8', mode='w')\n\n sql = \"\"\"select keyword_paper from doclda\"\"\"\n result = dbs.getTuples(sql)\n for i in range(0, len(result)):\n if (result[i][0]):\n keywords.write(result[i][0] + ',')\n\ndef userdict_extract():\n \"\"\"\n 抽取关键字作为用户字典\n :return: 存储在 userdict.txt里面\n \"\"\"\n keywords_save()\n\n # 把keyword读出来, 并且统计词频写入userdict.txt里面\n wordDict = {}\n keywordsLst = open('keywords.txt', encoding='utf-8', mode='r').read().split(',')\n userdict = open('userdict.txt', encoding='utf-8', mode='w')\n\n # 统计词频放入词典\n for word in keywordsLst:\n if(word in wordDict):\n wordDict[word] += 1\n else:\n wordDict[word] = 1\n # 把词典写入文件\n for word in wordDict:\n userdict.write(word + ' ' + str(wordDict[word]) + ' n' + '\\n')\n\nprint('查询教师院系')\nsql='select id,institution from teacher'\nlist=dbs.getTuples(sql)\ninstitution_dict={}\nfor institution in list:\n if institution[1] not in institution_dict.keys():\n institution_dict[institution[1]]=[]\n institution_dict[institution[1]].append(institution[0])\n else :\n institution_dict[institution[1]].append(institution[0])\n\nmax=0\nmin=100\nfor v in institution_dict:\n l=len(institution_dict[v])\n if l>max:\n max=l\n if l=20:\n num_topics=10\n num_words=(num_topics-2)*2+10\n print('本院系文章总数为%d,即将分为主题数%d个,关键字%d个......' % (len(corpus),num_topics,num_words))\n # ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=50)\n # result = ldamodel.print_topics(num_topics=num_topics, num_words=num_words)\n # doc_lda = ldamodel[corpus]\n model = LsiModel(corpus, id2word=dictionary,num_topics=num_topics,)\n doc_lda = model[corpus]\n result = model.print_topics(num_topics=num_topics, num_words=num_words)\n time2 = time.time()\n print('模型训练用时:', time2 - time1)\n print('LDA模型训练完成。插入数据库......')\n\n\n for n in range(len(doc_lda)):\n Topic=doc_lda[n]\n if len(Topic)==0:\n prams = (institution_paper_list[n][0], institution + \"其他\", json.dumps({}, ensure_ascii=False),\n json.dumps({}, ensure_ascii=False))\n sql = 'insert into lda2 values(%s,%s,%s,%s)'\n list = dbs.exe_sql(sql, prams)\n continue\n c1 = sorted(Topic, key=lambda x: x[1], reverse=True)\n\n wordTopic = [i[1] for i in result if int(c1[0][0]) == i[0]]\n\n d=strToMap(wordTopic[0])\n t={}\n for key in DocWord[n]:\n if key in d.keys():\n t[key]=d[key]\n topic=c1[0][0]\n prams=(institution_paper_list[n][0],institution+str(topic),json.dumps(d,ensure_ascii=False),json.dumps(t,ensure_ascii=False))\n sql='insert into lda2 values(%s,%s,%s,%s)'\n list = dbs.exe_sql(sql, prams)\n\n\n\n\n","repo_name":"ischenrui/eds","sub_path":"algorithm/lsi.py","file_name":"lsi.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6621816263","text":"import asyncio\nfrom .db import model\nfrom .db import create_session\nfrom sqlalchemy import or_,and_, desc, asc\nimport queue\nimport sys\nimport logging\nimport datetime\nimport weakref\nfrom concurrent.futures import ThreadPoolExecutor\nimport concurrent\nfrom . import webserver\n#asyncio.tasks._DEBUG = True\n\n\nclass Job(object):\n def __init__(self, entry):\n self.entry = entry\n\n def __lt__(self, other):\n if self.entry and getattr(other, 'entry', None):\n return not self.entry.priority.__lt__(other.entry.priority)\n return False\n\n def __repr__(self):\n return \"\" %(self.entry and self.entry.id or self.entry.name)\n\n\n\nclass Daemon(object):\n log = logging.getLogger(\"daemon\")\n\n def __init__(self, manager, check_interval = 10, queue_size=20):\n self.manager = manager\n self.jobs = asyncio.PriorityQueue(queue_size)\n self.check_interval = check_interval\n self.in_check = set()\n self.workpool = ThreadPoolExecutor(5)\n self.loop = manager.loop\n self.manager.loop = self.loop\n self.blacklist = set()\n self.first_run = True\n\n @asyncio.coroutine\n def do_job(self):\n while True:\n try:\n job = yield from self.jobs.get()\n #yield from asyncio.sleep(1000)\n entry = job.entry\n entry.state = model.EntryState.started\n session = create_session()\n session.add(entry)\n self.log.info(\"check entry: %s\" %entry.full_path)\n if entry.plugin is None:\n self.log.debug(\"detect plugin for entry: %s\" %entry.id)\n (plugin, prio) = self.manager.get_backend_for_entry(entry)\n if not plugin:\n self.log.info(\"can't find plugin to handle url %s\" %(entry))\n entry.set_error(\"can't find plugin to handle url\", unhandled=True)\n continue\n entry.plugin = plugin.name\n session.commit()\n self.log.debug(\"use plugin for entry %s: %s (prio=%s)\" %(entry.id, plugin.name, prio))\n else:\n plugin = self.manager.get_backend(entry.plugin)\n if not plugin:\n self.log.error(\"entry has plugin that does not exist\")\n self.blacklist.add(entry.id)\n # FIXME, blacklist entry until restart\n return\n\n rv = plugin.do_entry(entry)\n def call_done(future):\n asyncio.Task(self.job_done(future))\n #rv.add_done_callback(self.job_done)\n rv.add_done_callback(call_done)\n yield from rv\n except Exception as e:\n self.log.exception(e)\n #raise asyncio.tasks.Return(job)\n\n def job_done(self, future):\n entry, rv = future.result()\n if not rv:\n self.log.error(\"job failed: %s\", str(entry))\n try:\n self.in_check.remove(entry.id)\n except KeyError:\n self.log.debug(\"entry should have been in in_check\")\n else:\n dm = yield from self.manager.get_download_manager(entry.collection)\n yield from dm.entry_done(entry)\n try:\n self.in_check.remove(entry.id)\n except KeyError:\n self.log.debug(\"entry should have been in in_check\")\n\n\n @asyncio.coroutine\n def got_entries(self, entries):\n if not entries:\n return\n try:\n for entry in entries:\n if entry.id in self.in_check:\n self.log.debug(\"entry still processed: %s\" %entry.full_path)\n continue\n\n self.in_check.add(entry.id)\n #self.in_check.add(entry)\n #embed()\n #print(\"qlen\", self.jobs.qsize())\n #asyncio.Task(self.do_job())\n yield from self.jobs.put(Job(entry))\n #print(\"%%%%%%\")\n #print(rv)\n\n #entry.next_check = next_check\n #session.add(entry)\n\n except Exception as e:\n self.log.exception(e)\n #for i in session.query(model.Entry).filter(or_(model.Entry.next_check==None,\n #model.Entry.next_check= DATE('{}') \" \\\n \"AND f_mensaje <= DATE('{}')) \".format(p_clave, f_ini, f_fin)\n\n df = pd.read_sql_query(query, conexion)\n if not df.empty:\n print(df)\n return df\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")\n\ndef consultar_comentarios_cantidad(conexion, p_clave):\n query = \"SELECT usuario.nick_usuario, count(mensaje.text_mensaje) as cantidad \" \\\n \"FROM mensaje \" \\\n \"INNER JOIN usuario ON usuario.id_usuario = mensaje.id_usuario \" \\\n \"GROUP BY mensaje.id_usuario \" \\\n \"HAVING text_mensaje like '%{}%' \" \\\n \"ORDER BY cantidad DESC\".format(p_clave)\n\n df = pd.read_sql_query(query, conexion)\n if not df.empty:\n return df\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")\n\ndef consultar_media_mensajes(conexion, f_ini, f_fin):\n query = \"SELECT red_social.nom_red_social, mensaje.f_mensaje \" \\\n \"FROM mensaje \" \\\n \"INNER JOIN red_social ON red_social.id_red_social = mensaje.id_red_social \" \\\n \"WHERE f_mensaje >= DATE('{}') \" \\\n \"AND f_mensaje <= DATE('{}') \".format(f_ini, f_fin)\n\n df = pd.read_sql_query(query, conexion)\n if not df.empty:\n df[\"f_mensaje\"] = pd.to_datetime(df[\"f_mensaje\"])\n df[\"dia\"] = df[\"f_mensaje\"].dt.date\n df = df.loc[:, [\"nom_red_social\", \"dia\"]]\n m_dia = df.groupby([\"nom_red_social\", \"dia\"])[\"dia\"].count().reset_index(name='Mensajes')\n total_mensajes = m_dia[\"Mensajes\"].sum()\n m_dia['media_mensajes'] = m_dia['Mensajes']/total_mensajes\n print(m_dia)\n m_dia.plot(x='dia', y=\"media_mensajes\", kind='bar', figsize=(12, 8))\n plt.xticks(rotation=30)\n plt.xlabel('Días')\n plt.ylabel('Porcentaje')\n plt.title('Media de mensajes por día', size=18)\n plt.show()\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")\n\ndef stadisticas_mensaje(conexion, word):\n query= \"SELECT (red_social.nom_red_social) as Red_Social, count(mensaje.text_mensaje) as Cantidad \" \\\n \"FROM mensaje \" \\\n \"INNER JOIN red_social ON red_social.id_red_social = mensaje.id_red_social \" \\\n \"WHERE mensaje.text_mensaje like '%{}%' \" \\\n \"GROUP BY red_social.nom_red_social\".format(word)\n\n df= pd.read_sql_query(query, conexion)\n if not df.empty:\n return df\n else:\n messagebox.showerror(\"Error\",\"No hay datos para mostrar. Primero cargar la Base de Datos\")\n return(\"Error: No hubo coincidencia con tu búsqueda\")","repo_name":"villa85/curso_python_2","sub_path":"Proyecto_Final_23E_Yuniel_Villalon/obtener_datos/consultar_datos.py","file_name":"consultar_datos.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29255508539","text":"\nimport argparse\n\ndef count_lines(line):\n n_lines = 1\n return(n_lines)\n \ndef count_words(line):\n n_words = 0\n while \" \" in line:\n line = line.replace(\" \",\" \")\n words = line.strip().split(\" \")\n if words != ['']:\n n_words += len(words)\n return(n_words)\n \ndef count_chars(line):\n n_chars = len(line) + 1\n return(n_chars)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Count lines, words and characters.')\n parser.add_argument('file_path', type=str, help='name of the file to be counted')\n parser.add_argument('-l', dest= \"lines\", action=\"store_true\", help='count lines')\n parser.add_argument('-c', dest= \"characters\", action=\"store_true\", help='count chars')\n parser.add_argument('-w', dest= \"words\", action=\"store_true\", help='count words')\n\n args = parser.parse_args()\n return(args)\n\ndef open_file(file_path):\n try:\n data_file = open(file_path, 'r')\n return(True, data_file)\n except OSError:\n return(False, 'File not found')\n\nif __name__ == '__main__':\n args = parse_args()\n data_file = open_file(args.file_path)\n if data_file[0] == True:\n lines = 0\n words = 0\n chars = 0\n for data_line in data_file[1]:\n if args.lines:\n lines += count_lines(data_line)\n if args.words:\n words += count_words(data_line)\n if args.characters:\n chars += count_chars(data_line)\n data_file[1].close()\n if args.lines:\n print(\"Number of lines: \" + str(lines))\n if args.words:\n print(\"Number of words: \" + str(words))\n if args.characters:\n print(\"Number of characters: \" + str(chars))\n else:\n print(data_file[1])\n\n","repo_name":"janusz-krauze/word_counter","sub_path":"word_counter.py","file_name":"word_counter.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36518508609","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User, Group\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom datetime import datetime\ntry:\n from django.contrib.sites.shortcuts import get_current_site\nexcept ImportError:\n from django.contrib.sites.models import get_current_site\nfrom django.core import signing\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.mail import send_mail\nfrom django.core.validators import validate_email\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import App_Course\nfrom instructor.models import Instructor, Course, Student\nfrom tutor_admin.models import Term\nfrom ta_tutor.models import Session\nfrom survey.models import Survey\nfrom student.models import Student as StudentAccount\n\nfrom pusher import Pusher, pusher\nimport codecs, json, sys, pyexcel as pe\nfrom collections import defaultdict\nfrom xlrd import XLRDError\n\n# LOAD HOME PAGE\ndef index(request):\n return render(request, 'home/home.html')\n\n# CONTACT US PAGE\ndef contact(request):\n context = {\n 'contact': ['Email: UtsaTutorLab@gmail.com'],\n 'title': \"Contact Us\",\n }\n return render(request, 'home/contact.html', context)\n\n# LOGIN USER, REDIRECT TO THEIR PROFILE\ndef submit_login(request):\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n if user.groups.filter(name='Student').exists():\n return HttpResponse(\n json.dumps(\"/student\"),\n content_type=\"application/json\"\n )\n if user.groups.filter(name='Tutor').exists():\n return HttpResponse(\n json.dumps(\"/ta_tutor\"),\n content_type=\"application/json\"\n )\n if user.groups.filter(name='Tutor_Admin').exists():\n return HttpResponse(\n json.dumps(\"/tutor_admin\"),\n content_type=\"application/json\"\n )\n if user.groups.filter(name='Instructor').exists():\n return HttpResponse(\n json.dumps(\"/instructor\"),\n content_type=\"application/json\"\n )\n if username == 'admin' or username == 'bifrost_larry':\n return HttpResponse(\n json.dumps(\"/admin\"),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps(\"false-1\"),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps(\"false-2\"),\n content_type=\"application/json\"\n )\n\n# LOGOUT USER\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n# REDIRECT TO USER PROFILE\ndef profile(request):\n user = request.user\n if user is not None:\n if user.is_active:\n if user.groups.filter(name='Student').exists():\n return HttpResponseRedirect('/../student/')\n if user.groups.filter(name='Tutor').exists():\n return HttpResponseRedirect('/../ta_tutor/')\n if user.groups.filter(name='Instructor').exists():\n return HttpResponseRedirect('/../instructor/')\n if user.groups.filter(name='Tutor_Admin').exists():\n return HttpResponseRedirect('/../instructor/')\n if user.username == 'admin' or user.username == 'bifrost_larry':\n return HttpResponseRedirect('/../admin/')\n else:\n return HttpResponseRedirect('/')\n else:\n return HttpResponseRedirect('/')\n\n@csrf_exempt\ndef pusher_authentication(request):\n\tpusher_client = pusher.Pusher(app_id=settings.PUSHER_APP_ID,key=settings.PUSHER_KEY,secret=settings.PUSHER_SECRET)\n\tpusher_client.trigger(u'ch1',u'enqueue',{})\t\n\n\treturn HttpResponse(\"Ooh secret\")\n\n# SHOWS ALL TUTORS SCHEDULES\ndef ta_schedule(request):\n context = {\n 'title': \"Tutor Schedule\",\n }\n return render(request, 'home/schedule.html', context)\n\n\n\n\n@login_required(login_url='/admin/')\ndef admin_import(request):\n if request.user.is_active:\n if not request.user.is_superuser:\n return HttpResponseRedirect('/profile')\n if request.method == \"GET\":\n return render(request, \"home/admin_import.html\")\n if request.method == \"POST\" and request.FILES['file']:\n xlsFile = request.FILES['file']\n i_first_name = i_last_name = i_user_name = i_email = class_name = class_num = s_first_name = s_last_name = s_user_name = ' '\n num_i = num_c = num_s = 0\n try:\n fs = FileSystemStorage()\n filename = fs.save(xlsFile.name, xlsFile)\n print(\"File name =\", xlsFile)\n sheet = pe.get_sheet(file_name=fs.path(xlsFile.name), name_columns_by_row=0)\n records = sheet.to_records()\n for record in records:\n keys = sorted(record.keys())\n for key in keys:\n if key == \"Instructor First Name\":\n print(str(record[key]))\n i_first_name = record[key]\n elif key == \"Instructor Last Name\": \n print(str(record[key]))\n i_last_name = record[key]\n elif key == \"Instructor Username\": \n print(str(record[key]))\n i_user_name = record[key]\n elif key == \"Instructor Email\":\n print(str(record[key]))\n i_email = record[key]\n elif key == \"Class Name\":\n print(str(record[key]))\n class_name = record[key]\n elif key == \"Class Number\":\n print(str(record[key]))\n class_num = record[key]\n elif key == \"Student abc123\":\n print(str(record[key]))\n s_user_name = record[key]\n elif key == \"Student First Name\":\n print(str(record[key]))\n s_first_name = record[key]\n elif key == \"Student Last Name\":\n print(str(record[key]))\n s_last_name = record[key]\n\n # Get or create user\n user, user_created = User.objects.get_or_create(username=i_user_name, first_name=i_first_name, last_name=i_last_name, email=i_email)\n group = Group.objects.get(name='Instructor')\n group.user_set.add(user)\n # Get or create current instructor\n cur_instructor,created = Instructor.objects.get_or_create(user=user, first_name=i_first_name, last_name=i_last_name, email=i_email)\n cur_instructor.save()\n\n if(user_created):\n # send email to setup password\n send_activation(request, user.username, user.email)\n num_i+=1\n\n # Get or create current course and associate with instructor\n cur_course, course_created = Course.objects.get_or_create(course_num=class_num, course_name=class_name)\n cur_course.save()\n cur_course.Instructor = cur_instructor\n cur_course.save()\n if(course_created):\n num_c+=1 \n \n # Get or create current student and associate with course\n cur_student, student_created = Student.objects.get_or_create(first_name=s_first_name, last_name=s_last_name, studentID=s_user_name)\n cur_student.save()\n cur_student.courses.add(cur_course)\n cur_student.save()\n if(student_created):\n num_s+=1\n \n fs.delete(xlsFile.name)\n data = {\n \"bool\":\"true\",\n\t\t \"i_created\":num_i,\n \"c_created\":num_c,\n \"s_created\":num_s\n }\n return HttpResponse(\n json.dumps(data),\n content_type=\"application/json\"\n )\n\n\n except XLRDError:\n print(\"xlrd error\")\n lastCol = firstCol = userCol = 0\n i_last_name = i_first_name = i_email = class_name = class_num = s_first_name = s_last_name = s_user_name = \"\"\n fs = FileSystemStorage()\n filename = fs.save(xlsFile.name, xlsFile)\n with codecs.open(fs.path(xlsFile.name), encoding='UTF-16') as f:\n for rowx, row in enumerate(f):\n if row.endswith(u'\\r\\n'): row = row[:-2]\n data = row.split(u'\\t ,')\n for colx, datum in enumerate(data):\n info = datum.strip(\"'\\\"\")\n if(rowx == 0):\n if( info == 'Instructor First Name'):\n print(info)\n iFirstCol = colx\n elif( info == 'Instructor Last Name'):\n print(info + str(colx))\n iLastCol = colx\n elif( info == 'Instructor Email'):\n print(info + str(colx))\n iEmailCol = colx\n elif( info == 'Class Name'):\n print(info + str(colx))\n cNameCol = colx\n elif( info == 'Class Number'):\n print(info + str(colx))\n cNumCol = colx\n elif( info == 'Student First Name'):\n print(info + str(colx))\n sFirstCol = colx\n elif( info == 'Student Last Name'):\n print(info + str(colx))\n sLastCol = colx\n elif( info == 'Student abc123'):\n print(info + str(colx))\n sUserCol = colx\n else:\n if(colx == iLastCol):\n # print(\"Instructor last name = col[\" + str(colx) +\"]\", info)\n i_last_name = info\n elif(colx == iFirstCol):\n # print(\"Instructor first name = col[\" + str(colx) +\"]\", info)\n i_first_name = info\n elif(colx == iEmailCol):\n # print(\"Instructor Email = col[\" + str(colx) +\"]\", info)\n i_email = info\n elif(colx == cNameCol):\n # print(\"Class name = col[\" + str(colx) +\"]\", info)\n class_name = info\n elif(colx == cNumCol):\n # print(\"Class num = col[\" + str(colx) +\"]\", info)\n class_num = info\n elif(colx == sUserCol):\n # print(\"username = col[\" + str(colx) +\"]\", info)\n s_user_name = info\n elif(colx == sFirstCol):\n # print(\"Student first name = col[\" + str(colx) +\"]\", info)\n s_first_name = info\n elif(colx == sLastCol):\n # print(\"Student last name = col[\" + str(colx) +\"]\", info)\n s_last_name = info\n\n if(rowx > 0):\n # Get or create user\n user, user_created = User.objects.get_or_create(username=i_user_name, first_name=i_first_name, last_name=i_last_name, email=i_email)\n group = Group.objects.get(name='Instructor')\n group.user_set.add(user)\n # Get or create current instructor\n cur_instructor,created = Instructor.objects.get_or_create(first_name=i_first_name, last_name=i_last_name, email=i_email)\n cur_instructor.save()\n # Get or create current course and associate with instructor\n cur_course, created = Course.objects.get_or_create(course_num=class_num, course_name=class_name)\n cur_course.save()\n cur_course.Instructor = cur_instructor\n cur_course.save()\n # Get or create current student and associate with course\n cur_student,created = Student.objects.get_or_create(first_name=s_first_name, last_name=s_last_name, studentID=s_user_name)\n cur_student.save()\n cur_student.courses.add(cur_course)\n cur_student.save()\n \n except Exception as e:\n print(\"Error in upload:\", e)\n\n if(fs.exists(filename)):\n # print(\"deleting file 2: \", xlsFile.name)\n fs.delete(xlsFile.name)\n if(fs.exists(filename)):\n # print(\"deleting file 1: \", filename)\n fs.delete(filename)\n \n data = {\n 'bool': 'false'\n }\n\n return HttpResponse(\n json.dumps(data),\n content_type = \"application/json\"\n )\n \ndef admin_purge(request):\n if request.method == \"GET\":\n terms = Term.objects.all()\n context = {\n 'terms':terms\n }\n return render(request, \"home/admin_purge.html\", context)\n \n if request.method == \"POST\":\n \n ######### DELETE TERMS, SESSIONS, SURVEYS #########\n\n termList = request.POST.getlist('selectedTerms[]')\n terms = []\n data = {}\n\n if \"None\" in termList:\n if len(termList) > 1:\n data['term-issue'] = \"None selected in term selection list\"\n data['bool-term'] = \"false\"\n else:\n data['term-issue'] = \"No surveys or student-tutor sessions deleted\"\n data['bool-term'] = \"true\"\n else: \n for term in termList:\n terms.append(Term.objects.get(name=term))\n surveys = Survey.objects.all()\n sessions = Session.objects.all()\n surveysToDelete = []\n sessionsToDelete = []\n for term in terms:\n for survey in surveys:\n if term.inTerm(survey.date_completed.date()):\n surveysToDelete.append(survey)\n for session in sessions:\n if term.inTerm(session.sessionID.date()):\n sessionsToDelete.append(session)\n \n # Delete surveys and minus count from tutor\n for survey in surveysToDelete:\n survey.tutor.survey_count -= 1\n survey.tutor.save()\n survey.delete()\n # Delete sessions\n for session in sessionsToDelete:\n session.delete()\n # Delete terms\n for term in terms:\n term.delete()\n \n ######## DELETE COURSES AND STUDENTS AND STUDENT ACCOUNTS ###########\n Course.objects.all().delete()\n Student.objects.all().delete()\n for student in StudentAccount.objects.all():\n if student.student.last_login.date() < datetime.today().date().replace(year = datetime.today().year - 1):\n student.user.delete()\n\n data['bool-term'] = 'true'\n\n return HttpResponse(\n json.dumps(data),\n content_type = \"application/json\"\n )\n\n@login_required(login_url='/admin/')\ndef admin_manage(request):\n if request.user.is_active:\n if not request.user.is_superuser:\n return HttpResponseRedirect('/profile')\n if request.method == \"GET\":\n instructors = Instructor.objects.all()\n tutor_admins = []\n for instructor in instructors:\n if instructor.user:\n if instructor.user.groups.filter(name=\"Tutor_Admin\"):\n tutor_admins.append(instructor)\n context = {\n \"instructors\": instructors,\n \"tutor_admins\": tutor_admins\n }\n return render(request, \"home/admin_manage.html\", context)\n if request.method == \"POST\":\n action = request.POST.get(\"action\")\n instructors = request.POST.getlist(\"selectedInstructors[]\")\n \n if \"None\" not in instructors:\n if action == \"delete\":\n try:\n for instructor in instructors:\n cur_instructor = Instructor.objects.get(email=instructor)\n cur_instructor.user.delete()\n data = {\n \"bool\":\"true\",\n \"msg\":\"Instructor(s) deleted\"\n }\n except ObjectDoesNotExist:\n data = {\n \"bool\":\"false\",\n \"msg\":\"Could not delete instructor (Does Not Exist)\"\n }\n elif action == \"addAdmin\":\n group = Group.objects.get(name='Tutor_Admin')\n for instructor in instructors:\n cur_instructor = Instructor.objects.get(email=instructor)\n group.user_set.add(cur_instructor.user)\n data = {\n \"bool\":\"true\",\n \"msg\":\"Instructor(s) given Tutor-Admin status\"\n }\n elif action == \"remAdmin\":\n group = Group.objects.get(name='Tutor_Admin')\n for instructor in instructors:\n cur_instructor = Instructor.objects.get(email=instructor)\n group.user_set.remove(cur_instructor.user)\n data = {\n \"bool\":\"true\",\n \"msg\":\"Instructor(s) revoked of Tutor-Admin status\"\n }\n else:\n data = {\n \"bool\":\"false\",\n \"msg\": \"None selected in instructor selection\"\n }\n\n return HttpResponse(\n json.dumps(data),\n content_type = \"application/json\"\n )\n \ndef send_activation(request, username, email):\n try:\n # GET EMAIL TEMPLETS\n email_body = 'home/email_temps/activation_body.txt'\n email_subject = 'home/email_temps/activation_subject.txt'\n user = User.objects.get(username=username)\n instructor = Instructor.objects.get(user = user)\n token = signing.dumps(username, salt=settings.SECRET_KEY)\n instructor.token = token\n instructor.save()\n \n # CONTEXT FOR EMAIL\n context = {\n 'site': get_current_site(request),\n 'username': user.get_full_name(),\n 'token': token,\n 'secure': request.is_secure(),\n }\n body = loader.render_to_string(email_body, context).strip()\n subject = loader.render_to_string(email_subject, context).strip()\n send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email])\n return True\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n return False\n","repo_name":"UtsaTutorLab/TutorLabProject","sub_path":"tutorlab/tutorlab/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71638317690","text":"#!C:\\Program Files\\Python310\\python.exe\nprint(\"content-type: text/html\\n\\n\")\n\nimport sys\n\nsys.path.append(\"C:\\\\Users\\\\tyree\\\\AppData\\\\Roaming\\\\Python\\\\Python310\\\\site-packages\")\nimport speech_recognition as sr\n\n\ndef main():\n # obtain audio from the microphone\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Say something!\")\n audio = r.listen(source)\n # recognize speech using Google Speech Recognition\n try:\n # the default google API (no keys needed)\n speech = r.recognize_google(audio)\n # print(speech)\n return speech\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\n \"Could not request results from Google Speech Recognition service; {0}\".format(\n e\n )\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Alx-nder/virtualTourWebsite","sub_path":"chatbot_module/speech_module.py","file_name":"speech_module.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27321065049","text":"class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n if len(nums) == 1:\n return [nums]\n \n elif len(nums) == 2:\n if nums[0] != nums[1]: \n return [nums, list(reversed(nums))] \n return [nums]\n \n all_perms = []\n for index, num in enumerate(nums):\n nums_without_current_num = nums[:index]\n if index + 1 <= len(nums):\n nums_without_current_num.extend(nums[index + 1:])\n \n permutuation_without_current_num = self.permuteUnique(nums_without_current_num) \n \n for perm in permutuation_without_current_num:\n all_perms.append([num] + perm) \n \n unique_perms_set = set()\n for perm in all_perms:\n unique_perms_set.add(tuple(perm)) \n \n unique_perms = []\n for perm in unique_perms_set:\n unique_perms.append(list(perm))\n \n return unique_perms","repo_name":"meraf00/Competitive-Programming","sub_path":"0047-permutations-ii/0047-permutations-ii.py","file_name":"0047-permutations-ii.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29308076592","text":"print(\"数字,日期和时间5\")\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nfrom dateutil.rrule import *\n#创建一周的列表\nweekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday']\nweekends = ['Saturday', 'Sunday']\n\n#初始化\ndef get_previous_byday(dayname, start_date=None):\n if start_date is None:\n start_date = datetime.today()\n day_num = start_date.weekday()\n day_num_target = weekdays.index(dayname)\n days_ago = (7 + day_num - day_num_target) % 7\n if days_ago == 0:\n days_ago = 7\n target_date = start_date - timedelta(days=days_ago)\n return target_date\n\n\ndef last_friday():\n print(datetime.today())\n print(get_previous_byday('Monday'))\n print(get_previous_byday('Tuesday'))\n print(get_previous_byday('Friday'))\n print(get_previous_byday('Saturday'))\n # 显式的传递开始日期\n print(get_previous_byday('Sunday', datetime(2012, 12, 21)))\n\n # 使用dateutil模块\n d = datetime.now()\n # 下一个周五\n print(d + relativedelta(weekday=FR))\n # 上一个周五\n print(d + relativedelta(weekday=FR(-1)))\n # 下一个周六, 为什么如果今天是周六,下一个/上一个都返回今天的日期??\n print(d + relativedelta(weekday=SA))\n # 上一个周六\n print(d + relativedelta(weekday=SA(-1)))\n\n\nif __name__ == '__main__':\n last_friday()\n\nfrom datetime import datetime, date, timedelta\nimport calendar\n\ndef get_month_range(start_date=None):\n if start_date is None:\n start_date = date.today().replace(day=1)\n _, days_in_month = calendar.monthrange(start_date.year, start_date.month)\n end_date = start_date + timedelta(days=days_in_month)\n return (start_date, end_date)\n def date_range(start, stop, step):\n while start < stop:\n yield start\n start += step\n def month_range():\n a_day = timedelta(days=1)\n first_day, last_day = get_month_range()\n while first_day < last_day:\n print(first_day)\n first_day += a_day\n # 使用生成器\n for d in date_range(datetime(2012, 9, 1), datetime(2012, 10, 1),\n timedelta(hours=6)):\n print(d)\n if __name__ == '__main__':\n month_range()\n\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\nimport pytz\n\n\ndef tz_local():\n d = datetime(2012, 12, 21, 9, 30, 0)\n print(d)\n\n # Localize the date for Chicago\n central = timezone('US/Central')\n loc_d = central.localize(d)\n print(loc_d)\n\n # Convert to Bangalore time\n bang_d = loc_d.astimezone(timezone('Asia/Kolkata'))\n print(bang_d)\n\n\n # 夏令时\n d = datetime(2013, 3, 10, 1, 45)\n loc_d = central.localize(d)\n print(loc_d)\n later = loc_d + timedelta(minutes=30)\n print(later)\n # 使用normalize修正这个问题\n later = central.normalize(loc_d + timedelta(minutes=30))\n print(later)\n\n # 一个普遍策略是先转换为UTC时间,使用UTC时间来进行计算\n print(loc_d)\n utc_d = loc_d.astimezone(pytz.utc)\n print(utc_d)\n\n later_utc = utc_d + timedelta(minutes=30)\n # 转回到本地时间\n print(later_utc.astimezone(central))\n\n # 根据ISO 3166国家代码查找时区名称\n print(pytz.country_timezones['IN'])\n\nif __name__ == '__main__':\n tz_local()","repo_name":"TheRealMilesLee/Computer-Science-Learning","sub_path":"Python相关/Python_CookBook/数字,日期和时间/数字,日期和时间 5.py","file_name":"数字,日期和时间 5.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"10343926285","text":"from django.shortcuts import render\nfrom django.template import Context, Template\nfrom django.template import loader\nfrom django.http import HttpResponse\n\n\ndef index(request):\n t = loader.get_template('start_page.html')\n context = {\n 'variable':'var',\n 'gbimg':'gbcolor.jpg'\n }\n return HttpResponse(t.render(context, request))\n\ndef map(request):\n t= loader.get_template('map.html')\n context = {\n 'gbimg':'map.png'\n }\n return HttpResponse(t.render(context, request))\n\n# Create your views here.\n","repo_name":"nidzik/PythonDjango","sub_path":"rush00/rush00/rush00/moviemon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74746638327","text":"elemento = int(input('Insira o valor do elemento a ser buscado: '))\n\nindice = 0\n\nlista = [5,8,3,1,0,2]\n\nfor i in range(len(lista)):\n if elemento == lista[i]:\n print(i)\n\n \n","repo_name":"bihellzin/monitoria-p1","sub_path":"aulas-monitoria/07-10/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"3723093846","text":"import sys\n\nsys.stdin = open('specialsort.txt')\n\nfor testcase in range(int(input())):\n n = int(input())\n nums = list(map(int, input().split()))\n\n print(f'#{testcase + 1}', end=' ')\n for _ in range(5):\n maxnum = nums[0]\n minnum = nums[0]\n \n for num in nums:\n if num > maxnum:\n maxnum = num\n if num < minnum:\n minnum = num\n print(f'{maxnum} {minnum}', end=' ')\n \n trash = nums.pop(nums.index(maxnum))\n trash = nums.pop(nums.index(minnum))\n print()\n","repo_name":"hani2057/algorithm","sub_path":"swea/8월/0811/specialsort.py","file_name":"specialsort.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22381092606","text":"import pandas as pd\nimport numpy as np\n\n\n## data source : https://www.eia.gov/tools/faqs/faq.php?id=74&t=11\ndataElectric = pd.read_excel('data/annual_generation_state.xls')\ndataCarbon = pd.read_excel('data/emission_annual.xls')\n\n### preprocessing data:\ndataCarbon= dataCarbon[dataCarbon['Year']==2018]\ndataCarbon = dataCarbon[dataCarbon['State']!='DC']\ndataCarbon = dataCarbon[dataCarbon['State']!= 'US-TOTAL']\ndataCarbon = dataCarbon[dataCarbon['Energy Source'] == 'All Sources']\ndataCarbon = np.asarray(dataCarbon)\n\n## for electricity data :\ndataElectric.reindex(['a','b','c','d','e','f'])\nb=dataElectric.columns\ncolumns = ['year','state','type','resource','generation']\ndic,i = {},0\nfor j in range(len(columns)):\n dic[b[j]] = columns[j]\nE = dataElectric.rename(columns=dic)\nE = E[E['year']==2018]\nE\nE = E[E['resource'] == 'Total']\nE = E[E['state'] != 'DC']\nE = E[E['state'] != 'US-Total']\nE = E[E['type'] == 'Total Electric Power Industry']\nE = np.asarray(E)\ndataElectric = E\n#################\n\n\ndef emissionDict(dataCarbon) :\n \"\"\"\n @dataCarbon : Carbon emission data\n @return : a dictionary with key = state name, value = CO2 emission\n \"\"\"\n assert isinstance(dataCarbon, pd.DataFrame)\n prev = 'AK'\n index,sumOfEmissions = 0,0\n emissionDict = {}\n for i in range(len(C)) :\n item = C[i]\n if item[1]!=prev :\n emissionDict[prev] = sumOfEmissions\n sumOfEmissions = item[4]\n prev = item[1]\n else :\n sumOfEmissions += item[4]\n emissionDict['Wyoming'] = sumOfEmissions\n return emissionDict\n\n\ndef ele_generation(dataElectric) :\n \"\"\"\n @dataElectric : electricity generation in each state\n @return : a dictionary with key = state name, value = electricity generation\n \"\"\"\n assert isinstance(dataElectric,pd.DataFrame)\n generationDict = {}\n for i in range(len(dataElectric)) :\n item = dataElectric[i]\n generationDict[item[1]] = item[4]\n return generationDict\n\n\ndef co2_per_mwh(generationDict,emissionDict) :\n \"\"\"\n @dataElectric : annual electricity generation in each state\n @dataCarbon : annual CO2 emission generation in each state\n @return : a dictionary with key = states name, value : CO2 emission per mwh electricity\n \"\"\"\n assert isinstance(generationDict,dict)\n assert isinstance(emissionDict,dict)\n perMPH = {}\n for name in generationDict :\n perMPH[name] = emissionDict[name]*1.0 / generationDict[name]\n return perMPH\n\n\ndef generate_csv(perMPH) :\n \"\"\"\n This function writes a csv file with state name as index and\n the value of CO2 generation per mwh electricity as column\n @perMPH : a dictionary\n \"\"\"\n my_dict = perMPH\n with open('co2_mwh.csv', 'w') as f:\n f.write('states,co2/mwh\\n')\n for key in my_dict.keys():\n f.write(\"%s,%s\\n\"%(key,my_dict[key]))\n","repo_name":"anurag1paul/electric_vehicles_analysis","sub_path":"data_analysis/environment_data_process.py","file_name":"environment_data_process.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74424183608","text":"import json \nfrom celery import shared_task \nfrom guided_redaction.jobs.models import Job\nfrom guided_redaction.job_run_summaries.api import (\n JobRunSummariesViewSet,\n JobRunSummariesGenerateViewSet,\n)\n\n@shared_task\ndef create_manual_jrs(job_uuid):\n job = Job.objects.get(pk=job_uuid)\n if job:\n job.status = 'running'\n job.save()\n worker = JobRunSummariesViewSet()\n response = worker.process_create_request(json.loads(job.request_data))\n job.response_data = json.dumps(response.data)\n job.status = 'success'\n job.save()\n\n@shared_task\ndef create_automatic_jrs(job_uuid):\n job = Job.objects.get(pk=job_uuid)\n if job:\n job.status = 'running'\n job.save()\n worker = JobRunSummariesGenerateViewSet()\n response = worker.process_create_request(json.loads(job.request_data))\n job.response_data = json.dumps(response.data)\n job.status = 'success'\n job.save()\n","repo_name":"dcaulton/guided_redaction","sub_path":"api/guided_redaction/job_run_summaries/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40899057875","text":"from tensorflow.keras.layers import Activation, Dense, Input, Concatenate, Flatten, InputLayer, Embedding\nfrom tensorflow.keras.models import Model, Sequential\nimport tensorflow as tf\nimport os\n\n\n\ndef build_multi_input_model(shape_vec, shape_mat):\n \"\"\"Build (and compile) multi input network.\n Args: \n shape_vec: Shape of the input vector\n shape_mat: Shape of the input matrix\n shape_out: Shape of the output vector\n Returns:\n model: Keras model\n \"\"\"\n\n # first branch for the\n inp1 = Input(shape=(1,), name='Country_ID')\n model1 = Embedding(23, 2, name='Country_Embedding')(inp1)\n model1 = Flatten()(model1)\n\n # second branch for the vector input\n inp2 = Input(shape=shape_vec, name=\"Date_and_Regimes\")\n\n # third branch for the matrix input\n inp3 = Input(shape=shape_mat, name=\"Ensemble\")\n model3 = Flatten()(inp3)\n \n # concatenate the two inputs\n x = Concatenate(axis=1)([model1, inp2, model3])\n\n # add the hiddden layers\n x = Dense( 100 , activation='linear' , name=\"Combined_Hidden_Layer_1\" )( x )\n x = Dense( 100 , activation='relu' , name=\"Combined_Hidden_Layer_2\" )( x )\n x = Dense( 100 , activation='relu' , name=\"Combined_Hidden_Layer_3\" )( x )\n\n x = Dense( 2 , activation='linear' , name=\"Output_Layer\" )(x)\n\n # returns the Model\n return Model([inp1, inp2, inp3], outputs=x)\n\n\ndef printModel(model, dir='', name='my_model.png'):\n tf.keras.utils.plot_model(model, to_file=os.path.join(dir , name), show_shapes=True,\n show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96)\n\ndef reset_weights(model):\n for layer in model.layers: \n if hasattr(layer,'init'):\n input_dim = layer.input_shape[1]\n new_weights = layer.init((input_dim, layer.output_dim),name='{}_W'.format(layer.name))\n layer.trainable_weights[0].set_value(new_weights.get_value())","repo_name":"muellerelias/nnpostprocessing","sub_path":"model/build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71770193208","text":"def BFS(s):\r\n queue = []\r\n queue.append(s)\r\n visited[s] = True\r\n dist[s] = 0\r\n while queue:\r\n s = queue.pop(0)\r\n for i in graph[s]:\r\n if visited[i] == False:\r\n visited[i] = True\r\n queue.append(i)\r\n dist[i] = dist[s]+1\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n q = int(input())\r\n for i in range(q):\r\n n , m = map(int,input().split())\r\n graph = [[] for x in range(n)]\r\n dist = [-1 for x in range(n)]\r\n visited = [False for x in range(n)]\r\n for _ in range(m):\r\n u,v = map(lambda x: int(x)-1,input().split())\r\n graph[u].append(v)\r\n graph[v].append(u)\r\n s = int(input()) - 1\r\n BFS(s)\r\n # print(dist)\r\n for i in range(n):\r\n if i == s:\r\n continue\r\n if dist[i] != -1:\r\n print(dist[i]*6,end=\" \")\r\n else:\r\n print(-1,end=\" \")\r\n print()\r\n","repo_name":"GenesisBlock3301/Data-Structure-and-Algorithm","sub_path":"Graph Theory/Breadth First Search Shortest Reach (hackerrank).py","file_name":"Breadth First Search Shortest Reach (hackerrank).py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4960846173","text":"import sys\nimport json\nfrom flask import Flask, Response, request\n\nfrom ecarton_code_challenge.lib.convert import convert_chars\n\napp = Flask('code_challenge')\n\n@app.route('/convert', methods=['POST'])\ndef convert():\n\n request_data = json.loads(request.data)\n\n converted = convert_chars(request_data)\n\n resp = Response(\n response=json.dumps(converted),\n mimetype='application/json',\n status=200)\n\n return resp\n\n\ndef create_app():\n return app\n","repo_name":"evert2410/engie","sub_path":"ecarton_code_challenge/lib/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33134410532","text":"#!/usr/bin/python\n\nimport serial\nimport serial.tools.list_ports\nimport time\nfrom modules.utils import timeit\n\n\nclass SerialComWorker():\n \"\"\"\n Class to handle the serial communication between the PC and the EDF signal generator\n\n This class will be in charge of managing the ports and sending the data to the device\n \"\"\"\n def __init__(self, config):\n self.config_params_ = config\n print(\"Serial communication worker initialized\")\n\n def listSerialPorts(self):\n \"\"\"\n Method to create a list of all corresponding EDF signal generator devices.\n\n Callback for the GUI interaction\n \"\"\"\n self.generator_devices_ = self.searchCommPortsWindows_()\n user_device_list = []\n if self.generator_devices_:\n # Create list to be displayed to user\n for device in self.generator_devices_:\n user_device_list.append(str(device.device))\n return user_device_list\n else:\n return []\n\n def selectCommPort(self, user_chosen_device):\n \"\"\"\n Method to save the selected comm port.\n\n Callback for the GUI interaction\n \"\"\"\n # Check that devices are loaded\n if self.generator_devices_:\n # Go through loaded devices and check if name is in user_chosen_device\n for device in self.generator_devices_:\n if device.name in user_chosen_device:\n print(\"Selected port: \" + device.name)\n self.chosen_device_ = device\n\n @timeit\n def beginTransmision(self, bytes_packages: list, channels_amount, sample_rate):\n \"\"\"\n Method to start the transmition to the generator.\n\n Callback for the GUI interaction\n \"\"\"\n config_sample_rate_pkg = self.createConfigPackage_(self.config_params_[\"config_sample_rate\"], sample_rate)\n config_channel_amount_pkg = self.createConfigPackage_(self.config_params_[\"config_channels_amount\"], channels_amount)\n config_reset_all_dacs_pkg = self.createConfigPackage_(self.config_params_[\"config_reset_all_dacs\"], channels_amount)\n data_pkgs = [bytes_packages[i:i+64] for i in range(0,len(bytes_packages),64)]\n\n try:\n # Start serial connection\n serial_connection = serial.Serial(self.chosen_device_.name, baudrate=115200, bytesize=serial.EIGHTBITS, write_timeout=5)\n\n # Write sample rate config\n serial_connection.write(serial.to_bytes(config_sample_rate_pkg))\n time.sleep(0.1)\n\n # Write amount of channels config\n serial_connection.write(serial.to_bytes(config_channel_amount_pkg))\n\n for byte_pkg in data_pkgs:\n #for j in range(channels_amount):\n serial_connection.write(b\"\".join(byte_pkg))\n\n\n # When simulation ended, we reset outputs and configs of DACs:\n serial_connection.write(serial.to_bytes(config_reset_all_dacs_pkg))\n \n\n # End serial connection\n serial_connection.close()\n return True\n except serial.SerialTimeoutException:\n print(\"Serial write operation timed out, try resetting the device\")\n return False\n\n\n ###### Private ######\n\n \"\"\"\n List of key-value pairs of EDF signal generators found. Should contain:\n Name: Identifier for the device\n Device: String used to open and close the port (COMx for Windows)\n \"\"\"\n generator_devices_ = []\n chosen_device_ = \"\" # Selected serial communication port\n\n def searchCommPortsWindows_(self):\n \"\"\"\n Method to look for connected EDF signal generator devices in Windows\n\n Returns a list of serial comm devices with key-value pairs containing information about it\n\n It uses the PID 0483 to identify the STMicroelectronics device and 5740 for the Virtual COMM port\n \"\"\"\n generator_devices = []\n ports = serial.tools.list_ports.comports()\n for port in ports:\n if (\"0483\" and \"5740\") in port.hwid:\n device = {}\n device[\"Name\"] = port.name\n device[\"Device\"] = port.device\n generator_devices.append(port)\n return generator_devices\n\n def createConfigPackage_(self, config_num: int, config_data: int):\n \"\"\"\n This method creates a custom configuration package to send config_data to the microcontroller.\n \"\"\"\n enum_pkg = int(config_num).to_bytes(2, byteorder=\"big\", signed=False)\n data_pkg = int(config_data).to_bytes(2, byteorder=\"big\", signed=False)\n return b\"\".join([enum_pkg, data_pkg])\n","repo_name":"Gonzalor95/TProfesional_EEG","sub_path":"PyEDF-APP/modules/SerialComWorker.py","file_name":"SerialComWorker.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4681056653","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('signup-admin/', views.signup_admin, name='signup_admin'),\n path('signin-admin/', views.signin_admin, name='signin_admin'),\n path('home/', views.home, name='home' ),\n path('student-home/', views.student_home, name='student_home' ),\n path('accounts/login/', views.home, name='home' ),\n path('logout/', views.logout, name='logout'),\n\n path('add-student/', views.add_student, name='add_student' ),\n path('view-student/', views.view_students, name='view_students' ),\n path('delete//', views.delete_student, name='delete_student' ),\n path('edit//', views.edit_student, name='edit_student' ),\n\n path('add-teacher/', views.add_teacher, name='add_teacher' ),\n path('view-teacher/', views.view_teachers, name='view_teachers' ),\n path('deletet//', views.delete_teacher, name='delete_teacher' ),\n path('editt//', views.edit_teacher, name='edit_teacher' ),\n\n path('add-department/', views.add_department, name='add_department' ),\n path('view-department/', views.view_departments, name='view_departments' ),\n path('deleted//', views.delete_department, name='delete_department' ),\n\n path('v-student/', views.v_students, name='v_students' ),\n path('v-teacher/', views.v_teachers, name='v_teachers' ),\n path('v-department/', views.v_departments, name='v_departments' ),\n]\n","repo_name":"Kamlesh-KD/University-Management-System","sub_path":"system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29361989619","text":"# Algoritmo para obtener signo zodiacal.\n\ndia_nacimiento = int(input(\"Ingrese dia de nacimiento : \"))\nmes_nacimiento = int(input(\"Ingrese mes de nacimiento : \"))\n\n# Transforma valores.\nmes_dia = int((\"00\"+str(mes_nacimiento))[-2:] + (\"00\"+str(dia_nacimiento))[-2:])\n\n# Diccionario con zodiaco.\nzodiaco = {\n 1222: \"capricornio\",\n 1122: \"sagitario\",\n 1023: \"escorpio\",\n 923: \"libra\",\n 823: \"virgo\",\n 723: \"leo\",\n 621: \"cancer\",\n 521: \"geminis\",\n 420: \"tauro\",\n 321: \"aries\",\n 219: \"piscis\",\n 120: \"acuario\",\n 0: \"capricornio\"\n}\n\n# Diccionario con zodiaco.\nfor i in zodiaco:\n if mes_dia >= i:\n print(\" \"+zodiaco[i])\n break\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej7/hito1_ej7_aca352e1a4a5b448a93d844e71d52fa5.py","file_name":"hito1_ej7_aca352e1a4a5b448a93d844e71d52fa5.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18502481021","text":"\"\"\"\nGiven a string s, return true if the s can be palindrome after deleting at most one\ncharacter from it.\n\nExample 1:\nInput: s = \"aba\"\nOutput: true\n\nExample 2:\nInput: s = \"abca\"\nOutput: true\nExplanation: You could delete the character 'c'.\n\nExample 3:\nInput: s = \"abc\"\nOutput: false\n\n\nConstraints:\n\n1 <= s.length <= 105\ns consists of lowercase English letters.\n\"\"\"\n\n\n# Time: O(n)\n# Space: O(1)\ndef valid_palindrome(s):\n def verify(s, left, right, deleted):\n while left < right:\n if s[left] != s[right]:\n if deleted:\n return False\n else:\n return verify(s, left + 1, right, True) or verify(s, left, right - 1, True)\n else:\n left += 1\n right -= 1\n return True\n\n return verify(s, 0, len(s) - 1, False)\n\n\n# Another Solution ---------------------------------------------------------------------------\n# Time: O(n)\n# Space: O(1)\ndef valid_palindrome_v2(s):\n low = 0\n high = len(s) - 1\n while low < high:\n if s[low] != s[high]:\n return is_palindrome(s, low + 1, high) or is_palindrome(s, low, high - 1)\n low += 1\n high -= 1\n\n\ndef is_palindrome(string, low, high):\n while low < high:\n if string[low] != string[high]:\n return False\n low += 1\n high -= 1\n return True\n\n\nif __name__ == \"__main__\":\n print(valid_palindrome_v2(\"abcba\"))\n","repo_name":"candaceleach41/algo_ds_coding_prep","sub_path":"easy/valid_palindrome_ii.py","file_name":"valid_palindrome_ii.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31725076494","text":"# Dictionaries are unordered.\n\nmy_dict = {\n 'key1': 1,\n 'key2': None,\n 'key3': 3.14,\n 'key4': [1,2,3],\n}\n# Iterate over keys\n# for x in my_dict:\n# print(x)\n\n# Iterate over values\n# for x in my_dict.values():\n# print(x)\n\n# Unpacking values\n# a, b, c, d = my_dict.values()\n# print(a, b, c, d)\n\n# Unpacking each tuple in the dictionary\n# for t in my_dict.items():\n# print(t)\n\n# Unpacking key, value pairs\n# for k, v in my_dict.items():\n# print(k, v)\n\n# ** unpacks k/v pairs into another dictionary. Can only be used on the right hand side. Notice how 'h':5 overwrode 'h':4.\nmy_dict_1 = {'p': 1, 'y': 2}\nmy_dict_2 = {'t': 3, 'h': 4}\nmy_dict_3 = {'h': 5, 'o': 6, 'n': 7}\nmerged_dict = {**my_dict_1, **my_dict_2, **my_dict_3}\nprint(merged_dict)","repo_name":"alexdavidkim/Python3-Notes","sub_path":"iterables_sequence_types/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73288238968","text":"# Here's an example of stuff to copy and paste into an interactive Python\n# interpreter to get a connection loaded.\n# Or you can load it with 'python -i interactive_mode.py'.\n\n# Set some variables.\n\nbmrc = \"~/.bmrc\"\nsite = \"www\"\nbmutilspath = \"./lib\"\n\n# Import everything, make a connection, and try to log in.\n\nimport json\nimport os\nimport sys\nsys.path.append(os.path.expanduser(bmutilspath).rstrip(\"/\"))\nimport bmutils\nbmconnection = bmutils.BMClientParser(os.path.expanduser(bmrc), site)\nif not bmconnection.verify_login():\n print(\"Could not login\")\n \n# At this point you can do whatever you want. Here's how to load a game,\n# and print its info in nice JSON.\n\ngamenumber = 3038\n\ngame = bmconnection.wrap_load_game_data(gamenumber)\nprint(json.dumps(game, sys.stdout, indent=1, sort_keys=True))\n","repo_name":"buttonmen-dev/buttonmen","sub_path":"tools/api-client/python/interactive_mode.py","file_name":"interactive_mode.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"} +{"seq_id":"35429763319","text":"import pyinputplus as pyip\nfrom datetime import date\nimport calendar\nimport openpyxl\nimport glob\nimport csv\n\n\ndef get_current_date():\n \"\"\"Get current date with goal format of: dd MMM YYYY\"\"\"\n date_year = date.today().year\n date_month = date.today().month\n month_abbr = calendar.month_abbr[date_month]\n date_day = date.today().day\n return f'{date_day} {month_abbr} {date_year}'\n\ndef get_user_input(message):\n \"\"\"Get input from the user with an individualized message and return the user's input.\"\"\"\n output = \"\"\n while True:\n output = input(message)\n print(f\"You entered {output}; is this correct?\")\n verify = pyip.inputMenu([\"Yes\", \"No\"], numbered=True)\n if verify == \"Yes\":\n break\n return output\n\ndef choose_excel_file():\n \"\"\"Showing the user all of the Excel files in the current working directory and asking them to select one to\n load if they have an ongoing file they are adding to.\"\"\"\n excel_files_in_directory = glob.glob('*.xlsx')\n print(\"The following Excel workbooks are in this folder:\")\n i = 1\n for file in excel_files_in_directory:\n print(f\"{i}: {file}\")\n i += 1\n load_current_file = pyip.inputMenu(['Yes', 'No'],\n \"\\nDo you want to pick one of these files to load for the output file?\\n\",\n numbered=True)\n if load_current_file == 'Yes':\n output = pyip.inputMenu(excel_files_in_directory, numbered=True)\n return output\n else:\n return 'None'\n\ndef choose_file(message):\n \"\"\"Asking the user to clarify which csv file correlates to VAX ID and which to VAX Reports data.\"\"\"\n files = glob.glob('*.csv')\n print(message)\n output = pyip.inputMenu(files, numbered=True)\n return output\n\n# Variables for counting\ntotal_occurrences = 0\ntotal_deaths = 0\ntotal_er_visits = 0\ntotal_hospitalizations = 0\ntotal_covid_vax_occurrences = 0\ntotal_covid_vax_deaths = 0\ntotal_covid_vax_er_visits = 0\ntotal_covid_vax_hospitalizations = 0\n\n# VAX file structure: Column 0 - VAERS_ID, Column 1 - VAX_TYPE, Column 2 - VAX_MANU,\n# Column 3 - VAX_LOT, Column 4 - VAX_DOSE_SERIES, Column 5 - VAX_ROUTE, Column 6 - VAX_SITE,\n# Column 7 - VAX_NAME\nVAX_file = choose_file('Which file has the vaccine ID information (Ex: VAERSVAX)?')\nDATA_file = choose_file(\"Which file has the vaccine report data (Ex: VAERSDATA)?\")\n\n# Choose and read into a list the VAX data.\nvax_data = []\nwith open(VAX_file, 'r', encoding='windows-1252') as file:\n reader = csv.reader(file, delimiter=',')\n headers = next(reader)\n for row in reader:\n vax_data.append(row)\n\n# Setting up a dictionary to read all the VAX data into.\n# Key is VAX_NAME, value is a list of VAERS_ID\nvax_data_initial = {}\n\nvax_count_variable = 0\nwhile vax_count_variable < len(vax_data):\n vax_name = vax_data[vax_count_variable][7]\n vax_id = vax_data[vax_count_variable][0]\n if vax_name in vax_data_initial:\n vax_data_initial[vax_name].append(vax_id)\n else:\n vax_data_initial[vax_name] = [vax_id]\n vax_count_variable += 1\n\n# Setup a dictionary for each VAERS_ID entry.\n# Determining whether the report is due to death.\nvax_reports = {}\n\n# DATA file structure:\n# Column 0 - VAERS_ID\n# Column 1 - RECVDATE\n# Column 2 - STATE\n# Column 3 - AGE_YRS\n# Column 4 - CAGE_YR\n# Column 5 - CAGE_MO\n# Column 6 - SEX\n# Column 7 - RPT_DATE\n# Column 8 - SYMPTOM_TEXT\n# Column 9 - DIED\n# Column 10 - DATEDIED\n# Column 11 - L_THREAT\n# Column 12 - ER_VISIT\n# Column 13 - HOSPITAL\n# Column 14 - HOSPDAYS\n# Column 15 - X_STAY\n# Column 16 - DISABLE\n# Column 17 - RECOVD\n# Column 18 - VAX_DATE\n# Column 19 - ONSET_DATE\n# Column 20 - NUMDAYS\nvax_data_data = []\nwith open(DATA_file, 'r', encoding='windows-1252') as file:\n reader = csv.reader(file, delimiter=',')\n headers = next(reader)\n for row in reader:\n vax_data_data.append(row)\n\ndata_count_variable = 0\nwhile data_count_variable < len(vax_data_data):\n vaers_id = vax_data_data[data_count_variable][0]\n reported_death = 0\n reported_er_visit = 0\n reported_hospitalization = 0\n if vax_data_data[data_count_variable][9] == \"Y\":\n reported_death += 1\n if vax_data_data[data_count_variable][12] == \"Y\":\n reported_er_visit += 1\n if vax_data_data[data_count_variable][13] == \"Y\":\n reported_hospitalization += 1\n\n # Add VAERS_ID to dictionary.\n vax_reports[vaers_id] = [reported_death, reported_er_visit, reported_hospitalization]\n data_count_variable += 1\n\nvax_data_by_type = []\nfor vaccine_type in vax_data_initial:\n vaccine_name = vaccine_type\n total_reported_occurrences = 0\n total_reported_deaths = 0\n total_reported_er_visits = 0\n total_reported_hospitalizations = 0\n for report_id in vax_data_initial[vaccine_type]:\n total_reported_occurrences += 1\n # 0 - reported_death, 1 - reported_er_visit, 2 - reported_hospitalization\n total_reported_deaths += vax_reports[report_id][0]\n total_reported_er_visits += vax_reports[report_id][1]\n total_reported_hospitalizations += vax_reports[report_id][2]\n\n # Add parsed data to list.\n vax_data_by_type.append([vaccine_name, # 0\n total_reported_occurrences, # 1\n total_reported_deaths, # 2\n total_reported_er_visits, # 3\n total_reported_hospitalizations]) # 4\n\n # Update totals.\n total_occurrences += total_reported_occurrences\n total_deaths += total_reported_deaths\n total_er_visits += total_reported_er_visits\n total_hospitalizations += total_reported_hospitalizations\n\n # Update COVID19 vaccine totals.\n if vaccine_type.__contains__('COVID19'):\n total_covid_vax_occurrences += total_reported_occurrences\n total_covid_vax_deaths += total_reported_deaths\n total_covid_vax_er_visits += total_reported_er_visits\n total_covid_vax_hospitalizations += total_reported_hospitalizations\n\nsorted_vax_data_list = sorted(vax_data_by_type, key=lambda vax_deaths: vax_deaths[2], reverse=True)\n\n# A variable for the date of the current data.\ndata_date = get_user_input(\"What's the date for this data (it's in the name of the zip folder)? \")\n\n# Check to see if output Excel already exists.\n# Load sheet if exists, else create new file.\nchosen_file = choose_excel_file()\noutput_wb = \"\"\nif chosen_file == 'None':\n output_wb = openpyxl.Workbook()\n chosen_file = get_user_input(\"What would you like to name the file? \")\nelse:\n output_wb = openpyxl.load_workbook(chosen_file)\n\noutput_wb_sheet = output_wb.create_sheet(index=0, title=data_date)\noutput_wb_sheet.merge_cells('A1:D1')\noutput_wb_sheet['A1'] = f\"VAERS Data from: {data_date}; Parsed on: {get_current_date()}\"\noutput_wb_sheet['A2'] = \"Vaccine Type\"\noutput_wb_sheet['B2'] = \"Number of Reports\"\noutput_wb_sheet['C2'] = \"Deaths Reported\"\noutput_wb_sheet['D2'] = \"ER Visits Reported\"\noutput_wb_sheet['E2'] = \"Hospitalizations Reported\"\n\nrow_to_write_to = 3 # Starting at 3 since the date is going in 1 and headers in 2.\nfor vaccine in sorted_vax_data_list:\n # Write values to Excel.\n output_wb_sheet[f'A{row_to_write_to}'] = vaccine[0]\n output_wb_sheet[f'B{row_to_write_to}'] = vaccine[1]\n output_wb_sheet[f'C{row_to_write_to}'] = vaccine[2]\n output_wb_sheet[f'D{row_to_write_to}'] = vaccine[3]\n output_wb_sheet[f'E{row_to_write_to}'] = vaccine[4]\n row_to_write_to += 1\n\n# Writing out the totals and comparing COVID19 to everything else.\noutput_wb_sheet['G2'] = \"Total Deaths\"\noutput_wb_sheet['G3'] = total_deaths\noutput_wb_sheet['G5'] = \"COVID19 Vaccine Deaths\"\noutput_wb_sheet['G6'] = total_covid_vax_deaths\noutput_wb_sheet['G8'] = \"Non-COVID Vaccine Deaths\"\noutput_wb_sheet['G9'] = total_deaths - total_covid_vax_deaths\noutput_wb_sheet['G11'] = \"Total ER Visits\"\noutput_wb_sheet['G12'] = total_er_visits\noutput_wb_sheet['G14'] = \"COVID19 ER Visits\"\noutput_wb_sheet['G15'] = total_covid_vax_er_visits\noutput_wb_sheet['G17'] = \"Non-COVID ER Visits\"\noutput_wb_sheet['G18'] = total_er_visits - total_covid_vax_er_visits\noutput_wb_sheet['G20'] = \"Total Hospitalizations\"\noutput_wb_sheet['G21'] = total_hospitalizations\noutput_wb_sheet['G23'] = \"COVID19 Hospitalizations\"\noutput_wb_sheet['G24'] = total_covid_vax_hospitalizations\noutput_wb_sheet['G26'] = \"Non-COVID Hospitalizations\"\noutput_wb_sheet['G27'] = total_hospitalizations - total_covid_vax_hospitalizations\noutput_wb_sheet['I2'] = \"Total Reports\"\noutput_wb_sheet['I3'] = total_occurrences\noutput_wb_sheet['I5'] = \"COVID19 Reports\"\noutput_wb_sheet['I6'] = total_covid_vax_occurrences\noutput_wb_sheet['I8'] = \"Non-COVID Reports\"\noutput_wb_sheet['I9'] = total_occurrences - total_covid_vax_occurrences\n\n# Clean up the spreadsheet.\nsheets = output_wb.sheetnames\nif 'Sheet' in sheets:\n del output_wb['Sheet']\n\nif chosen_file.endswith('.xlsx'):\n output_wb.save(chosen_file)\n output_wb.close()\nelse:\n output_wb.save(f'{chosen_file}.xlsx')\n output_wb.close()","repo_name":"calebwsaunders/VAERS_verification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22309893642","text":"import os\r\nfrom PIL import Image\r\n\r\n# Set the path and change working directory to the path of the images.\r\npath = \"test\"\r\nos.chdir(path)\r\n\r\n# Set some constants for the desired size of the x axis for the image and the logo filename.\r\nX_FIT_SIZE = 800\r\nLOGO_FILENAME = \"testing.png\"\r\n\r\n# Open the logo and also set some variables for its width and height.\r\nlogoIm = Image.open(LOGO_FILENAME)\r\nlogoWidth, logoHeight = logoIm.size\r\n\r\n# Create 2 new folders in the directory, don't raise an error if the folder already exists.\r\nos.makedirs(\"With Logo\", exist_ok=True)\r\nos.makedirs(\"Without Logo\", exist_ok=True)\r\n\r\n# Loop over all files in the working directory.\r\nfor filename in os.listdir('.'):\r\n if not (filename.endswith('.png') or filename.endswith('.jpg')) or filename == LOGO_FILENAME:\r\n continue # Skip non-image files and the logo file itself.\r\n\r\n # If the file passes through the check, open the image and save its width and height\r\n im = Image.open(filename)\r\n width, height = im.size\r\n\r\n # Check if image needs to be resized.\r\n if width > X_FIT_SIZE or width < X_FIT_SIZE:\r\n\r\n # Calculate the new width and height to resize to.\r\n height = int((X_FIT_SIZE / width) * height)\r\n width = X_FIT_SIZE\r\n\r\n # Resize the image.\r\n print(\"Resizing {0}...\".format(filename))\r\n im = im.resize((width, height))\r\n\r\n # Save the changes for the image without the logo.\r\n im.save(os.path.join(\"Without Logo\", filename))\r\n\r\n # Create 4 instances of the image, so we can edit each one and paste the logo on a different\r\n # corner each time without keeping the old one. We need to do this so we don't reference\r\n # the exact im Image because then every change to imBR affects im and vice-versa.\r\n imBR = im.resize((width, height))\r\n imBL = im.resize((width, height))\r\n imTL = im.resize((width, height))\r\n imTR = im.resize((width, height))\r\n\r\n # Add the logo to the image and save the image as the name + corner of logo.\r\n # This is being done for all 4 corners.\r\n # The last line of code in the group of code for each corner, puts the\r\n # location of the logo between the name and the extension (.png or .jpg).\r\n\r\n # Add logo to bottom right corner.\r\n print('Adding logo to the bottom right corner of {0}...'.format(filename))\r\n imBR.paste(logoIm, (width - logoWidth, height - logoHeight), logoIm)\r\n imBR.save(os.path.join('With Logo', \"{0}-BottomRight{1}\".format(filename[:-4], filename[-4:])))\r\n\r\n # Add logo to bottom left corner.\r\n print('Adding logo to the bottom left corner of {0}...'.format(filename))\r\n imBL.paste(logoIm, (0, height - logoHeight), logoIm)\r\n imBL.save(os.path.join('With Logo', \"{0}-BottomLeft{1}\".format(filename[:-4], filename[-4:])))\r\n\r\n # Add logo tp top left corner.\r\n print('Adding logo to the top left corner of {0}...'.format(filename))\r\n imTL.paste(logoIm, (0, 0), logoIm)\r\n imTL.save(os.path.join('With Logo', \"{0}-TopLeft{1}\".format(filename[:-4], filename[-4:])))\r\n\r\n # Add logo to top right corner.\r\n print('Adding logo to the top right corner of {0}...'.format(filename))\r\n imTR.paste(logoIm, (width - logoWidth, 0), logoIm)\r\n imTR.save(os.path.join('With Logo', \"{0}-TopRight{1}\".format(filename[:-4], filename[-4:])))\r\n","repo_name":"AxillV/image-watermark-creator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2933893624","text":"import os\nimport bs4\nimport pandas as pd\nimport time as t\nimport requests as rq\nimport webbrowser as web\n\nclass Scrape:\n def __init__(self):\n self.count = 1\n self.url = \"\"\n self.result = \"\"\n self.stage2 = \"\"\n self.e = \"\"\n self.lizt = []\n self.wait_anims = [\"Loading. < ÓwÓ <\", \n \"Loading.. ~< -w- <\", \n \"Loading... > ÒwÒ =>\", \n \"loading.. > -w- >~\"]\n \n def WelcomeAndCheck(self):\n print(\"Welcome to web scraper\")\n t.sleep(5)\n while True:\n try:\n self.url = input(\"Your url ? : \")\n self.respond = rq.get(self.url)\n self.result = self.respond.status_code\n for i in self.wait_anims:\n os.system(\"clear\")\n print(i)\n t.sleep(1.5)\n if self.result == 200:\n print(\"Success\")\n t.sleep(1)\n self.processing()\n break\n else:\n raise Exception()\n except:\n print(\"cannot connect to server, try again or check your url\")\n \n def processing(self):\n os.system(\"clear\")\n print(\"stage 1 passed\")\n self.stage2 = bs4.BeautifulSoup(self.respond.text, \"html.parser\")\n while True:\n try:\n self.stage3asktag = str(input(\"tag? : \"))\n self.stage3askclassortag = str(input(\"class or tag : \"))\n self.stage3askclassname = str(input(\"name of class/id? : \"))\n \n self.stage3 = self.stage2.find_all(self.stage3asktag, {self.stage3askclassortag : self.stage3askclassname})\n \n if self.stage3:\n self.lizt = []\n for i in self.stage3:\n self.e = i.text\n self.lizt.append(self.e)\n print(f\"Prewiew : {self.lizt}\")\n self.check_save_xl()\n else:\n raise Exception()\n except:\n print(\"cannot scrape\")\n t.sleep(2)\n os.system(\"clear\")\n \n def check_save_xl(self):\n os.system(\"clear\")\n print(\"do you want to save as xl?\")\n while True:\n try:\n os.system(\"clear\")\n self.save_check_xl = str(input(\"Excel [y/n]\")).lower()\n \n if self.save_check_xl == \"y\":\n self.excel()\n break\n elif self.save_check_xl == \"n\":\n self.check_save_txt()\n else:\n raise Exception()\n except:\n print(\"only type y or n\")\n\n def excel(self):\n os.system(\"clear\")\n self.name_content = str(input(\"the name of column? : \"))\n self.dataframe = pd.DataFrame({self.name_content : self.lizt})\n self.filename = str(input(\"file name? : \"))\n self.dataframe.to_excel(f\"{self.filename}.xlsx\", index=False)\n self.check_save_txt()\n \n def check_save_txt(self):\n os.system(\"clear\")\n print(\"do you want to save as xl or txt file?\")\n while True:\n try:\n self.save_check_txt = input(\".txt? [y/n]\").lower()\n if self.save_check_txt == \"y\":\n self.text()\n elif self.save_check_txt == \"n\":\n pass\n else:\n raise Exception()\n except:\n print(\"only type y or n\")\n \n def text(self):\n self.txt_filename = input(\"file name? : \")\n with open(f\"{self.txt_filename}.txt\", \"w\") as file:\n for i in self.stage3:\n self.e = i.text\n file.write(self.e + \"\\n\")\n \n \n def start(self):\n self.WelcomeAndCheck()\n\ntest = Scrape()\ntest.start()\n","repo_name":"Sphxre173/URLscraper-v1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31099460502","text":"import fileinput\nimport getopt\nimport sys\n\ndef fake_link():\n opts, args = getopt.getopt(sys.argv[1:], 'o:s:')\n for opt, arg in opts:\n if opt == '-o':\n out = arg\n\n with open(out, 'wb') as ofp, fileinput.input(files=args, mode='rb') as ifp:\n for line in ifp:\n if not line.startswith(b'#link'):\n ofp.write(line)\n\ndef fake_win32_link():\n args = sys.argv[1:]\n while args:\n arg = args[0]\n if arg == '-o':\n out = args[1]\n args = args[2:]\n continue\n if arg[0] not in '/-':\n break\n args = args[1:]\n if arg.lower().startswith('/out:'):\n out = arg[5:]\n with open(args[0], 'rb') as ifp, open(out, 'wb') as ofp:\n for line in ifp:\n if not line.startswith(b'#link'):\n ofp.write(line)\n\nif __name__ == '__main__':\n if sys.platform == 'win32':\n fake_win32_link()\n else:\n fake_link()\n sys.exit(0)\n","repo_name":"SCons/scons","sub_path":"test/fixture/mylink.py","file_name":"mylink.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1830,"dataset":"github-code","pt":"77"} +{"seq_id":"15963174933","text":"from aldryn_apphooks_config.fields import AppHookConfigField\nfrom aldryn_apphooks_config.models import AppHookConfig\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db import models\nfrom cms.models.fields import PlaceholderField\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nimport datetime\nfrom cms_appconfig import AdventCalendarConfig\nimport random\n\ndef placeholder_name(self):\n return _('Advent calendar') + ' ' + unicode(self.day)\n\n\nclass AdventCalenderDay(models.Model):\n app_config = AppHookConfigField(AdventCalendarConfig, verbose_name=_('calendar'), default=None)\n day = models.DateField(verbose_name=_('date'))\n placeholder = PlaceholderField(placeholder_name)\n order = models.IntegerField(verbose_name=_('display order'), default=0)\n\n def __str__(self):\n return _('Advent calendar') + ' ' + self.day.strftime('%Y-%m-%d')\n\n class Meta:\n verbose_name = _('Advent calendar day')\n verbose_name_plural = _('Advent calendar days')\n\n@receiver(post_save, sender=AdventCalendarConfig)\ndef create_advent_calender_days(sender, instance, created, **kwargs):\n if created:\n calendar_days = 24\n order = range(calendar_days)\n random.shuffle(order)\n for day in range(calendar_days):\n date = instance.start_date + datetime.timedelta(days=day)\n AdventCalenderDay.objects.create(\n app_config=instance,\n day=unicode(date),\n order=order[day]\n )\n","repo_name":"Maskinteknologsektionen/Website","sub_path":"advent_calendar/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70778484409","text":"from pyqtgraph import PlotWidget,GraphicsLayoutWidget\nfrom PyQt5 import QtWidgets, QtWidgets\nimport numpy as np\nimport pyqtgraph as pg\nimport bisect\nfrom Model.streamManager import StreamManager\nfrom scipy.signal import savgol_filter as sgf\nimport scipy.integrate as igt\n\nclass AnalyseViewModel:\n def __init__(self,analyseTabView,config):\n self.config = config\n self.tabView = analyseTabView\n self.setUpHandels()\n self.initControls()\n\n def setUpHandels(self):\n\n # get handels\n # combo boxes\n self.chnComboBox = self.tabView.findChild(QtWidgets.QComboBox,\"chnNumComboBox\")\n self.orientationComboBox = self.tabView.findChild(QtWidgets.QComboBox,\"orientationComboBox\")\n # buttons\n self.resetButton = self.tabView.findChild(QtWidgets.QPushButton,\"resetButton\")\n self.selectRoiButton = self.tabView.findChild(QtWidgets.QPushButton,\"selectRoiButton\")\n self.filterButton = self.tabView.findChild(QtWidgets.QPushButton,\"filterButton\")\n self.analyseButton = self.tabView.findChild(QtWidgets.QPushButton,\"analyseButton\")\n self.massCompButton = self.tabView.findChild(QtWidgets.QPushButton,\"massCompButton\")\n # labels\n self.preBurnLabel = self.tabView.findChild(QtWidgets.QLabel,\"preBurnLabel\")\n self.postBurnLabel = self.tabView.findChild(QtWidgets.QLabel,\"postBurnLabel\")\n self.startTimeLabel = self.tabView.findChild(QtWidgets.QLabel,\"startTimeLabel\")\n self.stopTimeLabel = self.tabView.findChild(QtWidgets.QLabel,\"stopTimeLabel\")\n self.idtLabel = self.tabView.findChild(QtWidgets.QLabel,\"idtLabel\")\n self.irtLabel = self.tabView.findChild(QtWidgets.QLabel,\"irtLabel\")\n self.atLabel = self.tabView.findChild(QtWidgets.QLabel,\"atLabel\")\n self.btLabel = self.tabView.findChild(QtWidgets.QLabel,\"btLabel\")\n self.maxThrustLabel = self.tabView.findChild(QtWidgets.QLabel,\"maxThrustLabel\")\n self.spImpulsLabel = self.tabView.findChild(QtWidgets.QLabel,\"spImpulsLabel\")\n self.totImpulsLabel = self.tabView.findChild(QtWidgets.QLabel,\"totImpulsLabel\")\n # line edits\n self.windowLineEdit = self.tabView.findChild(QtWidgets.QLineEdit,\"windowLineEdit\")\n self.orderLineEdit = self.tabView.findChild(QtWidgets.QLineEdit,\"orderLineEdit\")\n self.fuelMassLineEdit = self.tabView.findChild(QtWidgets.QLineEdit,\"fuelMassLineEdit\")\n # check boxes\n self.massCompCheckBox = self.tabView.findChild(QtWidgets.QCheckBox,\"massCompCheckBox\")\n self.calcMassCheckBox = self.tabView.findChild(QtWidgets.QCheckBox,\"calcMassCheckBox\")\n # graph view\n self.graphView = self.tabView.findChild(GraphicsLayoutWidget,\"analyseGraphView\")\n self.roi = None\n self.inf1 = None\n\n def initControls(self):\n self.chnComboBox.addItems([\"Channel {}\".format(num) for num in range(1,9,1)])\n self.orientationComboBox.addItems([\"upwards\",\"downwards\",\"horizontal\"])\n self.initGraph()\n self.selectRoiButton.state = \"selectRoi\"\n # connections\n self.resetButton.clicked.connect(self.resetGraphView)\n self.selectRoiButton.clicked.connect(self.selectRegions)\n self.analyseButton.clicked.connect(self.analyse)\n self.filterButton.clicked.connect(self.applyFilter)\n self.massCompButton.clicked.connect(self.computeMassCompensation)\n\n def resetGraphView(self):\n chnNum = self.chnComboBox.currentIndex()+1\n with StreamManager.numDataLock:\n if len(StreamManager.numData[chnNum])>=50:\n self.x = np.array(StreamManager.numData[0])\n self.y = np.array(StreamManager.numData[chnNum])\n\n scale = float(self.config.chnConfigs[chnNum-1].scale)\n offset = float(self.config.chnConfigs[chnNum-1].offset)\n self.y_ = self.y * scale + offset\n self.curve.setData(y=self.y_,x=self.x)\n if self.roi is None:\n self.roi = pg.LinearRegionItem([min(self.x),max(self.x)])\n self.Plt.addItem(self.roi)\n else:\n self.roi.setRegion([min(self.x),max(self.x)])\n self.roi.show()\n self.selectRoiButton.setText(\"Select Region of Interest\")\n self.selectRoiButton.state = \"selectRoi\"\n self.selectRoiButton.show()\n if self.inf1 is not None:\n self.inf1.hide()\n self.inf2.hide()\n\n def initGraph(self):\n win: GraphicsLayoutWidget = self.graphView\n self.Plt = win.addPlot(title=\"\",col=0,row=0)\n self.curve = self.Plt.plot(pen=(1,2*1.3))\n\n def selectRegions(self):\n if self.selectRoiButton.state == \"selectRoi\":\n self.cropDataToRegion()\n self.updateGraph()\n self.selectRoiButton.state = \"selectPreBurnData\"\n self.selectRoiButton.setText(\"Select Pre Burn Values\")\n elif self.selectRoiButton.state == \"selectPreBurnData\":\n self.getPreBurnValues()\n self.selectRoiButton.state = \"selectPostBurnData\"\n self.selectRoiButton.setText(\"Select Post Burn Values\")\n elif self.selectRoiButton.state == \"selectPostBurnData\":\n self.getPostBurnValues()\n self.calculateStartStopTime()\n\n def cropDataToRegion(self):\n x1, x2 = self.roi.getRegion()\n idx1 = max(bisect.bisect_left(self.x,x1),0)\n idx2 = min(bisect.bisect_right(self.x,x2),len(self.x)-1)\n self.x = self.x[idx1:idx2]\n self.y_ = self.y_[idx1:idx2]\n print(self.roi.getRegion())\n\n def getPreBurnValues(self):\n x1, x2 = self.roi.getRegion()\n idx1 = max(bisect.bisect_left(self.x,x1),0)\n idx2 = min(bisect.bisect_right(self.x,x2),len(self.x)-1)\n self.preBurnData = self.y_[idx1:idx2]\n self.preBurnValue = self.preBurnData.mean()\n self.preBurnStd = self.preBurnData.std()\n self.preBurnLabel.setText(\"{:.2f}\".format(self.preBurnValue))\n\n def getPostBurnValues(self):\n x1, x2 = self.roi.getRegion()\n idx1 = max(bisect.bisect_left(self.x,x1),0)\n idx2 = min(bisect.bisect_right(self.x,x2),len(self.x)-1)\n self.postBurnData = self.y_[idx1:idx2]\n self.postBurnValue = self.postBurnData.mean()\n self.postBurnStd = self.postBurnData.std()\n self.postBurnLabel.setText(\"{:.2f}\".format(self.postBurnValue))\n\n def calculateStartStopTime(self):\n # try to find the start value\n for i in range(len(self.x)):\n value = self.y_[i]\n if value > (max(self.preBurnData) + 2*self.preBurnStd):\n self.startTime = self.x[i]\n self.startTimeLabel.setText(\"{:.2f}\".format(self.startTime))\n break\n\n # try to find the stop value\n for i in range(len(self.x)):\n value = self.y_[(i+1)*-1] # inverse the search direction\n if value > (max(self.postBurnData) + 2*self.postBurnStd):\n self.stopTime = self.x[(i+1)*-1]\n self.stopTimeLabel.setText(\"{:.2f}\".format(self.stopTime))\n break\n if self.inf1 is None:\n self.inf1 = pg.InfiniteLine(angle=90, label='start time={:1.2f}'.format(self.startTime),\n labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': True})\n self.inf2 = pg.InfiniteLine(angle=90, label='stop time={:1.2f}'.format(self.stopTime),\n labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': True})\n self.Plt.addItem(self.inf1)\n self.Plt.addItem(self.inf2)\n self.inf1.setPos([self.startTime,0])\n self.inf2.setPos([self.stopTime,0])\n else:\n self.inf1.setPos([self.startTime,0])\n self.inf2.setPos([self.stopTime,0])\n self.inf1.show()\n self.inf2.show()\n self.roi.hide()\n #self.curve.setData(fillLevel = min(self.y_))\n\n def updateGraph(self):\n self.curve.setData(x=self.x,y=self.y_)\n\n def analyse(self):\n # 1. get max thrust value\n self.maxThrust = max(self.y_)\n self.maxThrust_Newton = self.maxThrust * 9.81\n print(\"max thrust:{:0.2f}\".format(self.maxThrust))\n # 2. get left 10% thrust time\n firstFound = False\n for i in range(len(self.x)):\n value = self.y_[i]\n if value > (self.maxThrust*0.1) and not firstFound:\n self.burnStartTime = self.x[i]\n print(\"burn time:{:0.2f}\".format(self.burnStartTime))\n firstFound =True\n elif value > (self.maxThrust*0.75):\n self.riseTime = self.x[i]\n print(\"rise time:{:0.2f}\".format(self.riseTime))\n break\n\n # 3. get right 10% thrust time\n firstFound = False\n for i in range(len(self.x)):\n value = self.y_[(i+1)*-1]\n if value > (self.maxThrust*0.1) and not firstFound:\n self.burnStopTime = self.x[(i+1)*-1]\n print(\"burn out time:{:0.2f}\".format(self.burnStopTime))\n firstFound =True\n elif value > (self.maxThrust*0.75):\n self.fallTime = self.x[(i+1)*-1]\n print(\"fall time:{:0.2f}\".format(self.fallTime))\n break\n\n # 4. get total impuls in Ns\n idx1 = max(bisect.bisect_left(self.x,self.startTime),0)\n idx2 = min(bisect.bisect_right(self.x,self.stopTime),len(self.x)-1)\n y_corr = (self.y_[idx1:idx2]-self.preBurnValue) * 9.81\n self.totImpuls = np.trapz(y= y_corr,x=self.x[idx1:idx2])\n print(\"Impuls:{:0.2f} Ns\".format(self.totImpuls))\n # 5. get specific impuls\n m_tot = self.preBurnValue - self.postBurnValue\n self.spImpuls = self.totImpuls / (m_tot * 9.81)\n print(\"spezific Impuls:{:0.2f} s\".format(self.spImpuls))\n # 6. update interface\n self.idtLabel.setText(\"{:0.2f} s\".format(self.burnStartTime - self.startTime))\n self.irtLabel.setText(\"{:0.2f} s\".format(self.riseTime - self.burnStartTime))\n self.btLabel.setText(\"{:0.2f} s\".format(self.fallTime-self.burnStartTime))\n self.atLabel.setText(\"{:0.2f} s\".format(self.burnStopTime-self.burnStartTime))\n self.maxThrustLabel.setText(\"{:0.2f} N\".format(self.maxThrust_Newton))\n self.totImpulsLabel.setText(\"{:0.2f} Ns\".format(self.totImpuls))\n self.spImpulsLabel.setText(\"{:0.2f} s\".format(self.spImpuls))\n\n def applyFilter(self):\n try:\n windowSize = int(self.windowLineEdit.text())\n order = int(self.orderLineEdit.text())\n self.y_ = sgf(self.y_,windowSize,order,mode=\"nearest\")\n self.updateGraph()\n except Exception as err:\n print(\"Ein Fehler ist aufgetreten!\")\n print(err)\n\n def computeMassCompensation(self):\n # iterativly compute the mass flow and correct the sensor data\n # algorithm by David Madlener\n m_tot = self.preBurnValue - self.postBurnValue\n t0 = self.startTime\n t1 = self.stopTime\n idx0 = bisect.bisect_left(self.x,t0)\n idx1 = bisect.bisect_right(self.x,t1)\n S = self.y_[idx0:idx1] # get sensor data (kg)\n t = self.x[idx0:idx1] # get time (s)\n P_old = 0\n m = np.ones(len(S)) * self.preBurnValue # initialize m(t) with constant pre burn values\n delta_P = 1\n e = 0.00001\n\n while delta_P > e:\n F = S - m # compute thrust F (kg)\n P_new = np.trapz(y=F,x=t) # integrate thrust (kg s)\n m_dot = -1 * F * (m_tot/P_new)\n m = igt.cumtrapz(m_dot,t,initial=m[0])\n delta_P = abs(P_new-P_old)\n P_old = P_new\n #print(\"delta_P:{}\".format(delta_P))\n\n self.y_[:idx0] = self.y_[:idx0] - self.preBurnValue\n self.y_[idx0:idx1] = S - m - self.preBurnValue\n self.y_[idx1:] = self.y_[idx1:] - self.postBurnValue\n self.updateGraph()\n","repo_name":"deets/unifhy-rocket-engine-test-stand","sub_path":"modul3/ViewModel/analyseViewModel.py","file_name":"analyseViewModel.py","file_ext":"py","file_size_in_byte":12002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"39970117596","text":"import importlib\nfrom contracting.execution import runtime\nfrom contractdb.driver import ContractDBDriver\nfrom contracting.execution.module import install_database_loader\nfrom contracting.db.encoder import encode\n\nimport ecdsa\nimport logging\nimport hashlib\n\n## Create new executor that takes a transaction JSON thing and executes it. It also enforces the stamps, etc.\n# if that is set in the environment variables\n\nexpected_tx_keys = {'sender', 'signature', 'payload'}\nexpected_tx_batch_keys = {'sender', 'signature', 'payload', 'index'}\nexpected_payload_keys = {'contract', 'function', 'arguments'}\n\nMALFORMED_TX = 1\nINVALID_SIG = 2\nPY_EXCEPTION = 3\n\n\nclass Engine:\n def __init__(self, stamps_enabled=False, timestamps_enabled=False, driver=ContractDBDriver()):\n install_database_loader()\n\n self.driver = driver\n\n self.log = logging.getLogger('Engine')\n self.stamps_enabled = stamps_enabled\n self.timestamps_enabled = timestamps_enabled\n\n def verify_tx_structure(self, tx: dict, part_of_batch=False):\n expected_keys = expected_tx_keys if not part_of_batch else expected_tx_batch_keys\n if tx.keys() ^ expected_keys != set():\n return False\n\n if tx['payload'].keys() ^ expected_payload_keys != set():\n return False\n\n if self.stamps_enabled and not tx['payload'].get('stamps'):\n return False\n\n if self.timestamps_enabled and not tx['payload'].get('timestamp'):\n return False\n\n return True\n\n @staticmethod\n def verify_tx_signature(tx: dict):\n tx_payload = encode(tx['payload'])\n tx_payload_bytes = tx_payload.encode()\n\n signature = bytes.fromhex(tx['signature'])\n pk = bytes.fromhex(tx['sender'])\n\n vk = ecdsa.VerifyingKey.from_string(pk, curve=ecdsa.NIST256p, hashfunc=hashlib.sha256)\n try:\n vk.verify(signature, tx_payload_bytes)\n except ecdsa.BadSignatureError:\n return False\n return True\n\n # key = nacl.signing.VerifyKey(pk)\n # try:\n # key.verify(tx_payload_bytes, signature)\n # except nacl.exceptions.BadSignatureError:\n # return False\n # return True\n\n def run(self, tx: dict, environment={}, part_of_batch=False):\n tx_output = {\n 'status': 0,\n 'updates': {},\n 'result': None,\n }\n\n # Add additional KV pair if stamps are enabled\n if self.stamps_enabled:\n tx_output['cost'] = 0\n\n # Verify the structure of the tx\n if not self.verify_tx_structure(tx, part_of_batch):\n self.log.error(\"Malformed transaction {}\".format(tx))\n tx_output['status'] = MALFORMED_TX\n return tx_output\n\n # Verify the signature of the tx\n if not self.verify_tx_signature(tx):\n self.log.error(\"Invalid signature for the transaction {}\".format(tx))\n tx_output['status'] = INVALID_SIG\n return tx_output\n\n # Extract the payload to pass as execution arguments\n payload = tx.get('payload')\n\n # Set the runtime driver (we might be able to remove this)\n runtime.rt.env.update({'__Driver': self.driver})\n runtime.rt.env.update(environment)\n\n runtime.rt.context._base_state = {\n 'signer': tx['sender'],\n 'caller': tx['sender'],\n 'this': tx['payload']['contract'],\n 'owner': self.driver.get_owner(tx['payload']['contract'])\n }\n\n try:\n # Access the payload values and load them from the database\n module = importlib.import_module(payload.get('contract'))\n func = getattr(module, payload.get('function'))\n tx_output['result'] = func(**payload.get('arguments'))\n\n except Exception as e:\n tx_output['result'] = str(e)\n tx_output['status'] = PY_EXCEPTION\n\n # Get the current cache of sets for the tx output\n\n _driver = runtime.rt.env.get('__Driver')\n\n tx_output['updates'] = _driver.sets\n\n # Clear them for the next execution\n _driver.clear_sets()\n\n runtime.rt.clean_up()\n\n return tx_output\n","repo_name":"Lamden/contractdb","sub_path":"contractdb/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"19384957109","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.contrib.gis.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('search', '0001_initial'),\n ('feedback', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ImageComment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comment', models.TextField(verbose_name='Comment')),\n ('tag_friend', models.CharField(max_length=1024, null=True, verbose_name='Tag Friends', blank=True)),\n ('like_count', models.IntegerField(default=0, max_length=100, verbose_name='like count')),\n ('is_deleted', models.BooleanField(default=False, verbose_name='Deleted Comment')),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': 'ImageComment',\n 'verbose_name_plural': 'ImageComments',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ImageCommentLike',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('image_comment', models.ForeignKey(related_name=b'like_image_comment', to='uploadimages.ImageComment')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ImageLike',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UploadImage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', models.ImageField(upload_to=b'upload_images', null=True, verbose_name='Image')),\n ('google_images', models.TextField(null=True, verbose_name='Google Images')),\n ('review_images', models.ImageField(upload_to=b'upload_images', null=True, verbose_name='Review Image')),\n ('tag_friend', models.CharField(max_length=1024, null=True, verbose_name='Tag Friends', blank=True)),\n ('special_feature', models.TextField(max_length=1024, null=True, verbose_name='Special Feature', blank=True)),\n ('location', django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, verbose_name='Review Location', geography=True)),\n ('is_verified', models.BooleanField(default=False, verbose_name='Upload Image Verified')),\n ('is_credited', models.BooleanField(default=False, verbose_name='Credit on Uploaded Image')),\n ('comment_count', models.IntegerField(default=0, max_length=100, verbose_name='comment count')),\n ('like_count', models.IntegerField(default=0, max_length=100, verbose_name='like count')),\n ('with_whom', models.CharField(max_length=1024, null=True, verbose_name='With Friend', blank=True)),\n ('is_deleted', models.BooleanField(default=False, verbose_name='Deleted Image')),\n ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')),\n ('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),\n ('owner', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ('place', models.ForeignKey(to='search.PlaceDetail', db_column=b'place_id')),\n ('review', models.ForeignKey(to='feedback.ReviewRating', null=True)),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': 'UploadImage',\n 'verbose_name_plural': 'UploadImages',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='imagelike',\n name='upload_image',\n field=models.ForeignKey(related_name=b'like_image', to='uploadimages.UploadImage'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='imagecomment',\n name='upload_image',\n field=models.ForeignKey(related_name=b'image_comment', to='uploadimages.UploadImage'),\n preserve_default=True,\n ),\n ]\n","repo_name":"bharat-gera/Nautlus","sub_path":"uploadimages/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"70559851129","text":"from typing import Any, Dict, List, Tuple, Type\nimport ctypes\nimport numpy as np\n\nfrom vxpy.core import logger\nimport vxpy.core.devices.camera as vxcamera\nimport vxpy.core.ipc as vxipc\nfrom vxpy.core.devices.camera import CameraDevice\nfrom vxpy.definitions import *\nfrom vxpy.ext_lib.tis_windows import tisgrabber as tis\n\nlog = logger.getLogger(__name__)\n\nic = ctypes.cdll.LoadLibrary('tisgrabber_x64.dll')\ntis.declareFunctions(ic)\nic.IC_InitLibrary(0)\n\n\nclass CallbackUserdata(ctypes.Structure):\n def __init__(self):\n ctypes.Structure.__init__(self)\n\n\nclass TISCamera(vxcamera.CameraDevice):\n \"\"\"TheImagingSource camera using the tisgrabber.dll for Windows OS\n \"\"\"\n\n def __repr__(self):\n return f'{TISCamera.__name__} {self.properties[\"model\"]} {self.properties[\"serial\"]}'\n\n manufacturer = 'TIS'\n\n # NOTE: TIS MAY only support 8-bit images for now?\n sink_formats = {'Y800': (1, np.uint8), # (Y8) 8-bit monochrome\n 'RGB24': (3, np.uint8), # 8-bit RGB\n 'RGB32': (4, np.uint8), # 8-bit RGBA\n # 'UYVY': (2, np.uint16),\n 'Y16': (1, np.uint16)} # 16-bit monochrome\n\n def __init__(self, *args, **kwargs):\n vxcamera.CameraDevice.__init__(self, *args, **kwargs)\n\n self._frame: np.ndarray = None\n\n self.metadata = {}\n self.settings = {}\n\n self.last_snap = vxipc.get_time()\n self.new_image = False\n\n def get_settings(self) -> Dict[str, Any]:\n if len(self.settings) == 0:\n settings = {**self.properties, 'exposure': 0.01, 'gain': 1.0}\n return settings\n return self.settings\n\n @property\n def frame_rate(self) -> float:\n return self.properties['frame_rate']\n\n @property\n def width(self) -> float:\n return self.properties['width']\n\n @property\n def height(self) -> float:\n return self.properties['height']\n\n @classmethod\n def get_camera_list(cls) -> List[CameraDevice]:\n camera_list = []\n devicecount = ic.IC_GetDeviceCount()\n for i in range(0, devicecount):\n model = tis.D(ic.IC_GetDevice(i))\n uniquename = tis.D(ic.IC_GetUniqueNamefromList(i))\n serial = uniquename.replace(model, '').strip(' ')\n props = {'serial': serial, 'model': model,\n 'width': 640, 'height': 480, 'frame_rate': 60.0}\n cam = TISCamera(**props)\n camera_list.append(cam)\n\n return camera_list\n\n def _open(self) -> bool:\n\n # Open (empty) device\n self.h_grabber = ic.IC_CreateGrabber()\n\n # Set callback\n self.userdata = CallbackUserdata()\n self._frame_ready_callback = ic.FRAMEREADYCALLBACK(self._fetch_and_convert_buffer)\n ic.IC_SetFrameReadyCallback(self.h_grabber, self._frame_ready_callback, self.userdata)\n\n return True\n\n def _fetch_and_convert_buffer(self, h_grabber, p_buffer, frame_number, p_data):\n width = ctypes.c_long()\n height = ctypes.c_long()\n bits_per_pixel = ctypes.c_int()\n color_format = ctypes.c_int()\n\n # Query the image description values\n ic.IC_GetImageDescription(h_grabber, width, height, bits_per_pixel, color_format)\n\n # Calculate the buffer size\n bytes_per_pixel = int(bits_per_pixel.value / 8.0)\n buffer_size = width.value * height.value * bytes_per_pixel\n\n source_format = self.properties['format']\n if buffer_size > 0:\n image = ctypes.cast(p_buffer, ctypes.POINTER(ctypes.c_ubyte * buffer_size))\n _dtype = self.sink_formats[source_format][1]\n _shape = (height.value, width.value, bytes_per_pixel // _dtype().nbytes)\n self._frame = np.ndarray(buffer=image.contents,\n dtype=_dtype,\n shape=_shape)\n\n self.new_image = True\n\n def _get_property_value_range(self, property_name):\n value_min = ctypes.c_float()\n value_max = ctypes.c_float()\n ic.IC_GetPropertyAbsoluteValueRange(self.h_grabber, tis.T(property_name), tis.T('Value'), value_min, value_max)\n\n return value_min.value, value_max.value\n\n def _set_property(self, property_name, value):\n limits = self._get_property_value_range(property_name)\n if not limits[0] <= value <= limits[1]:\n log.warning(f'Cannot set value of property {property_name} to {value} '\n f'on camera device {self}. Out of range {limits}')\n return\n\n # Set\n log.debug(f'Set property value of property {property_name} to {value} on device {self}')\n ic.IC_SetPropertyAbsoluteValue(self.h_grabber, tis.T(property_name), tis.T('Value'), ctypes.c_float(value))\n\n # Verify\n new_value = ctypes.c_float()\n ic.IC_GetPropertyAbsoluteValue(self.h_grabber, tis.T(property_name), tis.T('Value'), new_value)\n value_min, value_max = self._get_property_value_range(property_name)\n log.debug(f'New property value for {property_name} is {new_value.value:.5f} '\n f'({value_min:.5f} - {value_max:.5f}) on device {self}')\n\n def _set_property_switch(self, property_name, switch_name, value):\n # Set\n ic.IC_SetPropertySwitch(self.h_grabber, tis.T(property_name), tis.T(switch_name), value)\n log.debug(f'Set property switch {switch_name} of property {property_name} to {value} on device {self}')\n\n # Verify\n new_value = ctypes.c_long()\n ic.IC_GetPropertySwitch(self.h_grabber, tis.T(property_name), tis.T(switch_name), new_value)\n log.debug(f'New property switch value {property_name}:{switch_name} '\n f'is {new_value.value} on device {self}')\n\n def _start_stream(self) -> bool:\n # Open device by model and serial\n model = self.properties['model']\n serial = self.properties['serial']\n ic.IC_OpenDevByUniqueName(self.h_grabber, tis.T(f'{model} {serial}'))\n\n # Setting\n source_format = self.properties['format']\n format_str = f'{source_format} ({self.width}x{self.height})'\n ic.IC_SetVideoFormat(self.h_grabber, tis.T(format_str))\n ic.IC_SetFrameRate(self.h_grabber, ctypes.c_float(self.frame_rate))\n\n # Set to continuous mode\n ic.IC_SetContinuousMode(self.h_grabber, 0)\n\n # Set trigger enable\n ic.IC_SetPropertySwitch(self.h_grabber, tis.T('Trigger'), tis.T('Enable'), 1)\n\n # Set properties\n self._set_property_switch('Gain', 'Auto', 0)\n self._set_property_switch('Exposure', 'Auto', 0)\n self._set_property('Exposure', self.properties['exposure'])\n self._set_property('Gain', self.properties['gain'])\n\n # Start\n ic.IC_StartLive(self.h_grabber, 0)\n\n return True\n\n def next_snap(self) -> bool:\n current_time = vxipc.get_time()\n\n do_next = current_time >= self.last_snap + 1. / self.frame_rate\n\n if do_next:\n self.last_snap = current_time\n\n return do_next\n\n def snap_image(self) -> None:\n ic.IC_PropertyOnePush(self.h_grabber, tis.T('Trigger'), tis.T('Software Trigger'))\n\n def next_image(self) -> bool:\n return self.new_image\n\n def get_image(self) -> np.ndarray:\n self.new_image = False\n return self._frame\n\n def _end_stream(self) -> bool:\n ic.IC_StopLive(self.h_grabber)\n return True\n\n def _close(self) -> bool:\n pass\n\nif __name__ == '__main__':\n pass\n","repo_name":"thladnik/vxPy","sub_path":"vxpy/devices/camera/tis_windows_tisgrabber.py","file_name":"tis_windows_tisgrabber.py","file_ext":"py","file_size_in_byte":7540,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"71597758328","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2 as cv2\nimport csv as csv\nimport tensorflow as tf\nfrom keras.models import Sequential, Model\nfrom keras.layers import Lambda, Cropping2D\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout \nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\n\n### D A T A G E N E R A T I O N \n### 1.) Load the data from the driving log file\n# The csv file is structured like this: \n# center - left - right - steering - throttle - brake - speed\nlogFileLines = []\nwith open (\"./data/driving_log.csv\") as log: \n reader = csv.reader(log)\n next(reader)\n for line in reader: \n logFileLines.append(line) \n### 2.) Split the data into training and validation set\ntrainingData, validationData = train_test_split(logFileLines, test_size=0.2) \n### 3.) Define a generator which provides data batches more (memory) efficiently than just loading and storing the entire data set\ndef dataGenerator(data, batchSize=32): \n numDataSamples = len(data)\n while True:\n # Randomize data\n np.random.shuffle(data)\n # Return (i.e. yield) a batch every time the dataGenerator gets called\n for offset in range(0, numDataSamples, batchSize):\n batchData = data[offset:offset+batchSize]\n # Extract image links for center, left and right images\n # Extract steering values \n centerImgLinks = []\n leftImgLinks = []\n rightImgLinks = []\n steeringCenter = []\n steeringLeft = []\n steeringRight = [] \n for line in batchData: \n centerImgLinks.append(\"./data/\" + line[0])\n leftImgLinks.append(\"./data/\" + (line[1])[1:])\n rightImgLinks.append(\"./data/\" + (line[2])[1:])\n # Use left and right camera images to pretend the AV is swerved to either left or right\n # Adapt the steering by a correction factor of 0.2 in order to get the AV back to the center\n steeringCenterValue = float(line[3])\n steeringLeftValue = steeringCenterValue + 0.2\n steeringRightValue = steeringCenterValue - 0.2\n steeringCenter.append(steeringCenterValue)\n steeringLeft.append(steeringLeftValue)\n steeringRight.append(steeringRightValue)\n # Load actual images\n centerImages = []\n leftImages = []\n rightImages = []\n for centerImgLink, leftImgLink, rightImgLink in zip(centerImgLinks, leftImgLinks, rightImgLinks): \n centerImages.append(plt.imread(centerImgLink))\n leftImages.append(plt.imread(leftImgLink))\n rightImages.append(plt.imread(rightImgLink))\n # Stack images and steering values together respectively\n images = centerImages + leftImages + rightImages\n steerings = steeringCenter + steeringLeft + steeringRight\n # Augment the data by flipping the image and inverse the corresponding steering \n augmentedImages = []\n augmentedSteerings = []\n for img, steerVal in zip(images, steerings): \n flippedImg = np.fliplr(img)\n flippedSteerVal = - steerVal\n augmentedImages.append(img)\n augmentedImages.append(flippedImg)\n augmentedSteerings.append(steerVal)\n augmentedSteerings.append(flippedSteerVal) \n # Return (yield) the training batch \n X_train = np.array(augmentedImages) \n y_train = np.array(augmentedSteerings)\n yield sklearn.utils.shuffle(X_train, y_train) \n\n \n### B U I L D T H E M O D E L A R C H I T E C T U R E \nmodel = Sequential()\n# L a y e r 0 (P R E P R O C E S S I N G) \n# Lambda layer as preprocessing unit (normalization and mean centering)\n# Cropping layer to remove the above part of the images (which might be rather noise for the NN) \nmodel.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((60,20), (0,0))))\n# L a y e r 1\n# Convolution and MaxPool --> Input: 80x320x3 --> Layer 1 --> Output: 40x160x24 \nmodel.add(Conv2D(kernel_size=(5,5), filters=24, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 2\n# Convolution and MaxPool --> Input: 40x160x24 --> Layer 2 --> Output: 20x80x36 \nmodel.add(Conv2D(kernel_size=(5,5), filters=36, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 3\n# Convolution and MaxPool --> Input: 20x80x36 --> Layer 3 --> Output: 10x40x48\nmodel.add(Conv2D(kernel_size=(5,5), filters=48, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 4\n# Convolution and MaxPool --> Input: 10x40x48 --> Layer 4 --> Output: 5x20x64\nmodel.add(Conv2D(kernel_size=(3,3), filters=64, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 5\n# Convolution and MaxPool --> Input: 5x20x64 --> Layer 5 --> Output: 2x10x64\nmodel.add(Conv2D(kernel_size=(3,3), filters=64, padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), padding='valid'))\n# L a y e r 6\n# Flatten Layer --> Input: 2x10x64 --> Layer 4 Output: 1280\nmodel.add(Flatten())\n# L a y e r 7\n# Dense (Fully Connected) and Relu --> Input 1280 --> Layer 7 --> Output: 320\nmodel.add(Dense(320))\nmodel.add(Activation('relu'))\n# L a y e r 8\n# Dense (Fully Connected) and Relu --> Input 320 --> Layer 8 --> Output: 160\nmodel.add(Dense(160))\nmodel.add(Activation('relu'))\n# L a y e r 9 \n# Dense (Fully Connected) --> Input 160 --> Layer 9 --> Output: 16\nmodel.add(Dense(16))\nmodel.add(Activation('relu'))\n# L a y e r 10 (O u t p u t)\n# Dense (Fully Connected) --> Input 16 --> Layer 10 --> Output: 1\nmodel.add(Dense(1))\n\n\n### T R A I N T H E M O D E L\n# Define data generator for training and validation batches\nbatchSize = 32\ntrainingDataGenerator = dataGenerator(trainingData, batchSize)\nvalidationDataGenerator = dataGenerator(validationData, batchSize)\n# Use mean squared error function as loss and the adam optimizer (stochastic gradient descent)\nmodel.compile(loss=\"mse\", optimizer=\"adam\")\n# Training\nbehavioralCloningModel = model.fit_generator(trainingDataGenerator, steps_per_epoch=np.ceil(len(trainingData)/batchSize), \\\n validation_data=validationDataGenerator, validation_steps=np.ceil(len(validationData)/batchSize), \\\n epochs=10, verbose=1)\n# Save the model\nmodel.save(\"model.h5\")\n\n\n\n","repo_name":"dschmoeller/BehavioralCloningDeepNNsKeras","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1779391306","text":"# importing Libraries\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport seaborn as sns\r\n\r\n# Importing the dataset\r\nos.chdir('E:\\\\Programing\\\\UdemyML\\\\Machine Learning A-Z Template Folder\\\\Part 4 - Clustering\\\\Section 24 - K-Means Clustering')\r\ndf = pd.read_csv('Mall_Customers.csv')\r\nprint(df)\r\nx = df.iloc[:,[3,4]].values\r\n\r\n# Using Elbow Method\r\nfrom sklearn.cluster import KMeans\r\nwcss = []\r\nfor i in range(1,11):\r\n k = KMeans(n_clusters=i,init='k-means++',max_iter=300,n_init=10,random_state=0)\r\n k.fit(x)\r\n wcss.append(k.inertia_)\r\n\r\nsns.set()\r\nplt.plot(range(1,11),wcss)\r\nplt.title('Elbow Method ')\r\nplt.xlabel('No.of.Clusters')\r\nplt.ylabel('WCSS Score')\r\nplt.show()\r\n\r\n# Fitting The Model To 5 Clusters\r\nk = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10, random_state=0)\r\ny_k = k.fit_predict(x)\r\nprint(y_k)\r\n\r\n# Scatter Plot The Clusters\r\nplt.scatter(x[y_k==0,0],x[y_k==0,1],c='red',label = 'Cluster 1')\r\nplt.scatter(x[y_k==1,0],x[y_k==1,1],c='blue',label = 'Cluster 2')\r\nplt.scatter(x[y_k==2,0],x[y_k==2,1],c='green',label = 'Cluster 3')\r\nplt.scatter(x[y_k==3,0],x[y_k==3,1],c='yellow',label = 'Cluster 4')\r\nplt.scatter(x[y_k==4,0],x[y_k==4,1],c='cyan',label = 'Cluster 5')\r\nplt.xlabel('Annual Income')\r\nplt.ylabel('Spending Score')\r\nplt.title('Clustering Of Mall Clients')\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n","repo_name":"gemyhamed/Udemy_ML_C-MyownWork","sub_path":"K-means Clustring/KMeans Clustring.py","file_name":"KMeans Clustring.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42423650345","text":"text = []\r\nwith open('final.txt', 'r', encoding='utf-8') as f:\r\n\tfor line in f:\r\n\t\ttext.append(line.split('|')[2])\r\nword_dic = {}\r\n\r\nfor line in text:\r\n\tline_split = line.translate(str.maketrans('','','!(),-.[]_،؟!@#$\\n')).split(' ')\r\n\tfor word in line_split:\r\n\t\tif word in word_dic:\r\n\t\t\tword_dic[word] += 1\r\n\t\telse:\r\n\t\t\tword_dic[word] = 1\r\nword_dic_sorted = {k: v for k, v in sorted(word_dic.items(), key=lambda item: item[1], reverse=True)}\r\nwith open('word_count.txt', 'w', encoding='utf-8') as w:\r\n\tfor rank ,(word, count) in enumerate(word_dic_sorted.items()):\r\n\t\tw.write('{}-word: {}, count: {}\\n'.format(rank+1, word, count))\r\n\r\n","repo_name":"shenasa-ai/persian-tts","sub_path":"top-words.py","file_name":"top-words.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"77"} +{"seq_id":"42836011316","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 13 11:08:16 2020\r\n\r\n@author: prnvb\r\n\"\"\"\r\n\r\nfrom model import build_encoder, build_decoder_densenet, build_decoder_efnb2,\\\r\n build_decoder_efnb3, build_decoder_efnb4\r\nfrom keras.layers import Dense, Input, Dropout, Multiply, Add, Concatenate\r\nfrom keras.models import Model\r\n\r\nfrom utils import LATENT_DIM,NUM_CLASSES,INPUT_SHAPE\r\n\r\ndef build_classifier(encoder,dropout_rate=0.4):\r\n input_image = Input(shape=INPUT_SHAPE)\r\n embedding = encoder(input_image)\r\n #out = Dense(int(LATENT_DIM/2),activation='relu')(embedding)\r\n if dropout_rate>0:\r\n\t embedding = Dropout(0.3)(embedding)\r\n out = Dense(NUM_CLASSES,activation='softmax')(embedding)\r\n classifier = Model(input_image,out)\r\n classifier.name = 'Classifier'\r\n return classifier\r\n\r\ndef build_classifier_v2(encoder,input_shape):\r\n input_image = Input(shape=input_shape)\r\n embedding = encoder(input_image)\r\n #out = Dense(int(LATENT_DIM/2),activation='relu')(embedding)\r\n #out = Dropout(0.3)(out)\r\n out = Dense(NUM_CLASSES,activation='softmax')(embedding)\r\n classifier = Model(input_image,out)\r\n classifier.name = 'Classifier'\r\n return classifier\r\n\r\n\r\ndef build_conditioner():\r\n input_label_condition_vector = Input(shape=(NUM_CLASSES,))\r\n x = Dense(256,activation='relu')(input_label_condition_vector)\r\n #x = Dropout(0.2)(x)\r\n x = Dense(LATENT_DIM,activation='relu')(x)\r\n model = Model(input_label_condition_vector,x)\r\n return model\r\n\r\ndef build_c2ae(model_name): #encoder\r\n \r\n H_gamma = build_conditioner()\r\n H_gamma.name = 'H_gamma'\r\n H_beta = build_conditioner()\r\n H_beta.name = 'H_beta'\r\n \r\n #input_image = Input(shape=INPUT_SHAPE)\r\n #z = encoder(input_image)\r\n \r\n #condition_type_input = Input(shape=(1,))\r\n \r\n z = Input(shape=(LATENT_DIM,))\r\n \r\n l_m = Input(shape=(NUM_CLASSES,))\r\n gamma_m = H_gamma(l_m)\r\n beta_m = H_beta(l_m)\r\n z_l_m = Multiply()([z,gamma_m])\r\n z_l_m = Add()([z_l_m,beta_m])\r\n \r\n \r\n l_nm = Input(shape=(NUM_CLASSES,))\r\n gamma_nm = H_gamma(l_nm)\r\n beta_nm = H_beta(l_nm)\r\n z_l_nm = Multiply()([z,gamma_nm])\r\n z_l_nm = Add()([z_l_nm,beta_nm])\r\n \r\n if model_name == 'densenet121':\r\n decoder = build_decoder_densenet(LATENT_DIM)\r\n \r\n if model_name == 'efnb2':\r\n decoder = build_decoder_efnb2(LATENT_DIM)\r\n \r\n if model_name == 'efnb3':\r\n decoder = build_decoder_efnb3(LATENT_DIM)\r\n \r\n if model_name == 'efnb4':\r\n decoder = build_decoder_efnb4(LATENT_DIM)\r\n \r\n match_recon = decoder(z_l_m)\r\n nonmatch_recon = decoder(z_l_nm)\r\n \r\n out = Concatenate(axis=-1)([match_recon,nonmatch_recon])\r\n \r\n #c2ae = Model(inputs=[input_image,l_j],outputs=reconstruction)\r\n #c2ae = Model(inputs=[z,l_j,condition_type_input],outputs=reconstruction)\r\n \r\n c2ae = Model(inputs=[z,l_m,l_nm],outputs=out)\r\n \r\n return c2ae, decoder, H_gamma, H_beta#, condition_type_input #, encoder\r\n\r\nif __name__ == '__main__':\r\n encoder = build_encoder(LATENT_DIM)\r\n classifier = build_classifier(encoder)\r\n c2ae, _, decoder, H_gamma, H_beta = build_c2ae(encoder)\r\n \r\n encoder.summary()\r\n decoder.summary()\r\n H_gamma.summary()\r\n H_beta.summary()\r\n c2ae.summary()\r\n","repo_name":"pranavbudhwant/ISIC","sub_path":"c2ae/c2ae.py","file_name":"c2ae.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7229180673","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n\npage_dist = dict()\nresponse = requests.get('http://old.iachina.cn/upload/product/20091207050241328.html')\nresponse.encoding = 'gbk'\nresponse = response.text\np_list = BeautifulSoup(response,\"lxml\").find_all('p')\nlevel = 0\nfor p in p_list:\n\n try :\n if p['align'] == \"center\":\n title = p.get_text()\n print(\"title : \"+title)\n except:\n print(p.get_text())\n\n\n\n\n","repo_name":"xiaoweiab/learn1","sub_path":"translate/dealbaoxian/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"4962613615","text":"# author:lzt\n# date: 2019/12/12 10:50\n# file_name: lock_test\n# 有100张票 3个窗口同时开卖 每卖出一张 票数-1 直到100张票卖完为止\nfrom threading import Thread\nimport time\nimport threading\n\ntickets = 100\n\n# 获取一把锁\nlock1 = threading.Lock()\n\n\ndef window1():\n global tickets\n while tickets > 0:\n lock1.acquire()\n # 二次判断:检测数据有没有在等待期间发生变化\n if tickets > 0:\n # 打印票面\n print(\"window1卖出票号:\", tickets)\n # time.sleep(0.02)\n # 票数-1\n tickets -= 1\n lock1.release()\n\n\ndef window2():\n global tickets\n while tickets > 0:\n lock1.acquire()\n if tickets > 0:\n # 打印票面\n print(\"window2卖出票号:\", tickets)\n # time.sleep(0.1)\n # 票数-1\n tickets -= 1\n lock1.release()\n\n\ndef window3():\n global tickets\n while tickets > 0:\n lock1.acquire()\n if tickets > 0:\n # 打印票面\n print(\"window3卖出票号:\", tickets)\n # time.sleep(0.05)\n # 票数-1\n tickets -= 1\n lock1.release()\n\n\nt1 = Thread(target=window1)\nt2 = Thread(target=window2)\nt3 = Thread(target=window3)\n\nt1.start()\nt2.start()\nt3.start()\n","repo_name":"1987617587/lsh_py","sub_path":"basics/day29/lzt/lock_test.py","file_name":"lock_test.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"40273269722","text":"import os\nfrom setuptools import setup\n\n\ntry:\n descr = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()\nexcept IOError:\n descr = ''\n\ntry:\n from pypandoc import convert\n descr = convert(descr, 'rst', format='md')\nexcept ImportError:\n pass\n\nsetup_parameters = dict(\n name=\"pims_nd2\",\n version=\"1.1\",\n description=\"An image reader for nd2 (NIS Elements) multidimensional images\",\n author=\"Casper van der Wel\",\n install_requires=['pims>=0.3'],\n author_email=\"caspervdw@gmail.com\",\n url=\"https://github.com/soft-matter/pims_nd2\",\n packages=['pims_nd2'],\n include_package_data=True,\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: C\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS\"],\n platforms=['MacOS X', 'Windows', 'Linux CentOs 6.5/7', 'Linux Debian 7/8'],\n long_description=descr)\n\nsetup(**setup_parameters)\n","repo_name":"soft-matter/pims_nd2","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"77"} +{"seq_id":"73471847288","text":"import setuptools\n\nREQUIRED = [\n \"numpy\",\n \"pandas\",\n \"scikit-learn\"\n]\n\nsetuptools.setup(\n name=\"lambdata-isaacgrove\",\n version=\"0.8\",\n packages=setuptools.find_packages(),\n # Project uses reStructuredText, so ensure that the docutils get\n # installed or upgraded on the target machine\n install_requires=REQUIRED,\n # metadata to display on PyPI\n author=\"isaacgrove\",\n author_email=\"isaacgrove333@gmail.com\",\n description=\"Lambda DS Unit 3 lambdata - helper functions\",\n keywords=\"\",\n url=\"\", # project home page, if any\n classifiers=[\n \"License :: OSI Approved :: MIT License\"\n ]\n # could also include long_description, download_url, etc.\n)","repo_name":"isaacgrove/unit3-day1-lambdata","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5430453039","text":"from firebase_admin import db\nfrom rest_framework.response import Response\nfrom apps.metrics.helpers.combine_metrics_helper.combine_metrics import SearchNode\n\ndef handleEditName(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n old_name = data['old_name']\n new_name = data['new_name']\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n nodes = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes']\n try:\n for t in list_t:\n if t['name'] == old_name:\n t.update({\n 'name': str(new_name).upper()\n })\n for node in nodes:\n if(node['data']['id'] in t['composite_component']):\n print(node['data']['id'])\n node['data'].update({\n 'composite': str(new_name).upper()\n })\n break\n\n\n # Se actualiza la lista t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes'] = nodes\n # Se actualiza la bd\n # arch_arr[int(arch_index)]['versions'][int(version_index)]['elements'] = elements\n project_ref = db.reference(url)\n project_ref.update({\n 'architectures': arch_arr\n })\n\n return Response(data={\"ok\": True})\n except Exception as e:\n print('Error:', e)\n return Response({\"ok\":False})\n\n\n# Permite editar el componente compuesto al que pertenece un nodo\ndef handleEditNodeCompositeComponent(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n nodeData = data['node']\n composite_component = data['new_name']\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n nodes = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes']\n\n try:\n fullNode = SearchNode(nodeData, nodes) # me quede sin nombres jeje\n\n aux = False\n # Si el nodo pertenece con anterioridad a otro componente compuesto entonces lo saco de esa lista t\n if 'composite' in fullNode['data']:\n print('pertenecia a otro componente')\n for lt in list_t:\n for index, cc in enumerate(lt['composite_component']):\n if cc == fullNode['data']['name']:\n lt['composite_component'].pop(index)\n aux = True\n break\n\n if aux:\n print('break')\n break\n\n for t in list_t:\n if t['name'] == composite_component:\n t['composite_component'].append(nodeData)\n\n for node in nodes :\n if(node['data']['id'] == nodeData):\n node['data'].update({\n 'composite': t['name'],\n 'bg': t['bg']\n })\n\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes'] = nodes\n\n\n project_ref = db.reference(url)\n project_ref.update({\n 'architectures': arch_arr\n })\n return Response(data={'ok': True})\n except Exception as e:\n print(e)\n return Response(data={'ok': False})\n\n# Genera la tabla de los componentes compuestos\ndef handleCompositeComponentBoard(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n edges = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['edges']\n nodes = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['nodes']\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n\n # print(len(edges))\n # print(len(nodes))\n # print(len(list_t))\n try:\n for item in list_t:\n # Required interfaces\n ca = []\n # Provided interfaces\n ce = []\n\n for component in item['composite_component']:\n for edge in edges:\n sourceNode = SearchNode(edge['data']['source'], nodes)\n targetNode = SearchNode(edge['data']['target'], nodes)\n\n if component == sourceNode['data']['id']:\n if 'composite' not in targetNode['data']:\n composite = ''\n else:\n composite = targetNode['data']['composite']\n if sourceNode['data']['composite'] != composite:\n if edge['scratch']['index'] not in ce and edge['scratch']['index'] not in ca:\n ce.append(edge['scratch']['index'])\n\n\n if component == targetNode['data']['id']:\n if 'composite' not in sourceNode['data']:\n composite = ''\n else:\n composite = sourceNode['data']['composite']\n\n if targetNode['data']['composite'] != composite:\n if edge['scratch']['index'] not in ca and edge['scratch']['index'] not in ce:\n ca.append(edge['scratch']['index'])\n\n # print('--------NEXT---------')\n item.update({\n 'required_interfaces': ca,\n 'provided_interfaces': ce,\n 'description': ''\n })\n\n # Actualizo la lista t\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n project_ref = db.reference(url)\n # Actualizo los datos en la base de datos\n project_ref.update({\n 'architectures': arch_arr\n })\n\n return Response(data={'ok': True})\n except Exception as e:\n print(e)\n return Response(data={'ok': False})\n\n# TODO\n# ? Hace falta limpiar las tablas\n# Edita la descripción de los componentes compuestos\ndef handleEditCompositeComponentDescription(data):\n uid = data['user_id']\n project_index = data['project_index']\n arch_index = int(data['arch_index'])\n version_index = data['ver_index']\n url = '/users/' + uid + '/projects/' + str(project_index)\n\n cc_name = data['name']\n description = data['description']\n\n arch_ref = db.reference(url + '/architectures')\n arch_arr = arch_ref.get()\n\n list_t = arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t']\n\n try:\n for item in list_t:\n if item['name'] == cc_name:\n item.update({\n 'description': description\n })\n arch_arr[int(arch_index)]['versions'][int(version_index)]['elements']['list_t'] = list_t\n project_ref = db.reference(url)\n # Actualizo los datos en la base de datos\n project_ref.update({\n 'architectures': arch_arr\n })\n return Response(data={'ok': True})\n except Exception as e:\n print(e)\n return Response(data={'ok': False})\n","repo_name":"Leopgf/tesis-back","sub_path":"apps/metrics/helpers/combine_metrics_helper/composite_component_handler.py","file_name":"composite_component_handler.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27528022819","text":"\"\"\"These are the actions primary related to DialogDomain, but any module can use them. Basically it provides ways for visually inserting, editting and\r\nupdating the db:domain table.\"\"\"\r\nimport output\r\nimport wx\r\nimport session\r\nfrom errors import *\r\nimport DialogEditDomain\r\nfrom table_domain import t_domain\r\n \r\ndef insert():\r\n \"\"\"Calls the edit dialog in insert mode. Returns True of False whether a record has been inserted or not.\"\"\"\r\n result = False\r\n dlg = DialogEditDomain.create(None)\r\n dlg.set_mode(\"insert\")\r\n try:\r\n dlg.ShowModal()\r\n if dlg.result == wx.ID_OK:\r\n result = True\r\n else:\r\n raise error_abort(\"Insert canceled.\")\r\n finally:\r\n dlg.Destroy()\r\n return result\r\n\r\n\r\ndef edit(id):\r\n \"\"\"Calls the edit dialog in edit mode. Returns True of False whether a record has been edited or not.\"\"\"\r\n result = False\r\n dlg = DialogEditDomain.create(None)\r\n dlg.set_mode(\"edit\")\r\n dlg.set_id(id)\r\n try:\r\n dlg.ShowModal()\r\n if dlg.result == wx.ID_OK:\r\n result = True\r\n else:\r\n raise error_abort(\"Edit canceled.\")\r\n finally:\r\n dlg.Destroy()\r\n return result\r\n\r\ndef delete(id):\r\n if wx.MessageBox(\"Are you sure, mate?\", \"Confirm delete\", wx.YES_NO, None) == wx.YES:\r\n # raise error_x(\"Sorry, this action is too dangerous to be performed!\")\r\n t_domain.delete(id)\r\n else:\r\n raise error_abort(\"Delete not confirmed.\")\r\n\r\n","repo_name":"trevisanj/sheware","sub_path":"act_domain.py","file_name":"act_domain.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25015054524","text":"from database.models import Command, Result, Request, db, CharField\nfrom datetime import datetime\nimport psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\nfrom loguru import logger\nfrom loader import bot\nfrom telebot.types import Message\n\n\ndef check_database() -> None:\n \"\"\"\n Функция проверяет наличие базы данных, если БД не существует, то создает её.\n В конце создает таблицы.\n \"\"\"\n logger.add('debug_in_database.log', level='DEBUG', format=\"{time} {level} {message}\", rotation=\"10 KB\",\n compression=\"zip\")\n con = psycopg2.connect(\"user='postgres' host='localhost' password='12345'\")\n dbname = 'history'\n\n con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n try:\n cur.execute('CREATE DATABASE ' + dbname)\n logger.info('DATABASE created')\n\n except psycopg2.ProgrammingError as err:\n logger.exception(err)\n logger.error('DATABASE already exists')\n\n finally:\n with db:\n db.create_tables([Request, Command, Result])\n\n\ndef insert_in_requests(user_id: int, time: datetime) -> int:\n \"\"\"\n Cоздаёт запись в таблице requests\n :param user_id: id пользователя\n :param time: время когда пользователь сделал запрос\n :return: айди записи для создания связи между таблицами\n \"\"\"\n with db:\n request = Request.create(user_id=user_id, time=time)\n logger.info('INSERT in requests')\n return request.id\n\n\ndef insert_in_commands(request_id, command_name: str, city_name: str,\n data_in: str, data_out: str, quantity: str,\n min_price: CharField = None, max_price: CharField = None, min_distance: CharField = None,\n max_distance: CharField = None) -> int:\n \"\"\"\n Cоздаёт запись в таблице commands\n :param request_id: айди прошлого запроса(requests)\n :param command_name: имя команды\n :param city_name: названия города\n :param data_in: дата заезда\n :param data_out: дата выезда\n :param quantity: кол-во отелей\n :param min_price: мин. цена (optional)\n :param max_price: макс. цена (optional)\n :param min_distance: мин. дистанция до центра (optional)\n :param max_distance: макс. дистанция до центра (optional)\n :return: айди записи для создания связи между таблицами\n \"\"\"\n with db:\n command = Command.create(request_id=request_id, command_name=command_name, city_name=city_name,\n min_price=min_price, max_price=max_price, min_distance=min_distance,\n max_distance=max_distance, data_in=data_in, data_out=data_out, quantity=quantity)\n logger.info('INSERT in commands')\n return command.id\n\n\ndef insert_in_results(command_id, hotel: str, address: str, price: str,\n distance: str, total_price: str, url: str) -> None:\n \"\"\"\n Cоздаёт запись в таблице results\n :param command_id: айди прошлого запроса(commands)\n :param hotel: название отеля\n :param address: адрес\n :param price: цена за ночь\n :param distance: расстояние до центра\n :param total_price: общая сумма денег\n :param url: ссылка на отель\n :return: None\n \"\"\"\n with db:\n Result.insert(command_id=command_id, hotel=hotel, address=address, price=price,\n distance=distance, total_price=total_price, url=url).execute()\n logger.info('INSERT in results')\n\n\n@logger.catch()\ndef select_user_history(message: Message):\n \"\"\"\n Получает из базы данных историю всех запросов пользователя лимит(5),\n после этого обрабатывает их и приводит в тип текста.\n И после всего этого выводит пользователю его команду и отели которые он нашел, с помощью этой команды.\n :param message: сообщение пользователя(с помощью него мы получаем id,\n и имеем возможность отправить текст из функции)\n \"\"\"\n with db:\n keys = Request.select().where(Request.user_id == message.from_user.id).limit(5).order_by(Request.time.desc())\n for key in keys:\n command = Command.select().where(Command.request_id == key).get()\n text1 = (f'Время: {str(key.time)[0:19]} Команда: {command.command_name}\\n'\n f'Город: {command.city_name}, с {command.data_in} по {command.data_out}')\n if command.command_name == 'beastdeal':\n text1 += (f'параметры поиска:\\n'\n f'минимальная цена: {command.min_price} и максимальная цена: {command.max_price}\\n'\n f'минимальное расстояние: {command.min_distance}'\n f' и максимальное расстояние: {command.max_distance}\\n')\n bot.send_message(message.chat.id, text1)\n\n history = Result.select().where(Result.command_id == key)\n for one_story in history:\n text2 = (f'Название отеля: {one_story.hotel}, цена за ночь: {one_story.price}\\n'\n f'Расстояние до центра {one_story.distance}\\n'\n f'Полная стоимость проживания {one_story.total_price}\\n'\n f'Адресс: {one_story.address}\\nСсылка на страницу отеля: {one_story.url}')\n\n bot.send_message(message.chat.id, text2, disable_web_page_preview=True)\n\n\n\n","repo_name":"banrj/telegram_travel_bot","sub_path":"database/database_commands.py","file_name":"database_commands.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43798845662","text":"import itertools as it\nimport ubelt as ub\nimport pathlib\nimport time\nimport os\nimport stat\n\n\ndef ensure_selenium_chromedriver():\n \"\"\"\n os.environ['webdriver.chrome.driver'] = ensure_selenium_chromedriver()\n \"\"\"\n import requests\n import zipfile\n timeout = 5.0\n\n def latest_version():\n rsp = requests.get('http://chromedriver.storage.googleapis.com/LATEST_RELEASE', timeout=timeout)\n if rsp.status_code != 200:\n raise Exception\n version = rsp.text.strip()\n return version\n\n # version = latest_version()\n # version = '91.0.4472.19'\n # version = '90.0.4430.24'\n version = '92.0.4515.107'\n\n known_hashs = {\n '91.0.4472.19': '49622b740b1c7e66b87179a2642f6c57f21a97fc844c84b30a48',\n '90.0.4430.24': 'b85313de6abc1b44f26a0e12e20cb66657b840417f5ac6018946',\n '92.0.4515.107': '844c0e04bbbfd286617af2d7facd3d6cf7d3491b1e78120f8e0',\n }\n url = 'http://chromedriver.storage.googleapis.com/{}/chromedriver_linux64.zip'.format(version)\n bin_dpath = pathlib.Path(ub.expandpath('~/.local/bin'))\n download_dpath = bin_dpath / f'chromedriver_{version}'\n download_dpath.mkdir(exist_ok=True, parents=True)\n\n zip_fpath = ub.grabdata(\n url, hash_prefix=known_hashs.get(version, 'unknown-version'),\n dpath=download_dpath,\n )\n zip_fpath = pathlib.Path(zip_fpath)\n # dpath = zip_fpath.parent\n\n # TODO: version the binary\n chromedriver_fpath_real = download_dpath / 'chromedriver'\n chromedriver_fpath_link = bin_dpath / 'chromedriver'\n\n if not chromedriver_fpath_real.exists() or not chromedriver_fpath_link.exists():\n # Also check hash?\n\n zfile = zipfile.ZipFile(str(zip_fpath))\n try:\n fpath = zfile.extract(\n 'chromedriver', path=chromedriver_fpath_real.parent)\n finally:\n zfile.close()\n\n chromedriver_fpath_real_ = pathlib.Path(fpath)\n assert chromedriver_fpath_real_.exists()\n ub.symlink(chromedriver_fpath_real_, chromedriver_fpath_link,\n overwrite=True)\n\n if not ub.WIN32:\n print('add permission chromedriver_fpath_real_ = {!r}'.format(chromedriver_fpath_real_))\n st = os.stat(chromedriver_fpath_real_)\n os.chmod(chromedriver_fpath_real_, st.st_mode | stat.S_IEXEC)\n\n os.environ['PATH'] = os.pathsep.join(\n ub.oset(os.environ['PATH'].split(os.pathsep)) |\n ub.oset([str(chromedriver_fpath_link.parent)]))\n return chromedriver_fpath_link\n\n\ndef run_pvpoke_simulation(mons, league='auto'):\n \"\"\"\n Args:\n mons (List[pypogo.Pokemon]): pokemon to simulate.\n Must have IVS, movesets, level, etc... fields populated.\n \"\"\"\n from selenium import webdriver\n from selenium.webdriver.common.keys import Keys\n # from selenium.webdriver.support.ui import Select\n import pandas as pd\n # import pypogo\n\n if league == 'auto':\n for mon in mons:\n if mon.cp <= 1500:\n league = 'great'\n elif mon.cp <= 2500:\n league = 'ultra'\n elif mon.level <= 41:\n league = 'master-classic'\n elif mon.level <= 51:\n league = 'master'\n else:\n raise AssertionError\n break\n # for mon in mons:\n # mon.populate_all\n mon_cachers = {}\n have_results = {}\n to_check_mons = []\n for mon in mons:\n mon._slug = mon.slug()\n mon_cachers[mon._slug] = cacher = ub.Cacher(\n 'pvpoke_sim', depends=[mon._slug, league], appname='pypogo')\n mon_results = cacher.tryload()\n if mon_results is None:\n to_check_mons.append(mon)\n else:\n have_results[mon._slug] = mon_results\n\n if to_check_mons:\n # Requires the driver be in the PATH\n ensure_selenium_chromedriver()\n\n url = 'https://pvpoke.com/battle/matrix/'\n driver = webdriver.Chrome()\n driver.get(url)\n time.sleep(2.0)\n\n if league == 'great':\n league_box_target = 'Great League (CP 1500)'\n meta_text = 'Great League Meta'\n elif league == 'ultra':\n league_box_target = 'Ultra League (Level 50)'\n meta_text = 'Ultra League Meta'\n # meta_text = 'Premier Cup Meta'\n # meta_text = 'Remix Cup Meta'\n # meta_text = 'Premier Classic Cup Meta'\n elif league == 'master-classic':\n league_box_target = 'Master League (Level 40)'\n meta_text = 'Master League Meta'\n elif league == 'master':\n league_box_target = 'Master League (Level 50)'\n meta_text = 'Master League Meta'\n else:\n raise NotImplementedError\n\n leage_select = driver.find_elements_by_class_name('league-select')[0]\n leage_select.click()\n leage_select.send_keys(league_box_target)\n leage_select.click()\n leage_select.send_keys(Keys.ENTER)\n\n # leage_select.text.split('\\n')\n # leage_select.send_keys('\\n')\n # leage_select.send_keys('\\n')\n\n def add_pokemon(mon):\n add_poke1_button = driver.find_elements_by_class_name('add-poke-btn')[0]\n add_poke1_button.click()\n\n select_drop = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/select')\n\n if 1:\n import xdev\n all_names = select_drop.text.split('\\n')\n distances = xdev.edit_distance(mon.display_name(), all_names)\n chosen_name = all_names[ub.argmin(distances)]\n else:\n chosen_name = mon.name\n\n search_box = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/input')\n search_box.send_keys(chosen_name)\n\n advanced_ivs_arrow = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/a/span[1]')\n advanced_ivs_arrow.click()\n\n level40_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[2]')\n level41_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[3]')\n level50_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[4]')\n level51_cap = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[2]/div[2]/div[5]')\n\n if mon.level >= 51:\n level51_cap.click()\n elif mon.level >= 50:\n level50_cap.click()\n elif mon.level >= 41:\n level41_cap.click()\n elif mon.level >= 40:\n level40_cap.click()\n\n level_box = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/input')\n level_box.click()\n level_box.clear()\n level_box.clear()\n level_box.send_keys(str(mon.level))\n\n iv_a = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[1]')\n iv_d = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[2]')\n iv_s = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[9]/div/div[1]/div/input[3]')\n\n # TODO\n # driver.find_elements_by_class_name('move-select')\n\n iv_a.clear()\n iv_a.send_keys(str(mon.ivs[0]))\n\n iv_d.clear()\n iv_d.send_keys(str(mon.ivs[1]))\n\n iv_s.clear()\n iv_s.send_keys(str(mon.ivs[2]))\n\n # USE_MOVES = 1\n if mon.moves is not None:\n # mon.populate_all()\n\n fast_select = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[1]')\n fast_select.click()\n fast_select.send_keys(mon.pvp_fast_move['name'])\n fast_select.send_keys(Keys.ENTER)\n\n charge1_select = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[2]')\n charge1_select.click()\n charge1_select.send_keys(mon.pvp_charge_moves[0]['name'])\n charge1_select.send_keys(Keys.ENTER)\n\n charge2_select = driver.find_element_by_xpath('/html/body/div[5]/div/div[3]/div[1]/div[2]/div[10]/select[3]')\n charge2_select.click()\n charge2_select.send_keys(mon.pvp_charge_moves[1]['name'])\n charge2_select.send_keys(Keys.ENTER)\n\n save_button = driver.find_elements_by_class_name('save-poke')[0]\n save_button.click()\n\n quickfills = driver.find_elements_by_class_name('quick-fill-select')\n quickfill = quickfills[1]\n quickfill.text.split('\\n')\n quickfill.click()\n quickfill.send_keys(meta_text)\n quickfill.click()\n\n for mon in to_check_mons:\n add_pokemon(mon)\n\n shield_num_to_text = {\n 0: 'No shields',\n 1: '1 shield',\n 2: '2 shields',\n }\n\n shield_case_to_data = {}\n\n for atk_num_shields, def_num_sheids in it.product(shield_num_to_text, shield_num_to_text):\n shield_selectors = driver.find_elements_by_class_name('shield-select')\n shield_selectors[2].click()\n shield_selectors[2].send_keys(shield_num_to_text[atk_num_shields])\n shield_selectors[2].send_keys(Keys.ENTER)\n\n shield_selectors[3].click()\n shield_selectors[3].send_keys(shield_num_to_text[def_num_sheids])\n shield_selectors[3].send_keys(Keys.ENTER)\n\n #shield_selectors[0].click()\n\n battle_btn = driver.find_elements_by_class_name('battle-btn')[0]\n battle_btn.click()\n\n # Clear previous downloaded files\n dlfolder = pathlib.Path(ub.expandpath('$HOME/Downloads'))\n for old_fpath in list(dlfolder.glob('_vs*.csv')):\n old_fpath.unlink()\n\n time.sleep(2.0)\n\n # Download new data\n dl_btn = driver.find_element_by_xpath('//*[@id=\"main\"]/div[4]/div[9]/div/a')\n dl_btn.click()\n\n while len(list(dlfolder.glob('_vs*.csv'))) < 1:\n pass\n\n new_fpaths = list(dlfolder.glob('_vs*.csv'))\n assert len(new_fpaths) == 1\n fpath = new_fpaths[0]\n\n data = pd.read_csv(fpath, header=0, index_col=0)\n shield_case_to_data[(atk_num_shields, def_num_sheids)] = data\n\n for idx, mon in enumerate(to_check_mons):\n mon_results = {ss: scores.iloc[idx] for ss, scores in shield_case_to_data.items()}\n cacher = mon_cachers[mon._slug]\n cacher.save(mon_results)\n have_results[mon._slug] = mon_results\n\n _tojoin = ub.ddict(list)\n _joined = ub.ddict(list)\n for mon_results in have_results.values():\n for ss, scores in mon_results.items():\n _tojoin[ss].append(scores)\n\n for ss, vals in _tojoin.items():\n _joined[ss] = pd.concat([v.to_frame().T for v in vals])\n _joined.default_factory = None\n results = _joined\n return results\n","repo_name":"Erotemic/pypogo","sub_path":"pypogo/pvpoke_driver.py","file_name":"pvpoke_driver.py","file_ext":"py","file_size_in_byte":11341,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"5294664414","text":"import unittest\nimport stats as s\n\nclass TestStatsMethods(unittest.TestCase):\n\n\tdef test_compute_avg(self):\n\t\texpected = 2.5\n\t\ttest = s.compute_avg([1,2,3,4])\n\t\tself.assertEqual(test, expected)\n\n\tdef test_compute_min(self):\n\t\texpected = 1\n\t\ttest = s.compute_min([1,2,3,4])\n\t\tself.assertEqual(test, expected)\n\n\tdef test_compute_max(self):\n\t\texpected = 4\n\t\ttest = s.compute_max([1,2,3,4])\n\t\tself.assertEqual(test, expected)\n\nif __name__ == '__main__':\n\tunittest.main()\n\nunittest.main()","repo_name":"cmoussa1/Travis-CI-for-Python","sub_path":"unittest_stats.py","file_name":"unittest_stats.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23944934821","text":"#!/usr/bin/env python\n# import pytest\n\n\nclass Virus(object):\n '''Properties and attributes of the virus used in Simulation.'''\n\n def __init__(self, name, repro_rate, mortality_rate):\n self.name = name\n self.repro_rate = repro_rate\n self.mortality_rate = mortality_rate\n\n# ERIK's test\ndef test_virus_instantiation():\n '''Check to make sure that the virus instantiator is working.'''\n virus = Virus(\"Ebola\", 0.22, 0.7)\n assert virus.name == \"Ebola\"\n assert virus.repro_rate == 0.22\n assert virus.mortality_rate == 0.7\n\n# MAKHMUD's test\ndef test_virus_tuberculosis():\n virus = Virus(\"Tuberculosis\", 0.55, 0.67)\n assert virus.name == \"Tuberculosis\"\n assert virus.repro_rate == 0.55\n assert virus.mortality_rate == 0.67\n","repo_name":"makhmudislamov/HerdImmunityMakeSchool-Refactored","sub_path":"virus.py","file_name":"virus.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"25204048428","text":"\"\"\" Another chatgpt stab at geodesics in de Sitter Space \n\n\n\"\"\"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Define parameters\nH = 1.0 # Hubble constant\nL = 1.0 # de Sitter radius\nm = 0.1 # mass of particle\ntmax = 5.0 # maximum time\nN = 1000 # number of time steps\ndt = tmax / N # time step size\n\n# Define initial conditions\nx0 = 0.0\ny0 = L\npx0 = m * np.sqrt((H*L)**2 - 1.0) # ho-hum this is zero.\npy0 = 0.0\n\n# results\nresults = None\n\n# Define the differential equations for x, y, px, and py\ndef f(t, X):\n x, y, px, py = X\n \n r = np.sqrt(x**2 + y**2)\n f_x = px / (m * r)\n f_y = py / (m * r)\n f_px = -m * H**2 * x / r**3\n f_py = -m * H**2 * y / r**3\n return np.array([f_x, f_y, f_px, f_py])\n\ndef main():\n # Solve the differential equations using the Runge-Kutta method\n\n t = 0.0\n X = np.array([x0, y0, px0, py0])\n xvals = [x0]\n yvals = [y0]\n tvals = [t]\n Xvals = [dict(t=t, x=x0, y=y0, px=px0, py=py0)]\n while t < tmax:\n k1 = dt * f(t, X)\n k2 = dt * f(t + 0.5*dt, X + 0.5*k1)\n k3 = dt * f(t + 0.5*dt, X + 0.5*k2)\n k4 = dt * f(t + dt, X + k3)\n X = X + (k1 + 2.0*k2 + 2.0*k3 + k4) / 6.0\n xvals.append(X[0])\n yvals.append(X[1])\n tvals.append(t)\n x, y, px, py = X\n Xvals.append(dict(t=t, x=x, y=y, px=px, py=py))\n t += dt\n\n global results\n results = Xvals\n \n # Plot the geodesic\n #plt.plot(xvals, yvals)\n plt.plot(tvals, xvals, label='x')\n plt.plot(tvals, yvals, label='y')\n plt.plot(tvals, list(x['py'] for x in Xvals), label='py')\n plt.plot(tvals, list(x['px'] for x in Xvals), label='px')\n plt.plot(tvals, list(math.sqrt(x['x']**2 + x['y']**2) for x in Xvals), label='r')\n #plt.xlim(-2*L, 2*L)\n #plt.ylim(-2*L, 2*L)\n #plt.gca().set_aspect('equal', adjustable='box')\n plt.legend()\n plt.xlabel('t')\n plt.show()\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"swfiua/gotu","sub_path":"gotu/aidss2.py","file_name":"aidss2.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39263293153","text":"import pygame\nfrom PIL import Image as PilImage, ImageSequence\nfrom typing import List\nfrom pygame import Surface\nimport os\n\nfrom game.Entity.Image import Image\n\nclass ImageService:\n IMAGE_FORMAT_GIF = 'GIF'\n FRAME_TYPE_RGBA = 'RGBA'\n\n def __init__(self) -> None:\n self.imageSurfaceMap = {}\n\n def getImageFrameSurfaceList(self, imagePath: str) -> List[Surface]:\n result = []\n\n pilImage = PilImage.open(imagePath)\n if pilImage.format == self.IMAGE_FORMAT_GIF and pilImage.is_animated:\n for frame in ImageSequence.Iterator(pilImage):\n result.append(self.convertPilImageToSurface(frame.convert(self.FRAME_TYPE_RGBA)))\n else:\n result.append(self.convertPilImageToSurface(pilImage))\n\n return result\n\n def convertPilImageToSurface(self, pilImage) -> Surface:\n return pygame.image.fromstring(pilImage.tobytes(), pilImage.size, pilImage.mode).convert_alpha()\n\n def scaleImageSurface(self, imageSurface: Surface, width: int, height: int):\n return pygame.transform.scale(imageSurface, (width, height))\n\n def scaleImageSurfaceList(self, imageSurfaceList: List[Surface], width: int, height: int) -> list:\n result = []\n\n for imageSurface in imageSurfaceList:\n result.append(self.scaleImageSurface(imageSurface, width, height))\n\n return result\n\n def buildImage(self, path: str, width: int, height: int) -> Image:\n imageFrameSurfaceList = self.getImageFrameSurfaceList(path)\n imageFrameSurfaceList = self.scaleImageSurfaceList(\n imageFrameSurfaceList,\n width,\n height\n )\n\n self.imageSurfaceMap[path] = imageFrameSurfaceList\n\n image = Image(path)\n\n return image\n\n def buildImageList(self, path: str, width: int, height: int) -> List[Image]:\n result = []\n\n for fileName in os.listdir(path):\n result.append(self.buildImage(\"%s%s\" % (path, fileName), width, height))\n\n return result\n\n","repo_name":"white-rabbit-1-sketch/helicopter","sub_path":"Service/System/ImageService.py","file_name":"ImageService.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"39354750456","text":"#!/usr/bin/env python\n\n\"\"\"Tests for `xbitinfo` package.\"\"\"\nimport os\n\nimport numpy as np\nimport pytest\nimport xarray as xr\nfrom numpy.testing import assert_allclose, assert_equal\nfrom xarray.core import formatting\nfrom xarray.core.dataarray import DataArray\nfrom xarray.core.dataset import Dataset\nfrom xarray.core.variable import Variable\nfrom xarray.testing import assert_identical\n\nimport xbitinfo as xb\n\n\ndef assert_different(a, b):\n \"\"\"Raises an AssertionError if two objects are equal. This will match\n data values, dimensions and coordinates, but not names or attributes\n (except for Dataset objects for which the variable names must match).\n Arrays with NaN in the same location are considered equal.\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n See Also\n --------\n assert_identical, assert_allclose, Dataset.equals, DataArray.equals\n numpy.testing.assert_array_equal\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n if isinstance(a, (Variable, DataArray)):\n assert not a.equals(b), formatting.diff_array_repr(a, b, \"equals\")\n elif isinstance(a, Dataset):\n assert not a.equals(b), formatting.diff_dataset_repr(a, b, \"equals\")\n else:\n raise TypeError(f\"{type(a)} not supported by assertion comparison\")\n\n\ndef bitinfo_assert_equal(bitinfo1, bitinfo2):\n assert list(bitinfo1.keys()) == list(bitinfo2.keys()), print(\n f\"lhs = {bitinfo1.keys()} vs rhs = {bitinfo2.keys()}\"\n )\n for v in bitinfo1.keys():\n assert_equal(bitinfo1[v], bitinfo2[v])\n\n\ndef bitinfo_assert_allclose(bitinfo1, bitinfo2, **kwargs):\n assert list(bitinfo1.keys()) == list(bitinfo2.keys()), print(\n f\"lhs = {bitinfo1.keys()} vs rhs = {bitinfo2.keys()}\"\n )\n for v in bitinfo1.keys():\n assert_allclose(bitinfo1[v], bitinfo2[v], **kwargs)\n\n\ndef bitinfo_assert_different(bitinfo1, bitinfo2):\n \"\"\"Fail bitinfo different values.\"\"\"\n assert (bitinfo1 != bitinfo2).any()\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_returns_dataset(implementation):\n \"\"\"Test xb.get_bitinformation returns xr.Dataset.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n assert isinstance(\n xb.get_bitinformation(ds, implementation=implementation, axis=0), xr.Dataset\n )\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_dim(implementation):\n \"\"\"Test xb.get_bitinformation is sensitive to dim.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n bitinfo0 = xb.get_bitinformation(ds, axis=0, implementation=implementation)\n bitinfo2 = xb.get_bitinformation(ds, axis=2, implementation=implementation)\n assert_different(bitinfo0, bitinfo2)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_dim_string_equals_axis_int(implementation):\n \"\"\"Test xb.get_bitinformation undestands xarray dimension names the same way as axis as integers.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n bitinfox = xb.get_bitinformation(ds, dim=\"x\", implementation=implementation)\n bitinfo2 = xb.get_bitinformation(ds, axis=2, implementation=implementation)\n assert_identical(bitinfox, bitinfo2)\n\n\ndef test_get_bitinformation_masked_value(implementation=\"julia\"):\n \"\"\"Test xb.get_bitinformation is sensitive to masked_value.\"\"\"\n ds = xr.tutorial.load_dataset(\"rasm\")\n bitinfo = xb.get_bitinformation(ds, dim=\"x\", implementation=implementation)\n bitinfo_no_mask = xb.get_bitinformation(\n ds, dim=\"x\", masked_value=\"nothing\", implementation=implementation\n )\n bitinfo_no_mask_None = xb.get_bitinformation(\n ds, dim=\"x\", masked_value=None, implementation=implementation\n )\n assert_identical(bitinfo_no_mask, bitinfo_no_mask_None)\n assert_different(bitinfo, bitinfo_no_mask)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_set_zero_insignificant(implementation):\n \"\"\"Test xb.get_bitinformation is sensitive to set_zero_insignificant.\"\"\"\n ds = xr.tutorial.load_dataset(\"air_temperature\")\n dim = \"lon\"\n bitinfo = xb.get_bitinformation(ds, dim=dim, implementation=implementation)\n bitinfo_szi_False = xb.get_bitinformation(\n ds, dim=dim, set_zero_insignificant=False, implementation=implementation\n )\n try:\n bitinfo_szi_True = xb.get_bitinformation(\n ds, dim=dim, set_zero_insignificant=True, implementation=implementation\n )\n assert_identical(bitinfo, bitinfo_szi_True)\n except NotImplementedError:\n assert implementation == \"python\"\n if implementation == \"python\":\n assert_identical(bitinfo, bitinfo_szi_False)\n elif implementation == \"julia\":\n assert_different(bitinfo, bitinfo_szi_False)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_confidence(implementation):\n \"\"\"Test xb.get_bitinformation is sensitive to confidence.\"\"\"\n ds = xr.tutorial.load_dataset(\"air_temperature\")\n dim = \"lon\"\n bitinfo = xb.get_bitinformation(ds, dim=dim, implementation=implementation)\n try:\n bitinfo_conf99 = xb.get_bitinformation(\n ds, dim=dim, confidence=0.99, implementation=implementation\n )\n bitinfo_conf50 = xb.get_bitinformation(\n ds, dim=dim, confidence=0.5, implementation=implementation\n )\n assert_different(bitinfo_conf99, bitinfo_conf50)\n assert_identical(bitinfo, bitinfo_conf99)\n except AssertionError:\n assert implementation == \"python\"\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_label(rasm, implementation):\n \"\"\"Test xb.get_bitinformation serializes when label given.\"\"\"\n ds = rasm\n xb.get_bitinformation(\n ds, dim=\"x\", label=\"./tmp_testdir/rasm\", implementation=implementation\n )\n assert os.path.exists(\"./tmp_testdir/rasm.json\")\n # second call should be faster\n xb.get_bitinformation(\n ds, dim=\"x\", label=\"./tmp_testdir/rasm\", implementation=implementation\n )\n os.remove(\"./tmp_testdir/rasm.json\")\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\n@pytest.mark.parametrize(\"dtype\", [\"float64\", \"float32\", \"float16\"])\ndef test_get_bitinformation_dtype(rasm, dtype, implementation):\n \"\"\"Test xb.get_bitinformation returns correct number of bits depending on dtype.\"\"\"\n ds = rasm.astype(dtype)\n v = list(ds.data_vars)[0]\n dtype_bits = dtype.replace(\"float\", \"\")\n assert len(xb.get_bitinformation(ds, dim=\"x\")[v].coords[\"bit\" + dtype_bits]) == int(\n dtype_bits\n )\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_multidim(rasm, implementation):\n \"\"\"Test xb.get_bitinformation runs on all dimensions by default\"\"\"\n ds = rasm\n bi = xb.get_bitinformation(ds, implementation=implementation)\n # check length of dimension\n assert bi.dims[\"dim\"] == len(ds.dims)\n bi_time = bi.sel(dim=\"time\").Tair.values\n bi_x = bi.sel(dim=\"x\").Tair.values\n bi_y = bi.sel(dim=\"y\").Tair.values\n assert any(bi_time != bi_x)\n assert any(bi_time != bi_y)\n assert any(bi_y != bi_x)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_different_variables_dims(rasm, implementation):\n \"\"\"Test xb.get_bitinformation runs with variables of different dimensionality\"\"\"\n ds = rasm\n # add variable with different dimensionality\n ds[\"Tair_mean\"] = ds.Tair.mean(dim=\"time\")\n bi = xb.get_bitinformation(ds, implementation=implementation)\n assert all(np.isnan(bi.Tair_mean.sel(dim=\"time\")))\n bi_Tair_mean_x = bi.Tair_mean.sel(dim=\"x\")\n bi_Tair_x = bi.Tair.sel(dim=\"x\")\n assert_different(bi_Tair_mean_x, bi_Tair_x)\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_different_dtypes(rasm, implementation):\n ds = rasm\n ds[\"Tair32\"] = ds.Tair.astype(\"float32\")\n ds[\"Tair16\"] = ds.Tair.astype(\"float16\")\n bi = xb.get_bitinformation(ds, implementation=implementation)\n for bitdim in [\"bit16\", \"bit32\", \"bit64\"]:\n assert bitdim in bi.dims\n assert bitdim in bi.coords\n\n\n@pytest.mark.parametrize(\"implementation\", [\"julia\", \"python\"])\ndef test_get_bitinformation_dim_list(rasm, implementation):\n bi = xb.get_bitinformation(rasm, dim=[\"x\", \"y\"], implementation=implementation)\n assert (bi.dim == [\"x\", \"y\"]).all()\n\n\ndef test_get_bitinformation_keep_attrs(rasm):\n bi = xb.get_bitinformation(rasm, dim=[\"x\", \"y\"]).Tair\n assert \"units\" in bi.attrs\n assert bi.attrs[\"units\"] == 1\n for a in rasm.Tair.attrs.keys():\n assert bi.attrs[\"source_\" + a] == rasm.Tair.attrs[a], print(bi.attrs)\n\n\n@pytest.mark.parametrize(\n \"ds,dim,axis\",\n [\n (pytest.lazy_fixture(\"ugrid_demo\"), None, -1),\n (pytest.lazy_fixture(\"icon_grid_demo\"), \"ncells\", None),\n (pytest.lazy_fixture(\"air_temperature\"), \"lon\", None),\n (pytest.lazy_fixture(\"rasm\"), \"x\", None),\n (pytest.lazy_fixture(\"ROMS_example\"), \"eta_rho\", None),\n (pytest.lazy_fixture(\"era52mt\"), \"time\", None),\n (pytest.lazy_fixture(\"eraint_uvz\"), \"longitude\", None),\n ],\n)\ndef test_implementations_agree(ds, dim, axis):\n \"\"\"Test whether the python and julia implementation retrieve the same results\"\"\"\n bi_python = xb.get_bitinformation(\n ds,\n dim=dim,\n axis=axis,\n implementation=\"python\",\n set_zero_insignificant=False,\n overwrite=True,\n masked_value=None,\n )\n bi_julia = xb.get_bitinformation(\n ds,\n dim=dim,\n axis=axis,\n implementation=\"julia\",\n set_zero_insignificant=False,\n overwrite=True,\n masked_value=None,\n )\n bitinfo_assert_allclose(bi_python, bi_julia, rtol=1e-4)\n","repo_name":"observingClouds/xbitinfo","sub_path":"tests/test_get_bitinformation.py","file_name":"test_get_bitinformation.py","file_ext":"py","file_size_in_byte":10000,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"77"} +{"seq_id":"70505270329","text":"from threading import Lock\nimport random\n\nfrom abs_estimator import AbsEstimator\n\n\nclass SumEst(AbsEstimator):\n _ITERATION_NUMBER = 100\n _POOL_SAMPLE_SIZE = 1000\n _ITERATION_NUMBER_INFORMATION = \"Number of iterations\"\n _POOL_SAMPLE_SIZE_INFORMATION = \"Size of the query pool sample\"\n _PAIR_QUERY_INDEX = 0\n _PAIR_DOCUMENT_INDEX = 1\n\n @property\n def experiment_details(self):\n additional_information = {SumEst._ITERATION_NUMBER_INFORMATION: SumEst._ITERATION_NUMBER,\n SumEst._POOL_SAMPLE_SIZE_INFORMATION: SumEst._POOL_SAMPLE_SIZE}\n return additional_information\n\n @property\n def common_api(self):\n return self.__common_api\n\n @common_api.setter\n def common_api(self, val):\n self.__common_api = val\n\n def __init__(self, common_api):\n self.__common_api = common_api\n\n def estimate(self):\n super().estimate()\n estimation_acc = 0\n query_pool = self.common_api.read_query_pool()\n pool_size = self._estimate_pool_size(query_pool)\n for i in range(0, SumEst._ITERATION_NUMBER):\n query_document_pair = self._select_query_document_pair(query_pool)\n document = query_document_pair[SumEst._PAIR_DOCUMENT_INDEX]\n query = query_document_pair[SumEst._PAIR_QUERY_INDEX]\n document_inverse_degree = self._calculate_document_inverse_degree(document, query_pool)\n degree_query = self._calculate_degree_query(query)\n partial_estimation = pool_size * degree_query * document_inverse_degree\n estimation_acc += partial_estimation\n self.common_api.report_progress(i, SumEst._ITERATION_NUMBER)\n estimation = estimation_acc / SumEst._ITERATION_NUMBER\n return estimation\n\n def _verify_match(self, query, document):\n content = document.content.lower()\n if content.find(query.lower()) != -1:\n return True\n return False\n\n def _select_query_document_pair(self, query_pool):\n list_size = len(query_pool)\n while True:\n random_index = random.randrange(list_size)\n random_query = query_pool[random_index]\n try:\n document_list = self.common_api.download(random_query).results\n except:\n continue\n valid_list = []\n for document in document_list:\n if self._verify_match(random_query, document):\n valid_list.append(document)\n if len(valid_list) > 0:\n random_index = random.randrange(len(valid_list))\n random_document = valid_list[random_index]\n return [random_query, random_document]\n\n def _get_matching_query_list(self, document, query_pool):\n lock = Lock()\n matching_query_list = []\n\n def iteration(query):\n nonlocal document, matching_query_list, lock\n if self._verify_match(query, document):\n with lock:\n matching_query_list.append(query)\n\n self.common_api.execute_in_parallel(query_pool, iteration)\n return matching_query_list\n\n def _calculate_degree_query(self, query):\n lock = Lock()\n count = 0\n\n def iteration(document):\n nonlocal query, count, lock\n if self._verify_match(query, document):\n with lock:\n count += 1\n\n document_list = self.common_api.download(query).results\n self.common_api.execute_in_parallel(document_list, iteration)\n return count\n\n def _estimate_pool_size(self, query_pool):\n count = 0\n query_pool_size = len(query_pool)\n lock = Lock()\n\n # noinspection PyUnusedLocal\n def iteration(iteration_number):\n nonlocal query_pool, query_pool_size, count, lock\n random_index = random.randrange(0, query_pool_size)\n query = query_pool[random_index]\n document_list = self.common_api.download(query).results\n for document in document_list:\n if self._verify_match(query, document):\n with lock:\n count += 1\n return\n\n self.common_api.execute_in_parallel(range(0, SumEst._POOL_SAMPLE_SIZE), iteration)\n return len(query_pool) * count / SumEst._POOL_SAMPLE_SIZE\n\n def _calculate_document_inverse_degree(self, document, query_pool):\n matching_query_list = self._get_matching_query_list(document, query_pool)\n i = 1\n while True:\n random_index = random.randrange(0, len(matching_query_list))\n query = matching_query_list[random_index]\n try:\n document_list = self.common_api.download(query).results\n except:\n continue\n for item in document_list:\n if item.identifier == document.identifier:\n return i / len(matching_query_list)\n i += 1\n","repo_name":"fpbfabio/estimation_methods","sub_path":"sum_est.py","file_name":"sum_est.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37562572928","text":"\"\"\"\nProyecto: Panel de control de velociadad de motores Tf \n@Autor: EDVS\n\"\"\"\n\n#%%\n# import libraries \nimport sys\nfrom time import time\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtSerialPort\n\nimport time\n\n# Author of the library: Stefan Holstein \n# inspired by: https://github.com/Werkov/PyQt4/blob/master/examples/widgets/analogclock.py\nfrom analoggaugewidget import AnalogGaugeWidget\n\nclass Main_App(QMainWindow):\n\n def __init__(self,parent=None,*args):\n super(Main_App,self).__init__(parent=parent)\n\n self.ancho = 450 \n self.altura = 800\n self.run = True\n\n # --- VARIABLES PARA LA LECTURA DE DE LOS SENSORES--------\n self.velocidad_M1 = 0\n self.velocidad_M2 = 0\n self.corriente_M1 = 0\n self.corriente_M2 = 0\n\n self.setFixedSize(self.ancho,self.altura)\n self.setWindowTitle(\"panel de control\")\n self.General = QLabel(self)\n self.General.setGeometry(0,0,self.ancho,self.altura)\n self.General.setStyleSheet(\"border-radius: 3px; border: none; background-color: #000000;\")\n\n\n self.box_Panel = QLabel(self.General)\n self.tv_tituloPANEL = QLabel(\"PANEL DE CONTROL\",self.box_Panel)\n \n self.compotenes = QWidget(self.box_Panel)\n self.name_dispsitivo = QLabel('Dispositivos:',self.compotenes)\n \n \n self.list_Puertos = QComboBox(self)\n \n #----------Box panel---------#\n self.box_Panel.setGeometry(QRect(10, 10,self.ancho-20, self.altura-20))\n self.box_Panel.setStyleSheet(\" border-radius: 15px; background-color: #101010;\")\n \n #----------Box panel de control---------#\n \n font = QFont()\n font.setPointSize(13)\n font.setBold(True)\n self.tv_tituloPANEL.setFont(font)\n self.tv_tituloPANEL.setStyleSheet(\"border: none; color: #C2185B;\")\n self.tv_tituloPANEL.setGeometry(100, 10, 250, 40)\n\n ## --- COMPONENTES ---------------\n self.compotenes.setGeometry(5,50,420,46)\n self.compotenes.setStyleSheet(\" border-radius: 5px; border:1px solid #607D8B;\")\n\n #-------dispositivos-----\n font = QFont()\n font.setPointSize(11)\n self.name_dispsitivo.setFont(font)\n self.name_dispsitivo.setStyleSheet(\" border-radius: 15px; border: none;color:#1565C0\")\n self.name_dispsitivo.setGeometry(10,3,120,40)\n\n #---------Lista de puertos--------------#\n font.setPointSize(10)\n self.list_Puertos.setFont(font)\n self.list_Puertos.setGeometry(135, 65, 150, 35)\n ports = [\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\"]\n \n self.list_Puertos.addItems(ports)\n \n self.list_Puertos.setStyleSheet(\"QListView {background-color: #B3E5FC;}\")\n self.list_Puertos.setStyleSheet(\"border-radius: 2px; border:1px solid #1565C0;color:#4CAF50; background-color: transparest;\")\n \n\n \n # ----- button list ports-------------\n font.setPointSize(11)\n\n self.button = QPushButton(self.compotenes)\n self.button.setFont(font)\n self.button.setMouseTracking(True)\n self.button.setText(\"Conectar\")\n self.button.setCursor(Qt.PointingHandCursor)\n self.button.setAutoDefault(False)\n self.button.setGeometry(300, 6, 100, 34)\n self.button.setCheckable(True)\n self.button.clicked.connect(self.Mensaje)\n self.button.setStyleSheet(\"background-color: rgb(251, 192, 45); border-radius: 5px; border: 1px solid rgb(100,100,100);\")\n\n \n\n # ----------- PROGRES BARR--------------#\n \n self.C_bar = QWidget(self.box_Panel)\n self.C_bar.setGeometry(20, 100,390,300)\n self.C_bar.setStyleSheet(\" border-radius: 10px; background-color: black; border:none;\")\n\n self.frame_1 = QFrame(self.C_bar)\n self.frame_1.setGeometry(10, 10,160,160)\n self.frame_1.setFrameShape(QFrame.StyledPanel)\n self.frame_1.setFrameShadow(QFrame.Raised)\n self.sensor_M1= AnalogGaugeWidget(self.frame_1)\n self.sensor_M1.setMinimumSize(QSize(150, 150))\n \n \n self.frame_2 = QFrame(self.C_bar)\n self.frame_2.setGeometry(220, 10,160,160)\n self.frame_2.setFrameShape(QFrame.StyledPanel)\n \n\n self.frame_2.setFrameShadow(QFrame.Raised)\n self.sensor_M2= AnalogGaugeWidget(self.frame_2)\n self.sensor_M2.setMinimumSize(150, 150)\n self.sensor_M2.value_min = -60\n self.sensor_M2.value_max = 60\n self.sensor_M2.units = \"deg\"\n\n # ---Label----------\n self.LedDirecion = QLabel(self.C_bar)\n self.LedDirecion.setGeometry(175, 20,30,30)\n self.LedDirecion.setStyleSheet(\" border-radius: 15px; background-color: black; border: 1px solid #CFD8DC;\")\n \n # +++++++++++++++++++++++Label para la lectura del sensor de corriente++++++++++++++++++++++++\n self.img_LogoCarrito = QLabel(self.C_bar)\n self.img_LogoUPC = QLabel(self.C_bar)\n #----------------LOGO UPC---------#\n self.img_LogoCarrito.setGeometry(10,180, 120, 100)\n self.img_LogoCarrito.setPixmap(QPixmap(\"imagenes/carrito.png\"))\n self.img_LogoCarrito.setStyleSheet(\"background-color: black ;border:none;\")\n \n self.img_LogoCarrito.setScaledContents(True)\n\n #----------LOGO AESS---------#\n self.img_LogoUPC.setGeometry(250, 180, 100, 100)\n self.img_LogoUPC.setPixmap(QPixmap(\"imagenes/LOGO_UPC.png\"))\n self.img_LogoUPC.setStyleSheet(\"border:none;\")\n \n self.img_LogoUPC.setScaledContents(True)\n\n\n # ----DEFINIR SET POINT DEL MOTOR 1 (motor derecho)-------\n \"\"\"self.corr_M1 = QWidget(self.C_bar)\n self.corr_M1.setGeometry(110,200,165,50)\n self.corr_M1.setStyleSheet(\" border-radius: 10px; border: 1px solid #FFEE58;\")\n\n self.L_corrD = QLabel(\"Corriente MI: (mA):\",self.corr_M1)\n self.L_corrD.setGeometry(5,2,150,20)\n self.L_corrD.setAlignment(Qt.AlignCenter)\n self.L_corrD.setStyleSheet(\"border: none; color: #F5F5F5\")\n\n self.mA_M1 = QLabel(str(self.corriente_M1),self.corr_M1)\n self.mA_M1.setGeometry(5,24,150,20)\n self.mA_M1.setAlignment(Qt.AlignCenter)\n self.mA_M1.setStyleSheet(\"border: none; color: #4CAF50\")\n font.setPointSize(10)\n self.mA_M1.setFont(font)\"\"\"\n\n\n # -----------BOTONES PARA CONTROLAR LA DIRECION Y VELOCIDAD-----\n \n self.botones = QWidget(self.box_Panel)\n self.botones.setGeometry(20, 410,390,350)\n self.botones.setStyleSheet(\" border-radius: 10px; border: none; background-color: black\")\n\n # ----DEFINIR SET POINT DEL MOTOR 1 (motor derecho)-------\n self.SP_M1 = 0\n self.SP_M2 = 0\n\n self.motor1 = QWidget(self.botones)\n self.motor1.setGeometry(10,10,120,50)\n self.motor1.setStyleSheet(\" border-radius: 10px; border: 1px solid #E91E63;\")\n\n self.L_motorD = QLabel(\"MOTOR VEL (rpm):\",self.motor1)\n self.L_motorD.setGeometry(5,2,110,20)\n self.L_motorD.setStyleSheet(\"border: none; color: #F5F5F5\")\n\n self.RMP_M1 = QLabel(str(self.SP_M1),self.motor1)\n self.RMP_M1.setGeometry(10,24,100,20)\n self.RMP_M1.setAlignment(Qt.AlignCenter)\n self.RMP_M1.setStyleSheet(\"border: none; color: #4CAF50\")\n font.setPointSize(10)\n self.RMP_M1.setFont(font)\n\n \n # ----DEFINIR SET POINT DEL MOTOR 2 (motor izquierdo)-------\n self.motor2 = QWidget(self.botones)\n self.motor2.setGeometry(260,10,120,50)\n self.motor2.setStyleSheet(\" border-radius: 10px; border: 1px solid #E91E63;\")\n\n self.L_motorI = QLabel(\"MOTOR POS (deg):\",self.motor2)\n self.L_motorI.setGeometry(5,2,110,20)\n self.L_motorI.setStyleSheet(\"border: none; color: #F5F5F5\")\n\n self.RMP_M2 = QLabel(str(self.SP_M2),self.motor2)\n self.RMP_M2.setGeometry(10,24,100,20)\n self.RMP_M2.setAlignment(Qt.AlignCenter)\n self.RMP_M2.setStyleSheet(\"border: none; color: #4CAF50\")\n font.setPointSize(10)\n self.RMP_M2.setFont(font)\n\n # *************** BOTONES ********************\n h_1 = 80\n w_1 = 80\n cx = 160\n cy = 160\n\n # --------------- BOTON PARA AVANZAR ADELANTE-------------\n self.b_upper = QPushButton(self.botones)\n self.b_upper.setGeometry(cx, cy-h_1, w_1, h_1)\n self.b_upper.setMouseTracking(True)\n self.b_upper.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowUp\")))\n self.b_upper.setIconSize(QSize(h_1,w_1))\n self.b_upper.setCursor(Qt.PointingHandCursor)\n self.b_upper.setAutoDefault(False)\n \n #self.b_upper.clicked.connect(self.Mup)\n self.b_upper.pressed.connect(self.Mup)\n self.b_upper.released.connect(self.stopCount)\n self.b_upper.setStyleSheet(\"border-radius: 30px;\")\n self.b_upper.setCheckable(True)\n\n # --------------- BOTON BOTON PARA RETROCEDER------------- \n self.b_Back = QPushButton(self.botones)\n self.b_Back.setGeometry(cx, cy+h_1, w_1, h_1)\n self.b_Back.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowDown\")))\n self.b_Back.setMouseTracking(True)\n self.b_Back.setIconSize(QSize(h_1,w_1))\n self.b_Back.setCursor(Qt.PointingHandCursor)\n self.b_Back.setAutoDefault(False)\n\n #self.b_Back.clicked.connect(self.MDown)\n self.b_Back.pressed.connect(self.MDown)\n self.b_Back.released.connect(self.stopCount)\n self.b_Back.setStyleSheet(\"border-radius: 30px;\")\n\n # --------------- BOTON PARA GIRAR A LA IZQUIERDA------------- \n self.b_left = QPushButton(self.botones)\n self.b_left.setGeometry(cx+w_1+10, cy, w_1, h_1)\n self.b_left.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowRight\")))\n self.b_left.setMouseTracking(True)\n self.b_left.setIconSize(QSize(h_1,w_1))\n self.b_left.setCursor(Qt.PointingHandCursor)\n self.b_left.setAutoDefault(False)\n\n #self.b_left.clicked.connect(self.MLeft)\n self.b_left.pressed.connect(self.MLeft)\n self.b_left.released.connect(self.stopCount)\n self.b_left.setStyleSheet(\"border-radius: 30px;\")\n\n # --------------- BOTON PARA GIRAR A LA DERECHA------------- \n\n self.b_right = QPushButton(self.botones)\n self.b_right.setGeometry(cx-w_1-10, cy, w_1, h_1)\n self.b_right.setIcon(self.style().standardIcon(getattr(QStyle, \"SP_ArrowLeft\")))\n self.b_right.setMouseTracking(True)\n self.b_right.setIconSize(QSize(h_1,w_1))\n self.b_right.setCursor(Qt.PointingHandCursor)\n self.b_right.setAutoDefault(False)\n\n #self.b_right.clicked.connect(self.Mright)\n self.b_right.pressed.connect(self.Mright)\n self.b_right.released.connect(self.stopCount)\n self.b_right.setStyleSheet(\"border-radius: 30px;\")\n\n\n \n #-------Interrupcion cada 50ms para actualizar el set point\n self.direction =''\n self.timer1 = QTimer()\n self.timer1.setInterval(50)\n self.timer1.timeout.connect(self.contador)\n self.timer1.stop() #Inicai imagen statica\n\n # ======================= FUNCIONES ============================\n \n def contador(self):\n if self.direction== 'UP':\n\n self.SP_M1 = self.SP_M1+1\n \n if self.SP_M1>=821:\n self.SP_M1 =821\n \n \n \n elif self.direction== 'DW':\n \n self.SP_M1 = self.SP_M1-1\n \n if self.SP_M2<=-821:\n self.SP_M1=-821\n \n elif self.direction== 'LF':\n self.SP_M2 = self.SP_M2 +1\n if self.SP_M2>=45 :\n self.SP_M2=45\n \n \n elif self.direction== 'RT':\n self.SP_M2 = self.SP_M2-1\n\n if self.SP_M2<=-45 :\n self.SP_M2=-45\n \n self.RMP_M1.setText(str(self.SP_M1))\n self.RMP_M2.setText(str(self.SP_M2))\n #texto1 = 'SP:' + str(self.SP_M1) + ';'+ str(self.SP_M2)\n #self.serial.write(texto1.encode())\n\n def stopCount(self):\n self.timer1.stop()\n self.Write_SetPoint()\n \n def Mup(self):\n self.direction = 'UP' #adelante\n self.timer1.start()\n\n def MDown(self):\n self.direction = 'DW' #retroceso\n self.timer1.start()\n\n def MLeft(self):\n self.direction = 'LF' #Giro a la izquierda\n self.timer1.start()\n \n def Mright(self):\n self.direction = 'RT' ##Giro a la derecha\n self.timer1.start()\n \n\n def Mensaje(self,checked):\n mensaje = QMessageBox(self)\n mensaje.setWindowTitle(\"Mensaje\")\n mensaje.setStyleSheet(\"background-color: rgb(38, 198, 218);color: balck\")\n font = QFont()\n font.setPointSize(10)\n mensaje.setFont(font)\n\n #baud_rate = 9600\n Port = self.list_Puertos.currentText()\n self.serial = QtSerialPort.QSerialPort(Port,baudRate=9600,readyRead=self.ReadValuesSensor)\n self.button.setText(\"Desconectar\" if checked else \"Conectar\")\n if checked:\n if not self.serial.isOpen():\n if not self.serial.open(QIODevice.ReadWrite):\n self.btn_Conectar.setChecked(False)\n #self.timer.start()\n \n\n \n else:\n self.serial.close()\n #self.timer.stop()\n self.contador()\n \n mensaje.setText(\"La conexion fue realizada con éxito \")\n mensaje.move(self.pos().x()+50, self.pos().y()+150)\n mensaje.exec()\n\n \n def Write_SetPoint(self):\n \n texto1 = 'SP:' + str(int(self.SP_M1*(255/821))) + ';'+ str(int((255/2)*(int(self.SP_M2)/60+1)))\n self.serial.write(texto1.encode())\n # print(texto)\n # SP:-NN;-MN\n # (255/2)(int(self.SP_M2)/60+1)\n print(texto1)\n\n def ReadValuesSensor(self):\n\n while self.serial.canReadLine():\n cad = self.serial.readLine().data().decode().strip()\n print(cad)\n if \":\" in cad:\n #print(cad)\n pos=cad.index(\":\")\n label=cad[:pos]\n value=cad[pos+1:]\n if label == 'velo1':\n self.velocidad_M1 = int(value)\n if label == 'velo2':\n self.velocidad_M2 = int(value)\n \n if label == 'corr1':\n self.corriente_M1 = int(value)\n if label == 'corr2':\n self.corriente_M2 = int(value)\n\n self.update_data()\n\n def update_data (self):\n self.sensor_M1.update_value(abs(int(self.velocidad_M1*(821/1023))))\n self.sensor_M2.update_value(int(int(self.velocidad_M2)*(120/1024)-60))\n #(255/2)(int(self.SP_M2)/60+1)\n self.mA_M1.setText(str(round(self.corriente_M1*(5000000/(1023*752)),2)))\n\n if (self.SP_M1<0):\n self.LedDirecion.setStyleSheet(\"background-color: red\")\n else:\n self.LedDirecion.setStyleSheet(\"background-color: green\")\n \n\n\ndef main():\n app = QApplication(sys.argv)\n ex = Main_App()\n ex.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n\n\n#%%","repo_name":"dvsivle/proyecto-diseno-o-de-controlador-de-motores","sub_path":"DISEÑO DE CONTROLADOR DE MOTORES/MICROCONTROLADOR-INTERFACE_APP/APP_CONTROL_CARRITO/AppVelocityControl.py","file_name":"AppVelocityControl.py","file_ext":"py","file_size_in_byte":15504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23925547989","text":"import os\n\ndef get_tail_byte(fname,last_bytes):\n\t# Open file with 'b' to specify binary mode\n\twith open(fname, 'rb') as file:\n\t\tfile.seek(last_bytes * -1, os.SEEK_END) # Note minus sign\n\t\tbyte_data = file.read()\n\t\treturn byte_data.decode('utf-8')\n\treturn \"\"\n\n\nif __name__ == \"__main__\":\n\tprint(get_tail_byte(\"11_tail.py\",100))\n","repo_name":"donarts/sourcecode","sub_path":"python/example/11_tail.py","file_name":"11_tail.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7152198298","text":"#!/usr/bin/env python\nimport unittest\nimport gevent\nimport requests\nfrom gevent import monkey\nmonkey.patch_socket()\n\n\nclass TestProxy(unittest.TestCase):\n def test_proxy(self):\n local_proxy = {\"http\": \"http://127.0.0.1:8399\"}\n\n def get():\n r = requests.get(\"http://www.baidu.com\", proxies=local_proxy)\n self.assertEqual(r.status_code, 200)\n\n gevent_list = []\n for i in xrange(5):\n gevent_list.append(gevent.spawn(get))\n gevent.joinall(gevent_list)\n","repo_name":"loadlj/rzproxy","sub_path":"tests/test_proxy.py","file_name":"test_proxy.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"77"} +{"seq_id":"17649401385","text":"import base64\n\nDATABASE_NAME = 'ocean'\nDATABASE_USER = 'oceanuser'\nDATABASE_PASSWORD = 'ocean@123'\nDATABASE_HOST = '127.0.0.1'\nDATABASE_PORT = '5432'\nFRONTEND_URL = 'https://www.testoceanplatform.com/'\nBACKEND_URL = 'https://www.testoceanplatform.com/api/v1/ocean/admin'\n\n# # Xero keys and URL's\n# SIGNUP_SCOPE = 'offline_access+openid+profile+email+accounting.transactions+' \\\n# 'accounting.contacts+accounting.settings+' \\\n# 'accounting.attachments+accounting.reports.read'\n# REDIRECT_URI = 'https://b522-2409-4073-2e93-77db-147a-4fab-45a9-e65.ngrok.io'\n# # REDIRECT_URI='http://localhost:8001/account/token/'\n# CLIENT_ID = '0F28E5B43A7445BCA5DE7B8D2D64A965'\n# CLIENT_SECRET = 'iRxAhGllAUITY-ktKLAY5v37s2IT29NeaBvMo00RSpY8DjRh'\nSTATE = '123'\n#\n\n#\nAUTH_URL_GENERATOR = 'https://login.xero.com/identity/connect/authorize?response_type=code'\nTOKEN_URL = 'https://identity.xero.com/connect/token'\nCONNECTION_URL = 'https://api.xero.com/connections'\nBALANCE_SHEET_URL = 'https://api.xero.com/api.xro/2.0/Reports/BalanceSheet'\nPROFIT_LOSS_URL = 'https://api.xero.com/api.xro/2.0/Reports/ProfitAndLoss'\nBANK_SUMMARY_URL = 'https://api.xero.com/api.xro/2.0/Reports/BankSummary'\nREFRESHING_URL = 'https://identity.xero.com/connect/token'\nUSER_DETAILS = 'https://api.xero.com/api.xro/2.0/Users'\nCONTACT_DETAILS = 'https://api.xero.com/api.xro/2.0/Contacts'\n\n# CLIENT_ID = \"D9B541ECA6E34916AB838BF8E641F8F1\"\n# CLIENT_SECRET = \"phFzovy45PMf0zsEx_Tt7OxoT8Z77Bl45JJbzydz5cGtsn2_\"\n\n\nCLIENT_ID = \"12F7583836C942418227E7EAC79D11D6\"\nCLIENT_SECRET = \"l9llhAyLiv0gViFV4R1A-qMs9BD8ANXsYPbNRUmzASkWqtnO\"\n\nSIGNUP_SCOPE = \"offline_access+openid+profile+email\"\nSIGN_UP_REDIRECT_URI = \"http://localhost:8001/account/xero/callback/\"\n\ntoken_value = CLIENT_ID + ':' + CLIENT_SECRET\nBASIC_TOKEN = base64.urlsafe_b64encode(token_value.encode()).decode()\n\n# AWS SNS keys\n\n# AWS_ACCESS_KEY = \"AKIAVXLDNFMCUBMJOS24\"\n# AWS_SECRET_ACCESS_KEY = \"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\"\n# AWS_TOPIC_ARN = \"arn:aws:sns:ap-south-1:393734859525:OCEAN-TOPIC\"\nREGION_NAME = \"ap-south-1\"\n# AWS_TOPIC_ARN = \"arn:aws:sns:ap-south-1:393734859525:TEST\"\n# AWS_TOPIC_ARN = \"arn:aws:sns:ap-south-1:393734859525:TEST-OTP\"\n\nAWS_ACCESS_KEY = 'AKIAVXLDNFMCR4GVDT3E'\nAWS_SECRET_ACCESS_KEY = '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'\nAWS_TOPIC_ARN = 'arn:aws:sns:ap-south-1:393734859525:OTPCHECK'\n\n# Codat constants\nCODAT_API_KEY = 'NVfJAZiDLd6oZ65LOrKCxp459SBa1s1jb3azmkfd'\nCODAT_AUTHORIZATION_KEY = 'Basic TlZmSkFaaURMZDZvWjY1TE9yS0N4cDQ1OVNCYTFzMWpiM2F6bWtmZA=='\n\nAUTH_PROVIDERS = {\n \"email\": \"email\", \"xero\": \"xero\", \"google\": \"google\"\n}\n# Social Authentication Status\nINITIATED = \"INITIATED\"\nUPDATED_DETAILS = \"UPDATED_DETAILS\"\nCOMPLETED = \"COMPLETED\"\nCOMPLETE_PROFILE = \"COMPLETE_PROFILE\"","repo_name":"AKSHAY-KR99/ocean-imp","sub_path":"ocean_dev/ocean_dev/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40854360830","text":"# 摄像头实时人脸识别\n\n# Author: coneypo\n# Blog: http://www.cnblogs.com/AdaminXie\n# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera\n\n# Created at 2018-05-11\n# Updated at 2018-10-29\n\nimport dlib # 人脸处理的库 Dlib\nimport numpy as np # 数据处理的库 numpy\nimport cv2 # 图像处理的库 OpenCv\nimport pandas as pd # 数据处理的库 Pandas\nimport time\nimport os\n\nimport redis\nimport pickle\n\nclass Redis:\n @staticmethod\n def connect():\n r = redis.StrictRedis(host='127.0.0.1', port=6379, db=0)\n return r\n\n #将内存数据二进制通过序列号转为文本流,再存入redis\n @staticmethod\n def set_data(r,key,data,ex=None):\n r.set(key,pickle.dumps(data),ex)\n\n # 将文本流从redis中读取并反序列化,返回返回\n @staticmethod\n def get_data(r,key):\n data = r.get(key)\n if data is None:\n return None\n\n return pickle.loads(data)\n\n\n# 人脸识别模型,提取 128D 的特征矢量\n# face recognition model, the object maps human faces into 128D vectors\nfacerec = dlib.face_recognition_model_v1(\"static/data_dlib/dlib_face_recognition_resnet_model_v1.dat\")\n\n\n# 计算两个向量间的欧式距离\ndef return_euclidean_distance(feature_1, feature_2):\n feature_1 = np.array(feature_1)\n feature_2 = np.array(feature_2)\n dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))\n print(\"e_distance: \", dist)\n\n if dist > 0.4:\n return \"diff\"\n else:\n return \"same\"\n\n\n# 处理存放所有人脸特征的 CSV\npath_features_known_csv = \"static/features_all.csv\"\ncsv_rd = pd.read_csv(path_features_known_csv, header=None)\n\n# 存储的特征人脸个数\n# print(csv_rd.shape[0])\n\n# 用来存放所有录入人脸特征的数组\nfeatures_known_arr = []\nfeatures_known_name = []\n\n# 读取已知人脸数据\n# known faces\nfor i in range(csv_rd.shape[0]):\n features_someone_arr = []\n for j in range(0, len(csv_rd.loc[i, :])):\n # for j in range(0, len(csv_rd.ix[i, :])):\n # print(csv_rd.loc[i, :][j])\n features_someone_arr.append(csv_rd.loc[i, :][j])\n # features_someone_arr.append(csv_rd.ix[i, :][j])\n # print(features_someone_arr)\n name = features_someone_arr.pop()\n features_known_name.append(name)\n features_known_arr.append(features_someone_arr)\nprint(\"Faces in Database:\", len(features_known_arr))\n\n# Dlib 检测器和预测器\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor('static/data_dlib/shape_predictor_68_face_landmarks.dat')\n\n# 创建 cv2 摄像头对象\ncap = cv2.VideoCapture(1)\n# cap.open(\"rtsp://admin:Aa123456@192.180.0.180/Streaming/Channels/103\")\n\n# cap.set(propId, value)\n# 设置视频参数,propId 设置的视频参数,value 设置的参数值\ncap.set(3, 100)\n\n# 返回一张图像多张人脸的 128D 特征\ndef get_128d_features(img_gray):\n faces = detector(img_gray, 1)\n if len(faces) != 0:\n face_des = []\n for i in range(len(faces)):\n shape = predictor(img_gray, faces[i])\n face_des.append(facerec.compute_face_descriptor(img_gray, shape))\n else:\n face_des = []\n return face_des\n\n\n# cap.isOpened() 返回 true/false 检查初始化是否成功\nwhile cap.isOpened():\n\n flag, img_rd = cap.read()\n kk = cv2.waitKey(1)\n\n # 取灰度\n img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)\n # print(img_gray)\n # 人脸数 faces\n faces = detector(img_gray, 0)\n\n # 待会要写的字体\n font = cv2.FONT_HERSHEY_COMPLEX\n\n cv2.putText(img_rd, \"Press 'q': Quit\", (20, 450), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)\n\n # 存储人脸名字和位置的两个 list\n # list 1 (faces): store the name of faces Jack unknown unknown Mary\n # list 2 (pos_namelist): store the positions of faces 12,1 1,21 1,13 31,1\n\n # 存储所有人脸的名字\n pos_namelist = []\n name_namelist = []\n features_known_arr2 = []\n\n other = os.listdir('static/data_faces_from_camera/other')\n others=[]\n for i in range(len(other)):\n if(other[i] != '.DS_Store'):\n others.append(other[i])\n now = int(round(time.time(), 2) * 1000)\n if len(others)>0:\n last = max(others)[:-4]\n else:\n last = 0\n code = 800\n # print(last)\n\n # print(int(last)+code)\n # 按下 q 键退出\n if kk == ord('q'):\n break\n else:\n # 检测到人脸\n if len(faces) != 0:\n # 获取当前捕获到的图像的所有人脸的特征,存储到 features_cap_arr\n features_cap_arr = []\n for i in range(len(faces)):\n shape = predictor(img_rd, faces[i])\n features_cap_arr.append(facerec.compute_face_descriptor(img_rd, shape))\n\n # 遍历捕获到的图像中所有的人脸\n for k in range(len(faces)):\n # 让人名跟随在矩形框的下方\n # 确定人名的位置坐标\n # 先默认所有人不认识,是 unknown\n name_namelist.append(\"unknown\")\n\n # 每个捕获人脸的名字坐标\n pos_namelist.append(tuple([faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))\n print(features_known_arr)\n # 对于某张人脸,遍历所有存储的人脸特征\n for i in range(len(features_known_arr)):\n # features_known_arr2 = features_known_arr\n print(\"with person_\", str(i+1), \"the \", end='')\n # name = features_known_arr2[i].pop()\n\n # print(features_known_arr2[i])\n\n # 将某张人脸与存储的所有人脸数据进行比对\n compare = return_euclidean_distance(features_cap_arr[k], features_known_arr[i])\n\n if compare == \"same\": # 找到了相似脸\n name_namelist[k] = features_known_name[i]\n # name_namelist[k] = \"person_\" + str(i+1)\n #else 不相似的脸 截图保存 等待后续操作\n else:\n print(now)\n print(last)\n #\n if((now) > int(last)+code or (int(last) == 0)):\n # 将人脸计数器清零\n cnt_ss = 0\n path_make_dir = \"static/data_faces_from_camera/\"\n for kd, d in enumerate(faces):\n # 计算矩形框大小\n height = (d.bottom() - d.top())\n width = (d.right() - d.left())\n hh = int(height / 2)\n ww = int(width / 2)\n color_rectangle = (255, 255, 255)\n if (d.right() + ww) > 640 or (d.bottom() + hh > 480) or (d.left() - ww < 0) or (\n d.top() - hh < 0):\n cv2.putText(img_rd, \"OUT OF RANGE\", (20, 300), font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)\n save_flag = 1\n color_rectangle = (0, 0, 255)\n else:\n save_flag = 1\n color_rectangle = (0, 255, 255)\n # 根据人脸大小生成空的图像\n im_blank = np.zeros((int(height * 2), width * 2, 3), np.uint8)\n if save_flag:\n cnt_ss += 1\n # print(cnt_ss)\n if(height * 2<720):\n for ii in range(height * 2):\n if(width * 2<720):\n for jj in range(width * 2):\n if(d.top() - hh + ii<720):\n im_blank[ii][jj] = img_rd[d.top() - hh + ii][d.left() - ww + jj]\n cv2.imwrite(path_make_dir + \"/other/\" + str(now) + \".jpg\", im_blank)\n print(\"写入本地:\", path_make_dir + \"/other/\" + str(now) + \".jpg\")\n\n # 矩形框\n for kk, d in enumerate(faces):\n # print(d.left(), d.top())\n # print(d.right(), d.bottom())\n # 绘制矩形框\n # if(name_namelist[kk]!='unknown'):\n # cv2.rectangle(img_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (0, 255, 255), 2)\n # else:\n cv2.rectangle(img_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (0, 0, 255),\n 2)\n # cv2.rectangle(img_rd,\n # tuple([d.left() - ww, d.top() - hh]),\n # tuple([d.right() + ww, d.bottom() + hh]),\n # color_rectangle, 2)\n\n # 在人脸框下面写人脸名字\n for i in range(len(faces)):\n cv2.putText(img_rd, name_namelist[i], pos_namelist[i], font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)\n\n # 將識別出的人臉存入Redis\n # r = Redis.connect()\n # if(len(name_namelist)>0):\n # Redis.set_data(r, 'name', name_namelist)\n print(\"Name list now:\", name_namelist, \"\\n\")\n\n cv2.putText(img_rd, \"Face Recognition\", (20, 40), font, 1, (0, 0, 0), 1, cv2.LINE_AA)\n cv2.putText(img_rd, \"Faces: \" + str(len(faces)), (20, 100), font, 1, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"camera\", img_rd)\n# 释放摄像头\ncap.release()\n\n# 删除建立的窗口\ncv2.destroyAllWindows()\n","repo_name":"liu279/face_recognize","sub_path":"face_reco_from_camera.py","file_name":"face_reco_from_camera.py","file_ext":"py","file_size_in_byte":10064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18141561032","text":"\ndef sqrt(a, threshold = 0.00000001, maxIter = 50):\n\t'''Calculate the square root of 'a' using newtons method'''\n\tXi = 1.0 #a starting guess\n\tDelta = 1.0\n\tcnt = 1\n\twhile Delta > threshold and cnt <= maxIter:\n\t\tnewXi = (Xi + a / Xi) / 2\n\t\tDelta = abs(newXi - Xi)\n\t\tXi = newXi\n\t\tcnt += 1\n\treturn Xi\n\n","repo_name":"Gholtes/Algorithms","sub_path":"sqrt.py","file_name":"sqrt.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35520359899","text":"class Nstacks:\n\n def __init__(self,k,n):\n self.k=k # #of stacks\n self.n=n # size of all stacks\n\n self.arr=[0]*self.n #initialise and arr with k stacks\n\n self.top=[-1]*self.k # all k stacks are empty\n\n self.free=0 # top of free stack\\\n\n self.next= [i+1 for i in range(self.n)] # point to next ele\n self.next[self.n -1]=-1 # point till last ele\n \n def isEmpty(self,sn):\n return self.top[sn]==-1\n\n def isFull(self):\n return self.free ==-1\n\n def push(self,item,sn):\n if self.isFull():\n print(\"STACK OVERFLOWN\")\n return\n \n insert_at=self.free #insert at the first free pos\n\n self.free=self.next[self.free] # move free pos\n self.arr[insert_at]=item #insert the item in free pos\n self.next[insert_at]=self.top[sn] # move top pos\n self.top[sn]=insert_at #new top\n\n def pop(self,sn):\n if self.isEmpty(sn):\n print(\"STACK UNDERFLOWN\")\n return None\n \n topOfStack=self.top[sn] # item at top of stack\n self.top[sn]=self.next[self.top[sn]] # new top\n self.next[topOfStack] #old top is moved to free pos\n self.free=topOfStack\n\n return self.arr[topOfStack]\n\n def printStack(self,sn):\n topIndex=self.top[sn]\n while topIndex!=-1:\n print(self.arr[topIndex])\n topIndex=self.next[topIndex]\n\n def printAll(self):\n \n for i in range(self.n):\n print(self.arr[i])\n\n\nif __name__=='__main__':\n\n NS=Nstacks(4,16)\n\n NS.push(1000,0)\n NS.push(800,0)\n NS.push(900,0)\n NS.push(700,0)\n\n NS.push(121,1)\n NS.push(189,1)\n NS.push(165,1)\n NS.push(132,1)\n\n NS.push(265,2)\n NS.push(244,2)\n NS.push(211,2)\n NS.push(278,2)\n\n NS.push(369,3)\n NS.push(344,3)\n NS.push(311,3)\n NS.push(355,3)\n\n\n print(\"*\"*10) \n print(\"*\"*10)\n NS.printAll()\n print(\"*\"*10)\n print(\"*\"*10)\n\n print(\"\")\n print(\"\")\n\n print(\"*\"*10)\n NS.printStack(0)\n print(\"*\"*10)\n NS.printStack(1)\n print(\"*\"*10)\n NS.printStack(2)\n print(\"*\"*10)\n NS.printStack(3)\n print(\"*\"*10)\n\n\n print(\"popped from 0 \",NS.pop(0))\n print(\"popped from 1 \",NS.pop(1))\n print(\"popped from 2 \",NS.pop(2))\n print(\"popped from 3 \",NS.pop(3))\n\n\n print(\"*\"*10)\n NS.printAll()\n","repo_name":"Abrar-04/DSA-Practice","sub_path":"06.Stacks/450.Stacks/Nstacks.py","file_name":"Nstacks.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32584052738","text":"import logging\nimport ckan.lib.helpers as h\nimport ckan.plugins as p\nfrom ckan.plugins import implements, toolkit\nfrom ckanext.linkfinder.model import make_uuid\nfrom ckan.logic import get_action\n\nlog = logging.getLogger('ckanext.linkfinder')\n\nclass LinkFinderPlugin(p.SingletonPlugin):\n implements(p.IConfigurer, inherit=True)\n implements(p.ITemplateHelpers, inherit=True)\n implements(p.IDomainObjectModification, inherit=True)\n\n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n\n def get_helpers(self):\n \"\"\"\n A dictionary of extra helpers that will be available to provide\n ga report info to templates.\n \"\"\"\n return {\n 'linkfinder_installed': lambda: True,\n }\n\n def notify(self, entity, operation=None):\n \"\"\"\n if not isinstance(entity, model.Resource):\n return\n\n if operation:\n if operation == model.DomainObjectOperation.new:\n self._create_task(entity)\n else:\n # if operation is None, resource URL has been changed, as the\n # notify function in IResourceUrlChange only takes 1 parameter\n self._create_task(entity)\n \"\"\"\n\n def _create_task(self, resource):\n user = get_action('get_site_user')({'model': model,\n 'ignore_auth': True,\n 'defer_commit': True}, {})\n context = json.dumps({\n 'site_url': self.site_url,\n 'apikey': user.get('apikey')\n })\n data = json.dumps(resource_dictize(resource, {'model': model}))\n\n task_id = make_uuid()\n task_status = {\n 'entity_id': resource.id,\n 'entity_type': u'resource',\n 'task_type': u'qa',\n 'key': u'celery_task_id',\n 'value': task_id,\n 'error': u'',\n 'last_updated': datetime.now().isoformat()\n }\n task_context = {\n 'model': model,\n 'user': user.get('name'),\n }\n\n get_action('task_status_update')(task_context, task_status)\n celery.send_task(\"qa.update\", args=[context, data], task_id=task_id)\n","repo_name":"datagovuk/ckanext-linkfinder","sub_path":"ckanext/linkfinder/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71874049529","text":"import errtee\nimport re, urllib.request\nimport json\nimport os\n\n\"\"\"\nReads the list of files in http://www.apache.org/dist/\n\nCreates:\n../../site/json/foundation/releases.json\nFormat:\n{ top-level dir: { release-id: date}, ... }\n\nThe release id is derived from the filename by removing common suffixes etc, see cleanFilename()\nThe date comes from the first entry\n\n../../site/json/foundation/releases-files.json\nFormat:\n{ top-level dir: { release-id: [list of files for that release-id]}, ... }\n\nTODO: it would probably be more efficient to parse the output of\nsvn ls -R https://dist.apache.org/repos/dist/release/\nCould cache the output based on the last changed date\n\nOr use an rsync listing:\nrsync --list-only -r rsync.apache.org::apache-dist\nNote that rsync excludes hashes, sigs and KEYS files; however they are not needed here.\n\"\"\"\n\nreleases = {}\nfiles = {}\nmainurl = \"http://www.apache.org/dist/\"\n\nx = 0\n\n# don't try to maintain history for the moment...\n#try:\n# with open(\"../../site/json/foundation/releases.json\") as f:\n# releases = json.loads(f.read())\n# f.close()\n#except Exception as err:\n# print(\"Could not read releases.json, assuming blank slate\")\n\ndef getDirList(url):\n try:\n data = urllib.request.urlopen(url).read().decode('utf-8')\n for entry, xd, xdate in re.findall(r\".+\\s+(\\d\\d\\d\\d-\\d\\d-\\d\\d)\", data, re.MULTILINE | re.UNICODE):\n yield(entry, xdate, xd)\n except:\n pass\n\ndef cleanFilename(filename):\n \"\"\"\n Attempts to determine the release id to which a file belongs\n Strips extensions such as .tgz etc, then suffixes such as -sources\n Replaces qualifiers such as -assembly-, -parent- by '-'\n Returns the simplified filename .\n \"\"\"\n for suffix in ['.tgz', '.gz', '.bz2', '.xz', '.zip', '.rar', '.tar', 'tar', '.deb', '.rpm', '.dmg', '.egg', '.gem', '.pom', '.war', '.exe',\n '-scala2.11', '-cdh4', '-hadoop1', '-hadoop2', '-hadoop2.3', '-hadoop2.4', '-all',\n '-src', '_src', '.src', '-sources', '_sources', '-source', '-bin', '-dist',\n '-source-release', '-source-relase', '-apidocs', '-javadocs', '-javadoc', '_javadoc', '-tests', '-test', '-debug', '-uber',\n '-macosx', '-distribution', '-example', '-manual', '-native', '-win', '-win32', '-linux', '-pack', '-packaged', '-lib', '-current', '-embedded',\n '-py', '-py2', '-py2.6', '-py2.7', '-no', 'unix-distro', 'windows-distro', 'with', '-dep', '-standalone', '-war', '-webapp', '-dom', '-om', '-manual', '-site',\n '-32bit', '-64bit', '-amd64', '-i386', '_i386', '.i386', '-x86_64', '-minimal', '-jettyconfig', '-py2.py3-none-any', 'newkey', 'oldkey', 'jars', '-jre13', '-hadoop1', '-hadoop2', '-project',\n '-with-dependencies', '-client', '-server', '-doc', '-docs', 'server-webapps', '-full', '-all', '-standard', '-for-javaee', '-for-tomcat',\n 'hadoop1-scala2', '-deployer', '-fulldocs', '-windows-i64', '-windows-x64', '-embed', '-apps', '-app', '-ref', '-installer', '-bundle', '-java']:\n if filename[len(filename)-len(suffix):] == suffix:\n filename = filename[0:len(filename)-len(suffix)]\n for repl in ['-assembly-', '-minimal-', '-doc-', '-src-', '-webapp-', '-standalone-', '-parent-', '-project-', '-win32-']:\n filename = filename.replace(repl, '-')\n return filename\n\ndef cleanReleases(committeeId):\n if len(releases[committeeId]) == 0:\n del releases[committeeId]\n del files[committeeId]\n\ndef parseDir(committeeId, path):\n print(\" %s...\" % path)\n if len(path) > 100:\n print(\"WARN too long path: recursion?\")\n return\n for f, d, xd in getDirList(\"%s/%s\" % (mainurl, path)):\n if xd:\n if (\"/%s\" % f) not in path and f.lower() not in ['binaries', 'repos', 'updatesite', 'current', 'stable', 'stable1', 'stable2', 'binary', 'notes', 'doc', 'eclipse', 'patches', 'docs', 'changes', 'features', 'tmp', 'cpp', 'php', 'ruby', 'py', 'py3', 'issuesfixed', 'images', 'styles', 'wikipages']:\n parseDir(committeeId, \"%s/%s\" % (path, f))\n # Note: this eliminates binary archives; not sure whether that is intentional or not.\n elif not re.search(r\"(MD5SUM|SHA1SUM|\\.md5|\\.mds|\\.sh1|\\.sh2|\\.sha|\\.asc|\\.sig|\\.bin|\\.pom|\\.jar|\\.whl|\\.pdf|\\.xml|\\.xsd|\\.html|\\.txt|\\.cfg|\\.ish|\\.pl|RELEASE.NOTES|LICENSE|KEYS|CHANGELOG|NOTICE|MANIFEST|Changes|readme|x86|amd64|-manual\\.|-docs\\.|-docs-|-doc-|Announcement|current|-deps|-dependencies|binary|-bin-|-bin\\.|-javadoc-|-distro|rat_report)\", f, flags=re.IGNORECASE):\n filename = cleanFilename(f)\n if len(filename) > 1:\n if filename not in releases[committeeId]:\n releases[committeeId][filename] = d\n files[committeeId][filename] = []\n print(\" - %s\\t\\t\\t%s\" % (filename, f))\n files[committeeId][filename].append(\"%s/%s\" % (path, f))\n\n\nfor committeeId, d, xdir in getDirList(mainurl):\n if committeeId != 'incubator':\n if committeeId not in ['xml', 'zzz', 'maven-repository']:\n print(\"Parsing /dist/%s content:\" % committeeId)\n releases[committeeId] = releases[committeeId] if committeeId in releases else {}\n files[committeeId] = {}\n parseDir(committeeId, committeeId)\n cleanReleases(committeeId)\n else:\n for podling, d, xd in getDirList(\"%s/incubator/\" % mainurl):\n print(\"Parsing /dist/incubator-%s content:\" % podling)\n committeeId = \"incubator-%s\" % podling\n releases[committeeId] = releases[committeeId] if committeeId in releases else {}\n files[committeeId] = {}\n parseDir(committeeId, \"incubator/%s\" % podling)\n cleanReleases(committeeId)\n\nprint(\"Writing releases.json\")\nwith open(\"../../site/json/foundation/releases.json\", \"w\") as f:\n json.dump(releases, f, sort_keys=True, indent=0)\n f.close()\nwith open(\"../../site/json/foundation/releases-files.json\", \"w\") as f:\n json.dump(files, f, sort_keys=True, indent=0)\n f.close()\n\nprint(\"All done!\")","repo_name":"ep-infosec/33_apache_comdev-projects","sub_path":"scripts/cronjobs/parsereleases.py","file_name":"parsereleases.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27238118836","text":"import itertools\nimport operator\nimport random\nimport numpy as np\nimport pickle\nimport os\n\nfrom sklearn.metrics import accuracy_score\nfrom deap import gp\nfrom deap import base\nfrom deap import creator\n\ndef get_args():\n str = \"\\n************************************************************\\n\"\n str += \"* Welcome to Copy Task champion arena *\\n\"\n str += \"* Please provide the following arguments comma delimited *\\n\"\n str += \"* Type of test to run options are (required): *\\n\"\n str += \"* - std -> to run the standard champion *\\n\"\n str += \"* - mul -> to run the multiplication champion *\\n\"\n str += \"* - mod -> to run the modified champion *\\n\"\n str += \"* - log -> to run the logical champion *\\n\"\n str += \"* Depth of sequence i.e. number of 1/-1's (required): *\\n\"\n str += \"* - options are: 4, 5, 6, 15, 21 *\\n\"\n str += \"* Range of noise to use (required): *\\n\"\n str += \"* - options are: 0, 0.5, 0.25, 0.125 *\\n\"\n str += \"* Which champion to load (optional): *\\n\"\n str += \"* - example 'champion_1' .... 'champion_50' *\\n\"\n str += \"* Number of tests to run (optional): *\\n\"\n str += \"* - integer represents the number of tests *\\n\"\n str += \"* Length of Noise in sequence (optional): *\\n\"\n str += \"* - integer represents the length of noise *\\n\"\n str += \"************************************************************\\n\"\n print(str)\n \n options = (\"std\", \"mul\",\"mod\",\"log\")\n while True:\n try:\n input_args = input(\"Choose your champion:\\n\").strip().lower().split(\",\")\n\n if len(input_args) < 2:\n raise ValueError\n\n if len(input_args)>0 and input_args[0].strip() not in options:\n raise ValueError\n\n if len(input_args)>1 and int(input_args[1].strip()) not in (4, 5, 6, 15, 21):\n raise ValueError\n\n # Everything is fine \n break\n\n except ValueError:\n print(\"Sorry your entry is wrong, try again!\")\n\n # Reading Type and Depth Values\n type = input_args[0]\n depth = int(input_args[1])\n\n # Default Range Value if not passed\n range_val = 0\n if type in ('mod'):\n if len(input_args) > 2:\n range_val = float(input_args[2])\n else:\n range_val = 0.5\n\n # Default Champion if not passed\n champion = \"champion_1\"\n if len(input_args) > 3:\n champion = input_args[3]\n \n # Default Number of tests if not passed\n num_test = 50\n if len(input_args) > 4:\n num_test = int(input_args[4])\n\n # Default Noise and generalize\n noise, generalize = 10, True\n if len(input_args) > 5:\n noise = int(input_args[5])\n generalize = False\n\n return type, depth, range_val, champion, num_test, generalize, noise\n\n'''\nProblem setup\n'''\n\n# Generate Random Data\ndef generate_data(noise, depth, range_val, num_tests, generalize):\n retval = []\n for _ in range(num_tests):\n sequence = []\n sequence.append(random.choice((-1.0, 1.0)))\n noise = 10 if not generalize else random.randint(10, 20)\n for _ in range(depth - 1):\n sequence.extend([random.uniform(-range_val,range_val) for _ in range(noise)])\n sequence.append(random.choice((-1.0, 1.0)))\n retval.append(sequence)\n return retval\n\n# Generate Classification based on dataset\ndef generate_output(dataset, type):\n retval = []\n for i in range(num_tests):\n data = dataset[i]\n sequence = []\n counter = 0\n for el in data:\n if type == 'mod':\n if el == 1 or el == -1:\n counter += el\n else:\n counter += el\n sequence.append(-1 if counter < 0 else 1)\n retval.append(sequence)\n return retval\n\n# Generate expected GP Action based on Dataset\ndef generate_action(dataset, type):\n retval = []\n for i in range(num_tests):\n data = dataset[i]\n sequence = []\n MEMORY = []\n if type == 'mod':\n for el in data:\n if el != 1 and el != -1:\n sequence.append(2)\n else:\n if len(MEMORY) == 0 or MEMORY[len(MEMORY)-1] == el:\n sequence.append(0)\n MEMORY.append(el)\n else:\n sequence.append(1)\n MEMORY.pop()\n else:\n for el in data:\n if el == 0:\n sequence.append(2)\n else:\n if len(MEMORY) == 0 or MEMORY[len(MEMORY)-1] == el:\n sequence.append(0)\n MEMORY.append(el)\n else:\n sequence.append(1)\n MEMORY.pop()\n retval.append(sequence)\n return retval\n\n'''\n Begining of DEAP Structure\n'''\n\n# Define a protected division function\ndef protected_div(left, right):\n try:\n return left / right\n except ZeroDivisionError:\n return 1\n\n# Define a new if-then-else function\ndef if_then_else(input, output1, output2):\n if input:\n return output1\n else:\n return output2\n\ndef create_gp(type):\n # defined a new primitive set for strongly typed GP\n pset = gp.PrimitiveSetTyped(\"MAIN\", itertools.repeat(float, 2), float)\n\n if type in (\"std\", \"vec\", \"mod\"):\n pset.addPrimitive(operator.add, [float, float], float)\n pset.addPrimitive(operator.sub, [float, float], float)\n pset.addPrimitive(protected_div, [float, float], float)\n\n if type == \"mul\":\n pset.addPrimitive(operator.add, [float, float], float)\n pset.addPrimitive(operator.sub, [float, float], float)\n pset.addPrimitive(operator.mul, [float, float], float)\n\n if type == \"log\":\n # boolean operators\n pset.addPrimitive(operator.and_, [bool, bool], bool)\n pset.addPrimitive(operator.or_, [bool, bool], bool)\n pset.addPrimitive(operator.not_, [bool], bool)\n pset.addPrimitive(operator.mul, [float, float], float)\n pset.addPrimitive(operator.lt, [float, float], bool)\n pset.addPrimitive(operator.eq, [float, float], bool)\n pset.addPrimitive(protected_div, [float, float], float)\n pset.addPrimitive(if_then_else, [bool, float, float], float)\n\n # terminals\n pset.addEphemeralConstant(\"rand100\", lambda: random.random() * 100, float)\n pset.addTerminal(False, bool)\n pset.addTerminal(True, bool)\n\n creator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\n creator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMax)\n\n toolbox = base.Toolbox()\n toolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)\n toolbox.register(\"compile\", gp.compile, pset=pset)\n return toolbox\n\nif __name__ == \"__main__\":\n\n # Const variables\n local_dir = os.path.dirname(__file__)\n champ_path = os.path.join(local_dir, 'champions/')\n\n # Get input from terminal\n type, depth, range_val, champion, num_tests, generalize, noise = get_args()\n\n # Generate Data\n data_validation = generate_data(noise, depth, range_val, num_tests, generalize)\n labels_validation = generate_output(data_validation, type)\n actions_validation = generate_action(data_validation, type)\n \n # Create GP\n toolbox = create_gp(type)\n \n # Load Champion\n champ_name = champ_path + str(depth) + '_champions_' + type\n with open(champ_name, 'rb') as f:\n champions = pickle.load(f)\n print(\"loaded champions\")\n\n hof1, hof2, hof3, hof4 = champions[champion]\n\n \n # Running Test on unseen data and checking results\n print(\"\\n==================\")\n print(\"Begin Testing ....\")\n print(\"==================\\n\")\n\n # Transform the tree expression in a callable function\n tree1 = toolbox.compile(expr=hof1)\n tree2 = toolbox.compile(expr=hof2)\n tree3 = toolbox.compile(expr=hof3)\n tree4 = toolbox.compile(expr=hof4)\n\n # Evaluate the sum of correctly identified\n predictions, predict_actions = [],[]\n # Evaluate the sum of correctly identified\n for i in range(num_tests):\n data = data_validation[i]\n MEMORY, classification, actions = [], [], []\n counter = 0\n length = len(data)\n for j in range(length):\n # If stack is empty then 0, else the value on top of stack\n stack_output = MEMORY[counter - 1] if counter > 0 else 0\n\n arg1 = tree1(data[j],stack_output)\n arg2 = tree2(data[j],stack_output)\n arg3 = tree3(data[j],stack_output)\n arg4 = tree4(data[j],stack_output)\n pos = np.argmax([arg1, arg2, arg3, arg4])\n\n # Action has been decided\n temp = 1 if stack_output >= 0 else -1\n actions.append(pos)\n if pos == 0:\n MEMORY.append(data[j])\n temp = data[j]\n counter += 1\n elif pos == 1:\n MEMORY.pop()\n counter -= 1\n stack_output = MEMORY[counter - 1] if counter > 0 else 0\n temp = 1 if stack_output >= 0 else -1\n else:\n temp = 1 if stack_output >= 0 else -1\n \n # Add to classification\n classification.append(temp)\n\n predictions.append(classification)\n predict_actions.append(actions)\n\n # Evaluate predictions\n total_accuracy = 0\n for i in range(num_tests):\n accuracy = accuracy_score(labels_validation[i], predictions[i])\n print(\"Test #{} Accuracy: {}\".format(i, accuracy))\n total_accuracy += accuracy\n \n print(\"------------------------\")\n print(\"Total Accuracy: {}\".format(total_accuracy/num_tests))","repo_name":"Mihyar-30614/Genetic-Programming-Benchmarking-Deep-Memory-Tasks","sub_path":"DEAP/Sequence Classification/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":10079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11584406338","text":"'''\nIntegrantes\nEnrique Emanuel Rezende Tavares da Silva - 11796090\nGuilherme Dias Jimenes - 11911021\nRonald Cosmo de Sousa - 11909783\n'''\n\nimport csv\nimport re\nimport random\nfrom copy import deepcopy\nfrom math import sqrt\n\n\ndef knn(training_data:list[dict], query_point:dict, num_of_neighbors:int):\n\t'''\n\tReturns a tuple where the first element is the euclidian distance between the query point and a training point\n\tand the second element is the index of the training point in the dataset\n\t'''\n\tdef make_distance_tuple(y, ind_y) : return (euclidian_dist(query_point, y), ind_y)\n\n\tdistances_from_query_point = list(\n\t\tmap(\n\t\t\tmake_distance_tuple,\n\t\t\ttraining_data,\n\t\t\trange(0, len(training_data))\n\t\t)\n\t)\n\n\tdef get_distance(dist_tuple) : return dist_tuple[0]\n\tdef get_point_index(dist_tuple) : return dist_tuple[1]\n\tdef get_point_from_dataset(point_index) : return training_data[point_index]\n\tdef get_class_of_point(point) : return point[\"a16\"]\n\n\tsorted_distances = sorted(distances_from_query_point, key=get_distance)\n\tk_nearest_neighbors_index = map(get_point_index, sorted_distances[:num_of_neighbors])\n\tk_nearest_neighbors = map(get_point_from_dataset, k_nearest_neighbors_index)\n\n\tknn_classes = list(map(get_class_of_point, k_nearest_neighbors))\n\tplus_class_occurrences = knn_classes.count(\"+\")\n\tminus_class_occurrences = knn_classes.count(\"-\")\n\treturn \"+\" if plus_class_occurrences > minus_class_occurrences else \"-\"\n\n\n'''\nCalculates the euclidian distance between two vectors (`a` and `b`)\n'''\ndef euclidian_dist(a:dict, b:dict) -> float:\n\tcols = list(a)\n\t# Removing class column because its value is a string\n\tcols.remove(\"a16\")\n\tsum_of_squared_diffs = 0\n\tfor col in cols:\n\t\tcomp_a = a[col]\n\t\tcomp_b = b[col]\n\t\tsum_of_squared_diffs = sum_of_squared_diffs + ( (comp_a - comp_b) ** 2 )\n\treturn sqrt(sum_of_squared_diffs)\n\n\n'''\nReturns a list of dicts corresponding to the dataset.\n\nFor example, from the following .csv:,\n\n\t\tfirst_name,last_name\n\t\tJohn, Cleese\n\t\tTerry, Gilliam\n\nthe first row of the dataset would look like this:\n\n\t{'first_name': 'John', 'last_name': 'Cleese'}\n\nAnd the whole dataset would look like this:\n\n\t[\n\t\t{'first_name': 'John', 'last_name': 'Cleese'} ,\n\t\t{'first_name': 'Terry', 'last_name': 'Gilliam'}\n\t]\n\n'''\ndef read_dataset():\n\tprint(\"Reading dataset `Credit Approval`\")\n\twith open('data/crx.data', 'r') as file:\n\t\treader = csv.DictReader(file)\n\t\tdata = []\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\treturn data\n\n'''\nRemove the dataset's NA (missing) values by looking which values are equal to `?`.\n'''\ndef remove_null(dataset):\n dado_limpo = []\n for row in dataset:\n # Check if the line contains any value with \"?\"\n if re.search(r'\\?', str(row.values())):\n continue\n dado_limpo.append(row)\n return dado_limpo\n\n\"\"\"\nConvert categorical attributes into dummy variables (one-hot encoding)\n\"\"\"\ndef one_hot_encoding(dataset: dict, column: str):\n\n\tcategories = set()\n\n\t# Discover categories\n\tfor row in dataset:\n\t\tif row[column] not in categories:\n\t\t\tcategories = categories | { row[column] }\n\n\t# create new column for each category discovered\n\tfor row in dataset:\n\t\tfor category in categories:\n\t\t\tnew_col_name = f\"{column}_{category}\"\n\t\t\tvalue = row[column]\n\t\t\trow[new_col_name] = int(value == category)\n\t\trow.pop(column)\n\n\treturn dataset\n\n\ndef one_hot_encode_all_columns(dataset) :\n\tprint(\"One-hot encoding columns a1, a4, a5, a6, a7, a10, a12, a13\")\n\tto_encode = deepcopy(dataset)\n\tone_hot_encoding(to_encode, \"a1\")\n\tone_hot_encoding(to_encode, \"a4\")\n\tone_hot_encoding(to_encode, \"a5\")\n\tone_hot_encoding(to_encode, \"a6\")\n\tone_hot_encoding(to_encode, \"a7\")\n\tone_hot_encoding(to_encode, \"a9\")\n\tone_hot_encoding(to_encode, \"a10\")\n\tone_hot_encoding(to_encode, \"a12\")\n\tone_hot_encoding(to_encode, \"a13\")\n\treturn to_encode\n\n\n\n\"\"\"\nDivides the data into a set of training data (70%) and a set of query data (30%).\nReturns a tuple (training_data, query_data)\n\"\"\"\ndef divide_data(dataset):\n\tprint(\"Dividing data intro training and query sets\")\n\tdataset_size = len(dataset)\n\ttarget_training_quantity = int( 0.7 * dataset_size )\n\ttraining_data = random.choices(dataset, k=target_training_quantity)\n\tquery_data = [row for row in dataset if row not in training_data]\n\treturn training_data, query_data\n\n\n\"\"\"\nNormalizes the dataset by diving each value on a column by the maximum value of\nthat column found in the dataset\n\"\"\"\ndef normalize_dataset(dataset):\n\tprint(\"Normalizing scale of columns with continuous numbers\")\n\tdataset = remove_null(dataset)\n\tmax_values = {}\n\tfor row in dataset:\n\t\tfor key, value in row.items():\n\t\t\ttry:\n\t\t\t\tvalue = float(value) #converts\n\t\t\texcept ValueError: #in case of conversion failure\n\t\t\t\tcontinue\n\t\t\tif key not in max_values or value > max_values[key]:\n\t\t\t\tmax_values[key] = value\n\n\tfor row in dataset:\n\t\tfor key, value in row.items():\n\t\t\ttry:\n\t\t\t\tvalue = float(value)\n\t\t\texcept ValueError:\n\t\t\t\tcontinue\n\t\t\trow[key] = value / max_values[key] #division\n\n\treturn dataset\n\n\"\"\"\nCalculates the accuracy of running k-NN\n\"\"\"\ndef accuracy(points, predicted_values) :\n\tprint(\"Calculating accuracy of k-NN implemented\")\n\tnum_of_points = len(points)\n\ttrue_predictions = 0\n\n\tfor point, prediction in zip(points, predicted_values):\n\t\t# a16 is the column name which contain the classes categories.\n\t\tif point[\"a16\"] == prediction:\n\t\t\ttrue_predictions = true_predictions + 1\n\n\treturn true_predictions / num_of_points\n\n\"\"\"\nRuns k-NN on the `Credit Approval` dataset, making sure that before running:\n\t1 - All null data is removed;\n\t2 - Categorical data is one-hot encoded\n\t3 - All numeric values are normalized\n\t4 - k-NN is trained on 70% of the full dataset\n\nAfter running the algorithm, it outputs to STDOUT the accuracy obtained from querying 30% of the data against the\ntraining data and comparing expected classes X predicted classes.\n\"\"\"\ndef main() :\n\tneighbors = 100\n\tdata = read_dataset()\n\tnormalized_data = normalize_dataset(data)\n\tencoded_data = one_hot_encode_all_columns(normalized_data)\n\ttraining_data, query_data = divide_data(encoded_data)\n\n\tpredictions = []\n\tfor query_point in query_data:\n\t\tpredicted_class = knn(training_data, query_point, neighbors)\n\t\tpredictions.append(predicted_class)\n\taccuracy_knn = accuracy(query_data, predictions)\n\n\tprint(f\"Accuracy of KNN with k={neighbors} is {accuracy_knn}\")\n\n\nif (__name__ == \"__main__\") : main()\n","repo_name":"Oiapokxui/tarefas-ia","sub_path":"tarefa1/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11948637723","text":"import PyPDF2\nimport sys\n# combined the 3 pdf\n# pdf.py dummy.pdf twopage.pdf tilt.pdf\n\n# inputs = sys.argv[1:]\n\n# def pdf_combiner(pdf_list):\n# for pdf in pdf_list:\n# print(pdf)\n\n# pdf_combiner(inputs)\n\n# PS C:\\Users\\Mohamed Bee\\Desktop\\Python_w_Udemy\\Section17_Scripting with Python\\PDF> python Exo.py dummy.pdf twopage.pdf tilt.pdf\n# output\n# dummy.pdf\n# twopage.pdf\n# tilt.pdf\n\n\n# that is bcz there is the merger obj.\n\n# inputs = sys.argv[1:]\n\n# def pdf_combiner(pdf_list):\n# merger=PyPDF2.PdfFileMerger()\n# for pdf in pdf_list:\n# print(pdf)\n# merger.append(pdf)\n# merger.write('super.pdf')\n \n# pdf_combiner(inputs)\n\n# type all that then enter\n# PS C:\\Users\\Mohamed Bee\\Desktop\\Python_w_Udemy\\Section17_Scripting with Python\\PDF> python Exo.py dummy.pdf twopage.pdf tilt.pdf\n\n# output\n# dummy.pdf\n# twopage.pdf\n# tilt.pdf\n# and then run the program\n\n\ntemplate = PyPDF2.PdfFileReader(open('super.pdf', 'rb'))\nwatermark = PyPDF2.PdfFileReader(open('wtr.pdf', 'rb'))\noutput= PyPDF2.PdfFileWriter()\n\nfor i in range(template.getNumPages()):\n page= template.getPage(i)\n page.mergePage(watermark.getPage(0))\n output.addPage(page)\n \n with open('watermarked_output.pdf', 'wb') as file:\n output.write(file)\n \n \n \n# output pages are watermarked\n ","repo_name":"MBee05/Section17_Scripting-with-Python","sub_path":"PDF/Exo.py","file_name":"Exo.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32373363900","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance as scidist\nimport tqdm\nimport cwrap\nimport PDBloader\nimport eigen\n\n\ndef get_dmat(coords):\n dmat = scidist.pdist(coords)\n dmat = scidist.squareform(dmat)\n return dmat\n\n\ndef get_cmap(dmat, thr=8., sep_cut=2):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> coords = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> cmap = get_cmap(get_dmat(coords[:8]), sep_cut=0)\n >>> cmap\n array([[False, True, True, True, False, False, False, False],\n [ True, False, True, True, True, False, False, False],\n [ True, True, False, True, True, True, False, False],\n [ True, True, True, False, True, True, False, False],\n [False, True, True, True, False, True, True, False],\n [False, False, True, True, True, False, True, True],\n [False, False, False, False, True, True, False, True],\n [False, False, False, False, False, True, True, False]])\n >>> cmap = get_cmap(get_dmat(coords[:8]), sep_cut=2)\n >>> cmap\n array([[False, False, False, True, False, False, False, False],\n [False, False, False, False, True, False, False, False],\n [False, False, False, False, False, True, False, False],\n [ True, False, False, False, False, False, False, False],\n [False, True, False, False, False, False, False, False],\n [False, False, True, False, False, False, False, False],\n [False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False]])\n \"\"\"\n n, n = dmat.shape\n cmap = dmat <= thr\n for i in range(sep_cut + 1):\n mask = ~(np.logical_or(np.diag(np.ones(n - i, dtype=bool), k=i), np.diag(np.ones(n - i, dtype=bool), k=-i)))\n cmap = np.logical_and(cmap, mask)\n return cmap\n\n\ndef mapalign(cmap_a,\n cmap_b,\n sep_x_list=[0, 1, 2],\n sep_y_list=[1, 2, 3, 8, 16, 32],\n gap_e_list=[-0.2, -0.1, -0.01, -0.001],\n niter=20,\n progress=True,\n eigen_init=False,\n eigen_aln=False):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n\n # Few minutes to run. Uncomment the following to test it!\n >>> aln, score, sep_x_best, sep_y_best, gap_e_best = mapalign(cmap_a, cmap_b)\n >>> aln\n array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,\n 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,\n 26, 27, 28, 29, 30, 31, 32, 33, 34, 44, 45, 46, 47,\n 48, 49, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 103,\n 104, 105, 106, 107, 108, 109, 110, 111, 112, 119, 120, 121, 122,\n 123, 124, 125, 126, 127, 152, 153, 154, 155, 156, 157, 158, 159,\n 160, 161, 162, 163, 164, 165, 166, 167, 168, 169], dtype=int32)\n >>> aln.shape\n (88,)\n >>> score\n 407.2732985813753\n >>> sep_x_best, sep_y_best, gap_e_best\n (1, 16, -0.001)\n \"\"\"\n if eigen_aln:\n aln, score, gap_e_best = eigen.get_alignment(cmap_a,\n cmap_b,\n gap_extension_list=gap_e_list,\n niter=niter,\n progress=progress)\n sep_x_best, sep_y_best = None, None\n else:\n aln, score, sep_x_best, sep_y_best, gap_e_best = cwrap.get_alignment(cmap_a,\n cmap_b,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_extension_list=gap_e_list,\n niter=niter,\n progress=progress,\n eigen_init=eigen_init)\n return aln, score, sep_x_best, sep_y_best, gap_e_best\n\n\ndef get_aln_b(aln_a, nb):\n \"\"\"\n >>> aln_a = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln_a.shape\n (88,)\n >>> aln_b = get_aln_b(aln_a, 215)\n >>> aln_b\n array([ 2., 3., 4., 5., 8., 9., 12., 13., 14., 15., 16., 17., 18.,\n 19., 20., -1., -1., -1., -1., 21., 22., 23., 24., 25., -1., -1.,\n -1., -1., -1., -1., -1., 26., -1., -1., -1., -1., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., -1., -1.,\n -1., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52.,\n 53., 54., 55., 56., 57., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., 58.,\n 59., 60., 61., 62., 63., 64., 65., 66., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., 67., 68., 69., 70., 71., 72., -1., -1., -1., 73., 74.,\n 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.,\n -1., -1., -1., -1., -1., -1., -1.])\n \"\"\"\n aln_b = -np.ones(nb)\n ai_aln = np.where(aln_a != -1)[0]\n bi_aln = aln_a[ai_aln]\n aln_b[bi_aln] = ai_aln\n return aln_b\n\n\ndef get_aligned_maps(cmap_a, cmap_b, aln, full=False):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n >>> aln = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln.shape\n (88,)\n\n Returns the maps aligned in the frame of cmap_a\n >>> cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln)\n >>> cmap_a_aln.shape\n (79, 79)\n >>> cmap_a_aln.shape\n (79, 79)\n\n Returns the maps aligned in the frame of cmap_b\n >>> cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln, full=True)\n >>> cmap_a_aln.shape\n (215, 215)\n >>> cmap_b_aln.shape\n (215, 215)\n \"\"\"\n na, na = cmap_a.shape\n nb, nb = cmap_b.shape\n ai_aln = np.where(aln != -1)[0]\n bi_aln = aln[ai_aln]\n if not full: # Only get the aligned parts\n cmap_a_aln = cmap_a[ai_aln, :][:, ai_aln]\n cmap_b_aln = cmap_b[bi_aln, :][:, bi_aln]\n else: # get the FULL matrices with zeros in insertion regions\n if na <= nb:\n cmap_a_aln = np.zeros_like(cmap_b)\n cmap_a_aln[:na, :na] = cmap_a\n cmap_a_aln[bi_aln, :] = cmap_a_aln[ai_aln, :]\n cmap_a_aln[:, bi_aln] = cmap_a_aln[:, ai_aln]\n cmap_b_aln = cmap_b\n else:\n cmap_a_aln = cmap_a\n cmap_b_aln = np.zeros_like(cmap_a)\n cmap_b_aln[:nb, :nb] = cmap_b\n cmap_b_aln[ai_aln, :] = cmap_b_aln[bi_aln, :]\n cmap_b_aln[:, ai_aln] = cmap_b_aln[:, bi_aln]\n return cmap_a_aln, cmap_b_aln\n\n\ndef get_score(cmap_a, cmap_b, aln):\n \"\"\"\n The score is the number of contacts common in the two maps aligned over the total number of contacts for cmap_a\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n >>> aln = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln.shape\n (88,)\n >>> score = get_score(cmap_a, cmap_b, aln)\n >>> score\n 0.5838926174496645\n \"\"\"\n cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln, full=False)\n comm = np.logical_and(cmap_a_aln, cmap_b_aln)\n score = comm.sum() / cmap_a.sum() # min(cmap_a.sum(), cmap_b.sum())\n return score\n\n\ndef plot_aln(cmap_a, cmap_b, aln, full=False, outfilename=None):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> cmd.load('data/2pd0_A.pdb', 'B_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> coords_b = cmd.get_coords('B_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> dmat_b = get_dmat(coords_b)\n >>> cmap_a = get_cmap(dmat_a)\n >>> cmap_b = get_cmap(dmat_b)\n >>> cmap_a.shape, cmap_b.shape\n ((88, 88), (215, 215))\n >>> aln = np.asarray([ -1, -1, 0, 1, 2, 3, -1, -1, 4, 5, -1, -1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19, 20, 21, 22, 23, 31, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 116, 117, 118, 119, 120, 121, 122, 123, 124, 145, 146, 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, -1, -1, -1])\n >>> aln.shape\n (88,)\n\n # >>> plot_aln(cmap_a, cmap_b, aln)\n # >>> plot_aln(cmap_a, cmap_b, aln, full=True)\n \"\"\"\n cmap_a_aln, cmap_b_aln = get_aligned_maps(cmap_a, cmap_b, aln, full=full)\n ai, aj = np.where(cmap_a_aln > 0)\n bi, bj = np.where(cmap_b_aln > 0)\n plt.scatter(bi, bj, s=16., c='gray', alpha=.5, label='cmap_b')\n plt.scatter(ai, aj, s=1., c='blue', label='cmap_a')\n plt.xticks([])\n plt.yticks([])\n plt.gca().set_aspect('equal', adjustable='box')\n plt.legend()\n if outfilename is not None:\n plt.savefig(outfilename)\n else:\n plt.show()\n\n\ndef batch_mapalign(cmap_a,\n logfilename,\n pdblist=[],\n pdbpath=None,\n num_workers=None,\n sep_x_list=[1],\n sep_y_list=[16],\n gap_e_list=[-0.001],\n eigen_init=False):\n \"\"\"\n >>> cmd.reinitialize()\n >>> cmd.load('data/3u97_A.pdb', 'A_')\n >>> coords_a = cmd.get_coords('A_ and polymer.protein and chain A and name CA')\n >>> dmat_a = get_dmat(coords_a)\n >>> cmap_a = get_cmap(dmat_a)\n >>> batch_mapalign(cmap_a, 'mapalign_batch.log', pdblist=['data/2pd0_A.pdb', 'data/3u97_A.pdb'])\n \"\"\"\n import torch\n import logging\n logging.basicConfig(filename=logfilename, level=logging.INFO, format='%(asctime)s: %(message)s')\n logging.info(f\"################ Starting {__file__} ################\")\n if num_workers is None:\n num_workers = os.cpu_count()\n logging.info(f\"num_workers: {num_workers}\")\n dataset = PDBloader.PDBdataset(pdbpath=pdbpath,\n pdblist=pdblist,\n cmap_a=cmap_a,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_e_list=gap_e_list,\n logfilename=logfilename,\n eigen_init=eigen_init)\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=1,\n shuffle=False,\n num_workers=num_workers,\n collate_fn=PDBloader.collate_fn,\n prefetch_factor=8)\n iterator = iter(dataloader)\n pbar = tqdm.tqdm(total=dataset.__len__())\n # for i, batch in enumerate(dataloader):\n for i in range(dataset.__len__()):\n try:\n batch = next(iterator)\n except RuntimeError:\n batch = [[(None, None, None, None, None)]]\n for b in batch:\n for chain_data in b:\n index, pdb, chain, score, native_contact = chain_data\n if index is not None:\n logging.info(f'{index} {pdb} {chain} {score:.4f} {native_contact:.4f}')\n pbar.update(1)\n pbar.close()\n\n\ndef log(msg):\n try:\n logging.info(msg)\n except NameError:\n pass\n\n\nif __name__ == '__main__':\n import sys\n import doctest\n import argparse\n from pymol import cmd\n # ### UNCOMMENT FOR LOGGING ####\n import os\n import PDBloader\n # ### ##################### ####\n # argparse.ArgumentParser(prog=None, usage=None, description=None, epilog=None, parents=[], formatter_class=argparse.HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True, allow_abbrev=True, exit_on_error=True)\n parser = argparse.ArgumentParser(description='')\n # parser.add_argument(name or flags...[, action][, nargs][, const][, default][, type][, choices][, required][, help][, metavar][, dest])\n parser.add_argument('-p1', '--pdb1', help='First structure file to align on pdb2')\n parser.add_argument('-p2', '--pdb2', help='Second pdb file. Can give multiple pdbs', nargs='+')\n parser.add_argument(\n '-db',\n '--pdbpath',\n help=\n 'Path to the pdb database. See: https://github.com/bougui505/misc/blob/master/shell/updatePDB.sh to download the PDB'\n )\n parser.add_argument('-s1', '--sel1', required=False, default='all')\n parser.add_argument('-s2', '--sel2', required=False, default='all')\n parser.add_argument(\n '--sep_x',\n type=int,\n default=1,\n help=\n 'Parameter to compute the STD of the gaussian: s_std=sep_y*(1+(s_min-2)**sep_x), with s_min the min sequence separation for cmap_a and cmap_b of the considered contacts. (default=1)'\n )\n parser.add_argument(\n '--sep_y',\n type=int,\n default=16,\n help=\n 'Parameter to compute the STD of the gaussian: s_std=sep_y*(1+(s_min-2)**sep_x), with s_min the min sequence separation for cmap_a and cmap_b of the considered contacts. (default=16)'\n )\n parser.add_argument('--gap_e',\n type=float,\n default=-0.001,\n help='Gap extension penalty. MUST BE negative (default=-0.001).')\n parser.add_argument('--niter', help='Number of iterations (default 20)', default=20, type=int)\n parser.add_argument('--show', action='store_true', help='Show the contact map alignment')\n parser.add_argument('--save', help='Save the contact map alignment in the given filename')\n parser.add_argument('--full',\n action='store_true',\n help='Display the full contact map alignemnt. Not only the aligned contacts')\n parser.add_argument('--hpo', help='Hyperparameter optimization for sep_x, sep_y and gap_e', action='store_true')\n parser.add_argument(\n '--eigen_init',\n help=\n 'Initialize the scoring alignment matrix using eigenvector decomposition. Faster but less accurate (see: https://doi.org/10.1093/bioinformatics/btq402)',\n action='store_true')\n parser.add_argument(\n '--eigen_aln',\n help=\n 'Contact map alignment using alignment of eigen vectors. Even faster but less accurate (see: https://doi.org/10.1093/bioinformatics/btq402)',\n action='store_true')\n parser.add_argument('--test', help='Test the code', action='store_true')\n args = parser.parse_args()\n\n if args.test:\n doctest.testmod(optionflags=doctest.ELLIPSIS) # | doctest.REPORT_ONLY_FIRST_FAILURE)\n sys.exit()\n\n cmd.load(args.pdb1, 'A_')\n coords_a = cmd.get_coords(f'A_ and polymer.protein and name CA and {args.sel1}')\n dmat_a = get_dmat(coords_a)\n cmap_a = get_cmap(dmat_a)\n if args.hpo:\n sep_x_list = [0, 1, 2]\n sep_y_list = [1, 2, 3, 8, 16, 32]\n gap_e_list = [-0.2, -0.1, -0.01, -0.001]\n else:\n sep_x_list = [args.sep_x]\n sep_y_list = [args.sep_y]\n gap_e_list = [args.gap_e]\n if args.pdb2 is not None:\n if len(args.pdb2) == 1:\n import logging\n logfilename = os.path.splitext(os.path.basename(__file__))[0] + '.log'\n logging.basicConfig(filename=logfilename, level=logging.INFO, format='%(asctime)s: %(message)s')\n logging.info(f\"################ Starting {__file__} ################\")\n log(args.pdb1)\n log(args.pdb2)\n cmd.load(args.pdb2[0], 'B_')\n coords_b = cmd.get_coords(f'B_ and polymer.protein and name CA and {args.sel2}')\n dmat_b = get_dmat(coords_b)\n cmap_b = get_cmap(dmat_b)\n log(f'cmap_a.shape: {cmap_a.shape}')\n log(f'cmap_b.shape: {cmap_b.shape}')\n aln, score, sep_x_best, sep_y_best, gap_e_best = mapalign(cmap_a,\n cmap_b,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_e_list=gap_e_list,\n progress=args.hpo,\n eigen_init=args.eigen_init,\n eigen_aln=args.eigen_aln,\n niter=args.niter)\n if args.hpo:\n log(f'sep_x: {sep_x_best}')\n log(f'sep_y: {sep_y_best}')\n log(f'gap_e: {gap_e_best}')\n print(f'sep_x: {sep_x_best}')\n print(f'sep_y: {sep_y_best}')\n print(f'gap_e: {gap_e_best}')\n log(f'score: {score:.4f}')\n print(f'score: {score:.4f}')\n native_contacts_score = get_score(cmap_a, cmap_b, aln)\n log(f'native_contacts_score: {native_contacts_score:.4f}')\n print(f'native_contacts_score: {native_contacts_score:.4f}')\n if args.show or args.save is not None:\n plot_aln(cmap_a, cmap_b, aln, full=args.full, outfilename=args.save)\n # >>> sep_x_best, sep_y_best, gap_e_best\n # (2, 16, -0.001)\n elif args.pdb2 is not None:\n batch_mapalign(cmap_a,\n f'mapalign_{os.path.basename(os.path.splitext(args.pdb1)[0])}.log',\n pdblist=args.pdb2,\n sep_x_list=sep_x_list,\n sep_y_list=sep_y_list,\n gap_e_list=gap_e_list,\n eigen_init=args.eigen_init)\n elif args.pdbpath is not None:\n batch_mapalign(cmap_a,\n f'mapalign_{os.path.basename(os.path.splitext(args.pdb1)[0])}.log',\n pdbpath=args.pdbpath,\n eigen_init=args.eigen_init)\n","repo_name":"bougui505/misc","sub_path":"python/mapalign/mapalign.py","file_name":"mapalign.py","file_ext":"py","file_size_in_byte":21453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"74942696894","text":"# Standard library imports\nimport os\nfrom tempfile import NamedTemporaryFile\nfrom uuid import uuid4\nfrom itertools import islice\n\n# Third party imports\nimport pandas as pd\n\n\ndef df_to_table(df,\n table,\n write_disposition='WRITE_EMPTY',\n blocking=True):\n \"\"\"Upload a Pandas DataFrame to Google BigQuery\n\n Args:\n df (DataFrame): The Pandas DataFrame to be uploaded.\n table (google.cloud.bigquery.Table): BigQuery table object.\n write_disposition (str): Either 'WRITE_EMPTY', 'WRITE_TRUNCATE', or\n 'WRITE_APPEND'; the default is 'WRITE_EMPTY'.\n blocking (bool): Set to False if you don't want to block until the job\n is complete.\n\n Returns:\n google.cloud.bigquery.Job: The file upload job object. If you have set\n blocking=False, this can be used to check for job completion.\n \"\"\"\n # Two annoyances here:\n # 1) df.to_csv() requires a non binary mode file handle, whereas\n # table.upload_from_file() requires a binary mode file handle, so\n # we can't reuse the same file handle in read/write mode.\n # 2) Windows won't allow reading from a temporary file whilst it's\n # still open (see robfraz/gbq-pandas issue #2), so we can't use\n # context handlers to auto-close (and therefore delete) the temporary\n # file that we write to.\n\n writebuf = NamedTemporaryFile(mode='w',\n encoding='UTF-8',\n prefix=\"df_to_table_\",\n suffix=\".csv\",\n delete=False) # robfraz/gbq-pandas issue #2\n\n try:\n df.to_csv(writebuf, index=False, encoding='UTF-8')\n writebuf.flush()\n writebuf.close()\n\n with open(writebuf.name, mode='rb') as readbuf:\n job = table.upload_from_file(readbuf,\n encoding='UTF-8',\n source_format='CSV',\n skip_leading_rows=1,\n create_disposition='CREATE_IF_NEEDED',\n write_disposition=write_disposition)\n finally:\n os.remove(writebuf.name)\n\n if blocking:\n job.result()\n\n return job\n\n\ndef query_to_df(sql, client):\n \"\"\"Run a Google BigQuery query, and return the result in a Pandas Dataframe\n\n The query must be a single SQL statement\n\n Args:\n sql (str): A string containing a single SQL statement.\n client (google.cloud.bigquery.Client): BigQuery client object.\n\n Returns\n DataFrame: A Pandas DataFrame containing the result of the query.\n \"\"\"\n job = client.run_async_query(str(uuid4()), sql)\n job.use_legacy_sql = False\n result = job.result()\n return table_to_df(result.destination)\n\n\ndef table_to_df(table, limit=None):\n \"\"\"Download a table from Google BigQuery into a dataframe, with optional row limit\n\n Args:\n table (google.cloud.bigquery.Table): BigQuery table object.\n limit (None|int): The default is limit=None (i.e. all rows in table); set to\n zero to get an empty DataFrame with the column names set, or a positive\n number to limit the maximum number of rows fetched into the DataFrame.\n\n Returns:\n DataFrame: A Pandas DataFrame containing the table data.\n \"\"\"\n if limit and limit < 0:\n limit = None\n\n table.reload()\n return pd.DataFrame(data=list(islice(table.fetch_data(), 0, limit)),\n columns=[column.name for column in table.schema])\n","repo_name":"robfraz/gbq-pandas","sub_path":"gbq_pandas.py","file_name":"gbq_pandas.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"3306275687","text":"from maths import norm, length\nimport numpy as np\n\n\nclass CollisionInfo:\n def __init__(self, did_hit, location, normal):\n self.did_hit = did_hit\n self.location = location\n self.normal = normal\n\n\nclass Ray:\n def __init__(self, origin, direction, emitted_brightness=0.0, gen=0):\n self.origin = origin\n self.direction = norm(direction)\n self.colour = np.ones(3)\n self.emitted_brightness = emitted_brightness\n self.gen = gen\n self.MAX_BOUNCE = 100\n\n def trace(self, scene):\n if self.gen > self.MAX_BOUNCE:\n return self.colour * self.emitted_brightness\n\n min_collision_dist = np.inf\n closest_collision = None\n for object in scene:\n collision_info = object.collision(self)\n if collision_info.did_hit:\n dist_of_collision = length(collision_info.location - self.origin)\n if dist_of_collision < min_collision_dist:\n closest_collision = collision_info\n min_collision_dist = dist_of_collision\n closest_collision_material = object.material\n\n if closest_collision is not None:\n self.colour *= closest_collision_material.colour\n\n new_ray_dir = closest_collision_material.reflect(\n self.direction, closest_collision.normal\n )\n\n reflected_ray = Ray(\n closest_collision.location,\n new_ray_dir,\n emitted_brightness=closest_collision_material.emissivity,\n gen=self.gen + 1,\n )\n self.colour *= reflected_ray.trace(scene)\n self.emitted_brightness = reflected_ray.emitted_brightness\n\n return self.colour * self.emitted_brightness\n","repo_name":"franklinscudder/RayTracer","sub_path":"rays.py","file_name":"rays.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14144170021","text":"import os\nfrom setuptools import setup, find_packages\n\n# get long_description from README.md\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# get install requirements\nwith open('requirements.txt') as fh:\n install_requires = fh.read().splitlines()\n\n# get version\nwith open('version.txt') as fh:\n version = fh.read().strip()[1:]\n\n# list of all utility scripts to be included with package\nscripts=[os.path.join('utils',f) for f in os.listdir('utils') if f.endswith('.py')]\n\nsetup(\n name='sliderule',\n author='SlideRule Developers',\n description='Python client for interacting with sliderule server',\n long_description_content_type=\"text/markdown\",\n url='https://github.com/ICESat2-SlideRule/sliderule/',\n license='BSD 3-Clause',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Physics',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n ],\n packages=find_packages(),\n version=version,\n install_requires=install_requires,\n scripts=scripts,\n)\n","repo_name":"ICESat2-SlideRule/sliderule","sub_path":"clients/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"79"} +{"seq_id":"35292065010","text":"class VendingMachine:\n def __init__(self):\n self.state = 'Idle'\n self.juices = {'PEPS': 30, 'MOUN': 30, 'DPEP': 50, 'COKE': 20, 'GATO': 20, 'DCOK': 30, 'MINM': 25, 'TROP': 30}\n self.stock = {juice: 1 for juice in self.juices.keys()}\n\n def run(self):\n while True:\n if self.state == 'Idle':\n self.idle_state()\n elif self.state == 'Dispensing':\n self.dispensing_state()\n elif self.state == 'InsufficientFunds':\n self.insufficient_funds_state()\n elif self.state == 'OutOfStock':\n self.out_of_stock_state()\n elif self.state == 'RefillPrompt':\n self.refill_prompt_state()\n elif self.state == 'Refill':\n self.refill_state()\n\n def idle_state(self):\n print(\"Welcome to the vending machine!\")\n print(\"List of drinks:\")\n for juice, price in self.juices.items():\n print(f\"{juice} - ${price}\")\n\n user_input = input(\"Enter the four-letter code for your drink: \")\n if user_input.lower()=='refill':\n self.state = 'Refill'\n\n elif user_input in self.juices:\n if self.stock[user_input] > 0:\n cost = self.juices[user_input]\n amount = float(input(\"Enter the amount of money you will feed: \"))\n if amount == cost:\n print(\"Dispensing drink...\")\n self.stock[user_input] -= 1\n self.state = 'Dispensing'\n elif amount < cost:\n self.state = 'InsufficientFunds'\n else:\n change = amount - cost\n print(f\"Dispensing drink and returning ${change} in change.\")\n self.stock[user_input] -= 1\n self.state = 'Dispensing'\n elif sum(self.stock.values())==0:\n self.state = 'RefillPrompt'\n else:\n self.state = 'OutOfStock' \n else: \n print(\"Invalid input. Please try again.\")\n\n def dispensing_state(self):\n print(\"Enjoy your drink!\")\n self.state = 'Idle'\n\n def insufficient_funds_state(self):\n print(\"The entered amount is less than the cost. Please enter a sufficient amount.\")\n self.state = 'Idle'\n\n def out_of_stock_state(self):\n print(\"Selected juice is out of stock. Please choose another drink.\")\n self.state = 'Idle'\n\n def refill_prompt_state(self):\n print(\"Please refill all the juices.\")\n self.state = 'Idle'\n\n def refill_state(self):\n print(\"Vending Machine has been refilled...\")\n self.stock = {juice: 1 for juice in self.juices.keys()}\n self.state = 'Idle'\n\n# Run the vending machine\nmachine = VendingMachine()\nmachine.run()\n","repo_name":"sanchitgarg2204/sanchitgarg2204.github.io","sub_path":"fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29412090844","text":"class Solution:\n def intToRoman(self, num: int) -> str:\n values = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\n strs = [\"M\", \"CM\", \"D\", \"CD\", \"C\", \"XC\", \"L\", \"XL\", \"X\", \"IX\", \"V\", \"IV\", \"I\"]\n sb = \"\"\n for i in range(len(values)):\n while num >= values[i]:\n num -= values[i]\n sb += strs[i]\n return sb","repo_name":"chandlerche/dailyLeetCode","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18895842674","text":"import numpy as np\nfrom utils import *\nfrom file_option_name_memo import *\n \n\ndef create_word_sequence(file_name_option,valid_list,grammar):\n _,_,_,w,_,_,_,_,_= create_dataloader(500, file_name_option, valid_list)\n truth_T0, truth_T = grammar_list[grammar].values()\n D,N_max = w.to('cpu').detach().numpy().copy().shape\n truth_F = np.zeros_like(w,dtype=np.int8)\n N = np.zeros(D,dtype=np.int8)\n total_w_num = 0\n for d in range(D):\n truth_F[d][0] = np.random.choice(N_max,p=truth_T0)\n w[d][0] = w[d][truth_F[d][0]]\n N[d] += 1\n total_w_num += 1\n for n in range(1,N_max):\n truth_F[d][n] = np.random.choice(N_max+1,p=truth_T[truth_F[d][n-1]])\n if truth_F[d][n] == N_max:\n w[d][n] = -1\n else:\n w[d][n] = w[d][truth_F[d][n]]\n N[d] += 1\n total_w_num += 1\n return D, N, w, truth_F","repo_name":"YutaMatsui-1122/CSL-VAE","sub_path":"create_word_sequence.py","file_name":"create_word_sequence.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11400197862","text":"import os\r\n\r\n\r\n# function to make a new database\r\ndef make_db(name):\r\n # lists local folder content\r\n local_folder_content = os.listdir()\r\n # looks if the database you are looking for exists\r\n if str(name) in local_folder_content:\r\n # if your db wasn't found this will be printed out\r\n return print(\"Database with this name already exists!\")\r\n else:\r\n # makes a new db file\r\n db_name = (str(name) + \".py\")\r\n with open(db_name, \"w+\") as db:\r\n db.close()\r\n return print(\"New database made!\")\r\n\r\n\r\n# read info from database\r\ndef read_db(name, print_content=False):\r\n try:\r\n db_name = (str(name) + \".py\")\r\n # opens your desired database\r\n with open(str(db_name), \"r\") as db:\r\n\r\n # reads db content\r\n db_content = db.read()\r\n\r\n if print_content:\r\n print(str(db_content))\r\n # closes the db\r\n db.close()\r\n return db_content\r\n except:\r\n print(\"Failed to get db content\")\r\n\r\n\r\ndef write_entry(name, user_name, user_id, user_age, user_bio, user_adinfo):\r\n\r\n\r\n # looks if the database you are looking for exists\r\n\r\n\r\n try:\r\n db_name = (str(name) + \".py\")\r\n\r\n # opens your desired database\r\n\r\n with open(str(db_name), \"r\") as db:\r\n # reads db content\r\n\r\n db_content = db.read()\r\n # makes a new dictionary for the user\r\n\r\n db.close()\r\n\r\n db = open(str(db_name), \"w\")\r\n user_dict = {\r\n \"Username\": str(user_name),\r\n \"UID\": int(user_id),\r\n \"Age\": int(user_age),\r\n \"Biography\": str(user_bio),\r\n \"Other\": str(user_adinfo)\r\n }\r\n\r\n db_to_write = str(db_content) + \"\\n\" + str(user_name) + \" = \" + str(user_dict)\r\n # writes user's data to your db\r\n db.write(str(db_to_write))\r\n print(\"new entry written to db\")\r\n db.close()\r\n except:\r\n print(\"Failed to write to db\")\r\n\r\n#read desired user's info\r\n\r\ndef read_user_info(name, username):\r\n try:\r\n db_name = (str(name) + \".py\")\r\n db = open(str(db_name), \"r\")\r\n db_content = db.read()\r\n db.close()\r\n info1 = str(db_content.split(f\"{username} = \"))\r\n info2 = info1.split(\"}\")\r\n info3 = info2[0].split(\"{\")\r\n data = info3[1]\r\n\r\n\r\n\r\n return data\r\n except:\r\n return print(\"Failed to get desired user's info\")","repo_name":"yourdarl1ng/mw-database","sub_path":"mw_database.py","file_name":"mw_database.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"15241905309","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport models.blocks as blocks\r\nfrom tf_p_inv import p_inv\r\nfrom constants import *\r\n\r\n\r\ndef hex_proj(a, g, params):\r\n with tf.variable_scope(\"hex_proj\", reuse=tf.AUTO_REUSE):\r\n if params['hex_final_dim'] < params['batch_size']:\r\n l = a - tf.matmul(tf.matmul(tf.matmul(g, p_inv(tf.matmul(g, g, transpose_a=True))),g, transpose_b=True), a) \r\n else:\r\n small_identity = params['small_id'] * tf.eye(params['hex_final_dim'])\r\n l = a - tf.matmul(tf.matmul(tf.matmul(g, p_inv(tf.matmul(g, g, transpose_a=True) + small_identity)),g, transpose_b=True), a) \r\n\r\n return l\r\n\r\n\r\ndef hex_classifier(h, g, phs, params):\r\n \"\"\"Input: [h,g] or [h,0] or [0,g], Output: the layer before the linear layer of softmax\"\"\"\r\n with tf.variable_scope(\"hex_classifier\", reuse=tf.AUTO_REUSE):\r\n keep_rate, stop_grad, _ = phs\r\n inp = tf.concat([h,g], -1)\r\n h_mlp = tf.layers.dense(inp, params['nli_mlp_dim'], tf.nn.relu)\r\n if params['hex_dropout']:\r\n h_drop = tf.nn.dropout(h_mlp, keep_rate)\r\n else:\r\n h_drop = h_mlp\r\n h_drop = tf.layers.dense(h_drop, params['hex_final_dim'])\r\n return h_drop\r\n\r\n\r\ndef hex_softmax(f, params):\r\n if params['final_linear']:\r\n with tf.variable_scope(\"hex_softmax\", reuse=tf.AUTO_REUSE):\r\n logits = tf.layers.dense(f, 3)\r\n return logits\r\n else:\r\n return f\r\n\r\nclass HEX(object):\r\n def __init__(self, params):\r\n if params['hex_share_emb'] == False:\r\n with tf.variable_scope(\"hex_embed\", reuse=tf.AUTO_REUSE):\r\n self.embeddings = tf.Variable(params['embeddings'], trainable=params['emb_train'], name='E')\r\n if params['self_att']:\r\n self.construct_hex_vec = self.construct_hex_vec_selfatt\r\n else:\r\n self.construct_hex_vec = self.construct_hex_vec_simple\r\n \r\n\r\n\r\n def share_emb(self, embeddings):\r\n self.embeddings = embeddings\r\n\r\n\r\n def construct_hex_vec_simple(self, inputs, params, phs):\r\n keep_rate, stop_grad, _ = phs\r\n\r\n premise_x, hypothesis_x = inputs\r\n\r\n with tf.variable_scope(\"hex_superficial\", reuse=tf.AUTO_REUSE):\r\n\r\n ## Calculate representaitons by CBOW method\r\n emb_premise = tf.nn.embedding_lookup(self.embeddings, premise_x) \r\n emb_premise_drop = tf.nn.dropout(emb_premise, keep_rate)\r\n\r\n emb_hypothesis = tf.nn.embedding_lookup(self.embeddings, hypothesis_x)\r\n emb_hypothesis_drop = tf.nn.dropout(emb_hypothesis, keep_rate)\r\n\r\n premise_rep = tf.reduce_sum(emb_premise_drop, 1)\r\n hypothesis_rep = tf.reduce_sum(emb_hypothesis_drop, 1)\r\n\r\n ## Combinations\r\n h_diff = premise_rep - hypothesis_rep\r\n h_mul = premise_rep * hypothesis_rep\r\n\r\n ### MLP\r\n mlp_input = tf.concat([premise_rep, hypothesis_rep, h_diff, h_mul], 1)\r\n\r\n superficial_output = tf.layers.dense(mlp_input, 100)\r\n return premise_rep, hypothesis_rep, mlp_input\r\n\r\n def construct_hex_vec_selfatt(self, inputs, params, phs):\r\n keep_rate, stop_grad, _ = phs\r\n\r\n premise_x, hypothesis_x = inputs\r\n\r\n with tf.variable_scope(\"hex_superficial_selfatt\", reuse=tf.AUTO_REUSE):\r\n\r\n emb_premise = tf.nn.embedding_lookup(self.embeddings, premise_x) \r\n emb_premise_drop = tf.nn.dropout(emb_premise, keep_rate)\r\n\r\n emb_hypothesis = tf.nn.embedding_lookup(self.embeddings, hypothesis_x)\r\n emb_hypothesis_drop = tf.nn.dropout(emb_hypothesis, keep_rate)\r\n\r\n prem_seq_lengths, prem_mask = blocks.length(premise_x)\r\n hyp_seq_lengths, hyp_mask = blocks.length(hypothesis_x)\r\n\r\n prem_self_att= blocks.simple_self_attention_block(emb_premise_drop, params['dim_emb'], prem_seq_lengths, prem_mask, scope = 'superficial_prem_self_att')\r\n hypo_self_att= blocks.simple_self_attention_block(emb_hypothesis_drop, params['dim_emb'], hyp_seq_lengths, hyp_mask, scope = 'superficial_hypo_self_att')\r\n\r\n\r\n premise_rep = tf.reduce_sum(prem_self_att, 1)\r\n hypothesis_rep = tf.reduce_sum(hypo_self_att, 1)\r\n\r\n ## Combinations\r\n h_diff = premise_rep - hypothesis_rep\r\n h_mul = premise_rep * hypothesis_rep\r\n\r\n ### MLP\r\n mlp_input = tf.concat([premise_rep, hypothesis_rep, h_diff, h_mul], 1)\r\n return premise_rep, hypothesis_rep, mlp_input\r\n","repo_name":"owenzx/LexicalDebias-ACL2020","sub_path":"models/hex.py","file_name":"hex.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"30245716562","text":"from discord.ext import commands\nimport traceback\nimport aiotrello\nimport datetime\nimport discord\n\n\nclass Suggest(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.trello = aiotrello.Trello(\n key=self.bot.config['trellokey'], token=self.bot.config['trellotoken'])\n\n @commands.command(name='suggest', description='Suggest a feature')\n @commands.cooldown(1, 600, commands.BucketType.user)\n async def suggestcmd(self, ctx, *, suggestion: str):\n if suggestion is None:\n await ctx.error('You can\\'t suggest nothing!')\n else:\n board = await self.trello.get_board(lambda b: b.name == 'Fire')\n suggestions = await board.get_list(lambda l: l.name == 'Suggestions')\n card = await suggestions.create_card(suggestion, f'Suggested by {ctx.author.name} ({ctx.author.id})')\n now = datetime.datetime.now(datetime.timezone.utc).strftime(\n '%d/%m/%Y @ %I:%M:%S %p')\n await card.add_comment(f'Suggested in channel {ctx.channel.name} ({ctx.channel.id}) in guild {ctx.guild.name} ({ctx.guild.id}) at {now} UTC')\n await ctx.success(f'Thanks! Your suggestion was added to the Trello @ <{card.url}>. Make sure to check it every now and then for a response.')\n\n\ndef setup(bot):\n try:\n bot.add_cog(Suggest(bot))\n bot.logger.info(f'$GREENLoaded $CYAN\"suggest\" $GREENcommand!')\n except Exception as e:\n bot.logger.error(\n f'$REDError while adding command $CYAN\"suggest\"', exc_info=e)\n","repo_name":"0xacn/bot","sub_path":"commands/suggest.py","file_name":"suggest.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"2831431504","text":"if __name__ == '__main__':\n n = int(input())\n \n listOfNumber = []\n for i in range(1, n+1):\n listOfNumber.append(i)\n \n result = ''.join(map(str, listOfNumber))\n print(result)\n\n\n# Hacker Rank Task\n# The included code stub will read an integer, n , from STDIN.\n# Without using any string methods, try to print the following:\n# 1234...n\n# Note that \"...\" represents the consecutive values in between.\n# Example:\n# n = 5\n\n# Print the string 12345","repo_name":"raihan-tajdid007/hackerRank-prob-solving","sub_path":"printFunction.py","file_name":"printFunction.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"27424267018","text":"import cv2\nimport numpy as np\nfrom line import Line\nfrom abc import ABCMeta, abstractmethod\n\ndef getArea(line):\n\treturn line.area\n\nclass ILineDetector(metaclass=ABCMeta):\n\t\"\"\"\n\t\tClass for line detection and filtering\n\t\"\"\"\n\t@abstractmethod\n\tdef __init__(self, algorithm=None, filtering_criteria=None, quantity=None):\n\t\t\"\"\"\n\t\t\tConstructor identifies the detection specifications\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\talgorithm ———> algorithm to find all lines in the canny frame (default=HOUGH_LINES)\n\t\t\tfiltering_criteria ———> Array of filtering Constants\n\t\t\tquantity ———> filtering by area as an optional excess filtering step\n\t\t\"\"\"\n\t\tpass\n\n\t@abstractmethod\n\tdef xExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the leftmost and rightmost vertical lines\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\t\t\t\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\tpass\n\n\t@abstractmethod\n\tdef yExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the top and bottom horizontal lines\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\t\t\t\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\tpass\n\n\t@abstractmethod\n\tdef run(self, frame):\n\t\t\"\"\"\n\t\t\tThis function does the following:-\n\t\t\t1- Creates the canny version of the frame\n\t\t\t2- Extracts all lines according to the specified algorithm\n\t\t\t3- Applies the desired filtering criterion\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tframe ———> Workpiece frame\n\t\t\"\"\"\n\t\tpass\n\nclass LineDetector(ILineDetector):\n\t\"\"\"\n\t\tClass builder for extracting lines from a frame\n\t\tDependencies\n ————————————\n\t\t- ImageManipulator\n\n\t\tAll Dynamic Variables\n\t\t————————————————————\n\t\tself.__algorithm ———> Hough detection or contours\n\t\tself.__minLength ———> The minimum length of a line\n\t\tself.__quantity ———> The minimum length of a line\n\t\tself.__filtering ———> Filtering Criteria\n\t\tself._horizontals ———> Horizontal Lines after eliminating redundancies\n\t\tself._verticals ———> Vertical Lines after eliminating redundancies\n\n\t\tAll Static Variables\n\t\t————————————————————\n\t\t—) For Algorithms\n\t\t\t1. CONTOURS\n\t\t\t2. HOUGH\n\n\t\t—) For Filtering\n\t\t\t1. XEXTREMES\n\t\t\t2. YEXTREMES\n\t\t\t3. ANGLE\n\t\t\t4. HORIZONTALS\n\t\t\t5. VERTICALS\n\t\"\"\"\n\tCONTOURS = 1\n\tHOUGH = 2\n\t\n\tXEXTREMES=1\n\tYEXTREMES=2\n\tANGLE=3 #TODO\n\tHORIZONTALS=4\n\tVERTICALS=5\n\n\n\tdef __init__(self, algorithm=None, filtering_criteria=None, quantity=None):\n\t\t\"\"\"\n\t\t\tConstructor identifies the detection specifications\n\t\t\t\n\t\t\tParameters\n\t\t\t——————————\n\t\t\talgorithm ———> algorithm to find all lines in the canny frame (default=HOUGH_LINES)\n\t\t\tfiltering_criteria ———> list of filtering sequences\n\t\t\tquantity ———> filtering by area as an optional excess filtering step\n\t\t\"\"\"\n\t\tself.__algorithm = algorithm\n\t\tself.minLength = 1\n\t\tself.minLineDistance = 20\n\n\t\tself.__quantity = quantity\n\t\tself.__filtering = filtering_criteria\n\n\tdef _toCanny(frame):\n\t\t\"\"\"\n\t\t\t#TODO : use salama's class\n\t\t\tfunction constructs the canny version of a frame\n\t\t\t:param frame: workpiece frame \n\t\t\t:return: canny version\n\t\t\"\"\"\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\terosion = cv2.erode(gray, (5, 5), iterations=1)\n\t\tcanny = cv2.Canny(erosion, 120, 80)\n\t\tcv2.imshow('canny', canny)\n\t\treturn canny\t\t\n\n\tdef _houghAlgorithm(self, canny):\n\t\tlines = cv2.HoughLinesP(canny, rho=1, theta=np.pi/180.0, threshold=5,minLineLength=self.minLength, maxLineGap=5)\n\t\tresult = []\n\t\ttry:\n\t\t\tfor line in lines:\n\t\t\t\tresult.append(Line(line))\n\t\texcept:\n\t\t\tpass\n\t\treturn result\n\n\tdef _contoursAlgorithm(self, contours, frame):\n\t\t\"\"\"\n\t\t\tMethod filters the found contours to return only those representing lines\n\t\t\"\"\"\n\t\tresult = []\n\t\tfor contour in contours:\n\t\t\t[vx,vy,x,y] = cv2.fitLine(contour, cv2.DIST_L2,0,0.01,0.01)\n\t\t\tthis_line = Line([(int(x), int(y)), (int(vx*2), int(vy*2))], cv2.contourArea(contour)).draw(frame)\n\t\t\tresult.append(this_line)\n\t\t\t# print(contour)\n\t\t\t# temp = contour.tolist()\n\t\t\t# list_version = []\n\t\t\t# for cnt in temp:\n\t\t\t# \tlist_version.append(cnt[0])\n\t\t\t# pts = [(list_version[0][0], list_version[0][1]), (list_version[-1][0], list_version[-1][1])]\n\t\t\t# this_line = Line(pts, cv2.contourArea(contour))\n\t\t\t\n\t\t\t# if this_line.length() > self.minLength:\n\t\t\t# \tresult.append(this_line)\n\t\treturn result\n\n\tdef _eliminateRedundancies(self, lines):\n\t\t\"\"\"\n\t\t\tAyman Optimized gedan here\n\t\t\"\"\"\n\t\toriginalTolerances = (Line.horizontalTolerance, Line.verticalTolerance)\n\t\tLine.horizontalTolerance = 1\n\t\tLine.verticalTolerance = 1\n\n\t\tself._verticals = LineDetector.__filterVerticals(lines)\n\t\tself._horizontals = LineDetector.__filterHorizontals(lines)\n\t\t\n\t\tlength = len(self._verticals)\n\t\ti = 0\n\t\twhile i < length:\n\t\t\tj = i+1\n\t\t\twhile j < length:\n\t\t\t\tif abs(self._verticals[i].perpDistance(self._verticals[j])) < self.minLineDistance:\n\t\t\t\t\tself._verticals.remove(self._verticals[j])\n\t\t\t\t\tlength -= 1\t\n\t\t\t\tj += 1\n\t\t\ti += 1\t\t\n\n\t\tlength = len(self._horizontals)\n\t\ti = 0\n\t\twhile i < length:\n\t\t\tj = i+1\n\t\t\twhile j < length:\n\t\t\t\tif abs(self._horizontals[i].perpDistance(self._horizontals[j])) < self.minLineDistance:\n\t\t\t\t\tself._horizontals.remove(self._horizontals[j])\n\t\t\t\t\tlength -= 1\t\n\t\t\t\tj += 1\n\t\t\ti += 1\n\n\t\tLine.horizontalTolerance = originalTolerances[0]\n\t\tLine.verticalTolerance = originalTolerances[1]\n\n\tdef xExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the leftmost and rightmost vertical lines\n\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\tleftmost = None\n\t\trightmost = None\n\t\tfor line in lines:\n\t\t\tif line.isVertical():\n\t\t\t\tif not leftmost or line.pts[0][0] < leftmost.pts[0][0] - 10:\n\t\t\t\t\tleftmost = line\n\n\t\t\t\tif not rightmost or line.pts[0][0] > rightmost.pts[0][0] + 10:\n\t\t\t\t\trightmost = line\n\n\t\treturn [leftmost, rightmost]\t\n\n\tdef yExtremes(self, lines):\n\t\t\"\"\"\n\t\t\tFunction returns the top and bottom horizontal lines\n\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tlines ———> list of all found lines (list of line objects)\n\n\t\t\t@return : list of two line objects\n\t\t\"\"\"\n\t\ttopmost = None\n\t\tbottommost = None\n\t\tfor line in lines:\n\t\t\tif line.isHorizontal():\n\t\t\t\tif not topmost or line.pts[0][1] < topmost.pts[0][1] - 10:\n\t\t\t\t\ttopmost = line\n\n\t\t\t\tif not bottommost or line.pts[0][0] > bottommost.pts[0][0] + 10:\n\t\t\t\t\tbottommost = line\n\n\t\treturn [topmost, bottommost]\t\t\t\n\n\tdef run(self, frame):\n\t\t\"\"\"\n\t\t\tThis function does the following:-\n\t\t\t\t1- Creates the canny version of the frame\n\t\t\t\t2- Extracts all lines according to the specified algorithm\n\t\t\t\t3- Applies the desired filtering criterion\n\n\t\t\tParameters\n\t\t\t——————————\n\t\t\tframe ———> Workpiece frame\n\t\t\"\"\"\n\t\tcanny = LineDetector.__toCanny(frame)\n\t\t# Detection\n\t\tif self.__algorithm == LineDetector.HOUGH:\n\t\t\tresult = self.__houghAlgorithm(canny)\n\t\telif self.__algorithm == LineDetector.CONTOURS:\n\t\t\tcontours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t\t\tresult = self.__contoursAlgorithm(contours, frame)\n\t\tprint(result)\n\t\tself.__eliminateRedundancies(result)\n\t\tprint(result)\n\t\t# Now you have the lines stored in self._horizontals and self._verticals\n\t\t# Filtering\n\t\t\n\t\tif self.__filtering != None:\n\t\t\tresult = []\n\t\t\tfor sequence in self._filtering:\n\t\t\t\tsequence_result = None\n\t\t\t\tfor criterion in sequence:\n\t\t\t\t\tif sequence_result is None:\n\t\t\t\t\t\tsequence_result = []\n\t\t\t\t\t\tsequence_result.extend(self.__horizontals)\n\t\t\t\t\t\tsequence_result.extend(self.__verticals)\n\t\t\t\t\n\t\t\t\t\tif criterion == LineDetector.XEXTREMES:\n\t\t\t\t\t\tsequence_result = self.xExtremes(sequence_result)\n\t\t\t\t\n\t\t\t\t\telif criterion == LineDetector.YEXTREMES:\n\t\t\t\t\t\tsequence_result = self.yExtremes(sequence_result)\n\n\t\t\t\t\telif criterion == LineDetector.VERTICALS:\n\t\t\t\t\t\tsequence_result = LineDetector.__filterVerticals(sequence_result)\n\n\t\t\t\t\telif criterion == LineDetector.HORIZONTALS:\n\t\t\t\t\t\tsequence_result = LineDetector.__filterHorizontals(sequence_result)\n\t\t\t\tresult.extend(sequence_result)\n\t\t\n\t\tif self.__quantity:\n\t\t\t\tresult = self.__filterByArea(result)\n\t\treturn result\n\n\tdef __filterVerticals(lines):\n\t\t\"\"\"\n\t\t\tFilter vertical lines\n\t\t\"\"\"\n\t\tif not lines:\n\t\t\treturn []\n\n\t\tresult = []\n\t\tfor line in lines:\n\t\t\tif line and line.isVertical():\n\t\t\t\tresult.append(line)\n\t\treturn result\n\n\tdef __filterHorizontals(lines):\n\t\t\"\"\"\n\t\t\tFilter horizontal lines\n\t\t\"\"\"\n\t\tif not lines:\n\t\t\treturn []\n\n\t\tresult = []\n\t\tfor line in lines:\n\t\t\tif line and line.isHorizontal():\n\t\t\t\tresult.append(line)\n\t\treturn result\n\n\tdef __filterByArea(self, lines):\n\t\t\"\"\"\n\t\t\tFilter lines by area\n\t\t\"\"\"\n\t\tlines.sort(key=getArea, reverse=True)\n\t\treturn lines[:self.__quantity]\n\nif __name__ == \"__main__\":\n\tcap = cv2.VideoCapture(\"http://localhost:8070/stream?topic=/robotech/robotech/cameraright/camera_image\")\n\n\twhile cap.isOpened():\n\t\t_, img = cap.read()\n\n\t\tDetector = LineDetector(LineDetector.HOUGH, [[LineDetector.XEXTREMES], [LineDetector.YEXTREMES]])\n\t\tlines = Detector.run(img)\n\t\tfor line in lines:\n\t\t\tif line:\n\t\t\t\tif line.isVertical():\n\t\t\t\t\tline.draw(img)\n\t\t\t\telse:\n\t\t\t\t\tline.draw(img)\n\n\t\tcv2.imshow('lol', img)\t\n\t\tkey = cv2.waitKey(20)\n\t\tif key == 27:\n\t\t\tbreak;","repo_name":"lawaty/CV-Libraries","sub_path":"line_detection.py","file_name":"line_detection.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74941036414","text":"try:\n import cv2\n import numpy as np\nexcept ImportError as e:\n from pip._internal import main as install\n packages = [\"numpy\", \"opencv-python\"]\n for package in packages:\n install([\"install\", package])\nfinally:\n pass\n\ndef warpPerspectiveImage():\n image = cv2.imread(\"cards.jpg\")\n width, height = 250,350\n pts1 = np.float32([[111,219],[287,188],[154,482],[352,440]])\n pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n image_wrap = cv2.warpPerspective(image, matrix, (width, height))\n cv2.imshow(\"Phones\", image_wrap)\n cv2.waitKey(0)\n return cv2.destroyAllWindows()\nwarpPerspectiveImage()","repo_name":"CrispenGari/opencv-python","sub_path":"beginner/Open-Computer-Version-Chapter-5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"35939082637","text":"import numpy as np\r\nfrom skimage import color\r\nfrom skimage import measure\r\nimport os, jpype\r\n\r\nEPS = 1e-15\r\n\r\n# The refinement stage of iteratively refined structural entropy.\r\ndef refinement_SE(adj, y=None):\r\n adj -= np.diag(np.diag(adj))\r\n tol = 1e-10\r\n max_iter = 300\r\n if y is None:\r\n n, k = adj.shape[0], 3\r\n y = np.random.randint(k, size=n)\r\n else:\r\n n, k = adj.shape[0], np.amax(y) + 1\r\n\r\n W = np.array(adj.copy(), dtype=np.float64)\r\n D = np.diag(np.sum(W, axis=-1, keepdims=False))\r\n S = np.eye(k)[y.reshape(-1)].astype(np.float64)\r\n volW = np.sum(W, dtype=np.float64)\r\n links = np.diagonal(np.matmul(np.matmul(S.T, W), S)).copy()\r\n degree = np.diagonal(np.clip(np.matmul(np.matmul(S.T, D), S), a_min=EPS, a_max=None)).copy()\r\n ses = (-links / volW) * np.log2(np.clip(degree, a_min=1e-10, a_max=None) / volW)\r\n z = y.copy()\r\n se = np.sum(ses)\r\n for iter_num in range(max_iter):\r\n for i in range(n):\r\n zi = z[i]\r\n links[zi] -= np.matmul(W[i,:], S[:,zi]) + np.matmul(S[:,zi].T, W[:,i])\r\n degree[zi] -= D[i,i]\r\n ses[zi] = (-links[zi]/volW) * np.log2(np.clip(degree[zi], a_min=1e-10, a_max=None)/volW)\r\n S[i,zi] = 0\r\n z[i] = -1\r\n\r\n links_new = links.copy()\r\n degree_new = degree.copy()\r\n links_new += np.matmul(W[i,:], S) + np.matmul(W[:, i].T, S)\r\n degree_new += D[i,i]\r\n ses_new = (-links_new/volW) * np.log2(np.clip(degree_new, a_min=1e-10, a_max=None)/volW)\r\n delta_ses = ses_new - ses\r\n\r\n opt_i = np.argmax(delta_ses)\r\n\r\n zi = opt_i\r\n z[i] = zi\r\n S[i,zi] = 1\r\n links[zi] = float(links_new[zi])\r\n degree[zi] = float(degree_new[zi])\r\n ses[zi] = float(ses_new[zi])\r\n if np.sum(ses) - se < tol:\r\n break\r\n se = np.sum(ses)\r\n return z\r\n\r\n# The merging stage of iteratively refined structural entropy.\r\ndef merging(adj, img_name, sp_scale=None):\r\n img_name = img_name.split('.')[0]\r\n if sp_scale == None:\r\n adj_path = f\"./{img_name}_adj.txt\"\r\n partition_path = f\"./{img_name}_partition.txt\"\r\n else:\r\n adj_path = f\"./{img_name}_{sp_scale}_adj.txt\"\r\n partition_path = f\"./{img_name}_{sp_scale}_partition.txt\"\r\n adj_path = os.path.abspath(adj_path)\r\n partition_path = os.path.abspath(partition_path)\r\n with open(adj_path, 'w') as f:\r\n f.write('{}\\n'.format(int(adj.shape[0])))\r\n for i in range(adj.shape[0]):\r\n for j in range(i + 1, adj.shape[1]):\r\n if adj[i, j] > 0:\r\n f.write('{}\\t{}\\t{}\\n'.format(int(i + 1), int(j + 1), adj[i, j]))\r\n Merging = jpype.JClass(\"algo.Merging\")\r\n Merging.main([adj_path, partition_path])\r\n if os.path.exists(adj_path):\r\n os.remove(adj_path)\r\n # read partition file\r\n y = np.zeros(adj.shape[0], dtype=int)\r\n with open(partition_path, 'r') as f:\r\n for comid, line in enumerate(f.readlines()):\r\n line = line.strip().split('\\t')\r\n for node in line:\r\n y[int(node) - 1] = comid\r\n if os.path.exists(partition_path):\r\n os.remove(partition_path)\r\n return y","repo_name":"zengguangjie/SLED","sub_path":"algo/iterative_refinement_SE.py","file_name":"iterative_refinement_SE.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7958162624","text":"import sys\nimport heapq\nsys.stdin = open('input.txt')\n\n# 가중치가 존재할때 최단경로를 찾는 알고리즘 - 다익스트라\n# 알고리즘에서는 heapq(최소힙)를 import해서 사용하여 간단하게 구현 가능\n# 코드구조는 BFS와 유사\n\nT = int(input())\n\nfor k in range(1, T + 1):\n N, E = map(int, input().split())\n temp = [list(map(int, input().split())) for i in range(E)] # [[s, e, w], ...]\n dist = [9999 for i in range(N + 1)] # 초기 모든 노드 가중치 무한대로 세팅\n v = [[] for i in range(N + 1)]\n for i in temp:\n v[i[0]].append([i[1], i[2]]) # 연결리스트는 단방향, 가중치를 함께저장\n\n # 시작노드 가중치 0으로 세팅하고 출발\n que = []\n heapq.heappush(que, [0, 0]) # 가중치, idx\n dist[0] = 0\n\n while que:\n d, cur = heapq.heappop(que) # 가중치중 가장 작은애를 뽑아, 시작~ 현재위치까지 쌓아온 가중치, cur이 현재위치\n\n if cur == N:\n print('#{} {}'.format(k, d))\n break\n\n if d > dist[cur]: # visited 대체\n continue\n\n # 현재 위치에서 갈 수 있는 위치들을 한번 보자\n # 만약에, 현재까지 쌓아온 가중치 + 현재에서 다음으로가는 가중치가 시작~다음위치까지 가는 가중치보다 작다면 업데이트\n for i in v[cur]:\n nd = dist[cur] + i[1]\n if dist[i[0]] > nd:\n dist[i[0]] = nd\n heapq.heappush(que, [nd, i[0]])\n","repo_name":"ggpp0909/problem_solving","sub_path":"Python/SWEA/1014/5251_최소이동거리/5251_최소이동거리.py","file_name":"5251_최소이동거리.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35448593582","text":"import os\r\nimport urllib.request as req\r\nfrom urllib.parse import urlparse\r\n\r\n\r\ndef download(url, to=None):\r\n if to:\r\n localfile = to\r\n else:\r\n fname = os.path.basename(urlparse(url).path)\r\n localfile = os.path.join('.', fname)\r\n print(\"Downloading {}\".format(localfile))\r\n\r\n if not os.path.isfile(localfile):\r\n req.urlretrieve(url, localfile)\r\n\r\n return localfile\r\n","repo_name":"minimekill/BloodyTelevision","sub_path":"getter.py","file_name":"getter.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21901734751","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport requests\nimport socket\nfrom PyQt5 import uic\nfrom delete import Delete\nfrom new import Add\nfrom search import Search\nfrom update import Update \n\nimport sys\nimport time\n\nclass Menu(QMainWindow):\n\t\"\"\"docstring for tipo\"\"\"\n\tdef __init__(self,delete,search,add,update,ip,name):\n\t\t\n\t\tQMainWindow.__init__(self)\n\t\tuic.loadUi(\"Menu.ui\",self)\n\t\tself.setObjectName(\"window\")\n\t\tself.delete=delete\n\t\tself.search=search\n\t\tself.add=add\n\t\tself.update=update\n\t\tself.labelip.setText(name+\" estas conectado en \"+ip)\n\t\tself.botonbuscar.clicked.connect(self.opensearch)\n\t\tself.botonnuevo.clicked.connect(self.openadd)\n\t\tself.botonactual.clicked.connect(self.openupdate)\n\t\tself.botoneliminar.clicked.connect(self.opendelete)\n\t\t\n\t\twith open(\"style.css\") as f:\n\t\t\tself.setStyleSheet(f.read())\n\t\n\tdef opensearch(self):\n\t\tself.search.show()\n\tdef openadd(self):\n\t\tself.add.show()\n\tdef openupdate(self):\n\t\tself.update.show()\n\tdef opendelete(self):\n\t\tself.delete.show()\n\t\t\n\n\nname = socket.gethostname()\nr = requests.get('http://127.0.0.1:3000/get_my_ip', params={'hostname':str(name) })\napp=QApplication(sys.argv)\n_delete=Delete()\n_search=Search()\n_new=Add()\n_update=Update()\n_menu=Menu(_delete,_search,_new,_update,str(r.json()['ip']),name)\n_menu.show()\napp.exec_()","repo_name":"toodaniels/System-PyMovies","sub_path":"Clientes/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"35606378152","text":"#!/usr/bin/env python #\r\n# -*- coding: utf-8 -*- #\r\n# @Time : 2018-03-29 8:53 #\r\n# @author : xuejf #\r\n# @email :171521952@qq.com #\r\n# -------------------------- #\r\nfrom configparser import *\r\n\r\n\r\nclass ConfigFile ():\r\n #_in_data_dir = r'E:\\work\\auto_test2\\in_data'\r\n #_out_data_dir = r'E:\\work\\auto_test2\\out_data'\r\n _in_data_dir=\"\"\r\n _out_data_dir=\"\"\r\n def __init__(self):\r\n #print(\"enter __init__()\")\r\n cf = ConfigParser()\r\n cf.read(\"init.conf\", encoding=\"utf-8\")\r\n #secs = cf.sections()\r\n #print(secs)\r\n #opts = cf.options(\"base\")\r\n #kvs = cf.items(\"db\")\r\n # read by type\r\n if(self._in_data_dir.strip()==\"\"):\r\n self._in_data_dir = cf.get(\"base\", \"in_data_dir\")\r\n if(self._out_data_dir.strip()==\"\"):\r\n self._out_data_dir = cf.get(\"base\", \"out_data_dir\")\r\n #print(self._in_data_dir)\r\n #print(self._out_data_dir)\r\n\r\ncf=ConfigFile()\r\n\r\n","repo_name":"xuejf/auto-test","sub_path":"config/config_g.py","file_name":"config_g.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25050078304","text":"from utils import token_required\nimport db\nfrom flask import request\nfrom sqlalchemy import Table, MetaData\nfrom sqlalchemy.exc import OperationalError, DataError, IntegrityError\nfrom flask_cors import cross_origin\nfrom . import handler\n\n\n@handler.route(\"/create\", methods=[\"POST\"])\n@cross_origin()\n@token_required\ndef create_table_data():\n data = request.get_json()\n table = data.get(\"table\")\n if table not in db.get_tables_in_db():\n return {\"error\": \"Table does not exist\"}, 400\n row = data.get(\"row\")\n db.clean_data(row, table)\n current_table = Table(table, MetaData(), autoload_with=db.engine)\n try:\n db.engine.execute(current_table.insert(), row)\n db.session.commit()\n return {\"message\": \"Successfully Created\"}, 200\n except (OperationalError, DataError, IntegrityError) as e:\n return {\"error\": \"Failed to create row, {0}\".format(e.orig)}, 400\n","repo_name":"agzuniverse/Chathuram","sub_path":"src/server/handlers/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"36360541238","text":"# 최소직사각형\n# 각 w, h를 비교해서 둘 중 큰 값을 한 리스트에 넣고 나머지를 리스트로 만든다. 두 개의 리스트 중 가장 큰 값을 뽑아서 곱하면 된다.\n\n# w, h 리스트를 만든다.\n# for문을 돌면서 w, h 중 큰 값은 w리스트 작은 값은 h리스트에 담는다.\n# 두 개의 리스트에서 가장 큰 값이 곱한 값이 답이다.\n\n#1\ndef solution(sizes):\n return max(max(x) for x in sizes) * max(min(x) for x in sizes)\n#2\nsolution = lambda sizes: max(sum(sizes, [])) * max(min(size) for size in sizes)\n#3\ndef solution(sizes):\n answer = 0\n \n sizes = [sorted(size, reverse=True) for size in sizes]\n \n widths = [size[0] for size in sizes]\n heights = [size[1] for size in sizes]\n \n width, height = max(widths), max(heights)\n \n answer = width * height\n \n return answer\n","repo_name":"BBstudyFighting/algorithm","sub_path":"18주차/SUYEON/SQL/programmers_coding test9.PY","file_name":"programmers_coding test9.PY","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"732868355","text":"#!/usr/bin/env python\nimport pandas as pd\nimport numpy as np\n\nimport stats_feature as sf\nimport cross_feature as cf\n\ndef itera(dcols):\n for key, val in dcols.items():\n print(key, val)\n\n##### load the train file into a dataframes ##### \ndf = pd.read_csv('./LoanStats3b.csv', header=1, low_memory=False) \n# delete last two rows\nnlines = len(df)\ndf = df.drop(df.index[[nlines-2, nlines-1]])\n\n##### feature visualization #####\n\ncols = df.columns.tolist()\ndict_cols = {}\nfor icol in range(len(cols)):\n dict_cols[icol] = cols[icol] \n\nitera(dict_cols)\nscol = input('Feature to Visualize [1-51], [-1]->Exit: ')\nwhile (scol != -1):\n sf.vis_feature(df[cols[scol]])\n scol = input('Feature to Visualize [1-51], [-1]->Exit: ') \n\n\nindex_train = (df['loan_status'] == 'Fully Paid') | (df['loan_status'] == 'Charged Off')\ntrain_set = df[index_train]\n\nscol = input('Feature to Couple with Loan Status [1-51], [-1]->Exit: ')\nwhile (scol != -1):\n cf.cross_hist(train_set[cols[scol]], train_set[cols[16]])\n scol = input('Feature to Visualize [1-51], [-1]->Exit: ') \n\n\n","repo_name":"jaurora/MachineLearning","sub_path":"LendingClub/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13983536663","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# AUTHOR: Ti Bai\n# EMAIL: tibaiw@gmail.com\n# AFFILIATION: MAIA Lab | UT Southwestern Medical Center\n# DATETIME: 9/22/2022\n\n# sys\nimport os\nimport shutil\n\n# monai\nfrom monai.apps.auto3dseg import (\n DataAnalyzer,\n BundleGen,\n AlgoEnsembleBestN,\n AlgoEnsembleBuilder,\n export_bundle_algo_history,\n import_bundle_algo_history,\n)\nfrom monai.auto3dseg import algo_to_pickle\nfrom monai.bundle.config_parser import ConfigParser\n\n\nif __name__ == '__main__':\n ### setup the experiement parameters\n is_data_analysis = False \n need_customized_train_params = False\n\n data_root = r'./data'\n datalist_file = r'./data/task1_AMOS.json'\n result_dir = r'result'\n dataset_name = 'MONAI'\n\n num_fold = 5\n model_name = ['segresnet'] # choose from [\"segresnet_small\", \"segresnet\", \"segresnet2d\", \"dints\", \"swinunetr\"]\n template_path = r'assets/algorithm_templates'\n task = 'segmentation'\n modality = 'CT'\n is_ensemble = False ##### ALWAYS SET IT AS FALSE UNLESS YOU REVISE THIS SCRIPT!!!\n\n train_param = {}\n if need_customized_train_params:\n train_data_size = 100\n num_iterations = 100000\n num_images_per_batch = 1\n num_iterations_per_validation = 1000\n train_param = {\n \"num_iterations\": num_iterations,\n \"num_iterations_per_validation\": num_iterations_per_validation,\n \"num_images_per_batch\": num_images_per_batch,\n \"num_epochs\": num_iterations // (train_data_size // num_images_per_batch),\n \"num_warmup_iterations\": int(0.01 * num_iterations),\n }\n\n # step 0: prepare the environment\n if not os.path.isdir(result_dir):\n os.makedirs(result_dir)\n\n data_src_cfg = {\n \"name\": dataset_name,\n \"task\": task,\n \"modality\": modality,\n \"datalist\": datalist_file,\n \"dataroot\": data_root,\n }\n input = os.path.join(result_dir, 'input.yaml')\n ConfigParser.export_config_file(data_src_cfg, input)\n\n datastats_file = os.path.join(result_dir, 'data_stats.yaml')\n\n # step 1: Data Analysis\n print('Step 1: Analyzing the dataset and saving the results to {} ...'.format(datastats_file))\n if is_data_analysis:\n analyser = DataAnalyzer(datalist_file, data_root, output_path=datastats_file)\n datastat = analyser.get_all_case_stats()\n\n # step 2: Algorithm Generation (algo_gen)\n print('Step 2: Generating the algorithm based on template from {} and saving the results to {} ...'.format(template_path, result_dir))\n if not os.path.exists(os.path.join(result_dir, 'algorithm_templates')):\n shutil.copytree(template_path, os.path.join(result_dir, 'algorithm_templates'))\n default_algos = {\n \"segresnet_small\": dict(_target_=\"segresnet_small.scripts.algo.SegresnetAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", \"segresnet_small\")),\n \"segresnet\": dict(_target_=\"segresnet.scripts.algo.SegresnetAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", \"segresnet\")),\n \"segresnet2d\": dict(_target_=\"segresnet2d.scripts.algo.Segresnet2dAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", \"segresnet2d\")),\n \"dints\": dict(_target_=\"dints.scripts.algo.DintsAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", 'dints')),\n \"swinunetr\": dict(_target_=\"swinunetr.scripts.algo.SwinunetrAlgo\",\n template_path=os.path.join(result_dir, \"algorithm_templates\", 'swinunetr'))\n }\n\n used_algorithms = {x: default_algos[x] for x in model_name if x in default_algos}\n\n bundle_generator = BundleGen(\n algo_path=result_dir,\n algos=used_algorithms,\n data_stats_filename=datastats_file,\n data_src_cfg_name=input,\n )\n\n bundle_generator.generate(result_dir, num_fold=num_fold)\n\n # Getting and Saving the history to hard drive\n history = bundle_generator.get_history()\n export_bundle_algo_history(history)\n\n # step 3: generate the train command\n print('Step 3: Generating the training command ...')\n #history = import_bundle_algo_history(result_dir, only_trained=False)\n for task in history:\n current_command = 'python '\n for current_algorithm_name, _ in task.items():\n current_algorithm_folder = os.path.join(result_dir, current_algorithm_name)\n current_train_script = os.path.join(current_algorithm_folder, 'scripts', 'train.py')\n current_command += current_train_script + ' run --config_file='\n\n all_config_files = []\n for current_config_file in os.listdir(os.path.join(current_algorithm_folder, 'configs')):\n current_config_file = os.path.join(current_algorithm_folder, 'configs', current_config_file)\n all_config_files.append(f\"'{current_config_file}'\")\n\n current_command += '\"[' + ','.join(all_config_files) + ']\"'\n\n for k, v in train_param.items():\n current_command += f\" --{k}={v}\"\n\n with open(f'{current_algorithm_name}.sh', 'w') as f:\n f.write('export CUDA_VISIBLE_DEVICES=your_device_id' + '\\n')\n f.write(current_command)\n\n # step 4: run the command\n print('Step 4: Please set the GPU device id (if necessary) and run the training script ...')\n\n # step 5: ensemble\n if is_ensemble:\n print('Step 5: Ensembling the result ...')\n history = import_bundle_algo_history(result_dir, only_trained=True)\n builder = AlgoEnsembleBuilder(history, input)\n builder.set_ensemble_method(AlgoEnsembleBestN(n_best=5))\n ensembler = builder.get_ensemble()\n preds = ensembler()\n\n print('Congrats! May the force be with you ...')\n","repo_name":"baiti01/Auto3DSeg-monai","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"41980222932","text":"from flask import Flask, render_template\nimport sqlalchemy\n\napp = Flask(__name__)\n\nengine = sqlalchemy.create_engine('mysql+pymysql://@127.0.0.1/game_recommendation?charset=utf8mb4')\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return \"Hello, World !\\n\\nAppend /recommendation/ to the current \" \\\n \"url\\n\\nSome available userids 76561197960355015, 76561197960385706\"\n\n\n@app.route('/recommendation/')\ndef recommendation(user_id):\n # retrieve recommendation for 'user_id'\n results = engine.execute('''\n SELECT g0, g1, g2, g3, g4, g5, g6, g7, g8, g9 FROM tbl_recommendation_games WHERE user_id=%s;\n ''' % user_id).first()\n\n lst_recommend_games = []\n for app_id in list(results):\n app_data = engine.execute('''\n SELECT name, initial_price, header_image FROM tbl_steam_app WHERE steam_appid=%s;\n ''' % app_id).first()\n if app_data != None:\n lst_recommend_games.append(app_data)\n\n return render_template('recomendation.html', user_id=user_id, lst_recommend_games=lst_recommend_games)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"jianleisun/recommendation_system_project","sub_path":"rs_flask_web_application.py","file_name":"rs_flask_web_application.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"6614698614","text":"import tensorflow as tf\nfrom keras import layers\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nX = np.array([[0, 0],\n [0, 1],\n [1, 0],\n [1, 1]], dtype=np.float32)\ny_and = np.array([[0], [0], [0], [1]], dtype=np.float32)\ny_or = np.array([[0], [1], [1], [1]], dtype=np.float32)\n\nx_and = layers.Input(shape = (2,))\nout_and = layers.Dense(units = 1, activation = 'sigmoid', name = 'and')(x_and)\n\nx_or = layers.Input(shape = (2,))\nout_or = layers.Dense(units = 1, activation = 'sigmoid', name = 'or')(x_or) # output unit이 1\n\nmodel = tf.keras.Model(inputs = [x_and, x_or], outputs = [out_and, out_or])\nmodel.summary()\n\nopt = tf.keras.optimizers.RMSprop(learning_rate=0.1)\nmodel.compile(optimizer=opt, loss='mse', metrics=['accuracy'])\n\n\nret = model.fit(x = [X, X], y = [y_and, y_or], epochs=100, batch_size=4, verbose=0)\ntest = model.evaluate(x = [X, X], y = [y_and, y_or], verbose=0)\n\nprint('total loss = ', test[0])\nprint('AND : loss = {}, acc = {}'.format(test[1], test[3]))\nprint('OR : loss = {}, acc = {}'.format(test[2], test[4]))\n\nplt.plot(ret.history['loss'], 'r--', label = 'loss')\nplt.plot(ret.history['and_loss'], 'g--', label = 'and_loss')\nplt.plot(ret.history['or_loss'], 'b--', label = 'or_loss')\nplt.plot(ret.history['and_accuracy'], 'g-', label = 'and_accuracy')\nplt.plot(ret.history['or_accuracy'], 'b-', label = 'or_accruacy')\nplt.xlabel('epochs')\nplt.ylabel('loss and accuracy')\nplt.legend(loc='best')\nplt.show()","repo_name":"YeDongVibe/Tensorflow_Class","sub_path":"P.Song/FunctionalAPI/FunctionalAPI(AND,OR).py","file_name":"FunctionalAPI(AND,OR).py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29778955905","text":"import os\nimport argparse\nimport cv2\nimport numpy as np\nimport sys\nimport time\nfrom threading import Thread\nimport importlib.util\nimport pytesseract\npytesseract.pytesseract.tesseract_cmd = r\"C:\\Users\\emielyn\\AppData\\Local\\Programs\\Tesseract-OCR\\tesseract.exe\"\nimport pyrebase\nfrom datetime import date\nfrom datetime import datetime\nimport imutils\nimport Levenshtein\n\nfrom mmocr.apis import TextRecInferencer\ninferencer = TextRecInferencer(model='SATRN', weights=r'C:\\Users\\emielyn\\mmocr\\best_IC15_recog_word_acc_epoch_77.pth')\n\n# Initialize the Firebase app with your service account credentials\n\nfirebaseConfig = {\n \"apiKey\": \"AIzaSyB_4cNoh3klH4mKPSd7dhJzr5QUGoLihy8\",\n \"authDomain\": \"scanmemaster-9da58.firebaseapp.com\",\n \"projectId\": \"scanmemaster-9da58\",\n \"databaseURL\" : \"https://scanmemaster-9da58-default-rtdb.firebaseio.com/\",\n \"storageBucket\": \"scanmemaster-9da58.appspot.com\",\n \"messagingSenderId\": \"270970295536\",\n \"appId\": \"1:270970295536:web:02ecd24ee665578e6d9e35\",\n \"measurementId\": \"G-27WEKS22GB\"\n}\n\nfirebase = pyrebase.initialize_app(firebaseConfig)\ndb = firebase.database()\n\nclass VideoStream:\n \"\"\"Camera object that controls video streaming from the Picamera\"\"\"\n def __init__(self, resolution=(420, 480), framerate=30):\n self.stream = cv2.VideoCapture(\"newCamVid1.mp4\")\n ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))\n ret = self.stream.set(3, resolution[0])\n ret = self.stream.set(4, resolution[1])\n\n # Get the first frame to determine its shape\n _, self.frame = self.stream.read()\n self.original_frame = self.frame.copy() # create a copy of the original frame\n self.output_width = 650\n self.output_height = int(self.frame.shape[0] / (self.frame.shape[1] / self.output_width))\n\n self.stopped = False\n\n def start(self):\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n while True:\n if self.stopped:\n self.stream.release()\n return\n\n # Read the next frame from the video stream\n (self.grabbed, frame) = self.stream.read()\n\n # Store the original frame\n self.original_frame = frame\n\n # Resize the original frame to the desired output resolution\n resized_frame = cv2.resize(self.original_frame, (self.output_width, self.output_height))\n\n # Store the resized frame\n self.frame = resized_frame\n\n def read(self):\n return self.frame\n\n def read_original(self):\n return self.original_frame\n\n def stop(self):\n self.stopped = True\n\n\n\n# class VideoStream:\n# \"\"\"Camera object that controls video streaming from the Picamera\"\"\"\n# # def __init__(self,resolution=(640,480),framerate=30): :820\n# def __init__(self,resolution=(420,480),framerate=30):\n# # self.stream = cv2.VideoCapture(0)\n\n# self.stream = cv2.VideoCapture(\"newCamVid1.mp4\")\n# # Read the first frame to get its shape\n# _, self.frame = self.stream.read()\n# self.frame = imutils.resize(self.frame, width=50)\n\n# #self.stream = cv2.VideoCapture(\"rtsp://thesis:thesisisit@10.0.254.12/stream2\")\n\n# ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))\n# ret = self.stream.set(3,resolution[0])\n# ret = self.stream.set(4,resolution[1])\n \n# (self.grabbed, self.frame) = self.stream.read()\n\n# self.stopped = False\n\n# def start(self):\n# Thread(target=self.update,args=()).start()\n# return self\n\n# def update(self):\n# while True:\n# if self.stopped:\n# self.stream.release()\n# return\n\n# (self.grabbed, self.frame) = self.stream.read()\n\n# def read(self):\n# return self.frame\n\n# def stop(self):\n# self.stopped = True\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--modeldir', help='Folder the .tflite file is located in',\n required=True)\nparser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',\n default='detect.tflite')\nparser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',\n default='labelmap.txt')\nparser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',\n default=0.5)\nparser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',\n default='640x480')\nparser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',\n action='store_true')\n\nargs = parser.parse_args()\n\nMODEL_NAME = args.modeldir\nGRAPH_NAME = args.graph\nLABELMAP_NAME = args.labels\nmin_conf_threshold = float(args.threshold)\nresW, resH = args.resolution.split('x')\nimW, imH = int(resW), int(resH)\nuse_TPU = args.edgetpu\n\n\npkg = importlib.util.find_spec('tflite_runtime')\nif pkg:\n from tflite_runtime.interpreter import Interpreter\n if use_TPU:\n from tflite_runtime.interpreter import load_delegate\nelse:\n from tensorflow.lite.python.interpreter import Interpreter\n if use_TPU:\n from tensorflow.lite.python.interpreter import load_delegate\n\nif use_TPU:\n if (GRAPH_NAME == 'detect.tflite'):\n GRAPH_NAME = 'edgetpu.tflite' \n\nCWD_PATH = os.getcwd()\n\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)\n\nPATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)\n\nwith open(PATH_TO_LABELS, 'r') as f:\n labels = [line.strip() for line in f.readlines()]\n\nif labels[0] == '???':\n del(labels[0])\n\nif use_TPU:\n interpreter = Interpreter(model_path=PATH_TO_CKPT,\n experimental_delegates=[load_delegate('libedgetpu.so.1.0')])\n print(PATH_TO_CKPT)\nelse:\n interpreter = Interpreter(model_path=PATH_TO_CKPT)\n\ninterpreter.allocate_tensors()\n\n# Get model details\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\nheight = input_details[0]['shape'][1]\nwidth = input_details[0]['shape'][2]\n\nfloating_model = (input_details[0]['dtype'] == np.float32)\n\ninput_mean = 127.5\ninput_std = 127.5\n\n\noutname = output_details[0]['name']\n\nif ('StatefulPartitionedCall' in outname): \n boxes_idx, classes_idx, scores_idx = 1, 3, 0\nelse: \n boxes_idx, classes_idx, scores_idx = 0, 1, 2\n\nframe_rate_calc = 1\nfreq = cv2.getTickFrequency()\n\nvideostream = VideoStream(resolution=(imW,imH),framerate=30).start()\ncount = 0\nexit = 0\ndetected = False\nimage_output = \"iMAGE.jpg\"\n\n\ndef checkExist():\n global exit\n global prev_txt\n while True:\n if exit == 0:\n filename = \"scanned_platenumbers.txt\"\n first_line = \"\"\n # Open the file for reading and writing\n with open(filename, \"r+\") as file:\n # Read the first line of the file\n first_line = file.readline().strip()\n # Read the remaining lines of the file\n remaining_lines = file.readlines()\n # Overwrite the file with the remaining lines\n file.seek(0)\n file.writelines(remaining_lines)\n file.truncate()\n # Close the file\n file.close()\n plateNum = first_line\n\n # print('check '+plateNum)\n\n try:\n if len(plateNum)>0:\n # Get all plate numbers in \"Vehicle_with_criminal_offense\" node\n plate_nums = db.child(\"Vehicle_with_criminal_offense\").shallow().get().val()\n \n # Find closest match to input\n global closest_match\n closest_match = None\n min_distance = float('inf')\n for num in plate_nums:\n distance = Levenshtein.distance(plateNum, num)\n if distance < min_distance:\n closest_match = num\n min_distance = distance\n \n confidence = round((1 - (min_distance / len(plateNum))) * 100, 2)\n if confidence >= 60 and closest_match not in prev_txt:\n exist = db.child(\"Vehicle_with_criminal_offense\").child(closest_match).child(\"plateNumber\").get()\n #print(exist.val())\n if exist.val() != None:\n isApprehended = db.child(\"Vehicle_with_criminal_offense\").child(closest_match).child(\"apprehended\").get()\n #print(\"isApprehended \"+isApprehended.val())\n if isApprehended.val() != 'yes':\n print('Notify '+plateNum)\n # Create Data\n nowD = datetime.now()\n dateToday = str(date.today())\n timeToday = nowD.strftime(\"%H:%M:%S\")\n crimeScanned = db.child(\"Vehicle_with_criminal_offense\").child(closest_match).child(\"criminalOffense\").get()\n\n color = ''\n if confidence >= 60 and confidence <= 75:\n color='yellow'\n elif confidence > 75 and confidence <= 100:\n color='red'\n\n data = {\"PlateNumber\":closest_match, \"Location\": \"Lapasan Zone 4\", \"Date\": dateToday, \"Time\": timeToday, \"Notification\": \"on\", \"Apprehended\": \"no\", \"CriminalOffense\": crimeScanned.val(), 'Color': color, 'DetectedPN': plateNum}\n db.child(\"Scanned\").child((dateToday+\" \"+timeToday)).set(data)\n dataPlateNumber = {\"PlateNumber\":closest_match, \"Apprehended\": \"no\",\"CriminalOffense\": crimeScanned.val()}\n db.child(\"ScannedPlateNumber\").child(closest_match).set(dataPlateNumber)\n\n #For Notification\n db.child(\"ScannedNotification\").set(data)\n db.child(\"ScannedPlateNumberNotification\").set(dataPlateNumber)\n prev_txt.append(closest_match)\n else:\n print(\" \")\n #print(\"Plate Number dont't exist\")\n except Exception as e:\n print(\"err \"+str(e))\n #print(\"Plate Number dont't exist \"+ str(e))\n #print()\n #print('checkDatabase')\n #print('Latest data:', plateNum)\n #print()\n #time.sleep(1)\n else:\n break\n\ndef saveForQuery():\n global exit\n filename = \"scanned_platenumbers.txt\"\n prevPN = ''\n # Create the file if it doesn't exist\n if not os.path.isfile(filename):\n open(filename, \"w\").close()\n\n while True:\n if exit == 0:\n\n #Read the latest scanned on the database\n plateNum = db.child(\"ScannedQuery\").child(\"PlateNumber\").get()\n if plateNum.val() != prevPN:\n # Open the file in append mode\n with open(filename, \"a\") as file:\n # Get the text to append from the user\n plateNum = plateNum.val()\n # Append the text to the end of the file\n file.write(plateNum+ \"\\n\")\n # Close the file\n file.close()\n #print('checkdatabase')\n prevPN = plateNum\n #time.sleep(1)\n else:\n break\n\nprev_txt = []\n\ndef clear_list():\n global exit\n while True:\n if exit == 0:\n time.sleep(30)\n prev_txt.clear()\n print(\"--------------------------\")\n else:\n break\n\n\ndef ocr():\n global detected\n global exit\n global prev_txt\n while True: \n if exit == 0: \n if os.path.exists(image_output):\n try:\n img_ocr = cv2.imread(image_output)\n img_ocr = cv2.resize(img_ocr,None, fx=0.5 , fy =0.5)\n if detected == True:\n # txt =pytesseract.image_to_string(img_ocr, config='-c tessedit_char_whitelist=0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ --psm 8 --oem 3')\n # print(txt) \n # Pass preprocessed image to OCR model\n result = inferencer(img_ocr, print_result=True)\n text = result['predictions'][0]['text']\n\n # Print OCR results\n print('Prediction: ',text)\n data = {\"PlateNumber\":text}\n db.child(\"ScannedQuery\").set(data)\n try:\n os.remove(image_output)\n except OSError as e:\n print(f\"Error: {image_output} path could not be delete. {e}\")\n except Exception as e:\n print(\"\")\n #print(\"An error occured:\", str(e))\n else:\n \n \n continue\n \n else:\n break\n\ndef detection():\n global frame_rate_calc\n global detected\n global exit\n # Set the target frame rate in frames per second\n target_fps = 10\n\n # Calculate the delay between frames in seconds\n frame_delay = 1.0 / target_fps\n while True:\n start_time = time.monotonic()\n t1 = cv2.getTickCount()\n frame1 = videostream.read()\n\n frame = frame1.copy()\n # frame = imutils.resize(frame1, width=820)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame_resized = cv2.resize(frame_rgb, (width, height))\n input_data = np.expand_dims(frame_resized, axis=0)\n\n if floating_model:\n input_data = (np.float32(input_data) - input_mean) / input_std\n\n interpreter.set_tensor(input_details[0]['index'],input_data)\n interpreter.invoke()\n\n boxes = interpreter.get_tensor(output_details[boxes_idx]['index'])[0]\n classes = interpreter.get_tensor(output_details[classes_idx]['index'])[0] \n scores = interpreter.get_tensor(output_details[scores_idx]['index'])[0]\n\n area = [(1,160),(647,160),(647,360),(1,360)] #Bahog ug video\n\n # area = [(1,257),(639,257),(639,480),(1,480)] #sa laptop cam\n # area = [(2,243),(637,243),(637,360),(2,360)] #sa CCTV\n\n for i in range(len(scores)):\n if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):\n\n ymin = int(max(1,(boxes[i][0] * imH)))\n xmin = int(max(1,(boxes[i][1] * imW)))\n ymax = int(min(imH,(boxes[i][2] * imH)))\n xmax = int(min(imW,(boxes[i][3] * imW)))\n \n cx = int((xmin + xmax)/2)\n cy = int((ymin + ymax)/2)\n result = cv2.pointPolygonTest(np.array(area, np.int32), (int(cx), int(cy)), False)\n if result >= 0:\n detected = True\n # cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)\n\n object_name = labels[int(classes[i])] \n label = '%s: %d%%' % (object_name, int(scores[i]*100)) \n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) \n label_ymin = max(ymin, labelSize[1] + 10) \n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED)\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) \n # cv2.circle(frame,(cx,cy),5,(10, 255, 0),-1)\n imgRoi = frame[ymin:ymax, xmin:xmax]\n cv2.imwrite(\"iMAGE.jpg\", imgRoi)\n \n else:\n detected = False\n for i in area:\n cv2.polylines(frame,[np.array(area, np.int32)], True, (15,220,10),6)\n\n cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)\n \n # frame1 = imutils.resize(frame, width=650)\n cv2.imshow('Object detector', frame)\n\n \n t2 = cv2.getTickCount()\n time1 = (t2-t1)/freq\n frame_rate_calc= 1/time1\n \n\n if cv2.waitKey(1) == ord('q'):\n exit =1\n break\n elapsed_time = time.monotonic() - start_time\n time.sleep(max(0, frame_delay - elapsed_time))\n videostream.stop()\n cv2.destroyAllWindows()\n\ntask1 = Thread(target=detection)\ntask2 = Thread(target=ocr)\ntask3 = Thread(target=saveForQuery)\ntask4 = Thread(target=checkExist)\ntask5 = Thread(target=clear_list)\n\nwhile True:\n task1.start()\n task2.start()\n task3.start()\n task4.start()\n task5.start()\n\n\n task1.join()\n task2.join()\n task3.join()\n task4.join()\n task5.join()\n if exit ==1:\n print(\"Done executing\")\n break","repo_name":"Millborne-g/MMOCR-codes","sub_path":"camLatest_polylines.py","file_name":"camLatest_polylines.py","file_ext":"py","file_size_in_byte":17846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38019262331","text":"from typing import Any, List\nfrom pytorch_lightning import LightningModule\nfrom src.models.fcvae_model_v1 import FCVAEModelV1\nfrom src.models.fcvae_model_v2 import FCVAEModelV2\nfrom src.models.fcae_model import FCAEModel\nfrom torch import nn\nimport torch\nfrom torchmetrics.classification.accuracy import Accuracy\n\n\nclass ExtractorFCMLPModel(LightningModule):\n \"\"\"\n A LightningModule organizes your PyTorch code into 5 sections:\n - Computations (init).\n - Train loop (training_step)\n - Validation loop (validation_step)\n - Test loop (test_step)\n - Optimizers (configure_optimizers)\n\n Read the docs:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html\n \"\"\"\n\n def __init__(\n self,\n extractor_path: str = \"\",\n task: str = \"regression\",\n n_output: int = 1,\n topology: List[int] = None,\n dropout: float = 0.1,\n num_unfreeze_epochs = 10,\n loss_type: str = \"MSE\",\n extractor_type: str = \"FCVAEModelV2\",\n lr: float = 0.001,\n weight_decay: float = 0.0005,\n **kwargs\n ):\n super().__init__()\n self.save_hyperparameters()\n\n self.extractor_type = extractor_type\n if self.extractor_type == \"FCVAEModelV1\":\n self.feature_extractor = FCVAEModelV1.load_from_checkpoint(extractor_path)\n elif self.extractor_type == \"FCVAEModelV2\":\n self.feature_extractor = FCVAEModelV2.load_from_checkpoint(extractor_path)\n elif self.extractor_type == \"FCAEModel\":\n self.feature_extractor = FCAEModel.load_from_checkpoint(extractor_path)\n else:\n raise ValueError(\"Unsupported extractor_type\")\n\n self.feature_extractor.freeze()\n\n self.task = task\n self.n_output = n_output\n self.topology = [self.feature_extractor.model.n_latent] + list(topology)\n\n self.num_unfreeze_epochs = num_unfreeze_epochs\n\n self.mlp_layers = []\n for i in range(len(self.topology) - 1):\n layer = nn.Linear(self.topology[i], self.topology[i + 1])\n self.mlp_layers.append(nn.Sequential(layer, nn.ReLU(), nn.BatchNorm1d(self.topology[i + 1]), nn.Dropout(dropout)))\n self.mlp_layers.append(nn.Linear(self.topology[-1], self.n_output))\n\n if task == \"classification\":\n self.loss_fn = torch.nn.CrossEntropyLoss(reduction='mean')\n if n_output < 2:\n raise ValueError(f\"Classification with {n_output} classes\")\n elif task == \"regression\":\n if self.hparams.loss_type == \"MSE\":\n self.loss_fn = torch.nn.MSELoss(reduction='mean')\n elif self.hparams.loss_type == \"L1Loss\":\n self.loss_fn = torch.nn.L1Loss(reduction='mean')\n else:\n raise ValueError(\"Unsupported loss_type\")\n\n self.mlp = nn.Sequential(*self.mlp_layers)\n\n self.accuracy = Accuracy()\n\n def on_epoch_end(self):\n # a hook is cleaner (but a callback is much better)\n if self.trainer.current_epoch == self.num_unfreeze_epochs:\n self.feature_extractor.unfreeze()\n\n def forward(self, x: torch.Tensor):\n z = self.feature_extractor.get_latent(x)\n return self.mlp(z)\n\n def get_probabilities(self, x: torch.Tensor):\n x = self.feature_extractor.get_latent(x)\n x = self.mlp(x)\n return torch.softmax(x, dim=1)\n\n def step(self, batch: Any):\n x, y, ind = batch\n out = self.forward(x)\n batch_size = x.size(0)\n y = y.view(batch_size, -1)\n loss = self.loss_fn(out, y)\n\n logs = {\"loss\": loss}\n if self.task == \"classification\":\n out_tag = torch.argmax(out, dim=1)\n acc = self.accuracy(out_tag, y)\n logs[\"acc\"] = acc\n\n return loss, logs\n\n def training_step(self, batch: Any, batch_idx: int):\n loss, logs = self.step(batch)\n d = {f\"train/{k}\": v for k, v in logs.items()}\n self.log_dict(d, on_step=False, on_epoch=True, logger=True)\n return logs\n\n def training_epoch_end(self, outputs: List[Any]):\n pass\n\n def validation_step(self, batch: Any, batch_idx: int):\n loss, logs = self.step(batch)\n d = {f\"val/{k}\": v for k, v in logs.items()}\n self.log_dict(d, on_step=False, on_epoch=True, logger=True)\n return logs\n\n def validation_epoch_end(self, outputs: List[Any]):\n pass\n\n def test_step(self, batch: Any, batch_idx: int):\n loss, logs = self.step(batch)\n d = {f\"test_{k}\": v for k, v in logs.items()}\n self.log_dict(d, on_step=False, on_epoch=True, logger=True)\n return logs\n\n def test_epoch_end(self, outputs: List[Any]):\n pass\n\n def configure_optimizers(self):\n \"\"\"Choose what optimizers and learning-rate schedulers to use in your optimization.\n Normally you'd need one. But in the case of GANs or similar you might have multiple.\n\n See examples here:\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n return torch.optim.Adam(\n params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay\n )\n","repo_name":"GillianGrayson/dnamvae","sub_path":"src/models/extractor_mlp_model.py","file_name":"extractor_mlp_model.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"14567317943","text":"from sys import argv\r\nimport copy\r\nfrom operator import itemgetter\r\nimport time\r\ndef shortPath(towns,city,target,path,i):\r\n\tif(city==[]):\r\n\t\treturn\r\n\telse:\t\r\n\t\tglobal maxWeight,flag,paths\r\n\t\tcity=sorted(city,key=itemgetter(1))[::-1]\r\n\t\tfor p in city:\t\t\t\r\n\t\t\tif(p[0]==target):\r\n\t\t\t\tprevMaxWeight=maxWeight\t\t\r\n\t\t\t\tpath[i]=p\r\n\t\t\t\tmaxWeight=p[1]\t\t\t\t\t\t\r\n\t\t\t\tfor n in range(i,-1,-1):\r\n\t\t\t\t\tif(path[n][1]prevMaxWeight):\r\n\t\t\t\t\tpaths=copy.copy(path)\t\t\t\t\r\n\t\t\t\t\tcontinue\t\t\t\r\n\t\t\t\telse:\t\t\r\n\t\t\t\t\tmaxWeight=prevMaxWeight\t\r\n\t\t\t\t\tcontinue\r\n\t\t\tif p[1] \"+str(n[0])\r\n\t\tif(n[1] 0 :\n for file in each_file:\n date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')\n unix_time = time.mktime(date_stamp.timetuple())\n #print(date_stamp, unix_time)\n full_file_path = each_dir+'/'+file\n #print(full_file_path)\n source = open(full_file_path, 'r').read()\n #print(source)\n try:\n value = float(source.split(gather+':')[1].split('')[0])\n #print(ticker+\":\",value)\n df = df.append({'Date':date_stamp, 'Unix':unix_time, 'Ticker':ticker, 'De Ratio':value,}, ignore_index = True)\n except Exception as e:\n pass\n\n #time.sleep(15)\n save = gather.replace(' ', '').replace(')', '').replace('(','').replace('/', '')+ ('.csv')\n print(save)\n df.to_csv(save)\n\n\nKey_Stats()\n\n\n#note \n#1\n#The df variable is used to store the creation of a new \"DataFrame\" object from Pandas, where we specify the columns to be date, unix, ticker, and DE ratio\n\n#2\n#The Try here identifies the value as usual, then we're re-defining our DataFrame object as the previous DataFrame object with the new data appended to it\n\n#3\n#specifying a custom name for the csv file, then using pandas to_csv capability to output the Data Frame to an actual CSV file\n#Running this then saves the dataframe as a CSV spreadsheet for us. We want to save the data since we really just need to access and store the data once","repo_name":"TakahiroSuzukiqq/python-machineleaning-wk1","sub_path":"structuring_data.py","file_name":"structuring_data.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14395725370","text":"import math\nfrom collections import defaultdict, Counter\n\n\nclass DSU:\n def __init__(self, N):\n self.p = list(range(N))\n\n def find(self, x):\n if self.p[x] != x:\n self.p[x] = self.find(self.p[x])\n return self.p[x]\n\n def union(self, x, y):\n xr, yr = self.find(x), self.find(y)\n self.p[xr] = yr\n\n\nclass Solution:\n def primes_set(self, n):\n for i in range(2, int(math.sqrt(n))+1):\n if n % i == 0:\n return self.primes_set(n//i) | set([i])\n return set([n])\n\n def largest_component_size(self, A):\n \"\"\"\n Time O(n * log(2m) * log(m)) where n is the number of elements\n and m is the max value in list\n Space: O(n + m)\n \"\"\"\n n = len(A)\n UF = DSU(n)\n primes = defaultdict(list)\n for i, num in enumerate(A):\n pr_set = self.primes_set(num)\n for q in pr_set:\n primes[q].append(i)\n for _, indexes in primes.items():\n for i in range(len(indexes)-1):\n UF.union(indexes[i], indexes[i+1])\n return max(Counter([UF.find(i) for i in range(n)]).values())\n","repo_name":"tuvo1106/1337code","sub_path":"0952_largest_component/largest.py","file_name":"largest.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"23830703999","text":"# 01_fruits.py\n# 利用CNN实现图像分类\n# 数据集:爬虫从百度图片搜索结果爬取\n# 内容:包含1036张水果图片\n# 共5个类别(苹果288张、香蕉275张、葡萄216张、\n# 橙子276张、梨251张)\n\n################## 数据预处理 ##################\nimport os\n\nname_dict = {\"apple\": 0, \"banana\": 1, \"grape\": 2,\n \"orange\": 3, \"pear\": 4}\ndata_root_path = \"data/fruits/\" # 数据集所在目录\n# 测试集、训练集文件路径\ntest_file_path = data_root_path + \"test.txt\"\ntrain_file_path = data_root_path + \"train.txt\"\nname_data_list = {} # 记录每个类别有那些图片\n\n\ndef save_name_data_list(path, # 图像路径\n name): # 类别名称\n if name not in name_data_list: # 字典中没有该类别\n img_list = [] # 创建空列表\n img_list.append(path) # 将图片存入列表\n name_data_list[name] = img_list # 存入字典\n else: # 字典中已经存在该类别\n name_data_list[name].append(path)\n\n\n# 遍历数据集中的每个子目录,取出图像样本路径\n# 并写入name_data_list字典\ndirs = os.listdir(data_root_path)\nfor d in dirs:\n full_path = data_root_path + d # 子目录完整路径\n # print(full_path)\n if os.path.isdir(full_path): # 是一个目录\n imgs = os.listdir(full_path) # 列出所有文件\n for img in imgs:\n img_full_path = full_path + \"/\" + img\n save_name_data_list(img_full_path,\n d) # 目录名称即类别名称\n else: # 文件\n pass\n\n# 遍历name_data_list字典,划分测试集、训练集\nwith open(test_file_path, \"w\") as f:\n pass\n\nwith open(train_file_path, \"w\") as f:\n pass\n\n# 遍历字典\nfor name, img_list in name_data_list.items():\n i = 0\n num = len(img_list) # 获取每个列别图片数量\n print(\"%s: %d张\" % (name, num))\n\n for img in img_list:\n line = \"%s\\t%d\\n\" % (img, name_dict[name])\n if i % 10 == 0: # 划分到测试集合\n with open(test_file_path, \"a\") as f:\n f.write(line)\n else: # 划分到训练集\n with open(train_file_path, \"a\") as f:\n f.write(line)\n i += 1\nprint(\"数据预处理完成.\")\n\n############### 模型搭建/训练 ##################\nimport paddle\nimport paddle.fluid as fluid\nimport numpy\nimport sys\nimport os\nfrom multiprocessing import cpu_count\nimport time\nimport matplotlib.pyplot as plt\n\n\ndef train_mapper(sample):\n \"\"\"\n 根据传入样本路径、类别,读取图像数据\n :param sample: 一行文本样本, 元组(文件路径,类别)\n :return: 返回图像数据、类别\n \"\"\"\n img, label = sample # img为路径, lable为类别\n if not os.path.exists(img):\n print(img, \"文件不存在\")\n\n # 读取文件内容\n img = paddle.dataset.image.load_image(img)\n # 将图像设置为固定大小\n img = paddle.dataset.image.simple_transform(\n im=img, # 原始图像\n resize_size=100, # 图像缩放大小\n crop_size=100, # 裁剪图像大小\n is_color=True, # 彩色图像\n is_train=True) # 训练模型(做随机裁剪)\n # 归一化处理,将每个像素值转换为0~1之间\n img = img.astype(\"float32\") / 255.0\n return img, label\n\n\n# 从训练集中读取数据\ndef train_r(train_list, buffred_size=1024):\n def reader():\n with open(train_list, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n # 去除空格和换行符\n line = line.strip().replace(\"\\n\", \"\")\n img_path, lab = line.split(\"\\t\")\n\n yield img_path, int(lab)\n\n return paddle.reader.xmap_readers(\n train_mapper, # 接收reader读取的数据二次处理\n reader, # 原始读取器\n cpu_count(), # 线程数量\n buffred_size) # 缓冲区大小\n\n# 定义reader\nBATCH_SIZE = 32 # 批次大小\n\ntrainer_reader = train_r(train_list=train_file_path)\nrandom_train_reader = paddle.reader.shuffle(\n reader=trainer_reader,\n buf_size=1300) # 随机读取器\nbatch_train_reader = paddle.batch(\n random_train_reader,\n batch_size=BATCH_SIZE)\n\n# 占位符\nimage = fluid.layers.data(name=\"image\",\n shape=[3, 100, 100],\n dtype=\"float32\")\nlabel = fluid.layers.data(name=\"label\",\n shape=[1],\n dtype=\"int64\")\n\ndef create_CNN(image, type_size):\n \"\"\"\n 搭建卷积神经网络\n :param image: 图像数据(经过归一化处理)\n :param type_size:类别数量\n :return: 一组分类概率\n \"\"\"\n # 第一组 conv/pool/dropout\n conv_pool_1 = fluid.nets.simple_img_conv_pool(\n input=image, # 输入图像数据\n filter_size=3, # 卷积核大小\n num_filters=32, # 卷积核数量\n pool_size=2, # 2*2区域做池化\n pool_stride=2, # 池化步长\n act=\"relu\") # 激活函数\n drop = fluid.layers.dropout(x=conv_pool_1,\n dropout_prob=0.5)\n\n # 第二组 conv/pool/dropout\n conv_pool_2 = fluid.nets.simple_img_conv_pool(\n input=drop, # 前一个dropout输出作为输入\n filter_size=3, # 卷积核大小\n num_filters=64, # 卷积核数量\n pool_size=2, # 2*2区域做池化\n pool_stride=2, # 池化步长\n act=\"relu\") # 激活函数\n drop = fluid.layers.dropout(x=conv_pool_2,\n dropout_prob=0.5)\n\n # 第三组 conv/pool/dropout\n conv_pool_3 = fluid.nets.simple_img_conv_pool(\n input=drop, # 前一个dropout输出作为输入\n filter_size=3, # 卷积核大小\n num_filters=64, # 卷积核数量\n pool_size=2, # 2*2区域做池化\n pool_stride=2, # 池化步长\n act=\"relu\") # 激活函数\n drop = fluid.layers.dropout(x=conv_pool_3,\n dropout_prob=0.5)\n\n # fc\n fc = fluid.layers.fc(input=drop,\n size=512, # 神经元数量\n act=\"relu\")\n # dropout\n drop = fluid.layers.dropout(x=fc,\n dropout_prob=0.5)\n # 输出层(使用softmax作为激活函数的fc)\n predict = fluid.layers.fc(input=drop,\n size=type_size,\n act=\"softmax\")\n return predict\n\n# 创建VGG模型\ndef vgg_bn_drop(image, type_size):\n def conv_block(ipt, num_filter, groups, dropouts):\n return fluid.nets.img_conv_group(\n input=ipt, # 输入图像, 格式[N,C,H,W]\n pool_stride=2,#池化步长\n pool_size=2, #池化区域大小\n conv_num_filter=[num_filter] * groups,\n conv_filter_size=3, #卷积核大小\n conv_act=\"relu\",#激活函数\n conv_with_batchnorm=True,#是否采用BN\n pool_type=\"max\")#池化类型\n\n conv1 = conv_block(image, 64, 2, [0.0, 0.0])\n conv2 = conv_block(conv1, 128, 2, [0.0, 0.0])\n conv3 = conv_block(conv2, 256, 3, [0.0, 0.0, 0.0])\n conv4 = conv_block(conv3, 512, 3, [0.0, 0.0, 0.0])\n conv5 = conv_block(conv4, 512, 3, [0.0, 0.0, 0.0])\n\n drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)\n fc1 = fluid.layers.fc(input=drop,\n size=512,\n act=None)\n bn = fluid.layers.batch_norm(input=fc1,\n act=\"relu\")#批量归一化\n drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.0)\n fc2 = fluid.layers.fc(input=drop2,\n size=512,\n act=None)\n predict = fluid.layers.fc(input=fc2,\n size=type_size,\n act=\"softmax\")\n return predict\n\n\n# 调用函数,创建模型\n# predict = create_CNN(image=image, type_size=5)\npredict = vgg_bn_drop(image=image, type_size=5)\n# 损失函数\ncost = fluid.layers.cross_entropy(\n input=predict,\n label=label)\navg_cost = fluid.layers.mean(cost)\n# 准确率\naccuracy = fluid.layers.accuracy(input=predict,\n label=label)\n# 优化器\noptimizer = fluid.optimizer.Adam(\n learning_rate=0.001)\noptimizer.minimize(avg_cost) # 优化目标函数\n\n# 执行器\nplace = fluid.CUDAPlace(0) # GPU训练\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n# feeder\nfeeder = fluid.DataFeeder(\n feed_list=[image, label],\n place=place)\n\ncosts = [] # 记录损失函数值\naccs = [] # 记录准确度\ntimes = 0\nbatchs = [] # 迭代次数\n\n# 开始训练\nfor pass_id in range(5):\n train_cost = 0 # 临时变量,记录损失值\n train_acc = 0\n times += 1\n for batch_id, data in enumerate(batch_train_reader()):\n train_cost, train_acc = exe.run(\n program=fluid.default_main_program(),\n feed=feeder.feed(data), # 喂入参数\n fetch_list=[avg_cost, accuracy])\n # 打印损失值、准确率\n if batch_id % 20 == 0:\n print(\"pass_id:%d, batch_id:%d, cost:%f, acc:%f\"\n % (pass_id, batch_id,\n train_cost[0], train_acc[0]))\n accs.append(train_acc[0])\n costs.append(train_cost[0])\n batchs.append(times)\n# 保存模型\nmodel_save_dir = \"./model/fruits/\"\nif not os.path.exists(model_save_dir):\n os.makedirs(model_save_dir)\nfluid.io.save_inference_model(\n dirname=model_save_dir, #保存路径\n feeded_var_names=[\"image\"],#预测时传入参数\n target_vars=[predict],#预测结果\n executor=exe)#执行器\n\nprint(\"模型保存成功:\", model_save_dir)\n\n# 训练过程可视化\nplt.title(\"training\", fontsize=24)\nplt.xlabel(\"iter\", fontsize=20)\nplt.ylabel(\"cost/acc\", fontsize=20)\nplt.plot(batchs, costs, color='red', label=\"Training Cost\")\nplt.plot(batchs, accs, color='green', label=\"Training Acc\")\nplt.legend()\nplt.grid()\nplt.savefig(\"train.png\")\nplt.show()\n\n\n#################### 预测 #####################\nfrom PIL import Image\n\n# 加载图像数据\ndef load_img(path): # path为图像路径\n img = paddle.dataset.image.load_and_transform(\n path, 100, 100, False).astype(\"float32\")\n img = img / 255.0 # 归一化\n\n return img\n\n# 定义执行器\nplace = fluid.CPUPlace()\ninfer_exe = fluid.Executor(place) #用于预测的执行器\n\ninfer_imgs = [] # 存放待预测的图像数据\ntest_img = \"apple_1.png\" # 待测试的图像\ninfer_imgs.append(load_img(test_img))#将图像数据存入待预测列表\n\ninfer_imgs = numpy.array(infer_imgs)#将列表转换为数组\n\n# 加载模型\ninfer_program, feed_target_names, fetch_targets = \\\n fluid.io.load_inference_model(model_save_dir,\n infer_exe)\n# 执行预测\nresults = infer_exe.run(infer_program,\n feed={feed_target_names[0]:infer_imgs},\n fetch_list=fetch_targets)\n# print(results)\n\nresult = numpy.argmax(results[0][0])\nfor k, v in name_dict.items():\n if result == v:\n print(\"预测结果:\", k)\n\n# 显示待预测的图像\nimg = Image.open(test_img)\nplt.imshow(img)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"wangjiancheng-123/datascience","sub_path":"深度学习/01_fruits.py","file_name":"01_fruits.py","file_ext":"py","file_size_in_byte":11206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74214809216","text":"class graph(object):\n def __init__(self, size):\n self.adjacency_list = {}\n self.maxSize = 0\n for x in range(1, size + 1):\n self.adjacency_list[x] = []\n self.size = size\n\n def add_node(self, start, end, weight):\n\n self.maxSize += weight\n\n self.adjacency_list[start].append([end, weight])\n self.adjacency_list[end].append([start, weight])\n\n def print_graph(self):\n for x in range(1, self.size + 1):\n print(x, \" : \", self.adjacency_list[x])\n\n\ndef minIndex(g, visited, distance, minDis):\n\n minIndex = -1\n\n for count in range(1, g.size + 1):\n if distance[count] <= minDis and (not visited[count]):\n minIndex = count\n minDis = distance[count]\n\n return minIndex\n\n\ndef dijksrta_short(g, start, end):\n visited = [False] * (g.size + 1)\n distance = [g.maxSize] * (g.size + 1)\n\n distance[start] = 0\n\n for _ in range(g.size):\n\n minIndex1 = minIndex(g, visited, distance, g.maxSize)\n\n visited[minIndex1] = True\n\n for x in g.adjacency_list[minIndex1]:\n\n if not visited[x[0]]:\n if distance[x[0]] > distance[minIndex1] + x[1]:\n distance[x[0]] = distance[minIndex1] + x[1]\n\n return distance[end]\n\n\ng = graph(5)\ng.add_node(1, 2, 10)\ng.add_node(2, 3, 15)\ng.add_node(1, 3, 70)\ng.add_node(2, 4, 15)\ng.add_node(4, 5, 20)\ng.add_node(1, 5, 100)\n\ng.print_graph()\n\nprint(dijksrta_short(g, 1, 5))\n","repo_name":"ArtistBanda/Algorithms-and-Basic-Programmes","sub_path":"Python/Algorithms/dijkstra_algorithm.py","file_name":"dijkstra_algorithm.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"5630863559","text":"from esys.escript import *\nimport numpy as np\nfrom math import floor\nfrom scipy.interpolate import RegularGridInterpolator\nfrom .datamapping import mapToDomain\nfrom esys.escript.linearPDEs import LinearSinglePDE, SolverOptions\nfrom esys.escript.pdetools import Locator\n\ndef setupERTPDE(domain, poisson=True):\n \"\"\"\n used t setup all ERT PDEs\n \"\"\"\n pde=LinearSinglePDE(domain, isComplex=False)\n pde.setSymmetryOn()\n optionsG=pde.getSolverOptions()\n #optionsG.setSolverMethod(SolverOptions.DIRECT)\n\n optionsG.setSolverMethod(SolverOptions.PCG)\n optionsG.setTolerance(1e-8)\n if True and hasFeature('trilinos'):\n #print(\"trilinos solver used.\")\n optionsG.setPackage(SolverOptions.TRILINOS)\n optionsG.setPreconditioner(SolverOptions.AMG)\n if poisson:\n optionsG.setTrilinosParameter(\"problem:type\", \"Poisson-3D\")\n optionsG.setTrilinosParameter(\"verbosity\", \"none\")\n optionsG.setTrilinosParameter(\"number of equations\", 1)\n #optionsG.setTrilinosParameter(\"max levels\", 3) # 10 is default 3 seems to be a good number\n #optionsG.setTrilinosParameter(\"cycle type\", \"V\")\n optionsG.setTrilinosParameter(\"problem: symmetric\", True)\n #optionsG.setTrilinosParameter(\"smoother: pre or post\", \"both\")\n #optionsG.setTrilinosParameter(\"Convergence Tolerance\", 1e-12)\n return pde\n\n\nclass IPModel(object):\n \"\"\"\n \"\"\"\n def __init__(self, domain, survey, locations=[], field_resolution=1., field_origin=(0.,0.,0), sigma_background=0.1, gamma_background=0.0001, padding_tags=[], stationsFMT=None):\n self.domain=domain\n self.survey=survey\n self.locations=locations\n self.stationsFMT=stationsFMT\n self.pde=setupERTPDE(domain)\n x=self.pde.getDomain().getX()[0]\n y=self.pde.getDomain().getX()[1]\n z=self.pde.getDomain().getX()[2]\n self.pde.setValue(q=whereZero(x-inf(x))+whereZero(x-sup(x))+ whereZero(y-inf(y))+whereZero(y-sup(y))+whereZero(z-inf(z)))\n\n self.locations=locations\n self.observation_locator=Locator(Solution(domain), [ self.survey.getStationLocation(s) for s in self.survey.getObservationElectrodes()])\n self.source_locator=Locator(ContinuousFunction(domain), [ self.survey.getStationLocation(ip) for ip in self.survey.getInjectionStations() ])\n\n self.field_resolution=field_resolution\n self.field_origin=field_origin\n self.sigma_background=sigma_background\n self.gamma_background=gamma_background\n self.padding_tags=padding_tags\n\n self.injections= [ i for i in self.survey.injectionIterator()]\n self.injectionMap=[ k for k in range(len(self.injections)) ]\n \n self.setUpDataMaps()\n self.setPrimaryPotential()\n \n\n \n def getAllInjections(self):\n return self.injectionMap\n \n def getInjection(self, k):\n return self.injections[self.injectionMap[k]]\n \n def getNumberOfInjections(self):\n return len(self.injections)\n \n def setUpDataMaps(self):\n \"\"\"\n This sets up the mapping of the DC self.dataDCMaps[self.numSrc] and IP self.dataIPMaps[self.numSrc] predictions to an array d[self.numDataMax, self.numSrc]\n \"\"\"\n self.numSrc=self.getNumberOfInjections()\n self.dataDCMaps={}\n self.dataIPMaps={}\n self.numData={}\n for k, i in enumerate(self.getAllInjections()):\n self.dataDCMaps[i] = { s: j for j,s in enumerate(self.survey.getObservations(self.getInjection(i)))}\n self.dataIPMaps[i] = { s: j+len(self.dataDCMaps[i]) for j,s in enumerate(self.survey.getObservations(self.getInjection(i)))}\n self.numData[i]=len(self.dataDCMaps[i])+len(self.dataIPMaps[i])\n self.numDataMax=max(self.numData.values())\n\n self.use=np.zeros((self.numDataMax, self.numSrc), dtype=bool)\n for k, i in enumerate(self.getAllInjections()):\n for j in self.dataDCMaps[i].values():\n self.use[j,k]=True \n for i in self.dataIPMaps[i].values():\n self.use[j,k]=True\n \n def makeDataSet(self, sources):\n \"\"\"\n \n \"\"\"\n responses=np.zeros((self.numDataMax, len(sources)), dtype=float)\n if self.survey.hasDipoleInjections():\n for k, ip in enumerate(sources):\n for s,i in self.dataDCMaps[ip].items():\n responses[i,k]=self.survey.getDataRecord(self.getInjection(ip)+ s, datatype='R')\n for s,i in self.dataIPMaps[ip].items():\n d=self.survey.getDataRecord( self.getInjection(ip) + s, datatype='R')\n e=self.survey.getDataRecord(self.getInjection(ip) + s, datatype='ETA')\n responses[i,k]=e/(1-e)*d\n else:\n for k, ip in enumerate(sources):\n for s,i in self.dataDCMaps[ip].items():\n responses[i,k]=self.survey.getDataRecord( (self.getInjection(ip),) + s, datatype='R')\n for s,i in self.dataIPMaps[ip].items():\n d=self.survey.getDataRecord( (self.getInjection(ip),) + s , datatype='R')\n e=self.survey.getDataRecord( (self.getInjection(ip),) + s, datatype='ETA')\n responses[i,k]=e/(1-e)*d \n return responses\n \n def setPrimaryPotential(self):\n \"\"\"\n this sets the primary potential assuming sigma=1 and I=1\n \"\"\"\n self.primary_potential={}\n self.primary_potential_at_stations = {}\n self.pde.setValue(A=kronecker(3), X=Data()) \n for i, ip in enumerate(self.survey.getListOfInjectionStations()):\n s=Scalar(0.,DiracDeltaFunctions(self.domain))\n if self.stationsFMT is None:\n s.setTaggedValue(ip,1.)\n else: \n s.setTaggedValue(self.stationsFMT%ip,1.)\n self.pde.setValue(y_dirac=s)\n self.primary_potential[ip]=self.pde.getSolution()\n self.primary_potential_at_stations[ip]=np.array(self.observation_locator(self.primary_potential[ip]))\n print(\"Primary potential for %s: %s\"%(ip,str(self.primary_potential[ip])))\n\n def runSurvey(self, sources, sigma_field, gamma_field):\n # sources point into \n # array to return data: \n responses=np.zeros((self.numDataMax, len(sources)), dtype=float)\n \n # extend the fields to the domain and grep values at source locations: \n sigma, sigma_p=mapToDomain(self.domain, sigma_field, self.field_resolution, origin=self.field_origin, data0=self.sigma_background, tags0=self.padding_tags, locators=self.source_locator )\n gamma, gamma_p=mapToDomain(self.domain, gamma_field, self.field_resolution, origin=self.field_origin, data0=self.gamma_background, tags0=self.padding_tags, locators=self.source_locator )\n \n self.pde.setValue(A=sigma*kronecker(3), y_dirac=Data())\n secondary_potential_at_stations={}\n u_at_stations={}\n # DC .... \n for k, j in enumerate(sources):\n if self.survey.hasDipoleInjections():\n ips=self.getInjection(j)\n for ip in ips:\n if not ip in secondary_potential_at_stations:\n idx=self.survey.getInjectionStationIndex(ip)\n sigma0=sigma_p[idx]\n print(\"DC injection %s at %s, sigma_p=%e\"%(ip, idx, sigma0))\n\n self.pde.setValue(X=(1-sigma/sigma0)*grad(self.primary_potential[ip])) \n u_s=self.pde.getSolution()\n secondary_potential_at_stations[ip]=np.array(self.observation_locator(u_s))\n\n u_at_stations[ip]=secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]/sigma0 \n for s,i in self.dataDCMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=u_at_stations[ips[0]][Midx]-u_at_stations[ips[0]][Nidx]- u_at_stations[ips[1]][Midx]+u_at_stations[ips[1]][Nidx] \n else:\n ip=self.getInjection(j)\n idx=self.survey.getInjectionStationIndex(ip)\n sigma0=sigma_p[idx]\n print(\"DC injection %s at %s, sigma_p=%e\"%(ip, idx, sigma0))\n\n self.pde.setValue(X=(1-sigma/sigma0)*grad(self.primary_potential[ip])) \n u_s=self.pde.getSolution()\n secondary_potential_at_stations[ip]=np.array(self.observation_locator(u_s))\n\n u_at_stations=secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]/sigma0 \n for s,i in self.dataDCMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=u_at_stations[Midx]-u_at_stations[Nidx]\n \n #.. IP\n sigma2=sigma/(1+gamma)\n du_at_stations={}\n u_s={}\n self.pde.setValue(A=sigma2*kronecker(3), y_dirac=Data())\n for k, j in enumerate(sources):\n \n if self.survey.hasDipoleInjections():\n ips=self.getInjection(j)\n for ip in ips:\n if not ip in u_s:\n idx=self.survey.getInjectionStationIndex(ip)\n sigma20=sigma_p[idx]/(1+gamma_p[idx])\n sigma0=sigma_p[idx]\n print(\"IP injection %s at %s, sigma2_p, gamma_p = %e, %e\"%(ip, idx, sigma20, gamma_p[idx]))\n self.pde.setValue(X=(1-sigma2/sigma20)*grad(self.primary_potential[ip])) \n \n u_s[ip]=self.pde.getSolution()\n du_at_stations[ip]=np.array(self.observation_locator(u_s[ip]))-secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]*(gamma_p[idx]/sigma0)\n for s,i in self.dataIPMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=du_at_stations[ips[0]][Midx]-du_at_stations[ips[0]][Nidx]-du_at_stations[ips[1]][Midx]+du_at_stations[ips[1]][Nidx]\n else:\n ip=self.getInjection(j)\n idx=self.survey.getInjectionStationIndex(ip)\n sigma20=sigma_p[idx]/(1+gamma_p[idx])\n sigma0=sigma_p[idx]\n print(\"IP injection %s at %s, sigma2_p, gamma_p = %e, %e\"%(ip, idx, sigma20, gamma_p[idx]))\n self.pde.setValue(X=(1-sigma2/sigma20)*grad(self.primary_potential[ip])) \n \n u_s=self.pde.getSolution()\n du_at_stations=np.array(self.observation_locator(u_s))-secondary_potential_at_stations[ip]+self.primary_potential_at_stations[ip]*(gamma_p[idx]/sigma0)\n for s,i in self.dataIPMaps[j].items():\n Midx, Nidx=self.survey.getObservationElectrodeIndex(s[0]), self.survey.getObservationElectrodeIndex(s[1]) \n responses[i,k]=du_at_stations[Midx]-du_at_stations[Nidx]\n \n self.sigma=sigma\n self.gamma=gamma\n \n return responses # [self.numDataMax, len(sources)]\n \n","repo_name":"LutzGross/fingal","sub_path":"bin/fingal/ipmodel.py","file_name":"ipmodel.py","file_ext":"py","file_size_in_byte":11465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"39922243810","text":"import sys\r\ninput=sys.stdin.readline\r\nn=int(input())\r\na=list(map(int, input().split()))\r\n\r\nd=[1]*n\r\nfor i in range(1,n):\r\n s=[]\r\n for j in range(i):\r\n if a[i]\n\nimport numpy as np\nfrom scipy.optimize import fmin_l_bfgs_b\nfrom scipy.linalg import norm\nfrom itertools import cycle, izip\nfrom sklearn.utils import atleast2d_or_csr, check_random_state\nfrom sklearn.utils import gen_even_slices\nfrom sklearn.utils import shuffle\nfrom sklearn.utils.extmath import safe_sparse_dot\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\ndef _binary_KL_divergence(p, p_hat):\n \"\"\"\n Computes the a real, KL divergence of two binomial distributions with\n probabilities p and p_hat respectively.\n \"\"\"\n return (p * np.log(p / p_hat)) + ((1 - p) * np.log((1 - p) / (1 - p_hat)))\n\n\ndef _logistic(X):\n \"\"\"\n Implements the logistic function.\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n return 1. / (1. + np.exp(np.clip(-X, -30, 30)))\n\n\ndef _d_logistic(X):\n \"\"\"\n Implements the derivative of the logistic function.\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n return X * (1 - X)\n\n\ndef _tanh(X):\n \"\"\"\n Computes the hyperbolic tan function\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n return np.tanh(X, X)\n\n\ndef _d_tanh(X):\n \"\"\"\n Computes the derivative of the hyperbolic tan function\n\n Parameters\n ----------\n x: array-like, shape (M, N)\n\n Returns\n -------\n x_new: array-like, shape (M, N)\n \"\"\"\n X *= -X\n X += 1\n return X\n\n\nclass Autoencoder(BaseEstimator, TransformerMixin):\n\n \"\"\"\n Sparse Autoencoder (SAE)\n\n A Sparse Autoencoder with one hidden layer.\n Parameters\n ----------\n n_hidden : int\n Number of hidden neurons\n activation: string, optional\n Activation function for the hidden layer; either \"logistic\" for\n 1 / (1 + exp(x)), or \"tanh\" for the hyperbolic tangent.\n algorithm : string, optional\n Optimization function for training the weights; could be \"l-bfgs-b\", \"cg\",\n \"newton-cg\", or \"bfgs\"\n learning_rate : float, optional\n Learning rate to use during learning. It is *highly* recommended\n to tune this hyper-parameter. Possible values are 10**[0., -3.].\n beta : float, optional\n Weight of sparsity penalty term\n sparsity_param : float, optional\n Desired average activation of the hidden units\n batch_size : int, optional\n Number of examples per minibatch.\n max_iter : int, optional\n Number of iterations/sweeps over the training dataset to perform\n during training.\n tol : float, optional\n Tolerance for the optimization. When the loss at iteration i+1 differs\n less than this amount from that at iteration i, convergence is\n considered to be reached.\n verbose: bool, optional\n When True (False by default) the method outputs the progress\n of learning after each iteration.\n random_state : integer or numpy.RandomState, optional\n A random number generator instance to define the state of the\n random permutations generator. If an integer is given, it fixes the\n seed. Defaults to the global numpy random number generator.\n\n Attributes\n ----------\n self.coef_hidden_ : array-like, shape (n_hidden, n_features)\n Weight matrix, where n_features in the number of visible\n units and n_hidden is the number of hidden units.\n self.coef_output_ : array-like, shape (n_features, n_hidden)\n Weight matrix, where n_features in the number of visible\n units and n_hidden is the number of hidden units.\n intercept_hidden_ : array-like, shape (n_hidden,), optional\n Biases of the hidden units\n intercept_visible_ : array-like, shape (n_features,), optional\n Biases of the visible units\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from sklearn.neural_network import SAE\n >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])\n >>> model = SAE(n_hidden=10)\n >>> model.fit(X)\n Autoencoder(activation_func='logistic', alpha=0.0001, batch_size=1000, beta=3,\n learning_rate=0.0001, max_iter=20, n_hidden=10,\n algorithm='l-bfgs', random_state=None, sparsity_param=0.01,\n tol=1e-05, verbose=False)\n\n References\n ----------\n\n [1] Ngiam, Jiquan, et al. \"On optimization methods for deep learning.\"\n Proceedings of the 28th International Conference on Machine Learning (ICML-11). 2011.\n http://ai.stanford.edu/~quocle/LeNgiCoaLahProNg11.pdf\n \"\"\"\n activation_functions = {\n 'tanh': _tanh,\n 'logistic': _logistic\n }\n derivative_functions = {\n 'tanh': _d_tanh,\n 'logistic': _d_logistic\n }\n def __init__(\n self, n_hidden=25, activation='logistic', algorithm='l-bfgs',\n decoder = 'non_linear', learning_rate=0.3, alpha=3e-3, beta=3, sparsity_param=0.1,\n batch_size=500, shuffle_data=False, max_iter=200, tol=1e-5, verbose=False, random_state=None):\n self.activation = activation\n self.algorithm = algorithm\n self.decoder = decoder\n self.n_hidden = n_hidden\n self.alpha = alpha\n self.learning_rate = learning_rate\n self.beta = beta\n self.sparsity_param = sparsity_param\n self.batch_size = batch_size\n self.shuffle_data = shuffle_data\n self.max_iter = max_iter\n self.tol = tol\n self.verbose = verbose\n self.random_state = random_state\n\n def _init_fit(self, n_features):\n \"\"\"\n Initialize weight and bias parameters\n\n Parameters\n ----------\n n_features: int\n Number of features (visible nodes).\n\n Returns\n -------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2), 1)\n \"\"\"\n rng = check_random_state(self.random_state)\n self.coef_hidden_ = rng.uniform(-1, 1, (n_features, self.n_hidden))\n self.coef_output_ = rng.uniform(-1, 1, (self.n_hidden, n_features))\n self.intercept_hidden_ = rng.uniform(-1, 1, self.n_hidden)\n self.intercept_output_ = rng.uniform(-1, 1, n_features)\n\n def _init_param(self):\n \"\"\"\n Sets the activation, derivative and the output functions\n \"\"\"\n self.activation_func = self.activation_functions[self.activation]\n self.derivative_func = self.derivative_functions[self.activation]\n \n def _unpack(self, theta, n_features):\n \"\"\"\n Extract the coefficients and intercepts (W1,W2,b1,b2) from theta\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2), 1)\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n n_features: int\n Number of features (visible nodes).\n \"\"\"\n N = self.n_hidden * n_features\n self.coef_hidden_ = np.reshape(theta[:N],\n (n_features, self.n_hidden))\n self.coef_output_ = np.reshape(theta[N:2 * N],\n (self.n_hidden, n_features))\n self.intercept_hidden_ = theta[2 * N:2 * N + self.n_hidden]\n self.intercept_output_ = theta[2 * N + self.n_hidden:]\n\n def _pack(self, W1, W2, b1, b2):\n \"\"\"\n Pack the coefficients and intercepts (W1,W2,b1,b2) from theta\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2), 1)\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n n_features: int\n Number of features\n n_classes: int\n Number of target classes\n \"\"\"\n return np.hstack((W1.ravel(), W2.ravel(),\n b1.ravel(), b2.ravel()))\n\n def transform(self, X):\n \"\"\"\n Computes the extracted features.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n\n Returns\n -------\n h: array-like, shape (n_samples, n_components)\n \"\"\"\n return self.activation_func(safe_sparse_dot(X, self.coef_hidden_) + self.intercept_hidden_)\n\n def fit_transform(self, X, y=None):\n \"\"\"\n Fit the model to the data X and transform it.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n \"\"\"\n self.fit(X)\n return self.transform(X)\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the model to the data X.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n self\n \"\"\"\n X = atleast2d_or_csr(X, dtype=np.float64, order=\"C\")\n n_samples, n_features = X.shape\n self._init_fit(n_features)\n self._init_param()\n if self.shuffle_data:\n X, y = shuffle(X, y, random_state=self.random_state)\n # generate batch slices\n self.batch_size = np.clip(self.batch_size, 0, n_samples)\n n_batches = n_samples / self.batch_size\n batch_slices = list(\n gen_even_slices(\n n_batches *\n self.batch_size,\n n_batches))\n #l-bfgs does not work well with minibatches\n if self.algorithm == 'l-bfgs':\n self.batch_size = n_samples\n # preallocate memory\n a_hidden = np.empty((self.batch_size, self.n_hidden))\n a_output = np.empty((self.batch_size, n_features))\n delta_o = np.empty((self.batch_size, n_features))\n if self.algorithm == 'sgd':\n for i in xrange(self.max_iter):\n for batch_slice in batch_slices:\n cost = self.backprop_sgd(\n X[batch_slice],\n n_features, self.batch_size,\n delta_o, a_hidden, a_output)\n if self.verbose:\n print(\"Iteration %d, cost = %.2f\"\n % (i, cost))\n elif self.algorithm == 'l-bfgs':\n self._backprop_lbfgs(\n X, n_features,\n a_hidden, a_output, \n delta_o, n_samples)\n return self\n\n def backprop(self, X, n_features, n_samples,\n delta_o, a_hidden, a_output):\n \"\"\"\n Computes the sparse autoencoder cost function ``Jsparse(W,b)``\n and the corresponding derivatives of Jsparse with respect to the\n different parameters given in the initialization [1]\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n X: array-like, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n n_features: int\n Number of features (visible nodes).\n n_samples: int\n Number of samples\n\n Returns\n -------\n cost: float\n grad: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n\n References\n -------\n [1] http://ufldl.stanford.edu/wiki/index.php/Autoencoders_and_Sparsity\n \"\"\"\n # Forward propagate\n a_hidden[:] = self.activation_func(safe_sparse_dot(X, self.coef_hidden_)\n + self.intercept_hidden_)\n if self.decoder=='non_linear':\n a_output[:] = self.activation_func(safe_sparse_dot(a_hidden, self.coef_output_)\n + self.intercept_output_)\n elif self.decoder=='linear':\n a_output[:] = safe_sparse_dot(a_hidden, self.coef_output_) + self.intercept_output_\n # Get average activation of hidden neurons\n sparsity_param_hat = np.sum(a_hidden, 0) / n_samples\n sparsity_delta = self.beta * \\\n ((1 - self.sparsity_param) / (1 - sparsity_param_hat)\n - self.sparsity_param / sparsity_param_hat)\n # Backward propagate\n diff = X - a_output\n #Linear decoder\n if self.decoder=='non_linear':\n delta_o[:] = -diff * self.derivative_func(a_output)\n elif self.decoder=='linear':\n delta_o[:] = -diff\n delta_h = (\n (safe_sparse_dot(delta_o, self.coef_output_.T) +\n sparsity_delta)) *\\\n self.derivative_func(a_hidden)\n # Get cost \n cost = np.sum(diff ** 2) / (2 * n_samples)\n # Add regularization term to cost \n cost += (0.5 * self.alpha) * (\n np.sum(self.coef_hidden_ ** 2) + np.sum(\n self.coef_output_ ** 2))\n # Add sparsity term to the cost\n cost += self.beta * np.sum(\n _binary_KL_divergence(\n self.sparsity_param,\n sparsity_param_hat))\n #Get gradients\n W1grad = safe_sparse_dot(X.T, delta_h) / n_samples \n W2grad = safe_sparse_dot(a_hidden.T, delta_o) / n_samples\n b1grad = np.sum(delta_h, 0) / n_samples\n b2grad = np.sum(delta_o, 0) / n_samples\n # Add regularization term to gradients \n W1grad += self.alpha * self.coef_hidden_\n W2grad += self.alpha * self.coef_output_\n return cost, W1grad, W2grad, b1grad, b2grad\n\n def reconstruct(self, a_hidden):\n if self.decoder=='non_linear':\n a_output = self.activation_func(safe_sparse_dot(a_hidden, self.coef_output_)\n + self.intercept_output_)\n elif self.decoder=='linear':\n a_output = safe_sparse_dot(a_hidden, self.coef_output_) + self.intercept_output_\n return a_output[:]\n \n \n def backprop_sgd(\n self, X, n_features, n_samples, delta_o, a_hidden, a_output):\n \"\"\"\n Updates the weights using the computed gradients\n\n Parameters\n ----------\n X: {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Y : numpy array of shape [n_samples]\n Subset of the target values.\n\n n_features: int\n Number of features\n\n n_classes: int\n Number of target classes\n\n n_samples: int\n Number of samples\n\n \"\"\"\n cost, W1grad, W2grad, b1grad, b2grad = self.backprop(\n X, n_features, n_samples, delta_o, a_hidden, a_output)\n # Update weights\n self.coef_hidden_ -= (self.learning_rate * W1grad)\n self.coef_output_ -= (self.learning_rate * W2grad)\n self.intercept_hidden_ -= (self.learning_rate * b1grad)\n self.intercept_output_ -= (self.learning_rate * b2grad)\n # TODO: dynamically update learning rate\n return cost\n \n def _backprop_lbfgs(\n self, X, n_features, a_hidden, a_output, delta_o, n_samples):\n \"\"\"\n Applies the one of the optimization methods (l-bfgs-b, bfgs, newton-cg, cg)\n to train the weights\n\n Parameters\n ----------\n X: {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n\n Y : numpy array of shape [n_samples]\n Subset of the target values.\n\n n_features: int\n Number of features\n\n n_classes: int\n Number of target classes\n\n n_samples: int\n Number of samples\n\n \"\"\"\n initial_theta = self._pack(\n self.coef_hidden_,\n self.coef_output_,\n self.intercept_hidden_,\n self.intercept_output_)\n optTheta, _, _ = fmin_l_bfgs_b(\n func=self._cost_grad,\n x0=initial_theta,\n maxfun=self.max_iter,\n disp=self.verbose,\n args=(\n X,\n n_features,\n n_samples,\n delta_o,\n a_hidden,\n a_output))\n self._unpack(optTheta, n_features)\n\n def _cost_grad(self, theta, X, n_features,\n n_samples, delta_o, a_hidden, a_output):\n \"\"\"\n Computes the MLP cost function ``J(W,b)``\n and the corresponding derivatives of J(W,b) with respect to the\n different parameters given in the initialization\n\n Parameters\n ----------\n theta: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n Contains concatenated flattened weights that represent the parameters \"W1, W2, b1, b2\"\n X: {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where n_samples in the number of samples\n and n_features is the number of features.\n n_features: int\n Number of features\n n_classes: int\n Number of target classes\n n_samples: int\n Number of samples\n\n Returns\n -------\n cost: float\n grad: array-like, shape (size(W1)*size(W2)*size(b1)*size(b2))\n\n \"\"\"\n self._unpack(theta, n_features)\n cost, W1grad, W2grad, b1grad, b2grad = self.backprop(\n X, n_features, n_samples, delta_o, a_hidden, a_output)\n return cost, self._pack(W1grad, W2grad, b1grad, b2grad)\n","repo_name":"IssamLaradji/randomized_neural_networks","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":17854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"73475333694","text":"import sys\n\nclass FileIO:\n def __init__(self, input_file, output_file = None):\n self.lines = [line for line in open(input_file, 'r')][::-1]\n self.output_file = output_file\n self.clear_file()\n\n def clear_file(self):\n if self.output_file:\n with open(self.output_file, 'w') as f:\n f.close()\n\n def get_input(self, from_file=1):\n \"\"\"Get input from file or from stdin.\"\"\"\n return self.lines.pop() if from_file else sys.stdin.readline()\n\n def write_output(self, *content, to_file=1, sep=\" \"):\n \"\"\"Write output to file or to stdout.\"\"\"\n content = sep.join(str(k) for k in content) + \"\\n\"\n if self.output_file and to_file:\n with open(self.output_file, 'a') as f:\n f.write(content)\n f.close()\n else:\n sys.stdout.write(content)","repo_name":"iammanish17/FileIO","sub_path":"FileIO.py","file_name":"FileIO.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"36728831831","text":"import argparse\nimport sys\nimport socket\nimport threading\nimport types\nimport csv\nimport os\nimport broadcast_reciever\nimport broadcast_sender\nimport sensor1\nimport time\n\n\ncache = {\n '/NewYork/Temperature':'80'\n}\ninformationBase= {\n '/NewYork/Sensor':'0'\n}\npendingInterestTable = {}\n\n\n#Tried implementing this to create a global object that can be accessed by the listener.\nclass Unit:\n\n def __init__(self,city, port):\n self.city=city\n self.port=port\n \n def __str__(self):\n return self.city + self.port\n\nthisUnit=Unit(city=\"\",port=0)\nsensorPort=33333\n\nclass Package:\n\n def __init__(self, type, name,sender):\n self.type=type\n self.name=name\n self.sender=sender\n \n def __str__(self):\n return self.type\n \nclass Interest(Package):\n pass\n\n \n\nclass Data(Package):\n\n def __init__(self, content):\n self.content = content\n\n\ndef inputHandler(package,city):\n if str(package.type) == \"interest\":\n forwardingInformationBase(package=package)\n checkSensors(interest=package,city=city)\n checkContentStore(package=package)\n elif str(package.type) ==\"data\":\n contentStore(package)\n\n \ndef checkSensors(interest,city):\n print(\"Sending to sensors\",interest.name)\n print(interest.name)\n splitWords = interest.name.split(\"/\")\n print(splitWords[1])\n if city==splitWords[1]:\n sensor=splitWords[2]\n sensorvalue = sensor1.Sensor.get_sensor(sensor)\n print(sensorvalue)\n dataPackage = Data(content=sensorvalue)\n dataPackage.name = interest.name\n dataPackage.sender = interest.sender\n print(dataPackage.content)\n contentStore(dataPackage=dataPackage)\n\n\ndef forwardData(dataPackage, destination):\n print(destination)\n print(destination, \"for data packet\")\n forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n forward.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n networks = csv.reader(open(\"networks.csv\",\"r\"),delimiter=\",\")\n for row in networks:\n if row[2]==destination:\n target=row[0]\n port=int(row[1])\n print(target,port)\n print(\"Forwarding data to requested destination\")\n forward.connect((target,port))\n message = f'{dataPackage.name},{dataPackage.type},{dataPackage.sender},{dataPackage.content}'.encode('utf-8')\n forward.send(message)\n forward.close()\n\n\ndef forwardInterest(package):\n words= package.name.split(\"/\")\n networkName= words[1]\n print(networkName)\n forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n forward.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n #forward.setblocking(False)\n networks = csv.reader(open(\"networks.csv\",\"r\"),delimiter=\",\")\n for row in networks:\n if row[2]==networkName:\n target=row[0]\n port=int(row[1])\n print(\"Forwarding interest to requested destination\")\n forward.connect((target,port))\n message = f'{package.name},{package.type},{package.sender}'.encode('utf-8')\n print(message)\n forward.send(message)\n forward.close()\n\n\ndef contentStore(dataPackage):\n print(\"Storing in content store\")\n name = dataPackage.name\n data = dataPackage.content \n newContent = {name:data}\n print(newContent)\n cache.update(newContent)\n print(\"Content saved\")\n\ndef checkContentStore(package):\n print(\"Checking content store\")\n for name, data in list(cache.items()):\n if package.name == name:\n print(\"Found in contentstore\")\n dataPackage= Data(content=data)\n dataPackage.name=name\n dataPackage.type=\"data\"\n dataPackage.sender=package.sender\n print(package.sender)\n forwardData(dataPackage, package.sender)\n \n\n\ndef checkInterestTable(prefix, sender, content):\n for query, author in pendingInterestTable:\n if prefix == query and author==sender:\n forwardData(content, author)\n\ndef forwardingInformationBase(package):\n print(\"Checking informationbase\")\n exists=False\n for interest, value in list(informationBase.items()):\n if package.name == interest:\n exists=True\n if value=='0':\n forwardInterest(package)\n informationBase[interest]='1'\n elif value=='1':\n print(interest, \"Already forwarded\")\n \n if exists== False:\n name = package.name\n newInterest={name:'1'}\n print(newInterest)\n informationBase.update(newInterest)\n forwardInterest(package)\n\ndef createInterest(input,city):\n #host = socket.gethostbyname(socket.gethostname())\n interest = Interest(type=\"interest\",name=input,sender=city)\n print(\"Created interest\")\n inputHandler(interest,city)\n\n\ndef ClientConsole(city):\n\n #listener()\n print('==================================================')\n print('Your device is now running')\n print('==================================================')\n print('Welcome to the NDN network(input help for help)')\n while True:\n operation = input(\">>>\")\n if operation=='/Local/Sensors':\n print(\"Sensors\")\n elif operation=='/Local/Sensors/SensorWeather':\n createInterest(operation)\n elif operation=='/Local/Sensors/WindSpeed':\n print(\"Windspeed\")\n elif operation == 'Broadcast/Recieve':\n broadcast_reciever.broadcastReceiver()\n elif operation == 'Broadcast/Send':\n broadcast_sender.broadcast(thisUnit.port, city)\n elif operation=='quit':\n break\n elif operation=='listen':\n print(\"Listening\")\n elif operation=='help':\n print('/Sensors: Get list of sensors.')\n print('getf: get file from the server.')\n print('quit: close the connection and quit.')\n elif operation =='data':\n package = Package(type=\"interest\",name=\"/NewYork/Temp\", sender=\"Bob\")\n checkSensors(package=package)\n else:\n createInterest(operation,city) \n print('The client has been logged out.')\n\n \ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--city\", required=True)\n parser.add_argument(\"-p\", \"--port\", required=True)\n args = parser.parse_args()\n city = args.city\n thisUnit.port = int(args.port)\n os.system('python3 listen.py %d &'%thisUnit.port)\n os.system('python3 sensor1.py &')\n console = threading.Thread(target=ClientConsole(city))\n console.start()\n\nif __name__ == '__main__':\n main()\n","repo_name":"PerAndresen/Project3","sub_path":"forward_engine.py","file_name":"forward_engine.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38034764800","text":"# from django.conf import settings\r\nfrom contextlib import nullcontext\r\nfrom django.contrib import messages\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.mixins import LoginRequiredMixin\r\nfrom django.shortcuts import render, get_object_or_404\r\nfrom django.views.generic import ListView, DetailView, View\r\nfrom django.shortcuts import redirect\r\nfrom django.utils import timezone\r\nfrom hamcrest import none\r\nfrom .forms import CheckoutForm, RefundForm\r\nfrom .models import Item, OrderItem, Order, BillingAddress, Refund, Category, sizeItems\r\n\r\n\r\n\r\n# Create your views here.\r\nimport random\r\nimport string\r\n# import stripe\r\n# stripe.api_key = settings.STRIPE_SECRET_KEY\r\n\r\n\r\ndef create_ref_code():\r\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\r\n\r\n\r\n# class PaymentView(View):\r\n# def get(self, *args, **kwargs):\r\n# # order\r\n# order = Order.objects.get(user=self.request.user, ordered=False)\r\n# if order.billing_address:\r\n# context = {\r\n# 'order': order,\r\n# 'DISPLAY_COUPON_FORM': False\r\n# }\r\n# return render(self.request, \"payment.html\", context)\r\n# else:\r\n# messages.warning(\r\n# self.request, \"لم تقم بإضافة عنوان إرسال الفواتير\")\r\n# return redirect(\"core:checkout\")\r\n\r\n# def post(self, *args, **kwargs):\r\n# order = Order.objects.get(user=self.request.user, ordered=False)\r\n# # token = self.request.POST.get('stripeToken')\r\n# amount = int(order.get_total() * 100)\r\n# try:\r\n# # charge = stripe.Charge.create(\r\n# # amount=amount, # cents\r\n# # currency=\"usd\",\r\n# # source=token\r\n# # )\r\n# # create the payment\r\n# payment = Payment()\r\n# # payment.stripe_charge_id = charge['id']\r\n# payment.user = self.request.user\r\n# payment.amount = order.get_total()\r\n# payment.save()\r\n\r\n# # assign the payment to the order\r\n# order.ordered = True\r\n# order.payment = payment\r\n# # TODO : assign ref code\r\n# order.ref_code = create_ref_code()\r\n# order.save()\r\n\r\n# messages.success(self.request, \"تمت إضافة الطلب بنجاح\")\r\n# return redirect(\"/\")\r\n\r\n# # except stripe.error.CardError as e:\r\n# # # Since it's a decline, stripe.error.CardError will be caught\r\n# # body = e.json_body\r\n# # err = body.get('error', {})\r\n# # messages.error(self.request, f\"{err.get('message')}\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.RateLimitError as e:\r\n# # # Too many requests made to the API too quickly\r\n# # messages.error(self.request, \"RateLimitError\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.InvalidRequestError as e:\r\n# # # معلومات غير صالحة were supplied to Stripe's API\r\n# # messages.error(self.request, \"معلومات غير صالحة\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.AuthenticationError as e:\r\n# # # Authentication with Stripe's API failed\r\n# # # (maybe you changed API keys recently)\r\n# # messages.error(self.request, \"ليس لديك أذن الدخول\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.APIConnectionError as e:\r\n# # # Network communication with Stripe failed\r\n# # messages.error(self.request, \"خطأ في الشبكة\")\r\n# # return redirect(\"/\")\r\n\r\n# # except stripe.error.StripeError as e:\r\n# # # Display a very generic error to the user, and maybe send\r\n# # # yourself an email\r\n# # messages.error(self.request, \"هناك خطأ ما\")\r\n# # return redirect(\"/\")\r\n\r\n# except Exception as e:\r\n# # send an email to ourselves\r\n# messages.error(self.request, \"حدث خطأ جسيم\")\r\n# return redirect(\"/\")\r\n\r\n\r\nclass HomeView(ListView):\r\n template_name = \"index.html\"\r\n queryset = Item.objects.filter(is_active=True)\r\n context_object_name = 'items'\r\n\r\n\r\nclass OrderSummaryView(LoginRequiredMixin, View):\r\n def get(self, *args, **kwargs):\r\n try:\r\n order = Order.objects.get(user=self.request.user, ordered=False)\r\n \r\n # sizeItemList = sizeItems.objects.filter( is_active=True)\r\n context = {\r\n 'object': order,\r\n # 'sizeItemList': sizeItemList\r\n }\r\n return render(self.request, 'order_summary.html', context)\r\n except ObjectDoesNotExist:\r\n messages.error(self.request, \"ليس لديك طلب نشط\")\r\n return redirect(\"/\")\r\n\r\n\r\nclass ShopView(ListView):\r\n model = Item\r\n paginate_by = 6\r\n template_name = \"shop.html\"\r\n\r\n\r\nclass ItemDetailView(DetailView):\r\n model = Item\r\n template_name = \"product-detail.html\"\r\n # context = {\r\n # 'sizeItems': order\r\n # }\r\n\r\n# class CategoryView(DetailView):\r\n# model = Category\r\n# template_name = \"category.html\"\r\n\r\nclass CategoryView(View):\r\n def get(self, *args, **kwargs):\r\n category = Category.objects.get(slug=self.kwargs['slug'])\r\n item = Item.objects.filter(category=category, is_active=True)\r\n context = {\r\n 'object_list': item,\r\n 'category_title': category,\r\n 'category_description': category.description,\r\n 'category_image': category.image\r\n }\r\n return render(self.request, \"category.html\", context)\r\n\r\n\r\nclass CheckoutView(View):\r\n def get(self, *args, **kwargs):\r\n try:\r\n order = Order.objects.get(user=self.request.user, ordered=False)\r\n form = CheckoutForm()\r\n context = {\r\n 'form': form,\r\n 'order': order\r\n }\r\n # 'couponform': CouponForm(),\r\n # 'DISPLAY_COUPON_FORM': False\r\n return render(self.request, \"checkout.html\", context)\r\n\r\n except ObjectDoesNotExist:\r\n messages.info(self.request, \"ليس لديك طلب نشط\")\r\n return redirect(\"core:checkout\")\r\n\r\n def post(self, *args, **kwargs):\r\n form = CheckoutForm(self.request.POST or None)\r\n try:\r\n order = Order.objects.get(user=self.request.user, ordered=False)\r\n print(self.request.POST)\r\n if form.is_valid():\r\n street_address = form.cleaned_data.get('street_address')\r\n apartment_address = form.cleaned_data.get('apartment_address')\r\n country = form.cleaned_data.get('country')\r\n city = form.cleaned_data.get('city')\r\n phone = form.cleaned_data.get('phone')\r\n gps = form.cleaned_data.get('gps')\r\n # add functionality for these fields\r\n # same_shipping_address = form.cleaned_data.get(\r\n # 'same_shipping_address')\r\n save_info = form.cleaned_data.get('save_info')\r\n # print('yasser : ')\r\n # address_type = form.cleaned_data.get('address_type')\r\n billing_address = BillingAddress(\r\n user=self.request.user,\r\n street_address=street_address,\r\n apartment_address=apartment_address,\r\n country=country,\r\n city=city,\r\n phone=phone,\r\n save_info=save_info,\r\n gps=gps\r\n )\r\n # address_type=address_type,\r\n billing_address.save()\r\n # assign to the order\r\n order.billing_address = billing_address\r\n if billing_address.save_info== True:\r\n order.shipping_address = billing_address\r\n order.ordered = True\r\n order.save()\r\n OrderItem.objects.filter(order__pk=order.pk).update(ordered=True,ordered_date = timezone.now())\r\n\r\n # orderItems = OrderItem.objects.filter(order__pk=order.pk)\r\n # for order_item in orderItems:\r\n # print(order_item.ordered)\r\n # order_item.ordered = True\r\n # order_item.save()\r\n\r\n messages.success(self.request, \"تمت إضافة الطلب بنجاح\")\r\n return redirect(\"/\")\r\n # add redirect to the selected payment option\r\n # if address_type == 'B':\r\n # return redirect('core:payment', address_type='الدفع فاتورة/نقداً')\r\n # elif address_type == 'S':\r\n # return redirect('core:payment', address_type='الدفع عند التوصيل')\r\n # else:\r\n # messages.warning(\r\n # self.request, \" خيار دفع غير صالح\")\r\n # return redirect('core:checkout')\r\n except ObjectDoesNotExist:\r\n messages.error(self.request, \"ليس لديك طلب نشط\")\r\n return redirect(\"core:order-summary\")\r\n\r\n\r\n# def home(request):\r\n# context = {\r\n# 'items': Item.objects.all()\r\n# }\r\n# return render(request, \"index.html\", context)\r\n#\r\n#\r\n# def products(request):\r\n# context = {\r\n# 'items': Item.objects.all()\r\n# }\r\n# return render(request, \"product-detail.html\", context)\r\n#\r\n#\r\n# def shop(request):\r\n# context = {\r\n# 'items': Item.objects.all()\r\n# }\r\n# return render(request, \"shop.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/login/\")\r\ndef add_to_cart(request, slug ):\r\n item = get_object_or_404(Item, slug=slug)\r\n if request.method =='GET':\r\n print('wwwwwwwww')\r\n if request.method =='POST':\r\n print('GGGGGGGGGGGG')\r\n if 'sizeItemss' in request.GET:\r\n id = request.GET.get(\"sizeItemss\")\r\n if int (id) > 0 :\r\n sizeItem= get_object_or_404(sizeItems,item = item ,pk = id)\r\n order_item, created = OrderItem.objects.get_or_create(\r\n item=item,\r\n user=request.user,\r\n ordered=False,\r\n sizeItem= sizeItem\r\n )\r\n else:\r\n print('44sssssssss4')\r\n order_item, created = OrderItem.objects.get_or_create(\r\n item=item,\r\n user=request.user,\r\n ordered=False,\r\n )\r\n else:\r\n print('4444444ggggg')\r\n order_item, created = OrderItem.objects.get_or_create(\r\n item=item,\r\n user=request.user,\r\n ordered=False,\r\n )\r\n # order_item, created = OrderItem.objects.get_or_create(\r\n # item=item,\r\n # user=request.user,\r\n # ordered=False,\r\n # )\r\n\r\n order_qs = Order.objects.filter(user=request.user, ordered=False)\r\n if order_qs.exists():\r\n order = order_qs[0]\r\n if order.items.filter(item__slug=item.slug).exists():\r\n order_item.quantity += 1\r\n order_item.save()\r\n messages.info(request, \"تم تحديث كمية العنصر.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n order.items.add(order_item)\r\n messages.info(request, \"تمت إضافة بند إلى عربة التسوق.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n ordered_date = timezone.now()\r\n order = Order.objects.create(\r\n user=request.user, ordered_date=ordered_date)\r\n order.items.add(order_item)\r\n messages.info(request, \"تمت إضافة بند إلى عربة التسوق.\")\r\n return redirect(\"core:order-summary\")\r\n\r\n\r\n@login_required(login_url=\"/login/\")\r\ndef remove_from_cart(request, slug):\r\n item = get_object_or_404(Item, slug=slug)\r\n order_qs = Order.objects.filter(\r\n user=request.user,\r\n ordered=False)\r\n if order_qs.exists():\r\n order = order_qs[0]\r\n # check if the order item is in the order\r\n if order.items.filter(item__slug=item.slug).exists():\r\n order_item = OrderItem.objects.filter(\r\n item=item,\r\n user=request.user,\r\n ordered=False\r\n )[0]\r\n order_item.delete()\r\n order.items.remove(order_item)\r\n messages.info(request, \"تمت إزالة العنصر من عربة التسوق الخاصة بك.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"العنصر لم يكن في عربة التسوق الخاصة بك.\")\r\n return redirect(\"core:product\", slug=slug)\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"ليس لديك طلب نشط.\")\r\n return redirect(\"core:product\", slug=slug)\r\n return redirect(\"core:product\", slug=slug)\r\n\r\n\r\n@login_required(login_url=\"/login/\")\r\ndef remove_single_item_from_cart(request, slug):\r\n item = get_object_or_404(Item, slug=slug)\r\n order_qs = Order.objects.filter(\r\n user=request.user,\r\n ordered=False)\r\n if order_qs.exists():\r\n order = order_qs[0]\r\n # check if the order item is in the order\r\n if order.items.filter(item__slug=item.slug).exists():\r\n order_item = OrderItem.objects.filter(\r\n item=item,\r\n user=request.user,\r\n ordered=False\r\n )[0]\r\n if order_item.quantity > 1:\r\n order_item.quantity -= 1\r\n order_item.save()\r\n else:\r\n order_item.delete()\r\n order.items.remove(order_item)\r\n messages.info(request, \" تم تحديث كمية العنصر هذا.\")\r\n return redirect(\"core:order-summary\")\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"العنصر لم يكن في عربة التسوق الخاصة بك.\")\r\n return redirect(\"core:product\", slug=slug)\r\n else:\r\n # add a message saying the user dosent have an order\r\n messages.info(request, \"ليس لديك طلب نشط.\")\r\n return redirect(\"core:product\", slug=slug)\r\n return redirect(\"core:product\", slug=slug)\r\n\r\n\r\n# def get_coupon(request, code):\r\n# try:\r\n# coupon = Coupon.objects.get(code=code)\r\n# return coupon\r\n# except ObjectDoesNotExist:\r\n# messages.info(request, \"هذه القسيمة غير موجودة\")\r\n# return redirect(\"core:checkout\")\r\n\r\n\r\n# class AddCouponView(View):\r\n# def post(self, *args, **kwargs):\r\n# form = CouponForm(self.request.POST or None)\r\n# if form.is_valid():\r\n# try:\r\n# code = form.cleaned_data.get('code')\r\n# order = Order.objects.get(\r\n# user=self.request.user, ordered=False)\r\n# order.coupon = get_coupon(self.request, code)\r\n# order.save()\r\n# messages.success(self.request, \"تمت إضافة القسيمة بنجاح\")\r\n# return redirect(\"core:checkout\")\r\n\r\n# except ObjectDoesNotExist:\r\n# messages.info(self.request, \"ليس لديك طلب نشط\")\r\n# return redirect(\"core:checkout\")\r\n\r\n\r\nclass RequestRefundView(View):\r\n def get(self, *args, **kwargs):\r\n form = RefundForm()\r\n context = {\r\n 'form': form\r\n }\r\n return render(self.request, \"request_refund.html\", context)\r\n\r\n def post(self, *args, **kwargs):\r\n form = RefundForm(self.request.POST)\r\n if form.is_valid():\r\n ref_code = form.cleaned_data.get('ref_code')\r\n message = form.cleaned_data.get('message')\r\n email = form.cleaned_data.get('email')\r\n # edit the order\r\n try:\r\n order = Order.objects.get(ref_code=ref_code)\r\n order.refund_requested = True\r\n order.save()\r\n\r\n # store the refund\r\n refund = Refund()\r\n refund.order = order\r\n refund.reason = message\r\n refund.email = email\r\n refund.save()\r\n\r\n messages.info(self.request, \"تم استلام طلبك\")\r\n return redirect(\"core:request-refund\")\r\n\r\n except ObjectDoesNotExist:\r\n messages.info(self.request, \"هذا الطلب غير موجود\")\r\n return redirect(\"core:request-refund\")\r\n","repo_name":"fxamar/OceanWind","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72180091776","text":"from django.core.exceptions import EmptyResultSet\nfrom haystack.inputs import Raw\n\nfrom haystack.query import SearchQuerySet\nfrom django.db.models import Q\n\nfrom ads.models import Ad\nfrom comohay import settings\n\nimport logging\n\n\ndef double_clean(query_fragment, backend):\n \"\"\"\n Provides a mechanism for sanitizing user input before presenting the\n value to the backend.\n\n A basic (override-able) implementation is provided.\n \"\"\"\n if not isinstance(query_fragment, str):\n return query_fragment\n\n words = query_fragment.split()\n cleaned_words = []\n\n for word in words:\n if word in backend.RESERVED_WORDS:\n word = word.replace(word, word.lower())\n\n for char in backend.RESERVED_CHARACTERS:\n word = word.replace(char, \"\\\\\\\\%s\" % char)\n\n cleaned_words.append(word)\n\n return \" \".join(cleaned_words)\n\n\ndef has_duplicates(ad, verbose=False, title_mm=None, description_mm=None):\n \"\"\"\n Returns true if the passed ad has a duplicate in the database using the solr index, otherwise returns false\n\n Arguments\n ad (`Ad`):\n The ad from whom to detect if it has a duplicate\n verbose (`string`):\n Whether to print or no information about the process\n title_mm (`string`):\n minimum should match for the ad title,see https://solr.apache.org/guide/6_6/the-dismax-query-parser.html#TheDisMaxQueryParser-Themm_MinimumShouldMatch_Parameter\n description_mm (`string`):\n minimum should match for the ad description, see https://solr.apache.org/guide/6_6/the-dismax-query-parser.html#TheDisMaxQueryParser-Themm_MinimumShouldMatch_Parameter\n \"\"\"\n\n sqs = SearchQuerySet()\n\n if title_mm is None:\n title_mm = '{}<{}%'.format(settings.TITLE_MIN_WORDS, settings.TITLE_SIMILARITY)\n\n if description_mm is None:\n description_mm = '{}<{}%'.format(settings.DESCRIPTION_MIN_WORDS, settings.DESCRIPTION_SIMILARITY)\n\n clean_desc = double_clean(ad.description, sqs.query.backend)\n clean_desc = clean_desc.replace(\"'\", \"\\\\'\")\n max_desc_len = len(ad.description) + int(len(ad.description) * settings.DESCRIPTION_LENGTH_DIFF)\n\n clean_title = double_clean(ad.title, sqs.query.backend)\n clean_title = clean_title.replace(\"'\", \"\\\\'\")\n max_title_len = len(ad.title) + int(len(ad.title) * settings.TITLE_LENGTH_DIFF)\n\n ids_values = sqs.filter(\n content=Raw(\n \"description_length:[0 TO {}] AND {{!dismax qf=description mm={} v='{}'}} AND title_length:[0 TO {}] AND {{!dismax qf=title mm={} v='{}'}}\".format(\n max_desc_len, title_mm, clean_desc, max_title_len, description_mm, clean_title))\n ).values_list('id')\n\n ids = list(map(lambda x: x[0].split('.')[-1], ids_values))\n\n # TODO: think about adding a date comparison. It can be possible that the ad content is similar but corresponds\n # to other intent of selling another stock of the same product\n\n a = Q(id__in=ids)\n b = Q()\n\n has_contact_info = False\n\n if ad.contact_phone:\n b |= Q(contact_phone=ad.contact_phone)\n has_contact_info = True\n\n if ad.contact_email:\n b |= Q(contact_email=ad.contact_email)\n has_contact_info = True\n\n if ad.external_contact_id and ad.external_source:\n b |= (Q(external_contact_id=ad.external_contact_id) & Q(external_source=ad.external_source))\n has_contact_info = True\n\n if ad.contact_tg:\n b |= Q(contact_tg=ad.contact_tg)\n has_contact_info = True\n\n if has_contact_info:\n # Looking for duplicated ads from the same contact\n duplicates = Ad.objects.filter(a & (b))\n else:\n # Looking for duplicate ads from the same source that don't have contact information\n duplicates = Ad.objects.filter(\n Q(id__in=ids) &\n Q(external_source=ad.external_source) &\n (Q(contact_phone=None) | Q(contact_phone='')) &\n (Q(contact_email=None) | Q(contact_email='')) &\n (Q(external_contact_id=None) | Q(external_contact_id='')) &\n (Q(contact_tg=None) | Q(contact_tg=''))\n )\n\n if duplicates.count() > 0:\n if verbose:\n print('Found {} duplicates ({}) of ad:\"{}\"'.format(duplicates.count(), ','.join(ids), ad.title))\n for ad in duplicates.all():\n print('Title: {}'.format(ad.title))\n print('------------------------------------------------------------------')\n return True\n\n return False\n\n\ndef remove_duplicates(ad, verbose=False):\n \"\"\"\n Ad :param ad:\n \"\"\"\n\n sqs = SearchQuerySet()\n similarity = int(settings.DESCRIPTION_SIMILARITY * 100)\n\n # If the query has less than 4 clauses then it has to match at 100%, otherwise the number computed in similarity\n similarity = '3<{}'.format(similarity)\n\n clean_desc = double_clean(ad.description, sqs.query.backend)\n clean_desc = clean_desc.replace(\"'\", \"\\\\'\")\n max_desc_len = len(ad.description) + int(len(ad.description) * settings.DESCRIPTION_LENGTH_DIFF)\n\n clean_title = double_clean(ad.title, sqs.query.backend)\n clean_title = clean_title.replace(\"'\", \"\\\\'\")\n max_title_len = len(ad.title) + int(len(ad.title) * settings.TITLE_LENGTH_DIFF)\n\n ids_values = sqs.filter(\n content=Raw(\n \"description_length:[0 TO {}] AND {{!dismax qf=description mm={}% v='{}'}} AND title_length:[0 TO {}] AND {{!dismax qf=title mm={}% v='{}'}}\".format(\n max_desc_len, similarity, clean_desc, max_title_len, similarity, clean_title))\n ).values_list('id')\n\n ids = list(map(lambda x: x[0].split('.')[-1], ids_values))\n\n if (ad.contact_phone is not None and ad.contact_phone != '') or (\n ad.contact_email is not None and ad.contact_email != '') or (\n ad.external_contact_id is not None and ad.external_contact_id != ''):\n try:\n # Remove duplicated ads from same contact\n a = Q(id__in=ids)\n b = Q(contact_email=ad.contact_email)\n c = Q(contact_phone=ad.contact_phone)\n d = Q(external_contact_id=ad.external_contact_id) & Q(external_source=ad.external_source)\n\n to_delete = Ad.objects.filter(a & (b | c | d)).exclude(\n external_source=ad.external_source,\n external_id=ad.external_id\n )\n\n if verbose and to_delete.count() > 0:\n print('Removing {} duplicates ({}) of ad:\"{}\"'.format(to_delete.count(), ','.join(ids), ad.title))\n for ad in to_delete.all():\n print('Title: {}'.format(ad.title))\n # print('Description: {}'.format(ad.description))\n\n print('------------------------------------------------------------------')\n\n to_delete.delete()\n\n except Exception as e:\n logging.error(\"Error removing duplicated items: \" + str(e))\n\n else:\n try:\n # Remove duplicated ads from same source\n a = Q(id__in=ids)\n b = Q(external_source=ad.external_source)\n\n to_delete = Ad.objects.filter(a & b).exclude(\n external_source=ad.external_source,\n external_id=ad.external_id\n )\n\n if verbose and to_delete.count() > 0:\n print('Removing {} duplicates ({}) of ad:\"{}\"'.format(to_delete.count(), ','.join(ids), ad.title))\n for ad in to_delete.all():\n print('Title: {}'.format(ad.title))\n # print('Description: {}'.format(ad.description))\n\n print('------------------------------------------------------------------')\n\n to_delete.delete()\n\n except Exception as e:\n logging.error(\"Error removing duplicated items: \" + str(e))\n","repo_name":"daxslab/comohay","sub_path":"ads/services/ad_service.py","file_name":"ad_service.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"70055763137","text":"from __future__ import absolute_import, unicode_literals\n\nimport os\n\nfrom django.core.checks import Error, Warning, register\n\n\n@register()\ndef css_install_check(app_configs, **kwargs):\n errors = []\n\n css_path = os.path.join(\n os.path.dirname(__file__), 'static', 'wagtailadmin', 'css', 'normalize.css'\n )\n\n if not os.path.isfile(css_path):\n error_hint = \"\"\"\n Most likely you are running a development (non-packaged) copy of\n Wagtail and have not built the static assets -\n see http://docs.wagtail.io/en/latest/contributing/developing.html\n\n File not found: %s\n \"\"\" % css_path\n\n errors.append(\n Warning(\n \"CSS for the Wagtail admin is missing\",\n hint=error_hint,\n id='wagtailadmin.W001',\n )\n )\n return errors\n\n\n@register()\ndef base_form_class_check(app_configs, **kwargs):\n from wagtail.wagtailadmin.forms import WagtailAdminPageForm\n from wagtail.wagtailcore.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n if not issubclass(cls.base_form_class, WagtailAdminPageForm):\n errors.append(Error(\n \"{}.base_form_class does not extend WagtailAdminPageForm\".format(\n cls.__name__),\n hint=\"Ensure that {}.{} extends WagtailAdminPageForm\".format(\n cls.base_form_class.__module__,\n cls.base_form_class.__name__),\n obj=cls,\n id='wagtailadmin.E001'))\n\n return errors\n\n\n@register()\ndef get_form_class_check(app_configs, **kwargs):\n from wagtail.wagtailadmin.forms import WagtailAdminPageForm\n from wagtail.wagtailcore.models import get_page_models\n\n errors = []\n\n for cls in get_page_models():\n edit_handler = cls.get_edit_handler()\n if not issubclass(edit_handler.get_form_class(cls), WagtailAdminPageForm):\n errors.append(Error(\n \"{cls}.get_edit_handler().get_form_class({cls}) does not extend WagtailAdminPageForm\".format(\n cls=cls.__name__),\n hint=\"Ensure that the EditHandler for {cls} creates a subclass of WagtailAdminPageForm\".format(\n cls=cls.__name__),\n obj=cls,\n id='wagtailadmin.E002'))\n\n return errors\n","repo_name":"zhl2008/awd-platform","sub_path":"web_hxb2/lib/python3.5/site-packages/wagtail_bak/wagtailadmin/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":574,"dataset":"github-code","pt":"79"} +{"seq_id":"32408387862","text":"from flask import Flask, request, redirect\nfrom espeak import espeak\nimport twilio.twiml\nimport urllib, pycurl, os\nimport collections\nimport re\nimport subprocess\n\ndef getPhrase(phrase):\n\ttextPhrase = \"\"\n\tparameters = {\"\": phrase}\n\tdata = urllib.urlencode(parameters)\n\ttextPhrase = \"%s%s\" % (textPhrase,data)\n\treturn textPhrase\n\ndef speakSpeechFromText(phrase):\n\tphrase = getPhrase(phrase)\n\tespeak.synth(phrase)\n\tprint(\"Espeak on\")\napp = Flask(__name__)\n@app.route(\"/\", methods=['GET', 'POST'])\ndef hello_monkey():\n \"\"\"Respond to incoming calls with a simple text message.\"\"\"\n sms = request.args.get('Body')\n\t\n if not sms == \"\":\n speakSpeechFromText(sms)\n resp = twilio.twiml.Response()\n return str(resp)\n\nif __name__ == \"__main__\":\n\tprint (\"Hello twilio\")\n\tapp.run( host='0.0.0.0', debug=True, port = 80)\n","repo_name":"ferzeuz/SMStoSpeech","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2011110721","text":"from django.urls import path, include\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf.urls.i18n import i18n_patterns\n\n\nfrom rest_framework_swagger.views import get_swagger_view\nfrom rest_framework_simplejwt.views import (\n TokenVerifyView,\n TokenObtainPairView,\n TokenRefreshView,\n)\n\nfrom main import settings\nfrom main.yasg import urlpatterns as doc_urls\n\n\nschema_view = get_swagger_view(title='Pastebin API')\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('restframework/', include('rest_framework.urls')),\n path('api/token/access/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),\n path('i18n/', include('django.conf.urls.i18n')),\n # APPS\n path('user/', include('apps.users.urls')),\n path('main_page/', include('apps.main_page.urls')),\n path('investor/', include('apps.investor.urls')),\n path('feedback/', include('apps.feedback.urls')),\n path('other/', include('apps.other.urls')),\n path('trade_zone/', include('apps.trade_zone.urls')),\n path('invest_zone/', include('apps.invest_zone.urls')),\n path('food_zone/', include('apps.food_zone.urls')),\n path('fashion_zone/', include('apps.fashion_zone.urls')),\n path('b2b_meeting/', include('apps.b2b_meeting.urls')),\n path('tickets/', include('apps.ticket.urls')),\n path('profile_visit/', include('apps.profile_visit.urls')),\n path('chat/', include('apps.chat.urls')),\n]\n\nurlpatterns += i18n_patterns(\n path('user/', include('apps.users.urls')),\n path('main_page/', include('apps.main_page.urls')),\n path('investor/', include('apps.investor.urls')),\n path('feedback/', include('apps.feedback.urls')),\n path('other/', include('apps.other.urls')),\n path('trade_zone/', include('apps.trade_zone.urls')),\n path('invest_zone/', include('apps.invest_zone.urls')),\n path('fashion_zone/', include('apps.fashion_zone.urls')),\n path('b2b_meeting/', include('apps.b2b_meeting.urls')),\n)\n\nurlpatterns += doc_urls\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Bilalchik/hit_expo","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71905215936","text":"from odoo import _, api, fields, models\nfrom odoo.exceptions import UserError\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n _description = \"Account Entry\"\n\n asset_id = fields.Many2one(\n comodel_name='account.asset',\n help='Asset')\n schedule_date = fields.Date(\n string='Schedule Date',\n help='Rent Schedule Date.')\n source = fields.Char(\n string='Account Source',\n help='Source from where account move created.')\n\n def assert_balanced(self):\n prec = self.env['decimal.precision'].precision_get('Account')\n if self.ids:\n self._cr.execute(\"\"\"\n SELECT move_id FROM account_move_line WHERE move_id in %s\n GROUP BY move_id HAVING abs(sum(debit) - sum(credit)) > %s\n \"\"\", (tuple(self.ids), 10 ** (-max(5, prec))))\n if self._cr.fetchall():\n raise UserError(_(\"Cannot create unbalanced journal entry.\"))\n return True\n\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n\n\nclass AccountPaymentRegister(models.TransientModel):\n _inherit = 'account.payment.register'\n\n tenancy_id = fields.Many2one(\n comodel_name='account.analytic.account',\n string='Tenancy',\n help='Tenancy Name.')\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n\n @api.model\n def default_get(self, fields_list):\n # OVERRIDE\n res = super().default_get(fields_list)\n context = dict(self._context) or {}\n active_id = self.env[context.get('active_model')].browse(\n context.get('active_id'))\n if active_id:\n res['property_id'] = active_id.property_id.id or False\n res['tenancy_id'] = active_id.new_tenancy_id.id or False\n return res\n\n def action_create_payments(self):\n res = super(AccountPaymentRegister, self).action_create_payments()\n context = dict(self._context) or {}\n if self._context.get('asset') or self._context.get('openinvoice'):\n schedule_obj = self.env['tenancy.rent.schedule']\n invoice_id = context.get('active_id')\n for schedule in schedule_obj.search([('invc_id', '=', invoice_id)]):\n amount = 0.0\n if schedule.invc_id.state == 'paid':\n schedule.paid = True\n schedule.move_check = True\n if schedule.invc_id:\n amount = schedule.invc_id.amount_residual\n schedule.write({'pen_amt': amount})\n return res\n\n def _create_payment_vals_from_wizard(self):\n res = super()._create_payment_vals_from_wizard()\n res.update({'asset_id': self.property_id.id,\n 'property_id': self.property_id.id, 'tenancy_id': self.tenancy_id.id})\n return res\n\n\nclass AccountPayment(models.Model):\n _inherit = 'account.payment'\n\n tenancy_id = fields.Many2one(\n comodel_name='account.analytic.account',\n string='Tenancy',\n help='Tenancy Name.')\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n amount_due = fields.Monetary(\n comodel_name='res.partner',\n related='partner_id.credit',\n readonly=True,\n default=0.0,\n help='Display Due amount of Customer')\n\n def action_post(self):\n res = super(AccountPayment, self).action_post()\n invoice_obj = self.env['account.move']\n context = dict(self._context or {})\n for rec in self:\n if context.get('return'):\n invoice_browse = invoice_obj.browse(\n context.get('active_id')).new_tenancy_id\n invoice_browse.write({'amount_return': rec.amount})\n if context.get('deposite_received'):\n tenancy_active_id = self.env[\n 'account.analytic.account'].browse(context.get('active_id'))\n tenancy_active_id.write({'amount_return': rec.amount})\n return res\n\n @api.model\n def create(self, vals):\n res = super(AccountPayment, self).create(vals)\n if res and res.id and res.tenancy_id and res.tenancy_id.id:\n if res.payment_type == 'inbound':\n res.tenancy_id.write({'acc_pay_dep_rec_id': res.id})\n if res.payment_type == 'outbound':\n res.tenancy_id.write({'acc_pay_dep_ret_id': res.id})\n return res\n\n def _prepare_move_line_default_vals(self, write_off_line_vals):\n result = super()._prepare_move_line_default_vals(write_off_line_vals)\n context = dict(self._context) or {}\n for line in result:\n if not self.move_id.asset_id:\n self.move_id.asset_id = self.property_id.id or False\n if context.get('account_deposit_received') and line.get('debit') > 0 and self.tenancy_id.id:\n if self.payment_type in ('inbound', 'outbound'):\n line.update({\n 'analytic_account_id': self.tenancy_id.id,\n 'property_id': self.property_id.id\n })\n return result\n\n def _seek_for_lines(self):\n rec = super(AccountPayment, self)._seek_for_lines()\n if rec and rec[0] and self.tenancy_id and self.tenancy_id.id:\n if self.payment_type in ('inbound', 'outbound'):\n rec[0].update({'analytic_account_id': self.tenancy_id.id, 'property_id': self.property_id.id})\n return rec\n\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.move\"\n\n property_id = fields.Many2one(\n comodel_name='account.asset',\n string='Property',\n help='Property Name.')\n new_tenancy_id = fields.Many2one(\n comodel_name='account.analytic.account',\n string='Tenancy ')\n","repo_name":"hassanshah9586/Mishhin","sub_path":"Mishhin-production/property_management_ee/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":6062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36413776343","text":"from models.updown import UpDown\nfrom models.xlan import XLAN\nfrom models.xtransformer import XTransformer\nfrom models.transformer import Transformer\nfrom models.btoformer import Btoformer, Objformer\n\n__factory = {\n 'UpDown': UpDown,\n 'XLAN': XLAN,\n 'XTransformer': XTransformer,\n 'Transformer': Transformer,\n 'Btoformer': Btoformer,\n 'Objformer': Objformer\n}\n\ndef names():\n return sorted(__factory.keys())\n\ndef create(name, *args, **kwargs):\n if name not in __factory:\n raise KeyError(\"Unknown caption model:\", name)\n return __factory[name](*args, **kwargs)","repo_name":"YehLi/BTO-Net","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"37371233254","text":"#!/usr/bin/python3\n\n# Example of receiving and processing data using textfsm\n\nimport yaml\nimport textfsm\nimport myworkfuncs\nfrom tabulate import tabulate\n\nif __name__ == '__main__':\n\n devices = yaml.safe_load(open('mydevices.yaml'))\n all_done = myworkfuncs.threads_conn('connect_ssh', devices['routers'], command='sh ver')\n\n with open(\"cisco_ios_sh_ver_custom.textfsm\") as f:\n re_table = textfsm.TextFSM(f)\n header = re_table.header\n\n for item in all_done:\n for crouter in item:\n print(item[crouter])\n result = re_table.ParseText(item[crouter])\n print(tabulate(result, headers=header))\n print()\n","repo_name":"DmitriyPanteleev/my-network-automation","sub_path":"some_netinfo_parsers/parse_w_textfsm.py","file_name":"parse_w_textfsm.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42141133281","text":"from copy import deepcopy\nimport sudoku\n\n\n# single solution\ninput1 = [\n [0, 0, 6, 1, 0, 0, 0, 0, 8], \n [0, 8, 0, 0, 9, 0, 0, 3, 0], \n [2, 0, 0, 0, 0, 5, 4, 0, 0], \n [4, 0, 0, 0, 0, 1, 8, 0, 0], \n [0, 3, 0, 0, 7, 0, 0, 4, 0], \n [0, 0, 7, 9, 0, 0, 0, 0, 3], \n [0, 0, 8, 4, 0, 0, 0, 0, 6], \n [0, 2, 0, 0, 5, 0, 0, 8, 0], \n [1, 0, 0, 0, 0, 2, 5, 0, 0],\n]\n\n# multiple solutions\ninput2 = [\n [9, 0, 3, 0, 0, 0, 0, 5, 0],\n [0, 0, 8, 0, 0, 0, 3, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [2, 0, 7, 0, 0, 0, 1, 4, 8],\n [0, 6, 1, 0, 4, 0, 9, 0, 0],\n [0, 9, 4, 2, 7, 0, 0, 6, 0],\n [4, 2, 5, 3, 0, 6, 8, 7, 0],\n [0, 0, 6, 9, 5, 0, 4, 3, 0],\n [0, 0, 9, 0, 0, 0, 0, 1, 5],\n]\n\n\n\ndef is_single_solution(iterations:int=50, board:list[list[int]]=None, difficulty:str='easy'):\n\n solutions = set()\n if board is None:\n sb = sudoku.SudokuBoard(difficulty=difficulty)\n else: \n sb = board\n for i in range(iterations):\n sb_copy = deepcopy(sb)\n print(f'solving #{i}....')\n sb_copy.solve_board()\n solutions.add(''.join([ str(num) for row in sb_copy.board for num in row]))\n\n for solution_str in solutions:\n solution = [list(solution_str[i*9 : i*9 + 9]) for i in range(0, 9)]\n for row in solution:\n print([int(num) for num in row])\n print()\n print()\n\n print('Original puzzle:')\n for row in sb.board:\n print(row)\n\n print()\n print('Unique solutions:', len(list(solutions)))\n print()\n\nis_single_solution()\n\n# sb = sudoku.SudokuBoard(board=input2)\n# sb.solve_board()\n# for row in sb.board:\n# print(row)","repo_name":"FirstFlush/sudoku","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1790961190","text":"#!/usr/bin/env python\n#\n# windyworld.py\n#\n\nimport os\nimport shutil\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.rcParams['backend'] = 'TkAgg'\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport copy\nfrom datetime import datetime as dt\n\n\nclass Env:\n\n def __init__(self, action_size=4, stochastic_wind=False):\n\n self.action_size = action_size # 4 for four move; 8 for king's move\n self.stochastic_wind = stochastic_wind\n random.seed(0)\n self.dim = (10, 7)\n self.start = [0, 3]\n self.goal = [7, 3]\n self.wind = [0,0,0,1,1,1,2,2,1,0]\n self.FOURMOVE = {\n 0: (0, 1), # NORTH\n 1: (1, 0), # EAST\n 2: (0, -1), # SOUTH\n 3: (-1, 0)} # WEST\n self.KINGSMOVE = {\n 0: (0, 1), # NORTH\n 1: (1, 1), # NORTHEAST\n 2: (1, 0), # EAST\n 3: (1, -1), # SOUTHEAST\n 4: (0, -1), # SOUTH\n 5: (-1, -1), # SOUTHWEST\n 6: (-1, 0), # WEST\n 7: (-1, 1), # NORTHWEST\n 8: (0, 0)} # STAY\n\n def reset(self):\n\n self.state = self.start.copy()\n self.map = np.full(self.dim, 9)\n\n \"\"\"\nmove 0:north, 1:east, 2:south, 3:west \n \"\"\"\n def act(self, move):\n\n ### set action ID to map ###\n state0 = self.state.copy()\n self.map[state0[0], state0[1]] = move\n\n if self.action_size == 4:\n x, y = self.FOURMOVE[move]\n elif self.action_size in [8, 9]: # King's move w/o a nith move\n x, y = self.KINGSMOVE[move]\n\n self.state[0] += x\n self.state[1] += y + self.wind[state0[0]] # plus wind\n\n ### STOCHASTIC WIND IF THERE IS WIND ###\n # above in 1/3, below in 1/3, and no effect in 1/3\n if self.stochastic_wind and self.wind[state0[0]] >= 1:\n self.state[1] += random.randint(0, 2) - 1\n\n r = -1\n is_goal = False\n if self.state == self.goal:\n is_goal = True\n r = 0\n if self.state[0] < 0:\n self.state[0] = 0\n elif self.state[0] >= self.dim[0]:\n self.state[0] = self.dim[0] - 1\n if self.state[1] < 0:\n self.state[1] = 0\n elif self.state[1] >= self.dim[1]:\n self.state[1] = self.dim[1] - 1\n return r, is_goal\n\n def show_map(self):\n\n print (np.flipud(np.transpose(env.map)))\n\n\nclass AbstractAgent:\n\n def __init__(self, dim, epsilon, initializer='random'):\n\n self.epsilon = epsilon\n self.initialize(dim, initializer)\n\n def initialize(self):\n\n if self.initializer == 'zero':\n self.q = np.zeros((self.dim[0], self.dim[1], self.action_size))\n elif self.initializer == 'random':\n self.q = np.random.rand(self.dim[0], self.dim[1], self.action_size)\n\n def e_greedy(self, state):\n\n if random.random() < self.epsilon: # RANDOM\n return random.randint(0, self.action_size - 1)\n else:\n return np.argmax(self.q[state[0], state[1], :])\n\n def max_q(self, state):\n\n return max(self.q[state[0], state[1], :])\n\n def __getitem__(self, s, a):\n\n return self.q[s[0], s[1], a]\n\n def get_q(self, s, a):\n\n return self.q[s[0], s[1], a]\n\n '''\n def get_prob(self, s):\n\n print (self.q[s[0], s[1], :])\n return None\n '''\n\n def show_value(self, png_file):\n\n m = np.max(self.q, axis=2)\n sns.heatmap(m.transpose())\n plt.savefig(png_file)\n plt.close('all')\n\n def show_arrow(self):\n\n m = np.argmax(self.q, axis=2)\n arrow = list(map(lambda x: ' '.join([self.ARROW[x] for x in x]), m.transpose()))\n for a in reversed(arrow):\n print (a)\n\n def get_action_str(self, a_list):\n\n if self.action_size == 4:\n delimiter = ''\n elif self.action_size in [8,9]:\n delimiter = ' '\n return delimiter.join([self.DIRECTION[a] for a in a_list])\n\n def find_policy(self):\n\n m = ([[np.argmax(self.q[i,j,:]) for i in range(self.dim[0])] for j in range(self.dim[1])])\n print (np.flipud(np.array(m)))\n\n\ndef softmax(x):\n\n return np.exp(x) / np.sum(np.exp(x))\n\n\nclass FourMoveAgent(AbstractAgent):\n\n def __init__(self, dim, epsilon, initializer='random'):\n\n self.dim = dim\n self.action_size = 4\n self.epsilon = epsilon\n self.initializer = initializer\n self.AGENTTYPE = 4\n self.DIRECTION = {0: 'U', 1:'R', 2:'D', 3:'L'}\n self.ARROW = {0: '^', 1:'>', 2:'v', 3:'<'}\n self.initialize()\n \n\nclass KingsMoveAgent(AbstractAgent):\n\n def __init__(self, epsilon, action_size=8, initiazlier='random'):\n\n self.action_size = action_size\n self.epsilon = epsilon\n #self.DIRECTION = {0: 'U', 1:'r', 2:'R', 3:'e', 4: 'D', 5:'w', 6:'L', 7:'l'}\n self.DIRECTION = {0: '⬆ï¸�', 1:'↗ï¸�', 2:'âž¡ï¸�', 3:'↘ï¸�', 4: '⬇ï¸�', 5:'↙ï¸�', 6:'⬅ï¸�', 7:'↙ï¸�', 8:'🔄'}\n self.ARROW = {0: '⬆ï¸�', 1:'↗ï¸�', 2:'âž¡ï¸�', 3:'↘ï¸�', 4: '⬇ï¸�', 5:'↙ï¸�', 6:'⬅ï¸�', 7:'↙ï¸�', 8:'🔄'}\n self.initialize(dim, initializer)\n self.q = np.zeros((10, 7, self.action_size))\n \n \nclass ActorCriticAgent:\n\n def __init__(self, dim, epsylon):\n\n self.epsylon = epsylon\n self.value = np.zeros(dim)\n self.policy = np.zeros((dim[0], dim[1], 4))\n\n def e_greedy(self, state):\n\n if random.random() < self.epsylon: # RANDOM\n return random.randint(0, 3)\n else:\n return np.argmax(self.policy[state[0], state[1], :])\n\n\ndef sarsa(env, agent, alpha, gamma):\n\n ### SARSA ###\n env.reset()\n a = agent.e_greedy(env.state)\n a_list = []\n r = -1\n R = 0\n i = 0\n is_goal = False\n while not is_goal: # AN EPISODE\n a_list.append(a)\n i += 1\n s0 = env.state.copy()\n r, is_goal = env.act(a)\n R += r\n a1 = agent.e_greedy(env.state)\n value = agent.get_q(s0, a)\n agent.q[s0[0], s0[1], a] = agent.get_q(s0, a) \\\n + alpha * (r + gamma * agent.get_q(env.state, a1) - agent.get_q(s0, a))\n a = copy.copy(a1)\n a_list.append(a)\n return i, R, a_list\n\n\ndef q_learn(env, agent, alpha, gamma):\n\n ### Q-LEARNING ###\n env.reset()\n R = 0\n i = 0\n r = -1\n is_goal = False\n while not is_goal:\n i += 1\n s0 = env.state.copy()\n a = ql_agent.e_greedy(env.state) \n r, is_goal = env.act(a)\n R += r\n value = agent.q[s0[0], s0[1], a]\n agent.q[s0[0], s0[1], a] = agent.get_q(s0, a) + alpha * (r + gamma * agent.max_q(env.state) - agent.get_q(s0, a))\n return i, R\n \n\ndef actor_critic(env, agent, alpha, gamma):\n\n ### Q-LEARNING ###\n env.reset()\n R = 0\n i = 0\n r = -1\n is_goal = False\n while not is_goal:\n i += 1\n s0 = env.state.copy()\n a = agent.e_greedy(env.state) \n r, is_goal = env.act(a)\n R += r\n value = agent.value[s0[0], s0[1]]\n policy = agent.policy[s0[0], s0[1], a]\n delta = r + gamma * agent.value[env.state[0], env.state[1]] - value\n agent.value[s0[0], s0[1]] += delta\n agent.policy[s0[0], s0[1], a] += delta\n if is_goal:\n break\n return i, R\n\n\ndef show_step_graph(step_list, std_list, png_file):\n\n plt.plot(s_step_list, label='#steps')\n plt.plot(std_list, label='SD')\n plt.yscale('log')\n plt.savefig(png_file)\n plt.close('all')\n return\n\n\n###\nif __name__ == '__main__':\n\n epsilon = 0.1\n alpha = 0.5\n #alpha = 0.1\n #alpha = 0.01\n gamma = 1.0\n dim = (10, 7)\n num = 1000\n slide = 20\n stochastic_wind = True\n\n now = dt.now()\n\n #agent = FourMoveAgent(epsilon)\n #ql_agent = FourMoveAgent(epsilon)\n\n #agent = KingsMoveAgent(epsilon)\n #ql_agent = KingsMoveAgent(epsilon)\n agent = KingsMoveAgent(epsilon, 8)\n ql_agent = KingsMoveAgent(epsilon, 8)\n #agent = KingsMoveAgent(epsilon, 9)\n #ql_agent = KingsMoveAgent(epsilon, 9)\n ac_agent = ActorCriticAgent(dim, epsilon)\n\n if stochastic_wind:\n sw_tag = '-sw'\n else:\n sw_tag = ''\n png_dir = '%s-%s%s' % (now.strftime('png-%y%m%d-%H%M%S'), agent.action_size, sw_tag)\n env = Env(agent.action_size, stochastic_wind)\n w = []\n s_step_list, step_std_list = [], []\n\n if os.path.isdir(png_dir):\n shutil.rmtree(png_dir)\n os.mkdir(png_dir)\n step_graph_file = '%s/step_list.png' % png_dir\n\n for n in range(num):\n s_step, s_r, s_a = sarsa(env, agent, alpha, gamma)\n #ql_step, ql_r = q_learn(env, ql_agent, alpha, gamma)\n #ac_step, ac_r = actor_critic(env, ac_agent, alpha, gamma)\n #w.append([n + 1, s_step, s_r, ql_step, ql_r, ac_step, ac_r])\n w.append([n + 1, s_step, s_r])\n s_step_list.append(s_step)\n step_slide = np.array(s_step_list[-slide:])\n step_std_list.append(step_slide.mean())\n s_a_str = agent.get_action_str(s_a)\n print ('%3d %3d %2.2f %2.2f' % (n+1, s_step, step_slide.mean(), step_slide.std()), s_a_str)\n if (n+1) % 10 == 0:\n png_file = '%s/value-%03d.png' % (png_dir, n+1)\n agent.show_value(png_file)\n agent.show_arrow()\n show_step_graph(s_step_list, step_std_list, step_graph_file)\n","repo_name":"kawagashira/sutton","sub_path":"windyworld/windyworld.py","file_name":"windyworld.py","file_ext":"py","file_size_in_byte":9626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10056494531","text":"\"\"\"\nVector2 that handles point screen coordinates\nTransformations related to the game position & game size happen here\n\"\"\"\n\n\nclass Vec2:\n \"Vector 2 class that has methods to scale screen coordinates\"\n\n screen_x_offset: int = 0\n screen_y_offset: int = 0\n screen_x_scale: int = 1\n screen_y_scale: int = 1\n\n def __init__(self, x_pos, y_pos, use_screen_offset: bool = True) -> None:\n self.x_pos = x_pos\n self.y_pos = y_pos\n self.use_screen_offset: bool = use_screen_offset\n\n def get_coords(self) -> tuple:\n \"\"\"Returns screen coordinates with transformations\"\"\"\n x_pos = self.x_pos * Vec2.screen_x_scale\n y_pos = self.y_pos * Vec2.screen_y_scale\n\n if self.use_screen_offset:\n return (round(x_pos + Vec2.screen_x_offset),\n round(y_pos + Vec2.screen_y_offset))\n\n return (round(x_pos), round(y_pos))\n\n @classmethod\n def setup_screen(cls, x_pos: int, y_pos: int, width: int, height: int) -> None:\n \"\"\"Setup for screen coordinate offset and scale\"\"\"\n Vec2.screen_x_offset = x_pos\n Vec2.screen_y_offset = y_pos\n Vec2.screen_x_scale = width / 1920\n Vec2.screen_y_scale = height / 1080\n","repo_name":"jfd02/TFT-OCR-BOT","sub_path":"vec2.py","file_name":"vec2.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":276,"dataset":"github-code","pt":"79"} +{"seq_id":"23865079507","text":"import logging\nimport os\nfrom random import choice\nfrom argparse import ArgumentParser\nfrom urllib.parse import urlparse\n\nfrom notion.client import NotionClient\nfrom notion.block import Block, PageBlock, CollectionViewBlock\nfrom emoji import EMOJI_UNICODE\nimport frontmatter\n\nfrom .markdown import convert\n\ntry:\n from dotenv import load_dotenv\n load_dotenv()\nexcept:\n pass\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef random_emoji():\n # Don't allow people, hands, or fingers.\n forbidden_emoji_patterns = ['child', 'skin_tone', 'person', 'hand', 'finger']\n\n emoji_key = None\n while not emoji_key:\n emoji_key = choice(list(EMOJI_UNICODE.keys()))\n\n for pattern in forbidden_emoji_patterns:\n if pattern in emoji_key:\n emoji_key = None\n break\n\n return EMOJI_UNICODE[emoji_key]\n\n\ndef infer_block(root_block, path) -> Block:\n name, ext = os.path.splitext(path)\n\n if name == 'index':\n return root_block\n\n if ext != '.md' and ext != '':\n return None\n\n title = name.replace('-', ' ').replace('_', ' ').capitalize()\n\n for block in root_block.children:\n if block.type != 'page':\n continue\n\n if block.title != title:\n continue\n\n return block\n\n # Create a new page block\n\n return root_block.children.add_new(PageBlock, title=title)\n\n\ndef move_pages_to_end(block):\n # Move pages to the end of the document if they aren't already\n pages_to_move = []\n pages_seen = []\n\n for c in block.children:\n if c.type == 'page':\n pages_seen.append(c)\n else:\n pages_to_move.extend(pages_seen)\n pages_seen.clear()\n\n for page in pages_to_move:\n logger.info(f\"Moving page {page.id} to end of {block.id}\")\n page.move_to(block, 'last-child')\n\n\ndef block_matches_markdown_block(block, markdown_block_type, **markdown_block):\n if markdown_block_type != type(block):\n return False\n\n for key, value in markdown_block.items():\n if key in ['type', 'schema', 'rows']:\n continue\n\n block_attr = getattr(block, key)\n\n if block_attr != value:\n return False\n\n return True\n\n\ndef sync_collection_schema(collection, expected_schema):\n existing_schema = collection.get('schema')\n\n # The schemas must match!\n if existing_schema == expected_schema:\n return\n\n logger.info(f\"Updating schema of {collection.id}\")\n\n # If they don't, try to make them match.\n collection.set('schema', expected_schema)\n\n\ndef sync_collection_rows(block, collection_schema, collection_rows):\n if block.collection is None:\n logger.info(f\"Creating a new collection for {block.id}\")\n # We should have generated a schema and rows for this one\n client = block._client # Hacky internals stuff...\n block.collection = client.get_collection(\n # Low-level use of the API\n # TODO: Update when notion-py provides a better interface for this\n client.create_record(\"collection\", parent=block, schema={\"title\": {\"text\": \"_\", \"type\": \"text\"}})\n )\n\n block.views.add_new(view_type=\"table\")\n\n collection_schema_ids = ['title']\n\n for i in range(len(collection_schema) - 1):\n collection_schema_ids.append('x' + format(i, '0>4x'))\n\n sync_collection_schema(block.collection, dict(zip(collection_schema_ids, collection_schema)))\n\n existing_rows = block.collection.get_rows()\n\n for extra_row in existing_rows[len(collection_rows):]:\n extra_row.remove()\n\n existing_rows_iter = iter(existing_rows)\n\n for row in collection_rows:\n try:\n row_block = next(existing_rows_iter)\n except StopIteration:\n row_block = block.collection.add_row()\n\n if len(row) > len(collection_schema_ids):\n row = row[:len(collection_schema_ids)]\n\n row = zip(collection_schema_ids, row)\n\n for schema_id, prop_value in row:\n if row_block.get_property(schema_id) != prop_value:\n row_block.set_property(schema_id, prop_value)\n\n\ndef sync_markdown_blocks_to_block(markdown_blocks, block):\n touched_blocks = set()\n children_iter = iter(block.children)\n\n for markdown_block in markdown_blocks:\n markdown_block_class = markdown_block[\"type\"]\n del markdown_block[\"type\"]\n\n markdown_contents = markdown_block.pop(\"title\", None)\n collection_schema = markdown_block.pop(\"schema\", None)\n collection_rows = markdown_block.pop(\"rows\", None)\n block_children = markdown_block.pop(\"children\", None)\n\n try:\n child_block = next(children_iter)\n while not block_matches_markdown_block(child_block, markdown_block_class, **markdown_block):\n child_block = next(children_iter)\n logger.info(f\"Using existing markdown block {child_block.id} in {block.id}\")\n except StopIteration:\n # If we've hit the end of the children create a new child.\n child_block = block.children.add_new(markdown_block_class, **markdown_block)\n logger.info(f\"Creating new markdown block {child_block.id} in {block.id}\")\n\n if markdown_contents is not None:\n # Manually set the title property to bypass the `markdown_to_notion` in `notion-py`\n # This is because it chokes up on URLs and really we just don't need this 'cause\n # we're parsing the markdown ourselves.\n if child_block.get([\"properties\", \"title\"]) != markdown_contents:\n child_block.set([\"properties\", \"title\"], markdown_contents)\n\n touched_blocks.add(child_block.id)\n\n if isinstance(child_block, CollectionViewBlock):\n sync_collection_rows(child_block, collection_schema, collection_rows)\n\n if block_children:\n sync_markdown_blocks_to_block(block_children, child_block)\n elif len(child_block.get(child_block.child_list_key, [])) > 0:\n # If no children should exist but there are children attached to this block\n # (a list, etc) we should remove them as they're no longer needed!\n for c in child_block.children:\n c.remove()\n\n\n for c in block.children:\n if c.type != 'page' and c.id not in touched_blocks:\n logger.info(f\"Removing child block {c.id} from {block.id}\")\n c.remove()\n\n\ndef sync_file_to_block(filename, block, links : dict={}):\n logger.info(f\"Syncing {filename} to block {block.id}\")\n\n with open(filename) as markdown_fd:\n contents = markdown_fd.read()\n\n post = frontmatter.loads(contents)\n\n def resolve_link(target):\n try:\n parsed = urlparse(target)\n\n if parsed.scheme:\n return target\n except:\n pass\n\n target_path = os.path.realpath(os.path.join(os.path.dirname(filename), target))\n\n block = links.get(target_path)\n\n if not block:\n return target\n\n return block.get_browseable_url()\n\n markdown_blocks = convert(str(post), link_resolver=resolve_link)\n\n sync_markdown_blocks_to_block(markdown_blocks, block)\n\n\ndef create_page_structure(directory, root_block):\n touched_pages = set()\n\n files_to_pages = dict()\n\n index_path = os.path.realpath(os.path.join(directory, \"index.md\"))\n readme_path = os.path.realpath(os.path.join(directory, \"README.md\"))\n readme_lower_path = os.path.realpath(os.path.join(directory, \"README.md\"))\n\n # Do the index/readme first to ensure the correct sort order.\n if os.path.isfile(index_path):\n files_to_pages[index_path] = root_block\n elif os.path.isfile(readme_path):\n files_to_pages[readme_path] = root_block\n elif os.path.isfile(readme_lower_path):\n files_to_pages[readme_lower_path] = root_block\n\n for path in os.listdir(directory):\n if path.startswith('.'):\n # Skip any \"private\" files / directories\n continue\n\n if path.lower() == 'index.md' or path.lower() == 'readme.md':\n # Skip because we had a special case for this above.\n continue\n\n block = infer_block(root_block, path)\n\n if not block:\n continue\n\n full_path = os.path.realpath(os.path.join(directory, path))\n\n touched_pages.add(block.id)\n\n if os.path.isdir(full_path):\n files_to_pages.update(create_page_structure(full_path, block))\n elif os.path.splitext(full_path)[1].lower() == '.md':\n files_to_pages[full_path] = block\n\n return files_to_pages\n\n\ndef sync_directory_to_block(directory, root_block):\n # Do Two Passes: First, create blocks for all files that need them\n # Keep track of absolute file path -> block\n logger.info(\"Creating page structure..\")\n files_to_pages = create_page_structure(os.path.realpath(directory), root_block)\n\n touched_pages = set(block.id for block in files_to_pages.values())\n\n # Then, for iterate through every single page block created and:\n for full_path, block in files_to_pages.items():\n # Lock it\n if not block.get(['format', 'block_locked'], default=False):\n block.set(['format', 'block_locked'], True)\n\n if block.icon is None:\n block.icon = random_emoji()\n\n # Sync it.\n sync_file_to_block(full_path, block, links=files_to_pages)\n\n # Sort it.\n move_pages_to_end(block)\n\n # Clean it.\n for child in block.children:\n # Any children that are pages under block but aren't in touched_pages should be pruned\n if child.type == 'page' and child.id not in touched_pages:\n child.remove()\n\n # Technologic.\n\ndef main():\n import sys\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.INFO)\n\n parser = ArgumentParser()\n\n parser.add_argument('--notion-token', type=str, default=os.environ.get('NOTION_TOKEN'))\n parser.add_argument('docs_path', type=str)\n parser.add_argument('notion_url', type=str)\n\n args = parser.parse_args()\n\n token = args.notion_token\n root_url = args.notion_url\n docs_path = args.docs_path\n\n # add row to notion collection and add a text block with link to the new card\n client = NotionClient(token_v2=token)\n root_block = client.get_block(root_url)\n\n sync_directory_to_block(docs_path, root_block)\n","repo_name":"imnotjames/notion-docs-sync","sub_path":"notion_docs_sync/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10424,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"79"} +{"seq_id":"15201200233","text":"import sys\nfrom pathlib import Path\nfrom typing import Dict, Any, List\n\nimport numpy as np\nimport pyrallis\nimport torch\nfrom tqdm import tqdm\n\nsys.path.append(\".\")\nsys.path.append(\"..\")\n\nfrom inversion.options.train_options import TrainOptions\nfrom inversion.video.generate_videos import generate_reconstruction_videos\nfrom prepare_data.landmarks_handler import LandmarksHandler\nfrom inversion.video.post_processing import postprocess_and_smooth_inversions\nfrom inversion.video.video_config import VideoConfig\nfrom inversion.video.video_editor import InterFaceGANVideoEditor, StyleCLIPVideoEditor\nfrom inversion.video.video_handler import VideoHandler\nfrom utils.common import tensor2im\nfrom utils.inference_utils import get_average_image, run_on_batch, load_encoder, IMAGE_TRANSFORMS\n\n\n@pyrallis.wrap()\ndef run_inference_on_video(video_opts: VideoConfig):\n # prepare all the output paths\n video_opts.output_path.mkdir(exist_ok=True, parents=True)\n\n # parse video\n video_handler = VideoHandler(video_path=video_opts.video_path,\n output_path=video_opts.output_path,\n raw_frames_path=video_opts.raw_frames_path,\n aligned_frames_path=video_opts.aligned_frames_path,\n cropped_frames_path=video_opts.cropped_frames_path)\n video_handler.parse_video()\n\n aligned_paths, cropped_paths = video_handler.get_input_paths()\n input_images = video_handler.load_images(aligned_paths)\n cropped_images = video_handler.load_images(cropped_paths)\n if video_opts.max_images is not None:\n aligned_paths = aligned_paths[:video_opts.max_images]\n input_images = input_images[:video_opts.max_images]\n cropped_images = cropped_images[:video_opts.max_images]\n\n # load pretrained encoder\n net, opts = load_encoder(video_opts.checkpoint_path, test_opts=video_opts, generator_path=video_opts.generator_path)\n\n # loads/computes landmarks transforms for the video frames\n landmarks_handler = LandmarksHandler(output_path=video_opts.output_path,\n landmarks_transforms_path=video_opts.landmarks_transforms_path)\n video_opts.landmarks_transforms_path = landmarks_handler.landmarks_transforms_path\n landmarks_transforms = landmarks_handler.get_landmarks_transforms(input_paths=aligned_paths,\n cropped_frames_path=video_handler.cropped_frames_path,\n aligned_frames_path=video_handler.aligned_frames_path)\n\n # run inference\n results = run_inference(input_paths=aligned_paths,\n input_images=input_images,\n landmarks_transforms=landmarks_transforms,\n net=net,\n opts=opts)\n\n # save inverted latents (can be used for editing, pti, etc)\n results_latents_path = opts.output_path / \"latents.npy\"\n np.save(results_latents_path, np.array(results[\"result_latents\"]))\n\n result_images = [np.array(tensor2im(im)) for im in results[\"result_images\"]]\n result_latents = np.array(list(results[\"result_latents\"].values()))\n landmarks_transforms = np.array(list(results[\"landmarks_transforms\"]))\n\n result_images_smoothed = postprocess_and_smooth_inversions(results, net, video_opts)\n\n # get video reconstruction\n generate_reconstruction_videos(input_images=cropped_images,\n result_images=result_images,\n result_images_smoothed=result_images_smoothed,\n video_handler=video_handler,\n opts=video_opts)\n\n if opts.interfacegan_directions is not None:\n editor = InterFaceGANVideoEditor(generator=net.decoder, opts=video_opts)\n for interfacegan_edit in video_opts.interfacegan_edits:\n edit_images_start, edit_images_end, edit_latents_start, edit_latents_end = editor.edit(\n edit_direction=interfacegan_edit.direction,\n start=interfacegan_edit.start,\n end=interfacegan_edit.end,\n result_latents=result_latents,\n landmarks_transforms=landmarks_transforms\n )\n edited_images_start_smoothed = editor.postprocess_and_smooth_edits(results, edit_latents_start, video_opts)\n edited_images_end_smoothed = editor.postprocess_and_smooth_edits(results, edit_latents_end, video_opts)\n editor.generate_edited_video(input_images=cropped_images,\n result_images_smoothed=result_images_smoothed,\n edited_images_smoothed=edited_images_start_smoothed,\n video_handler=video_handler,\n save_name=f\"edited_video_{interfacegan_edit.direction}_start\")\n editor.generate_edited_video(input_images=cropped_images,\n result_images_smoothed=result_images_smoothed,\n edited_images_smoothed=edited_images_end_smoothed,\n video_handler=video_handler,\n save_name=f\"edited_video_{interfacegan_edit.direction}_end\")\n\n if opts.styleclip_directions is not None:\n editor = StyleCLIPVideoEditor(generator=net.decoder, opts=video_opts)\n for styleclip_edit in video_opts.styleclip_edits:\n edited_images, edited_latents = editor.edit(edit_direction=styleclip_edit.target_text,\n alpha=styleclip_edit.alpha,\n beta=styleclip_edit.beta,\n result_latents=result_latents,\n landmarks_transforms=landmarks_transforms)\n edited_images_smoothed = editor.postprocess_and_smooth_edits(results, edited_latents, video_opts)\n editor.generate_edited_video(input_images=cropped_images,\n result_images_smoothed=result_images_smoothed,\n edited_images_smoothed=edited_images_smoothed,\n video_handler=video_handler,\n save_name=styleclip_edit.save_name)\n\n\ndef run_inference(input_paths: List[Path], input_images: List, landmarks_transforms: Dict[str, Any], net,\n opts: TrainOptions):\n results = {\"source_images\": [], \"result_images\": [], \"result_latents\": {}, \"landmarks_transforms\": []}\n with torch.no_grad():\n avg_image = get_average_image(net)\n # run inference one frame at a time (technically can be run in batches, but done for simplicity)\n for input_image, input_path in tqdm(zip(input_images, input_paths)):\n results[\"source_images\"].append(input_image)\n image_name = input_path.name\n if landmarks_transforms is not None:\n if image_name not in landmarks_transforms:\n continue\n image_landmarks_transform = torch.from_numpy(landmarks_transforms[image_name][-1]).cuda()\n else:\n image_landmarks_transform = None\n with torch.no_grad():\n transformed_image = IMAGE_TRANSFORMS(input_image)\n result_batch, latents = run_on_batch(inputs=transformed_image.unsqueeze(0).cuda(),\n net=net,\n opts=opts,\n avg_image=avg_image,\n landmarks_transform=image_landmarks_transform)\n # we'll save the last inversion and latent code\n results[\"result_images\"].append(result_batch[0][-1])\n results[\"result_latents\"][image_name] = latents[0][-1]\n results[\"landmarks_transforms\"].append(image_landmarks_transform)\n return results\n\n\nif __name__ == '__main__':\n run_inference_on_video()\n","repo_name":"yuval-alaluf/stylegan3-editing","sub_path":"inversion/video/inference_on_video.py","file_name":"inference_on_video.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":622,"dataset":"github-code","pt":"79"} +{"seq_id":"28683065973","text":"class Dog:\n\n # Class object attribute\n species = 'mammal'\n\n def __init__(self, breed, name, has_spots):\n self.breed = breed\n self.name = name\n self.has_spots = has_spots\n\n def bark(self):\n print(\"WOOF !!!\")\n\n\nmy_dog = Dog(breed='Lab', name='Dock', has_spots=True)\nprint(\"type(my_object): \", type(my_dog))\nprint(\"type(my_object): \", my_dog.breed)\nmy_dog.bark()\n\n\nclass Circle:\n\n pi = 3.14\n\n def __init__(self, radius=10):\n self.radius = radius\n self.area = Circle.pi * radius ** 2\n\n def circumference(self):\n return 2 * Circle.pi * self.radius\n\n\ncircle = Circle(5)\nprint(\"circle.area: \", circle.area)\nprint(\"circle.circumference: \", circle.circumference())\n\n\n","repo_name":"thbaymet/python-intro","sub_path":"alphabet/aap_classes.py","file_name":"aap_classes.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"40825276743","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 21 23:12:48 2022\n\n@author: gyzdm\n\"\"\"\n\nimport yfinance as yf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport os\n\n\nclass StockRadar:\n def __init__(self, watch_list, backtrack_output, data_input, start_date):\n self.watch_list = watch_list\n self.data = None\n self.sma = None\n self.backtrack_list = []\n self.transactions = []\n self.backtrack_output = backtrack_output\n self.data_input = data_input\n self.start_date = start_date\n self.__load_data()\n print(os.getcwd())\n \n def __load_data(self):\n watch_list_string = \" \".join(self.watch_list)\n if self.data_input:\n if os.path.exists(self.data_input):\n self.data = pd.read_pickle(self.data_input)\n else:\n self.data = yf.download(watch_list_string, start=self.start_date)\n self.data.to_pickle(self.data_input)\n else:\n self.data = yf.download(watch_list_string, start=self.start_date)\n #self.data.to_csv(self.backtrack_output+'data.csv') \n return\n \n def getMovingAverage(self):\n self.sma_window_sizes = [5,10,20,30,50,100,200]\n self.sma_tokens = [\"SMA{}\".format(window_size) for window_size in self.sma_window_sizes]\n #self.sma = self.data.loc[:,([\"Close\"],self.watch_list)]\n columns = pd.MultiIndex.from_product([self.sma_tokens, self.watch_list], names=['sma_type','token'])\n self.sma = pd.DataFrame(columns = columns)\n for window_size in self.sma_window_sizes:\n sma_token = \"SMA{}\".format(window_size)\n for stock_token in self.watch_list:\n stock_close_prices = self.data[\"Close\"][stock_token].to_frame()\n sma_df = stock_close_prices[stock_token].rolling(window_size).mean()\n self.sma.loc[:,(sma_token,stock_token)] = sma_df\n #self.sma.dropna(inplace=True)\n #self.sma.loc[:,(slice(None),['SPY'])].plot()\n #plt.show()\n print(\"{0} Moving Average Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return self.sma\n \n def checkSMACrossing(self):\n if not self.sma:\n self.getMovingAverage()\n stock_crossing_tag = []\n data_365day = self.data[\"Close\"]\n data_30day = data_365day[-30:-1]\n for stock_token in self.watch_list:\n close_today = self.data[\"Close\"][stock_token][-1]\n close_yesterday = self.data[\"Close\"][stock_token][-2]\n high_today = self.data[\"High\"][stock_token][-1]\n low_today = self.data[\"Low\"][stock_token][-1]\n change = close_today/close_yesterday-1\n data_365day = self.data[\"Close\"][stock_token]\n data_30day = data_365day[-30:-1]\n rank365 = data_365day.rank(pct=True)\n rank30 = data_30day.rank(pct=True)\n for sma_token in self.sma_tokens:\n sma_today = self.sma[sma_token][stock_token][-1]\n sma_yesterday = self.sma[sma_token][stock_token][-2]\n if close_today > sma_today and close_yesterday < sma_yesterday:\n stock_crossing_tag.append(\"{0} Up Crossing {1} change:{2:+.1%} rank30:{3:.1%} rank365:{4:.1%}\".format(stock_token,sma_token, change,rank30[-1],rank365[-1])) \n elif close_today < sma_today and close_yesterday > sma_yesterday:\n stock_crossing_tag.append(\"{0} Down Crossing {1} change:{2:+.1%} rank30:{3:.1%} rank365:{4:.1%}\".format(stock_token,sma_token, change,rank30[-1],rank365[-1]))\n elif high_today > sma_today and close_yesterday < sma_yesterday:\n stock_crossing_tag.append(\"{} Failed Up Crossing {}\".format(stock_token,sma_token))\n elif low_today < sma_today and close_yesterday > sma_yesterday:\n stock_crossing_tag.append(\"{} Failed Down Crossing {}\".format(stock_token,sma_token))\n print(\"{0} SMA Crossing Checking Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return stock_crossing_tag\n \n def backtrack_sma(self):\n if self.sma is None:\n self.getMovingAverage()\n print(\"{0} Start SMA Backtracking...\".format(datetime.now().strftime(\"%H:%M:%S\")))\n for stock_token in self.watch_list:\n close_prices = self.data[\"Close\"][stock_token]\n initial_balance = 10000\n # SMA strategy\n for window_size in self.sma_window_sizes:\n sma_token = \"SMA{}\".format(window_size)\n print(\"Working on {1} Backtracking {0}\".format(stock_token,sma_token))\n shares = 0\n balance = 0\n next_year = True\n for row in range(close_prices.shape[0]):\n sma_today = self.sma[sma_token][stock_token][row]\n sma_yesterday = self.sma[sma_token][stock_token][row-1]\n if pd.isna(sma_today) or pd.isna(sma_yesterday):\n continue\n close_today = close_prices[row]\n close_yesterday = close_prices[row-1]\n if next_year:\n year = close_prices.index[row].year\n next_year = False\n if window_size>=100 and close_today > sma_today:\n shares_to_buy = initial_balance/close_prices[row]\n shares += shares_to_buy\n balance = 0\n total_asset = shares*close_prices[row]+balance\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',shares_to_buy,close_today,\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")]) \n else:\n balance += initial_balance\n year_start_asset = shares*close_prices[row] + balance\n if close_today > sma_today and close_yesterday < sma_yesterday:\n if balance > 0:\n shares_to_buy = balance/close_today\n shares += shares_to_buy\n total_asset = shares*close_today\n balance = 0\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',shares_to_buy,close_today,\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")]) \n elif close_today < sma_today and close_yesterday > sma_yesterday:\n if shares>0:\n shares_to_sell = shares\n balance_credits = shares*close_today\n shares =0\n balance+=balance_credits\n total_asset = balance\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'sell',shares_to_sell,close_today,\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")]) \n if row == close_prices.shape[0]-1 or close_prices.index[row+1].year>year:\n total_asset = shares*close_prices[row]+balance\n performance = total_asset/year_start_asset - 1\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'hold',shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n self.backtrack_list.append([stock_token,sma_token,year,performance]) \n next_year = True\n print(\"{0} Moving Average Backtrack Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n \n def backtrack_all_in(self):\n sma_token = 'All_In'\n initial_balance = 10000\n for stock_token in self.watch_list:\n print(\"Working on {1} Backtracking {0}\".format(stock_token,sma_token))\n close_prices = self.data[\"Close\"][stock_token]\n shares = 0\n balance = initial_balance\n for row in range(close_prices.shape[0]):\n if pd.isna(close_prices[row]):\n continue\n if shares == 0:\n shares = balance/close_prices[row]\n year = close_prices.index[row].year\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',shares,close_prices[row],\n 0,balance,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n balance = 0\n if row == close_prices.shape[0]-1 or close_prices.index[row+1].year>year:\n total_asset = shares*close_prices[row]+balance\n performance = total_asset/initial_balance - 1\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'hold',shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n self.backtrack_list.append([stock_token,sma_token,year,performance]) \n shares = 0\n balance = initial_balance\n print(\"{0} All In Backtrack Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n \n def backtrack_automatic(self):\n sma_tokens = [('Automatic_Daily',0),('Automatic_Monthly',12),('Automatic_Biweekly',24)]\n initial_balance = 10000\n for sma_token,frequency in sma_tokens:\n for stock_token in self.watch_list:\n print(\"Working on {1} Backtracking {0}\".format(stock_token,sma_token))\n close_prices = self.data[\"Close\"][stock_token]\n shares = 0\n next_year = True\n for row in range(close_prices.shape[0]):\n if pd.isna(close_prices[row]):\n continue\n if next_year:\n year = close_prices.index[row].year\n next_year = False\n ndays = len(close_prices[close_prices.index.year == year])\n n_interval = ndays if frequency == 0 else frequency\n period = ndays//n_interval\n balance = initial_balance\n periodic_invest_fund = initial_balance/n_interval\n year_start_asset = shares*close_prices[row] + balance\n if row % period == 0 and balance*1.1>=periodic_invest_fund:\n new_shares=periodic_invest_fund/close_prices[row]\n shares+=new_shares\n balance-=periodic_invest_fund\n total_asset = shares*close_prices[row] + balance\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'buy',new_shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n if row == close_prices.shape[0]-1 or close_prices.index[row+1].year>year:\n total_asset = shares*close_prices[row] + balance\n performance = total_asset/year_start_asset - 1\n self.transactions.append([sma_token,stock_token,close_prices.index[row].year,'hold',new_shares,close_prices[row],\n balance,total_asset,close_prices.index[row].strftime(\"%Y-%m-%d\")])\n self.backtrack_list.append([stock_token,sma_token,year,performance]) \n next_year = True\n print(\"{0} Automatic Backtrack Calculation Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n \n def backtrack(self):\n # Automatic Strategy\n self.backtrack_automatic()\n # SMA strategy\n self.backtrack_sma()\n # All in strategy\n self.backtrack_all_in()\n backtrack_df = pd.DataFrame(data=self.backtrack_list,columns = ['Stock','Strategy','Year','Performance'])\n backtrack_df.to_csv(self.backtrack_output+'performance.csv')\n transaction_df = pd.DataFrame(data=self.transactions,columns = ['Strategy','Stock','Year','Transaction','Shares','Price',\n 'Balance','Total Asset','Date'])\n transaction_df.to_csv(self.backtrack_output+'transaction.csv')\n print(\"{0} Backtrack results wirting Completed\".format(datetime.now().strftime(\"%H:%M:%S\")))\n return\n\ndef main_old():\n data = yf.download(\"SPY AAPL\", start=\"2017-01-01\", end=\"2017-04-30\")\n apple = data[\"Close\"][\"AAPL\"]\n msft = yf.Ticker(\"MSFT\")\n # get stock info\n info=msft.info\n \n # get historical market data\n hist = msft.history(period=\"max\")\n \n # show actions (dividends, splits)\n actions = msft.actions\n \n # show sustainability\n sustainability= msft.sustainability\n \n # show analysts recommendations\n recommendations = msft.recommendations\n \n # show news\n news = msft.news\n \n a = 0\n return\n\ndef main():\n #watch_list=[\"SPY\",\"AAPL\"]\n watch_list = [\"AAPL\",\"ADBE\",\"AMD\",\"AMZN\",\"ARKK\",\"ATVI\",\"BABA\",\"BIDU\",\"BILI\",\n \"CRM\",\"DIDIY\",\"DIS\",\"DOCU\",\"EA\",\"EDU\",\"ENPH\",\"FDX\",\"GILD\",\n \"GOOG\",\"HUYA\",\"IAU\",\"JD\",\"JNJ\",\"MA\",\"META\",\"MSFT\",\"MU\",\"NFLX\",\n \"NIO\",\"NTES\",\"NVDA\",\"PARA\",\"PDD\",\"PFSI\",\"PINS\",\"PYPL\",\"QQQ\",\n \"SNAP\",\"SPY\",\"T\",\"TAL\",\"TCEHY\",\"TME\",\"TSLA\",\"TWLO\",\"U\",\"UBER\",\n \"V\",\"VRTX\",\"VXX\",\"VZ\",\"WMT\",\"ZM\"]\n sr = StockRadar(watch_list,r\"C:\\\\Dropbox\\\\Share for Gary\\\\Investment\\\\\",\".\\\\data\\\\data2000.pkl\",\"2000-01-01\")\n sr.backtrack()\n #sma = sr.checkSMACrossing()\n \n \n \n return\n\n\nif __name__ == '__main__':\n main()","repo_name":"gyzdmgqy/stock","sub_path":"Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":14445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29000286814","text":"import sys\nimport numpy as np\n\ndef parse_data():\n \"\"\"Parse data into a 2-D numpy array for vector calculations\"\"\"\n return np.array([_make_array(line.strip()) for line in sys.stdin.readlines()])\n\n\ndef gamma_rate(vector_data):\n \"\"\"Return gamma rate as decimal string (rounding up 0.5 to 1)\"\"\"\n length, _ = vector_data.shape\n gamma_vector = vector_data.sum(axis=0) / length\n\n return ''.join([str(int(i)) for i in np.rint(np.nextafter(gamma_vector, gamma_vector + 1))])\n\n\ndef epsilon_rate(binary_string):\n \"\"\"Return corresponding epsilon rate as binary string (bit-wise complement)\"\"\"\n return ''.join([str(int(not int(i))) for i in binary_string])\n\n\ndef multiply_gamma_epsilon(gamma, epsilon):\n \"\"\"Return decimal product of binary strings gamma and epsilon\"\"\"\n return int(gamma, 2) * int(epsilon, 2)\n\n\ndef oxygen_rate(vector_data):\n position = 0\n while vector_data.shape[0] > 1:\n criterion = gamma_rate(vector_data)[position]\n\n # delete rows from data where bit is not matching criterion\n rows_to_delete = np.where(vector_data[:, position] != int(criterion))[0]\n vector_data = np.delete(vector_data, rows_to_delete, axis=0)\n position += 1\n return ''.join([str(int(i)) for i in vector_data[0]])\n\n\ndef co2_rate(vector_data):\n position = 0\n while vector_data.shape[0] > 1:\n criterion = gamma_rate(vector_data)[position]\n\n # delete rows from data where bit is not matching co2 criterion\n # i.e. where it is matching ox criterion\n rows_to_delete = np.where(vector_data[:, position] == int(criterion))[0]\n vector_data = np.delete(vector_data, rows_to_delete, axis=0)\n position += 1\n return ''.join([str(int(i)) for i in vector_data[0]])\n\n\ndef life_support_rating(vector_data):\n \"\"\"Return decimal product of oxygen_rate and co2_rate binary strings\"\"\"\n ox = oxygen_rate(vector_data)\n co2 = co2_rate(vector_data)\n return int(ox, 2) * int(co2, 2)\n\n\ndef _make_array(string_input):\n return np.array([int(i) for i in string_input])\n\n\nif __name__ == '__main__':\n\n data = parse_data()\n\n # Part 1\n gamma = gamma_rate(data)\n epsilon = epsilon_rate(gamma)\n\n solution = multiply_gamma_epsilon(gamma, epsilon)\n print(solution)\n\n # Part 2\n life_support = life_support_rating(data)\n print(life_support)\n","repo_name":"annplaube/aoc_2021","sub_path":"3/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23966842299","text":"from django import forms\nfrom filer.models.filemodels import File, Folder\n\n\nclass FileForm(forms.ModelForm):\n class Meta:\n fields = ('name', 'file')\n model = File\n\n def __init__(self, *args, **kwargs):\n self.folder_name = kwargs.pop(\"folder_name\", \"Temp\")\n super(FileForm, self).__init__(*args, **kwargs)\n self.fields['name'].required = True\n self.fields['file'].required = True\n\n def save(self, commit=True):\n object = super(FileForm, self).save(commit=False)\n folder, created = Folder.objects.get_or_create(name=self.folder_name)\n object.folder = folder\n object.save()\n return object\n","repo_name":"SmallsLIVE/smallslive","sub_path":"smallslive/oscar_apps/dashboard/files/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"16972444090","text":"from typing import Dict, List, Union\n\nfrom cachetools import cached, TTLCache\n\nfrom app.chain import ChainBase\nfrom app.core.config import settings\nfrom app.core.context import TorrentInfo, Context, MediaInfo\nfrom app.core.metainfo import MetaInfo\nfrom app.db import SessionFactory\nfrom app.db.systemconfig_oper import SystemConfigOper\nfrom app.helper.sites import SitesHelper\nfrom app.log import logger\nfrom app.schemas import Notification\nfrom app.schemas.types import SystemConfigKey, MessageChannel\nfrom app.utils.singleton import Singleton\nfrom app.utils.string import StringUtils\n\n\nclass TorrentsChain(ChainBase, metaclass=Singleton):\n \"\"\"\n 站点首页种子处理链,服务于订阅、刷流等\n \"\"\"\n\n _cache_file = \"__torrents_cache__\"\n\n def __init__(self):\n self._db = SessionFactory()\n super().__init__(self._db)\n self.siteshelper = SitesHelper()\n self.systemconfig = SystemConfigOper()\n\n def remote_refresh(self, channel: MessageChannel, userid: Union[str, int] = None):\n \"\"\"\n 远程刷新订阅,发送消息\n \"\"\"\n self.post_message(Notification(channel=channel,\n title=f\"开始刷新种子 ...\", userid=userid))\n self.refresh()\n self.post_message(Notification(channel=channel,\n title=f\"种子刷新完成!\", userid=userid))\n\n def get_torrents(self) -> Dict[str, List[Context]]:\n \"\"\"\n 获取当前缓存的种子\n \"\"\"\n # 读取缓存\n return self.load_cache(self._cache_file) or {}\n\n @cached(cache=TTLCache(maxsize=128, ttl=600))\n def browse(self, domain: str) -> List[TorrentInfo]:\n \"\"\"\n 浏览站点首页内容,返回种子清单,TTL缓存10分钟\n :param domain: 站点域名\n \"\"\"\n logger.info(f'开始获取站点 {domain} 最新种子 ...')\n site = self.siteshelper.get_indexer(domain)\n if not site:\n logger.error(f'站点 {domain} 不存在!')\n return []\n return self.refresh_torrents(site=site)\n\n def refresh(self) -> Dict[str, List[Context]]:\n \"\"\"\n 刷新站点最新资源,识别并缓存起来\n \"\"\"\n\n # 读取缓存\n torrents_cache = self.get_torrents()\n\n # 所有站点索引\n indexers = self.siteshelper.get_indexers()\n # 配置的Rss站点\n config_indexers = [str(sid) for sid in self.systemconfig.get(SystemConfigKey.RssSites) or []]\n # 遍历站点缓存资源\n for indexer in indexers:\n # 未开启的站点不搜索\n if config_indexers and str(indexer.get(\"id\")) not in config_indexers:\n continue\n domain = StringUtils.get_url_domain(indexer.get(\"domain\"))\n torrents: List[TorrentInfo] = self.browse(domain=domain)\n # 按pubdate降序排列\n torrents.sort(key=lambda x: x.pubdate or '', reverse=True)\n # 取前N条\n torrents = torrents[:settings.CACHE_CONF.get('refresh')]\n if torrents:\n # 过滤出没有处理过的种子\n torrents = [torrent for torrent in torrents\n if f'{torrent.title}{torrent.description}'\n not in [f'{t.torrent_info.title}{t.torrent_info.description}'\n for t in torrents_cache.get(domain) or []]]\n if torrents:\n logger.info(f'{indexer.get(\"name\")} 有 {len(torrents)} 个新种子')\n else:\n logger.info(f'{indexer.get(\"name\")} 没有新种子')\n continue\n for torrent in torrents:\n logger.info(f'处理资源:{torrent.title} ...')\n # 识别\n meta = MetaInfo(title=torrent.title, subtitle=torrent.description)\n # 识别媒体信息\n mediainfo: MediaInfo = self.recognize_media(meta=meta)\n if not mediainfo:\n logger.warn(f'未识别到媒体信息,标题:{torrent.title}')\n # 存储空的媒体信息\n mediainfo = MediaInfo()\n # 清理多余数据\n mediainfo.clear()\n # 上下文\n context = Context(meta_info=meta, media_info=mediainfo, torrent_info=torrent)\n # 添加到缓存\n if not torrents_cache.get(domain):\n torrents_cache[domain] = [context]\n else:\n torrents_cache[domain].append(context)\n # 如果超过了限制条数则移除掉前面的\n if len(torrents_cache[domain]) > settings.CACHE_CONF.get('torrents'):\n torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF.get('torrents'):]\n # 回收资源\n del torrents\n else:\n logger.info(f'{indexer.get(\"name\")} 没有获取到种子')\n # 保存缓存到本地\n self.save_cache(torrents_cache, self._cache_file)\n # 返回\n return torrents_cache\n","repo_name":"2xx8/MoviePilot","sub_path":"app/chain/torrents.py","file_name":"torrents.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"24430622414","text":"import textwrap\n\nempty_char = '_'\nx_char = 'X'\no_char = 'O'\nnumber_of_spaces = 9\nwin_count_dict = {x_char: 0, o_char: 0}\ngrid_string = number_of_spaces * empty_char\nmove_count = 0\n\n\ndef print_grid():\n grid = [list(row) for row in textwrap.wrap(grid_string, 3)]\n\n print('---------')\n for row in grid:\n row_string = ' '.join(row)\n print(f\"| {row_string} |\")\n print('---------')\n\n\ndef grid_filled():\n return True if grid_string.count(empty_char) == 0 else False\n\n\ndef number_of_turns(player_char):\n return grid_string.count(player_char)\n\n\ndef count_wins():\n top = grid_string[0:3]\n middle = grid_string[3:6]\n bottom = grid_string[6:9]\n left = grid_string[0::3]\n center = grid_string[1::3]\n right = grid_string[2::3]\n diagonal_1_to_9 = grid_string[0::4]\n diagonal_7_to_3 = grid_string[2:8:2]\n\n for char in list(win_count_dict):\n win_count_dict[char] = [top, middle, bottom, left, center, right,\n diagonal_1_to_9, diagonal_7_to_3\n ].count(char * 3)\n\n\ndef should_the_game_continue():\n count_wins()\n if (win_count_dict[x_char] > 0 and win_count_dict[o_char] > 0) or\\\n (abs(number_of_turns(x_char) - number_of_turns(o_char)) >= 2):\n state = 'Impossible'\n elif grid_filled() and win_count_dict[x_char] == 0 and win_count_dict[o_char] == 0:\n state = 'Draw'\n elif win_count_dict[x_char] > 0:\n state = 'X wins'\n elif win_count_dict[o_char] > 0:\n state = 'O wins'\n else:\n # No End State has been triggered, the game should continue\n return True\n\n # An End State has been triggered, the game should NOT continue\n print(state)\n return False\n\n\ndef make_move(char):\n index = None\n\n while True:\n try:\n # Attempts to get the input and convert the string into integers\n coordinates = [int(string_input) for string_input in input().split(' ')]\n except ValueError:\n print('You should enter numbers!')\n continue\n\n # Validates Input is in the correct range\n if coordinates[0] < 1 or coordinates[0] > 3 or coordinates[1] < 1 or coordinates[1] > 3:\n print('Coordinates should be from 1 to 3!')\n continue\n\n # Converts the pass 2 integer input into the index of the grid_string\n index = (3 * (coordinates[0] - 1)) + (coordinates[1] - 1)\n\n if grid_string[index] != empty_char:\n print('This cell is occupied! Choose another one!')\n continue\n else:\n break\n\n grid_list = list(grid_string)\n grid_list[index] = char\n return ''.join(grid_list)\n\n\n# Print the empty grid and start the Game\nprint_grid()\n\nwhile should_the_game_continue():\n if move_count % 2 == 0:\n grid_string = make_move(x_char)\n else:\n grid_string = make_move(o_char)\n move_count += 1\n print_grid()\n\n","repo_name":"notdevinclark/Simple-Tic-Tac-Toe-Python","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23110908582","text":"from five import grok\n\nfrom zope.component import getUtility\nfrom zope import schema\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom zope.schema.vocabulary import SimpleTerm, SimpleVocabulary\n\nfrom Products.SilvaMetadata.interfaces import IMetadataService\n\nfrom silva.core.interfaces import IAutoTOC\nfrom silva.core.views import views as silvaviews\nfrom silva.core.conf.interfaces import ITitledContent\nfrom silva.core.interfaces import IAddableContents, IPublishable\nfrom silva.translations import translate as _\nfrom zeam.form import silva as silvaforms\n\n\n@apply\ndef sort_order_source():\n orders = []\n for key, title in [\n ('silva', _(u'Silva folder order')),\n ('alpha', _(u'Alphabetically')),\n ('reversealpha', _(u'Reverse alphabetically')),\n ('chronmod', _(u'Chronologically by modification date')),\n ('rchronmod', _(u'Reverse chronologically by modification date'))]:\n orders.append(SimpleTerm(value=key, token=key, title=title))\n return SimpleVocabulary(orders)\n\n\n@grok.provider(IContextSourceBinder)\ndef silva_content_types(context):\n contents = []\n container = context.get_container()\n addables = IAddableContents(container)\n for addable in addables.get_container_addables(IPublishable):\n contents.append(SimpleTerm(\n value=addable,\n token=addable,\n title=addable))\n return SimpleVocabulary(contents)\n\n\nclass IAutoTOCSchema(ITitledContent):\n _local_types = schema.Set(\n title=_(u\"Types to list\"),\n description=_(\n u\"Select here the content types you wish to see in \"\n u\"the table of content. You need to selected container types \"\n u\"(e.g. Folder and Publication) in order for the TOC to \"\n u\"display their contents.\"),\n value_type=schema.Choice(source=silva_content_types),\n default=set(['Silva Document', 'Silva Folder', 'Silva Publication']),\n required=True)\n _toc_depth = schema.Int(\n title=_(u\"Depth\"),\n description=_(\n u\"The depth to which the Table of Contents will be rendered \"\n u\"(-1 means unlimited depth.)\"),\n default=-1,\n min=-1,\n max=99,\n required=True)\n _display_desc_flag = schema.Bool(\n title=_(u\"Display description\"),\n description=_(\n u\"If selected, each item displayed will include its title \"\n u\"and metadata description, if available. \"),\n default=False,\n required=True)\n _show_icon = schema.Bool(\n title=_(\"Show icon\"),\n description=_(\n u\"If selected, each item displayed will include its icon. \"),\n default=False,\n required=True)\n _show_container_link = schema.Bool(\n title=_(\"Show container link\"),\n description=_(\n u\"If selected, there will be a link to the container \"\n u\"(as an H3) before the TOC list.\"),\n default=False,\n required=True)\n _sort_order = schema.Choice(\n title=_(u\"Sort order\"),\n description=_(u\"The order items in a container will be sorted\"),\n source=sort_order_source,\n default='silva',\n required=True)\n\n\n@silvaforms.customize(name='_toc_depth', schema=IAutoTOCSchema)\ndef customize_toc_depth(field):\n field.htmlAttributes['style'] = 'width: 4em;'\n\n\nclass AutoTOCAddForm(silvaforms.SMIAddForm):\n \"\"\"Add an Auto TOC.\n \"\"\"\n grok.context(IAutoTOC)\n grok.name(u'Silva AutoTOC')\n\n fields = silvaforms.Fields(IAutoTOCSchema)\n\n\nclass AutoTOCEditForm(silvaforms.SMIEditForm):\n \"\"\"Add an Auto TOC.\n \"\"\"\n grok.context(IAutoTOC)\n\n fields = silvaforms.Fields(IAutoTOCSchema).omit('id')\n\n\nclass AutoTOCView(silvaviews.View):\n grok.context(IAutoTOC)\n\n def update(self):\n metadata = getUtility(IMetadataService)\n self.description = metadata.getMetadataValue(\n self.context, 'silva-extra', 'content_description', acquire=0)\n","repo_name":"silvacms/Products.Silva","sub_path":"Products/Silva/AutoTOC/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"16494065337","text":"import argparse\nimport yaml\nfrom typing import Dict, List\nimport numpy as np\nimport json\n\nfrom sklearn.gaussian_process import GaussianProcessRegressor, kernels\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.pipeline import Pipeline\n\nfrom proxystore.store import get_store\n\nimport lifecycle\n\n\ndef reprioritize_queue(training_data: List[List],\n pred_data: List[np.array],\n gpr: GaussianProcessRegressor,\n opt_delay: float = 0.5) -> np.ndarray:\n \"\"\"Determine an optimal order in which to excecute a task queue\n\n Args:\n database: Inputs and outputs of completed simulations\n gpr: Gaussian-process regression model\n queue: Existing task queue\n opt_delay: Minimum run time of this function\n Returns:\n Re-ordered priorities of queue\n \"\"\"\n # can be called via funcx so imports\n import time\n import numpy as np\n import scipy\n import datetime\n\n start = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()\n time.sleep(opt_delay)\n\n # Update the GPR with the available training data\n train_X, train_y = zip(*training_data)\n gpr.fit(np.vstack(train_X), train_y)\n\n # Run GPR on the existing task queue\n pred_y, pred_std = gpr.predict(pred_data, return_std=True)\n best_so_far = np.min(train_y)\n # MB: FIXED\n # ei = (best_so_far - pred_y) / pred_std\n ei = (best_so_far - pred_y) * scipy.stats.norm(0, 1).cdf((best_so_far - pred_y) / pred_std) + pred_std * scipy.stats.norm(0, 1).pdf((best_so_far - pred_y) / pred_std)\n\n # Argument sort the EI score, ordered with largest tasks first\n end = datetime.datetime.now(tz=datetime.timezone.utc).timestamp()\n return start, end, np.argsort(-1 * ei)\n\n\ndef reprioritize_fx(fx, completed, pred_data, gpr):\n store = get_store('globus')\n gpr_proxy = store.proxy(gpr)\n ft = fx.submit(reprioritize_queue, completed, pred_data, gpr_proxy)\n return ft.result()\n\n\ndef reprioritize(task_queue, fx, database: Dict[int, List], output_file=None):\n completed = [x[1:] for x in filter(lambda x: x[2] is not None, database.values())]\n uncompleted = [x[:2] for x in filter(lambda x: x[2] is None, database.values())]\n if len(uncompleted) > 0:\n gpr = Pipeline([('scale', MinMaxScaler(feature_range=(-1, 1))),\n ('gpr', GaussianProcessRegressor(normalize_y=True, kernel=kernels.RBF() * kernels.ConstantKernel()))\n ])\n # x[1] is input array\n # start_t, end_t, new_order = reprioritize_queue(completed, [x[1] for x in uncompleted], gpr=gpr)\n start_t, end_t, new_order = reprioritize_fx(fx, completed, [x[1] for x in uncompleted], gpr=gpr)\n\n fts = []\n priorities = []\n max_priority = len(uncompleted)\n for i, idx in enumerate(new_order):\n ft = uncompleted[idx][0]\n priority = max_priority - i\n fts.append(ft)\n priorities.append(priority)\n\n if output_file is not None:\n with open(output_file, 'a') as f_out:\n f_out.write(f'R START: {start_t}\\n')\n f_out.write(f'R END: {end_t}\\n')\n for i, ft in enumerate(fts):\n f_out.write(f'P UPDATE: {ft.eq_task_id} {ft.priority} {priorities[i]}\\n')\n\n task_queue.update_priorities(fts, priorities)\n\n\ndef submit_initial_tasks(task_queue, exp_id, params: Dict):\n search_space_size = params['search_space_size']\n dim = params['sample_dimensions']\n sampled_space = np.random.uniform(size=(search_space_size, dim), low=-32.768, high=32.768)\n\n task_type = 0\n mean_rt = params['runtime']\n std_rt = params['runtime_var']\n\n payloads = []\n for sample in sampled_space:\n payload = json.dumps({'x': list(sample), 'mean_rt': mean_rt, 'std_rt': std_rt})\n payloads.append(payload)\n fts = task_queue.submit_tasks(exp_id, eq_type=task_type, payload=payloads)\n\n database = {}\n for i, ft in enumerate(fts):\n database[ft.eq_task_id] = [ft, sampled_space[i], None]\n\n return database\n\n\ndef run(exp_id, params: Dict):\n output_file = f'./output/{exp_id}_output.txt'\n # To avoid errors in finally\n task_queues = pools = dbs = fx_executors = {}\n try:\n fx_endpoints, db_names, pool_names = lifecycle.find_active_elements(params)\n repro_endpoint = params['reprioritize_endpoint']\n if repro_endpoint not in fx_endpoints:\n fx_endpoints.append(repro_endpoint)\n\n fx_executors = lifecycle.initialize_fx_endpoints(fx_endpoints, params)\n dbs = lifecycle.initialize_dbs(db_names, fx_executors, params)\n task_queues = lifecycle.initialize_task_queues(fx_executors, dbs, params)\n task_queue = task_queues['sim']\n database = submit_initial_tasks(task_queue, exp_id, params)\n # launch after submitting so pool has full data\n pools = lifecycle.initialize_worker_pools(exp_id, pool_names, fx_executors,\n dbs, params)\n lifecycle.initialize_proxystore(params)\n\n num_guesses = params['num_guesses']\n retrain_after = params['retrain_after']\n # next_retrain = retrain_after\n tasks_completed = 0\n fts = [v[0] for _, v in database.items()]\n print(f'NUM GUESSES: {num_guesses}')\n print(f'RETRAIN AFTER: {retrain_after}')\n print(f'FTS: {len(fts)}')\n num_repro = 0\n while tasks_completed < num_guesses:\n completed_fts = task_queue.pop_completed(fts, n=retrain_after)\n for ft in completed_fts:\n _, result = ft.result()\n database[ft.eq_task_id][2] = float(result)\n tasks_completed += 1\n\n print(f\"tasks completed: {tasks_completed}\")\n reprioritize(task_queue, fx_executors[repro_endpoint], database, output_file=output_file)\n num_repro += 1\n if num_repro == 2:\n # pool_names = 'bebop2', add 'bebop2' to params with params['tasks'][0]['pools'].append()\n params['tasks'][0]['pools'].append('bebop2')\n p = lifecycle.initialize_worker_pools(exp_id, ['bebop2'], fx_executors,\n dbs, params)\n pools.update(p)\n print(pools)\n elif num_repro == 4:\n params['tasks'][0]['pools'].append('bebop3')\n p = lifecycle.initialize_worker_pools(exp_id, ['bebop3'], fx_executors,\n dbs, params)\n pools.update(p)\n\n finally:\n for task_queue in task_queues.values():\n task_queue.shutdown()\n for db in dbs.values():\n db.shutdown()\n for pool in pools.values():\n pool.shutdown()\n for fx in fx_executors.values():\n fx.shutdown()\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('exp_id', help='experiment id')\n parser.add_argument('config_file', help=\"yaml format configuration file\")\n return parser\n\n\nif __name__ == '__main__':\n parser = create_parser()\n args = parser.parse_args()\n with open(args.config_file) as fin:\n params = yaml.safe_load(fin)\n\n # launch.launch_dbs(params)\n # launch.launch_worker_pools(args.exp_id, params)\n # launch.stop_dbs(params)\n\n run(args.exp_id, params)\n","repo_name":"NSF-RESUME/2023_ParSocial_OSPREY_example","sub_path":"python/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73177888254","text":"import os\r\nfrom views import designDrawSchemes, styles_and_animation\r\nfrom helpers import validate\r\nfrom creators import draw_schemes\r\nfrom os.path import isfile, join\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtGui import QIcon, QTextCursor\r\nfrom PyQt5.QtCore import QSize, QTimer, QThread, pyqtSignal\r\n\r\nclass DrawOne(QThread):\r\n change_value = pyqtSignal(str)\r\n def __init__(self, draw_params, gost_frame_params, many_schemes):\r\n super().__init__()\r\n self.draw_params = draw_params\r\n self.many_schemes = many_schemes\r\n self.gost_frame_params = gost_frame_params\r\n self.modules = 0\r\n self.chains = 0\r\n\r\n def run(self):\r\n fp_invertor = 'Data/Schemes/Invertor/'\r\n files_in_invertor = [f for f in os.listdir(fp_invertor) if isfile(join(fp_invertor, f))]\r\n try:\r\n if len(files_in_invertor) != 0:\r\n for file in files_in_invertor:\r\n os.remove(fp_invertor + f\"/{file}\")\r\n except PermissionError:\r\n self.statusBar.showMessage('Открыт pdf файл, закройте его и повторите попытку', 4000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n QTimer.singleShot(4000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return 1 \r\n\r\n config_keys = [] \r\n for key in self.draw_params.keys():\r\n if 'inv_' in key:\r\n config_keys.append(key)\r\n\r\n numbr = 0\r\n for config in config_keys:\r\n counts = int(self.draw_params[config]['count'])\r\n if self.many_schemes == True:\r\n for num in range(counts):\r\n numbr += 1\r\n self.num_error = draw_schemes.draw(self.draw_params, numbr, self.gost_frame_params, config)\r\n if self.num_error['error'] != 0: return \r\n self.modules += self.num_error['modules']\r\n self.chains += self.num_error['chains']\r\n self.change_value.emit(f\"{numbr} из {self.draw_params['count_invertor']}\")\r\n else:\r\n start_num = numbr\r\n numbr += counts\r\n if counts > 1:\r\n if start_num == 0:\r\n nums = f\"{1}-{numbr}\"\r\n else:\r\n nums = f\"{start_num}-{numbr}\"\r\n else:\r\n nums = numbr\r\n \r\n self.num_error = draw_schemes.draw(self.draw_params, nums, self.gost_frame_params, config)\r\n if self.num_error['error'] != 0: return \r\n self.modules += self.num_error['modules'] * counts\r\n self.chains += self.num_error['chains'] * counts\r\n self.change_value.emit(f\"{numbr} из {self.draw_params['count_invertor']}\")\r\n\r\nclass WindowDraw(QtWidgets.QMainWindow, designDrawSchemes.Ui_WindowDrawSchemes):\r\n def __init__(self, instance_of_main_window):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.input_data()\r\n validate.validate_number(self.fields_text)\r\n self.main_window = instance_of_main_window\r\n self.btnDraw.clicked.connect(self.draw)\r\n self.btnOpenScheme.clicked.connect(self.open_scheme)\r\n self.btnAddConfigInvertor.clicked.connect(self.add_invertor)\r\n self.btnDelConfigInvertor.clicked.connect(self.del_invertor)\r\n self.btnAddMPPT.clicked.connect(self.add_config)\r\n self.btnDelMPPT.clicked.connect(self.del_config)\r\n self.btnUpdateConsole.clicked.connect(self.update_console)\r\n self.btnSaveConfig.clicked.connect(self.save_config)\r\n self.checkUse_5or4_line.clicked.connect(self.show_and_hide_color_line_because_phase)\r\n self.inputCount_mppt.textChanged.connect(self.validate_input)\r\n self.inputAll_chain.textChanged.connect(self.validate_input)\r\n self.inputCount_input_mppt.textChanged.connect(self.validate_input)\r\n self.checkUse_three_phase.stateChanged.connect(self.show_and_hide_color_line_because_phase)\r\n self.checkUse_y_connector.stateChanged.connect(self.validate_input)\r\n self.checkUse_all_mppt.stateChanged.connect(self.validate_input)\r\n self.spinBox_maxY.valueChanged.connect(self.validate_input)\r\n self.spinBox_numInvertor.valueChanged.connect(self.up_down_invertor_selection)\r\n self.spinBoxConfigInvertor.valueChanged.connect(self.spin_config)\r\n self.spinBoxMPPT.valueChanged.connect(self.spin_config)\r\n self.checkDifferentMPPT.stateChanged.connect(self.show_and_hide_spinBox_mppt)\r\n\r\n def input_data(self):\r\n self.spinBox_numInvertor.setMinimum(1)\r\n self.spinBox_numInvertor.setEnabled(False)\r\n self.spinBoxConfigInvertor.setMinimum(1)\r\n self.spinBoxConfigInvertor.setMaximum(1)\r\n self.spinBoxMPPT.setMinimum(1)\r\n self.spinBoxMPPT.setMaximum(1)\r\n self.btnOpenScheme.hide()\r\n self.btnDelConfigInvertor.hide()\r\n self.btnDelMPPT.hide()\r\n self.progressBar.hide()\r\n self.spinBox_CloneInvertor.setMinimum(1)\r\n self.show_and_hide_color_line_because_phase()\r\n self.show_and_hide_spinBox_mppt()\r\n self.btnSaveConfig.setIcon(QIcon('Data/System/Icons/save.png'))\r\n self.btnSaveConfig.setIconSize(QSize(30, 30))\r\n self.draw_params = {}\r\n self.fields_text = [self.inputCount_mppt, self.inputCount_input_mppt, self.inputSolar_count_on_the_chain, self.inputAll_chain]\r\n\r\n def open_scheme(self):\r\n self.path_structural_schemes = [QtWidgets.QFileDialog.getOpenFileName(self, 'Выберите файл структурной схемы', \r\n 'Data/Schemes/Invertor', \"*.pdf\")[0]]\r\n if len(self.path_structural_schemes[0]) != 0:\r\n os.startfile(self.path_structural_schemes[0])\r\n\r\n def reset(self):\r\n self.inputCount_mppt.clear()\r\n self.inputCount_input_mppt.clear()\r\n self.inputSolar_count_on_the_chain.clear()\r\n self.inputAll_chain.clear()\r\n self.checkUse_y_connector.setCheckState(0)\r\n self.checkUse_all_mppt.setCheckState(0)\r\n self.checkUse_three_phase.setCheckState(0)\r\n self.checkUse_5or4_line.setCheckState(0)\r\n self.checkUse_5or4_line.setEnabled(False)\r\n self.textConsoleDraw.clear()\r\n self.textConsoleCurrent.clear()\r\n self.spinBox_numInvertor.setValue(1)\r\n self.spinBox_numInvertor.setEnabled(False)\r\n self.spinBoxConfigInvertor.setMinimum(1)\r\n self.spinBoxConfigInvertor.setMaximum(1)\r\n self.spinBox_CloneInvertor.setValue(1)\r\n self.btnOpenScheme.hide()\r\n self.inputName_invertor.clear()\r\n self.inputNumber_invertor.clear()\r\n self.inputTitle_grid_line.clear()\r\n self.inputTitle_grid_line_length.clear()\r\n self.inputTitle_grid_top.clear()\r\n self.inputTitle_grid_switch.clear()\r\n self.inputCountAllInvertors.clear()\r\n\r\n def invertor_and_config_keys(self):\r\n invertors = self.main_window.invertors\r\n self.spinBox_numInvertor.setMaximum(len(invertors))\r\n self.spinBox_numInvertor.setEnabled(True)\r\n\r\n spinbox_val = self.spinBox_numInvertor.value() - 1\r\n self.invertor = invertors[f'found_invertor_{spinbox_val}']\r\n\r\n self.config_keys = []\r\n for key in self.invertor.keys():\r\n if 'inv_' in key:\r\n self.config_keys.append(key) \r\n self.spinBoxConfigInvertor.setMaximum(len(self.config_keys))\r\n\r\n def up_down_invertor_selection(self):\r\n self.invertor_and_config_keys()\r\n if self.invertor['broken_file'] != True:\r\n self.inputName_invertor.setText(f'{self.invertor[\"module\"]}')\r\n self.inputName_invertor.setCursorPosition(0)\r\n self.inputCount_mppt.setText(f'{self.invertor[\"mppt\"]}')\r\n self.inputCountMpptOnParams.setText(f'{self.invertor[\"mppt\"]}')\r\n self.inputCount_input_mppt.setText(f'{self.invertor[\"inputs\"]}')\r\n self.inputSolar_count_on_the_chain.setText(str(0))\r\n self.inputAll_chain.setText(str(0))\r\n self.spinBox_maxY.setMinimum(self.invertor['inputs'])\r\n self.spinBox_maxY.setMaximum(self.invertor['inputs'] * 2)\r\n self.spinBox_maxY.setValue(self.invertor['inputs'] * 2)\r\n if self.invertor['phase'] == 3:\r\n self.checkUse_three_phase.setCheckState(2)\r\n elif self.invertor['phase'] == 1:\r\n self.checkUse_three_phase.setCheckState(0)\r\n self.inputNumber_invertor.setText(f\"{self.invertor['type_inv']}\")\r\n self.inputTitle_grid_line.setText(f\"{self.invertor['title_grid_line']}\")\r\n self.inputTitle_grid_line_length.setText(f\"{self.invertor['title_grid_line_length']}\")\r\n self.inputTitle_grid_top.setText(f\"{self.invertor['title_grid_top']}\")\r\n self.inputTitle_grid_switch.setText(f\"{self.invertor['title_grid_switch']}\")\r\n self.checkUse_5or4_line.setCheckState(2 if self.invertor['use_5or4_line'] == True else 0) \r\n self.inputCountAllInvertors.setText(f\"{int(self.invertor['count_invertor'])}\")\r\n self.spin_config()\r\n self.draw_invertor_config_in_console()\r\n # self.show_and_hide_different_mppt(False)\r\n\r\n def spin_config(self):\r\n if len(self.config_keys) != 0:\r\n if len(self.config_keys) > 1:\r\n self.btnDelConfigInvertor.show()\r\n else:\r\n self.btnDelConfigInvertor.hide()\r\n self.spinBoxConfigInvertor.show()\r\n\r\n config_index = self.spinBoxConfigInvertor.value() - 1\r\n # print(invertor[config_keys[config_index]])\r\n count_params = len(self.invertor[self.config_keys[config_index]]['params'])\r\n diff_index = 0\r\n self.checkDifferentMPPT.setCheckState(0)\r\n if count_params > 1:\r\n self.btnDelMPPT.show()\r\n self.spinBoxMPPT.show()\r\n self.spinBoxMPPT.setMaximum(count_params)\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n self.checkDifferentMPPT.setCheckState(2)\r\n else:\r\n self.btnDelMPPT.hide()\r\n self.spinBoxMPPT.setMaximum(1)\r\n\r\n max_y = self.invertor[self.config_keys[config_index]]['params'][diff_index]['max_y']\r\n self.spinBox_maxY.setValue(max_y)\r\n self.spinBox_CloneInvertor.setValue(int(self.invertor[self.config_keys[config_index]]['count']))\r\n self.inputSolar_count_on_the_chain.setText(str(self.invertor[self.config_keys[config_index]]['params'][diff_index]['pvs']))\r\n self.inputCount_mppt.setText(str(self.invertor[self.config_keys[config_index]]['params'][diff_index]['mppts']))\r\n self.inputAll_chain.setText(str(int(self.invertor[self.config_keys[config_index]]['params'][diff_index]['chains'])))\r\n self.checkUse_y_connector.setCheckState(2 if self.invertor[self.config_keys[config_index]]['params'][diff_index]['y'] == True else 0)\r\n self.validate_input()\r\n \r\n def draw_invertor_config_in_console(self):\r\n self.textConsoleDraw.clear()\r\n self.textConsoleDraw.moveCursor(QTextCursor.Start)\r\n total_pvs = 0\r\n for index in range(len(self.config_keys)):\r\n count_inv = int(self.invertor[self.config_keys[index]]['count'])\r\n self.textConsoleDraw.append(f\" {index + 1} КОНФИГУРАЦИЯ {count_inv} ИНВ. \")\r\n count_params = len(self.invertor[self.config_keys[index]]['params'])\r\n pvs_on_conf = 0\r\n for i in range(count_params):\r\n pvs = self.invertor[self.config_keys[index]]['params'][i]['pvs']\r\n chains = self.invertor[self.config_keys[index]]['params'][i]['chains']\r\n y = '| Y' if self.invertor[self.config_keys[index]]['params'][i]['y'] == True else ''\r\n self.textConsoleDraw.append(f\" {self.invertor[self.config_keys[index]]['params'][i]['mppts']} MPPT | {chains} цеп. | {pvs} ФЭМ {y} \")\r\n pvs_on_conf += pvs * chains\r\n total_pvs += pvs_on_conf * count_inv\r\n self.textConsoleDraw.append(f\" ИТОГО\")\r\n self.textConsoleDraw.append(f\" {int(self.invertor['count_invertor'])} Инверторов\")\r\n self.textConsoleDraw.append(f\" {int(total_pvs)} ФЭМ\")\r\n\r\n def show_and_hide_color_line_because_phase(self):\r\n if self.checkUse_three_phase.isChecked():\r\n self.checkUse_5or4_line.setEnabled(True)\r\n else:\r\n self.checkUse_5or4_line.setEnabled(False)\r\n self.checkUse_5or4_line.setCheckState(0)\r\n\r\n def show_and_hide_spinBox_mppt(self):\r\n if self.checkDifferentMPPT.isChecked():\r\n self.spinBoxMPPT.setEnabled(True)\r\n self.btnAddMPPT.setEnabled(True)\r\n self.btnDelMPPT.setEnabled(True)\r\n else:\r\n self.spinBoxMPPT.setEnabled(False)\r\n self.btnAddMPPT.setEnabled(False)\r\n self.btnDelMPPT.setEnabled(False)\r\n\r\n def validate_input(self): #валидация вводимых данных\r\n false_value = ['Н/Д', '']\r\n self.opacity_effect = QtWidgets.QGraphicsOpacityEffect()\r\n self.opacity_effect.setOpacity(0.6)\r\n config_index = self.spinBoxConfigInvertor.value() - 1\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n use_all_mppt = True if self.checkUse_all_mppt.isChecked() else False\r\n use_y_connector = True if self.checkUse_y_connector.isChecked() else False \r\n \r\n if not self.inputCount_mppt.text() in false_value and not self.inputCount_input_mppt.text() in false_value:\r\n count_input_mppt = int(self.inputCount_input_mppt.text())\r\n self.count_mppt = int(self.inputCount_mppt.text()) \r\n self.textConsoleCurrent.clear() \r\n max_y = self.spinBox_maxY.value() \r\n max_input = count_input_mppt * self.count_mppt\r\n max_input_y = max_y * self.count_mppt\r\n self.textConsoleCurrent.append(f\"Макс. кол-во входов без Y коннектора: {max_input}\")\r\n self.textConsoleCurrent.append(f\"Макс. кол-во входов c Y коннектором: {max_input_y}\")\r\n total_mppts = 0\r\n if len(self.config_keys) != 0:\r\n count_params = len(self.invertor[self.config_keys[config_index]]['params'])\r\n for i in range(count_params):\r\n total_mppts += self.invertor[self.config_keys[config_index]]['params'][i]['mppts']\r\n total_mppts -= self.invertor[self.config_keys[config_index]]['params'][diff_index]['mppts']\r\n total_mppts += self.count_mppt\r\n \r\n if not self.inputAll_chain.text() in false_value:\r\n self.all_chain = int(self.inputAll_chain.text())\r\n if self.all_chain < self.count_mppt and use_all_mppt == True:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Невозможно распределить по всем MPPT\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: Увеличьте кол-во цепочек\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif self.all_chain > max_input and use_y_connector == False:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во цепочек не вмещается\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: примените Y коннекторы / уменьшите кол-во цепочек\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif self.all_chain <= max_input and use_y_connector == True and use_all_mppt == True:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во цепочек слишком мало, чтобы распределить по всем MPPT с Y коннекторами\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: уберите Y коннекторы\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif self.all_chain > max_input_y:\r\n # self.textConsoleCurrent.append(\"\")\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во цепочек слишком большое для данной конфигурации\")\r\n self.textConsoleCurrent.append(\"РЕШЕНИЕ: уменьшите кол-во цепочек / измените конфигурацию\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n elif total_mppts > self.invertor[\"mppt\"]:\r\n self.textConsoleCurrent.append(\"ПРЕДУПРЕЖДЕНИЕ:\")\r\n self.textConsoleCurrent.append(\"Кол-во MPPT выходит за рамки параметров инвертора\")\r\n self.btnDraw.setEnabled(False)\r\n self.btnSaveConfig.setEnabled(False)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect)\r\n else:\r\n self.btnDraw.setEnabled(True)\r\n self.btnSaveConfig.setEnabled(True)\r\n self.btnDraw.setGraphicsEffect(self.opacity_effect.setOpacity(1))\r\n return 0\r\n else:\r\n self.textConsoleCurrent.clear() \r\n \r\n def check_imput_params(self):\r\n self.set_style_default()\r\n if self.inputCount_mppt.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputCount_mppt)\r\n return 1\r\n elif self.inputCount_input_mppt.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputCount_input_mppt)\r\n return 1\r\n elif self.inputSolar_count_on_the_chain.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputSolar_count_on_the_chain)\r\n return 1\r\n elif self.inputAll_chain.text() == '':\r\n styles_and_animation.no_fill_field(self, self.inputAll_chain)\r\n return 1\r\n else:\r\n return 0\r\n\r\n def set_style_default(self):\r\n self.inputCount_mppt.setStyleSheet(styles_and_animation.default_style_input)\r\n self.inputCount_input_mppt.setStyleSheet(styles_and_animation.default_style_input)\r\n self.inputSolar_count_on_the_chain.setStyleSheet(styles_and_animation.default_style_input)\r\n self.inputAll_chain.setStyleSheet(styles_and_animation.default_style_input)\r\n\r\n self.statusBar.setStyleSheet(styles_and_animation.status_white)\r\n self.statusBar.showMessage('', 100)\r\n\r\n def show_and_hide_different_mppt(self, status):\r\n if status == True:\r\n self.spinBoxConfigInvertor.show()\r\n if self.spinBoxConfigInvertor.value() > 1:\r\n self.btnDelConfigInvertor.show()\r\n else:\r\n self.btnDelConfigInvertor.hide()\r\n else:\r\n self.spinBoxConfigInvertor.hide()\r\n self.btnDelConfigInvertor.hide()\r\n\r\n def update_console(self):\r\n self.textConsoleDraw.clear()\r\n\r\n def update_total_count_invertors(self):\r\n count_invertor = 0\r\n for key in self.config_keys:\r\n count_invertor += int(self.invertor[key]['count'])\r\n self.invertor['count_invertor'] = int(count_invertor)\r\n self.main_window.w4.up_down_invertor_selection()\r\n self.inputCountAllInvertors.setText(f\"{int(self.invertor['count_invertor'])}\")\r\n\r\n def save_config(self):\r\n if self.check_imput_params() != 0:\r\n return 1\r\n config_index = self.spinBoxConfigInvertor.value() - 1\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n\r\n self.invertor['module'] = str(self.inputName_invertor.text())\r\n self.invertor['type_inv'] = str(self.inputNumber_invertor.text())\r\n self.invertor['title_grid_line'] = str(self.inputTitle_grid_line.text())\r\n self.invertor['title_grid_line_length'] = str(self.inputTitle_grid_line_length.text())\r\n self.invertor['title_grid_top'] = str(self.inputTitle_grid_top.text())\r\n self.invertor['title_grid_switch'] = str(self.inputTitle_grid_switch.text())\r\n self.invertor['phase'] = 3 if self.checkUse_three_phase.isChecked() else 1\r\n self.invertor['use_5or4_line'] = True if self.checkUse_5or4_line.isChecked() else False\r\n self.invertor['inputs'] = int(self.inputCount_input_mppt.text())\r\n \r\n if not self.config_keys:\r\n self.add_invertor()\r\n else:\r\n self.invertor[self.config_keys[config_index]]['count'] = self.spinBox_CloneInvertor.value()\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['mppts'] = int(self.inputCount_mppt.text())\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['chains'] = int(self.inputAll_chain.text())\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['pvs'] = int(self.inputSolar_count_on_the_chain.text())\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['y'] = True if self.checkUse_y_connector.isChecked() else False\r\n self.invertor[self.config_keys[config_index]]['params'][diff_index]['max_y'] = self.spinBox_maxY.value()\r\n \r\n total_chains = 0\r\n count_params = len(self.invertor[self.config_keys[config_index]]['params'])\r\n for i in range(count_params):\r\n total_chains += self.invertor[self.config_keys[config_index]]['params'][i]['chains']\r\n self.invertor[self.config_keys[config_index]]['total_chains'] = int(total_chains)\r\n self.update_total_count_invertors()\r\n \r\n self.main_window.w6.up_down_invertor_selection()\r\n self.up_down_invertor_selection()\r\n self.statusBar.showMessage('Параметры сохранены', 2000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_green)\r\n QTimer.singleShot(2000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n\r\n def add_invertor(self):\r\n if self.check_imput_params() != 0:\r\n return 1\r\n mppts = int(self.inputCount_mppt.text())\r\n count_inv = int(self.spinBox_CloneInvertor.value())\r\n pvs = int(self.inputSolar_count_on_the_chain.text())\r\n chains = int(self.inputAll_chain.text())\r\n y_connector = True if self.checkUse_y_connector.isChecked() else False\r\n max_y = self.spinBox_maxY.value()\r\n\r\n if not self.config_keys:\r\n name = 'inv_0'\r\n else:\r\n name = f'inv_{len(self.config_keys)}'\r\n\r\n self.invertor[name] = {'controller': False, 'commutator': False, 'left_yzip': False, 'right_yzip': False, \r\n 'title_other_device': 'УЗИП', 'count': count_inv, 'total_chains': chains, \r\n 'params': [{'mppts': mppts, 'chains': chains, 'pvs': pvs, 'count': 'piece', 'y': y_connector, 'max_y': max_y}]}\r\n\r\n self.invertor_and_config_keys()\r\n self.update_total_count_invertors()\r\n self.draw_invertor_config_in_console()\r\n self.spinBoxConfigInvertor.setValue(len(self.config_keys))\r\n\r\n def del_invertor(self):\r\n current_config_index = self.spinBoxConfigInvertor.value() - 1\r\n del self.invertor[self.config_keys[current_config_index]]\r\n self.invertor_and_config_keys()\r\n index = 0\r\n for key in self.config_keys:\r\n self.invertor[f'inv_{index}'] = self.invertor.pop(key)\r\n index += 1\r\n self.invertor_and_config_keys()\r\n self.update_total_count_invertors()\r\n self.draw_invertor_config_in_console()\r\n self.spin_config()\r\n\r\n def add_config(self):\r\n if self.check_imput_params() != 0:\r\n return 1\r\n current_config_index = self.spinBoxConfigInvertor.value() - 1\r\n mppts = int(self.inputCount_mppt.text())\r\n pvs = int(self.inputSolar_count_on_the_chain.text())\r\n chains = int(self.inputAll_chain.text())\r\n y_connector = True if self.checkUse_y_connector.isChecked() else False\r\n max_y = self.spinBox_maxY.value()\r\n\r\n current_params = self.invertor[self.config_keys[current_config_index]]['params']\r\n current_params.append({'mppts': mppts, 'chains': chains, 'pvs': pvs, 'count': 'piece', 'y': y_connector, 'max_y': max_y})\r\n self.draw_invertor_config_in_console()\r\n self.spin_config()\r\n self.spinBoxMPPT.setValue(len(current_params))\r\n\r\n def del_config(self):\r\n current_config_index = self.spinBoxConfigInvertor.value() - 1\r\n diff_index = self.spinBoxMPPT.value() - 1\r\n del self.invertor[self.config_keys[current_config_index]]['params'][diff_index]\r\n self.draw_invertor_config_in_console()\r\n self.spin_config()\r\n\r\n def out_params(self):\r\n title_project = self.main_window.inputTitleProject.text()\r\n code_project = self.main_window.inputCodeProject.text() \r\n code_project = self.main_window.inputCodeProject.text() \r\n self.many_schemes = True if self.checkManySchemes.isChecked() else False\r\n \r\n self.gost_frame_params = {'title_project': title_project, 'code_project': code_project}\r\n \r\n def draw(self):\r\n try:\r\n fp_invertors = \"Data/Schemes/Invertor\"\r\n files_invertors = [f for f in os.listdir(fp_invertors) if isfile(join(fp_invertors, f))]\r\n if len(files_invertors) != 0:\r\n for i in range(len(files_invertors)):\r\n with open(fp_invertors + f\"/{files_invertors[i]}\", 'w') as image_fd: \r\n pass\r\n except PermissionError:\r\n self.statusBar.showMessage('Открыт pdf файл схемы, перед построением закройте его и повторите попытку', 4000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n QTimer.singleShot(4000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return\r\n \r\n if not self.config_keys:\r\n self.statusBar.showMessage('Суохраните параметры', 2000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_yellow)\r\n QTimer.singleShot(2000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return \r\n \r\n\r\n for num in range(1, len(self.config_keys)):\r\n self.spinBoxConfigInvertor.setValue(num)\r\n self.spin_config()\r\n if self.validate_input() != 0:\r\n self.statusBar.showMessage('Неверная конфигурация MPPT', 4000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_yellow)\r\n QTimer.singleShot(4000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n return\r\n \r\n self.out_params()\r\n\r\n self.btnOpenScheme.hide()\r\n self.btnDraw.setEnabled(False)\r\n self.btnDraw.setText(f\"Cоздано 0 из {self.invertor['count_invertor']}\")\r\n self.progressBar.show()\r\n self.progressBar.setMaximum(int(self.invertor['count_invertor']))\r\n self.progressBar.setValue(0)\r\n \r\n self.painter_draw_one = DrawOne(self.invertor, self.gost_frame_params, self.many_schemes)\r\n self.painter_draw_one.change_value.connect(self.setProgressVal)\r\n self.painter_draw_one.finished.connect(self.drawFinished)\r\n self.painter_draw_one.start()\r\n\r\n def setProgressVal(self, val):\r\n self.progressBar.setValue(int(val.split(' ')[0]))\r\n self.btnDraw.setText(f\"Cоздано {val}\")\r\n\r\n def drawFinished(self):\r\n if hasattr(self.painter_draw_one, 'num_error'):\r\n if self.painter_draw_one.num_error['error'] == 0:\r\n self.textConsoleDraw.append(\"----------------------------\")\r\n self.textConsoleDraw.append(\"РЕЗУЛЬТАТЫ:\")\r\n self.textConsoleDraw.append(f\" Всего цепочек: {self.painter_draw_one.chains}\")\r\n self.textConsoleDraw.append(f\" Всего модулей: {self.painter_draw_one.modules}\")\r\n self.statusBar.showMessage('Формирование схем завершено успешно', 6000)\r\n self.statusBar.setStyleSheet(styles_and_animation.status_green)\r\n QTimer.singleShot(6000, lambda: self.statusBar.setStyleSheet(styles_and_animation.status_white))\r\n self.btnOpenScheme.show()\r\n elif self.painter_draw_one.num_error['error'] == 1:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Кол-во цепочек меньше числа MPPT, невозможно заполгнить все MPPT\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n elif self.painter_draw_one.num_error['error'] == 3:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Данное количесво цепочек не вмещается, примените Y коннекторы, либо измените конфигурацию MPPT\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n elif self.painter_draw_one.num_error['error'] == 4:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Данное количесво цепочек слишком мало чтобы заполнить все MPPT применяя Y коннекторы, уберите Y коннекторы или полное заполнение\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n elif self.painter_draw_one.num_error['error'] == 5:\r\n self.textConsoleDraw.append(\"!!!\")\r\n self.textConsoleDraw.append(\"Слишком большое количество цепочек\")\r\n self.textConsoleDraw.append(\"---\")\r\n self.statusBar.showMessage(\"Внимание!\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n else:\r\n self.statusBar.showMessage(\"Внимание! При построении схемы возникла проблема\")\r\n self.statusBar.setStyleSheet(styles_and_animation.status_red)\r\n self.btnDraw.setEnabled(True)\r\n self.btnDraw.setText('Построить')\r\n self.progressBar.hide()\r\n del self.painter_draw_one\r\n \r\n ","repo_name":"Croud9/Larso","sub_path":"app/logic/logicUIOneScheme.py","file_name":"logicUIOneScheme.py","file_ext":"py","file_size_in_byte":32368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17611580182","text":"import pytest\n\n\nclass TestBackends(object):\n\n @pytest.fixture\n def simple_payload(self):\n return {'name': 'test.simple', 'metric': 'simple_payload', 'value': 1, 'host': 'test'}\n\n @pytest.fixture\n def structured_payload(self):\n return {'name': 'test.structured', 'metric': 'structured_payload', 'val0': 1, 'val2': 'str', 'val3': [1, 2],\n 'host': 'test', 'tags': ['tag1', 'tags2']}\n\n def test_base_backend_simple_payload(self, mocker, dummy_backend, simple_payload):\n mock_gethostname = mocker.patch('socket.gethostname')\n mock_gethostname.return_value = 'test'\n dummy_backend.report(name='test.simple', metric='simple_payload', value=1, tags=None)\n reported_data = dummy_backend.reported_data['test.simple']\n assert reported_data == simple_payload\n\n def test_base_backend_structured_payload(self, mocker, dummy_backend, structured_payload):\n mock_gethostname = mocker.patch('socket.gethostname')\n mock_gethostname.return_value = 'test'\n dummy_backend.report(name='test.structured', metric='structured_payload',\n value={'val0': 1, 'val2': 'str', 'val3': [1, 2]}, tags=['tag1', 'tags2'])\n reported_data = dummy_backend.reported_data['test.structured']\n assert reported_data == structured_payload\n","repo_name":"APSL/kaneda","sub_path":"tests/unit/test_backends.py","file_name":"test_backends.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"79"} +{"seq_id":"224641372","text":"\nfrom pyramid_beaker import session_factory_from_settings\nfrom pyramid.config import Configurator\n# from pyramid.session import UnencryptedCookieSessionFactoryConfig\n# my_session_factory = UnencryptedCookieSessionFactoryConfig('not-really-secret')\n\n\"\"\" The docs have a charming parallel to the way `apt-get remove perl` used to\n make you type out 'I know that what I am doing is wrong':\n\n > Note the very long, very explicit name for\n > UnencryptedCookieSessionFactoryConfig. It's trying to tell you that this\n > implementation is, by default, *unencrypted*. You should not use it when\n > you keep sensitive information in the session object, as the information\n > can be easily read by both users of your application and third parties\n > who have access to your users' network traffic. Use a different session\n > factory implementation (preferably one which keeps session data on the\n > server) for anything but the most basic of applications where \"session\n > security doesn't matter\".\n\"\"\"\n\nfrom sqlalchemy import engine_from_config\nfrom .models import DBSession\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n session_factory = session_factory_from_settings(settings)\n config = Configurator(settings=settings)\n config.set_session_factory(session_factory)\n # config = Configurator(session_factory=my_session_factory, settings=settings)\n config.add_static_view('static', 'static', cache_max_age=3600)\n\n # \"Show me your deck list.\"\n config.add_route('give_deck', '/')\n # \"Did I parse your deck list correctly?\"\n config.add_route('check_deck', '/check')\n # \"Okay, I'm asking you questions about your deck.\"\n config.add_route('show_question', '/ask')\n # \"This is my answer to the question.\"\n config.add_route('check_answer', '/answer')\n # /answer should be POSTed to, and leads back to /ask with a flash message\n # telling you whether you were right or wrong.\n\n config.scan()\n return config.make_wsgi_app()\n","repo_name":"seanmcd/VexingArcanix","sub_path":"vexingarcanix/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"7957498194","text":"import sys\nsys.stdin = open('input.txt')\n\nn = int(input())\n\nfor k in range(1,n + 1):\n N = int(input())\n arr = list(map(int, input().split()))\n\n minv = maxv = arr[0]\n for i in range(N):\n if minv > arr[i]:\n minv = arr[i]\n if maxv < arr[i]:\n maxv = arr[i]\n\n result = maxv - minv\n print('#{} {}'.format(k, result))","repo_name":"ggpp0909/problem_solving","sub_path":"Python/SWEA/0810/4828_min_max/4828_min_max.py","file_name":"4828_min_max.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26619464747","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.patches import Rectangle\r\nfrom matplotlib.widgets import Slider, Button\r\n\r\n# Width of rectangle:\r\nL = 2\r\n\r\ndef collision_data_nospin(N, x0, y0, alpha0):\r\n \r\n t = np.zeros(N) # times at collision\r\n x = np.zeros(N) # x-values at collision\r\n y = np.zeros(N) # y-values at collision\r\n alpha = np.zeros(N) # alpha values at collision\r\n theta1 = np.zeros(N) # theta1 values at collision\r\n theta2 = np.zeros(N) # theta2 values at collision\r\n theta3 = np.zeros(N) # theta3 values at collision\r\n theta4 = np.zeros(N) # theta4 values at collision\r\n \r\n # Initial values:\r\n t[0] = 0\r\n x[0] = x0\r\n y[0] = y0\r\n alpha[0] = alpha0\r\n theta1[0] = np.arctan2(1 - y[0], L - x[0])\r\n theta2[0] = np.arctan2(1 - y[0], -L - x[0])\r\n theta3[0] = np.arctan2(-1 - y[0], -L - x[0])\r\n theta4[0] = np.arctan2( -1 - y[0], L - x[0])\r\n\r\n # Update formula:\r\n for i in range(1, N):\r\n if (alpha[i - 1] - theta1[i - 1])%(2*np.pi) < (theta2[i - 1] - theta1[i - 1])%(2*np.pi):\r\n t[i] = (1 - y[i - 1])/np.sin(alpha[i - 1])\r\n x[i] = x[i - 1] + t[i]*np.cos(alpha[i - 1])\r\n y[i] = 1\r\n alpha[i] = -alpha[i - 1]\r\n if (alpha[i - 1] - theta2[i - 1])%(2*np.pi) < (theta3[i - 1] - theta2[i - 1])%(2*np.pi):\r\n t[i] = (-L - x[i - 1])/np.cos(alpha[i - 1])\r\n x[i] = -L\r\n y[i] = y[i - 1] + t[i]*np.sin(alpha[i - 1])\r\n alpha[i] = np.pi - alpha[i - 1]\r\n if (alpha[i - 1] - theta3[i - 1])%(2*np.pi) < (theta4[i - 1] - theta3[i - 1])%(2*np.pi):\r\n t[i] = (-1 - y[i - 1])/np.sin(alpha[i - 1])\r\n x[i] = x[i - 1] + t[i]*np.cos(alpha[i - 1])\r\n y[i] = -1\r\n alpha[i] = -alpha[i - 1]\r\n if (alpha[i - 1] - theta4[i - 1])%(2*np.pi) < (theta1[i - 1] - theta4[i - 1])%(2*np.pi):\r\n t[i] = (L - x[i - 1])/np.cos(alpha[i - 1])\r\n x[i] = L\r\n y[i] = y[i - 1] + t[i]*np.sin(alpha[i - 1])\r\n alpha[i] = np.pi - alpha[i - 1]\r\n theta1[i] = np.arctan2(1 - y[i], L - x[i])\r\n theta2[i] = np.arctan2(1 - y[i], -L - x[i])\r\n theta3[i] = np.arctan2(-1 - y[i], -L - x[i])\r\n theta4[i] = np.arctan2( -1 - y[i], L - x[i])\r\n \r\n return x, y, alpha\r\n\r\ndef collision_data(N, x0, y0, alpha0, u0, MI_coeff):\r\n \r\n vx0 = np.cos(alpha0)\r\n vy0 = np.sin(alpha0)\r\n \r\n t = np.zeros(N) # times at collision\r\n x = np.zeros(N) # x-values at collision\r\n y = np.zeros(N) # y-values at collision\r\n vx = np.zeros(N) # x-component of veolcity at collision\r\n vy = np.zeros(N) # y-component of veolcity at collision\r\n u = np.zeros(N) # spin values at collision\r\n alpha = np.zeros(N) # alpha values at collision\r\n theta1 = np.zeros(N) # theta1 values at collision\r\n theta2 = np.zeros(N) # theta2 values at collision\r\n theta3 = np.zeros(N) # theta3 values at collision\r\n theta4 = np.zeros(N) # theta4 values at collision\r\n \r\n # Initial values:\r\n t[0] = 0\r\n x[0] = x0\r\n y[0] = y0\r\n vx[0] = vx0\r\n vy[0] = vy0\r\n u[0] = u0\r\n alpha[0] = alpha0\r\n theta1[0] = np.arctan2(1 - y[0], L - x[0])\r\n theta2[0] = np.arctan2(1 - y[0], -L - x[0])\r\n theta3[0] = np.arctan2(-1 - y[0], -L - x[0])\r\n theta4[0] = np.arctan2( -1 - y[0], L - x[0])\r\n\r\n # Update formula:\r\n for i in range(1, N):\r\n if (alpha[i - 1] - theta1[i - 1])%(2*np.pi) < (theta2[i - 1] - theta1[i - 1])%(2*np.pi):\r\n t[i] = (1 - y[i - 1])/vy[i - 1]\r\n x[i] = x[i - 1] + t[i]*vx[i - 1]\r\n y[i] = 1\r\n \r\n vT = -vx[i - 1]\r\n vn = -vy[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = -vparr\r\n vy[i] = -vperp\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n \r\n if (alpha[i - 1] - theta2[i - 1])%(2*np.pi) < (theta3[i - 1] - theta2[i - 1])%(2*np.pi):\r\n t[i] = (-L - x[i - 1])/vx[i - 1]\r\n x[i] = -L\r\n y[i] = y[i - 1] + t[i]*vy[i - 1]\r\n \r\n vT = -vy[i - 1]\r\n vn = vx[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = vperp\r\n vy[i] = -vparr\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n if (alpha[i - 1] - theta3[i - 1])%(2*np.pi) < (theta4[i - 1] - theta3[i - 1])%(2*np.pi):\r\n t[i] = (-1 - y[i - 1])/vy[i - 1]\r\n x[i] = x[i - 1] + t[i]*vx[i - 1]\r\n y[i] = -1\r\n \r\n vT = vx[i - 1]\r\n vn = vy[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = vparr\r\n vy[i] = vperp\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n \r\n if (alpha[i - 1] - theta4[i - 1])%(2*np.pi) < (theta1[i - 1] - theta4[i - 1])%(2*np.pi):\r\n t[i] = (L - x[i - 1])/vx[i - 1]\r\n x[i] = L\r\n y[i] = y[i - 1] + t[i]*vy[i - 1]\r\n vT = vy[i - 1]\r\n vn = -vx[i - 1]\r\n vparr = ((1 - MI_coeff)/(1 + MI_coeff))*vT - ((2*MI_coeff)/(1 + MI_coeff))*u[i - 1]\r\n vperp = -vn\r\n \r\n u[i] = -((1 - MI_coeff)/(1 + MI_coeff))*u[i - 1] - (2/(1 + MI_coeff))*vT\r\n vx[i] = -vperp\r\n vy[i] = vparr\r\n alpha[i] = np.arctan2(vy[i],vx[i])\r\n theta1[i] = np.arctan2(1 - y[i], L - x[i])\r\n theta2[i] = np.arctan2(1 - y[i], -L - x[i])\r\n theta3[i] = np.arctan2(-1 - y[i], -L - x[i])\r\n theta4[i] = np.arctan2( -1 - y[i], L - x[i])\r\n \r\n return x, y, alpha, vx, vy, u\r\n\r\n# Define initial parameters\r\ninit_MI_coeff = 1/2\r\ninit_x = 0\r\ninit_y = 0.25\r\ninit_theta = np.pi/4\r\ninit_u = 0\r\ninit_N = 50\r\n\r\nx_spin, y_spin, alpha_spin, vx_spin, vy_spin, u_spin = collision_data(init_N, init_x, init_y, init_theta, init_u, init_MI_coeff)\r\n\r\nfig, ax = plt.subplots()\r\nline, = ax.plot(x_spin, y_spin, lw=2, c='red')\r\n\r\n# May be uncommented to save collision data:\r\n#np.savetxt('rectangle_edges.txt',np.transpose(np.array([x_spin,y_spin,vx_spin,vy_spin,u_spin])))\r\n\r\nplt.gca().add_patch(Rectangle((-L,-1),2*L,2,\r\n edgecolor='black',\r\n facecolor='none'))\r\nax = plt.gca()\r\nax.set_aspect('equal', adjustable='box')\r\n\r\n# adjust the main plot to make room for the sliders\r\nfig.subplots_adjust(left=0.25, bottom=0.25)\r\n\r\naxMI_coeff = fig.add_axes([0.25, 0.1, 0.65, 0.03])\r\nMI_coeff_slider = Slider(\r\n ax=axMI_coeff,\r\n label='alpha',\r\n valmin=0,\r\n valmax=1,\r\n valinit=init_MI_coeff,\r\n)\r\n\r\nax_x = fig.add_axes([0.25, 0.2, 0.65, 0.03])\r\nx_slider = Slider(\r\n ax=ax_x,\r\n label='x0',\r\n valmin=-L,\r\n valmax=L,\r\n valinit=init_x,\r\n)\r\n\r\nax_y = fig.add_axes([0.25, 0.15, 0.65, 0.03])\r\ny_slider = Slider(\r\n ax=ax_y,\r\n label='y0',\r\n valmin=-1,\r\n valmax=1,\r\n valinit=init_y,\r\n)\r\n\r\nax_theta = fig.add_axes([0.25, 0.25, 0.65, 0.03])\r\ntheta_slider = Slider(\r\n ax=ax_theta,\r\n label='theta0',\r\n valmin=0,\r\n valmax=2*np.pi,\r\n valinit=init_theta,\r\n)\r\n\r\nax_u = fig.add_axes([0.05, 0.25, 0.0225, 0.63])\r\nu_slider = Slider(\r\n ax=ax_u,\r\n label=\"u\",\r\n valmin=0,\r\n valmax=10,\r\n valinit=init_u,\r\n orientation=\"vertical\"\r\n)\r\n\r\nax_N = fig.add_axes([0.1, 0.25, 0.0225, 0.63])\r\nN_slider = Slider(\r\n ax=ax_N,\r\n label=\"N\",\r\n valmin=1,\r\n valmax=100,\r\n valinit=init_N,\r\n orientation=\"vertical\",\r\n valfmt='%0.0f'\r\n)\r\n\r\n\r\n# The function to be called anytime a slider's value changes\r\ndef update(val):\r\n x_spin, y_spin, alpha_spin, vx_spin, vy_spin, u_spin = collision_data(int(N_slider.val), x_slider.val, y_slider.val, theta_slider.val, u_slider.val, MI_coeff_slider.val)\r\n line.set_xdata(x_spin)\r\n line.set_ydata(y_spin)\r\n fig.canvas.draw_idle()\r\n \r\nMI_coeff_slider.on_changed(update)\r\nx_slider.on_changed(update)\r\ny_slider.on_changed(update)\r\ntheta_slider.on_changed(update)\r\nu_slider.on_changed(update)\r\nN_slider.on_changed(update)\r\n\r\nresetax = fig.add_axes([0.8, 0.025, 0.1, 0.04])\r\nbutton = Button(resetax, 'Reset', hovercolor='0.975')\r\n\r\n\r\ndef reset(event):\r\n MI_coeff_slider.reset()\r\n x_slider.reset()\r\n y_slider.reset()\r\n theta_slider.reset()\r\n u_slider.reset()\r\n N_slider.reset()\r\nbutton.on_clicked(reset)\r\n\r\nplt.show()","repo_name":"WarlicTheWizard/billiards","sub_path":"rectangle with sliders.py","file_name":"rectangle with sliders.py","file_ext":"py","file_size_in_byte":8792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6412488724","text":"from sqlalchemy import (\n BigInteger,\n Boolean,\n Column,\n LargeBinary,\n Numeric,\n String,\n Integer,\n UnicodeText,\n)\nfrom Db import SESSION, Base\nimport os\n\n\nclass AutoReply(Base):\n __tablename__ = \"AutoReply\"\n id = Column(Integer, autoincrement=True, primary_key=True)\n text = Column(String)\n file_id = Column(String)\n msg_type = Column(String)\n msg_content = Column(String)\n\n def __init__(self, text, msg_type, msg_content, file_id, id=None):\n self.id = id\n self.msg_type = msg_type\n self.file_id = file_id\n self.text = text\n self.msg_content = msg_content\n\n\nAutoReply.__table__.create(checkfirst=True)\n\n\ndef getAutoReply(text):\n try:\n return SESSION.query(AutoReply).filter(AutoReply.text == text).one()\n except:\n return None\n finally:\n SESSION.close()\n\n\ndef getAllAutoReply():\n try:\n return SESSION.query(AutoReply).all()\n except:\n return None\n finally:\n SESSION.close()\n\n\ndef addAutoReply(text, msg_type, msg_content=\"\", file_id=\"\"):\n try:\n addRep = SESSION.query(AutoReply).filter(AutoReply.text == text).one()\n except Exception as e:\n addRep = None\n print(str(\"error : togglepropsetting : %s\" % (e)))\n\n if addRep:\n addRep.msg_type = msg_type\n addRep.msg_content = msg_content\n try:\n os.remove(addRep.file_id)\n except Exception as e:\n print(\"addAutoReplySetting : %s\" % (e))\n addRep.file_id = file_id\n else:\n addRep = AutoReply(text, msg_type, msg_content, file_id)\n SESSION.add(addRep)\n SESSION.commit()\n\n\ndef remAutoReplySetting(text):\n try:\n remrep = SESSION.query(AutoReply).filter(AutoReply.text == text).one()\n if remrep:\n SESSION.delete(remrep)\n SESSION.commit()\n return True\n except:\n return False\n\n","repo_name":"micodev/botShell","sub_path":"Db/autoReply_sql.py","file_name":"autoReply_sql.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"79"} +{"seq_id":"18918018871","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.4.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\n# # !pip install xlrd\n# -\n\nimport xlrd\nimport numpy as np\nimport matplotlib.pylab as plt\n\n# # Donnée météorologiques RT2012\n#\n# documentation à propos des données: https://www.rt-batiment.fr/batiments-neufs/reglementation-thermique-2012/donnees-meteorologiques.html\n\nfilename = './FichiersMeteo_RT2012.xls'\nweather_data = xlrd.open_workbook(filename)\n\nprint('sheets:', ', '.join(weather_data.sheet_names()))\n\n# +\ndescriptif = weather_data.sheet_by_name('Descriptif')\n\nprint('Descriptif')\nprint('==========')\nfor row in descriptif.get_rows():\n r = [r.value for r in row]\n print('\\t'.join(r))\n# -\n\nvilles = {'H1a': 'Trappes',\n 'H1b': 'Nancy',\n 'H1c': 'Macon',\n 'H2a': 'Rennes',\n 'H2b': 'La Rochelle',\n 'H2c': 'Agen',\n 'H2d': 'Carpentras',\n 'H3' : 'Nice'}\n\nk = 0\nzc = list(villes.keys())\n\n# +\nzone_climatique = 'H1c'\n#zone_climatique = zc[k]\n#print(k, zone_climatique)\n#k += 1\n\n# Reads columns\nvariables = ['Htsmd', 'te0', 'we0', 'dirN', 'diff', 'Teciel', 'Vent', 'Teau', 'Gamma', 'Psi']\ndatazone = weather_data.sheet_by_name(zone_climatique)\ndata = {var:np.array([cell.value for cell in datazone.col(c, start_rowx=1)])\n for c, var in enumerate(variables)}\n\nfig = plt.figure(figsize=(12, 10))\nnbr_graph = 3\n\n# === Temperature ===\nax1 = plt.subplot(nbr_graph, 1, 1)\n\nT_ext_grid = data['te0'].reshape(-1, 24).T\nT_ciel_grid = data['Teciel'].reshape(-1, 24).T\nT_eau_grid = data['Teau'].reshape(-1, 24).T\n\nplt.axhline(y=0, linewidth=1, color='black');\nplt.axhline(y=20, linewidth=1, linestyle=':', color='black');\n\n# T_ext\nplt.plot(T_ext_grid.mean(axis=0), color='r', label='T° ext.')\nx = np.arange(T_ext_grid.shape[1])\nplt.fill_between(x, T_ext_grid.min(axis=0), T_ext_grid.max(axis=0), color='red', alpha=0.1);\n\n# T_ciel\n#plt.plot(T_ciel_grid.max(axis=0), color='skyblue', label='T° eau (1m sol)')\nplt.plot(T_ciel_grid.mean(axis=0), color='darkblue', label='T° rayonnement ciel', alpha=0.2)\n#plt.plot(T_ciel_grid.min(axis=0), color='skyblue', label='T° eau (1m sol)')\n\n# T_eau\nplt.plot(T_eau_grid.mean(axis=0), color='skyblue', label='T° eau (1m sol)')\n\nplt.xlim(0, T_ext_grid.shape[1]); #plt.title(\"Température extérieure (°C)\");\nplt.ylabel(\"Température extérieure (°C)\");\nplt.legend(); plt.xlabel(\"jour de l'année\");\nplt.title(\"Température extérieure (°C)\");\nplt.ylim((-10, 35))\n\n# === Vent ===\nax1 = plt.subplot(nbr_graph, 1, 2, sharex=ax1)\nvent_grid = data['Vent'].reshape(-1, 24).T\n\nplt.plot(vent_grid.mean(axis=0), color='cadetblue', label='vitesse vent moy.')\nx = np.arange(T_ext_grid.shape[1])\nplt.fill_between(x, vent_grid.min(axis=0), vent_grid.max(axis=0), color='cadetblue', alpha=0.1);\n\nplt.xlim(0, T_ext_grid.shape[1]); plt.title(\"vitesse moyenne du vent (m/s)\");\nplt.ylabel(\"vitesse vent (m/s)\"); plt.xlabel(\"jour de l'année\");\nplt.legend();\nplt.ylim((0, 15))\n\n# === Soleil ===\nax2 = plt.subplot(nbr_graph, 1, 3, sharex=ax1)\nax2.set_title(f'{zone_climatique} {villes[zone_climatique]}')\n\ndirN_grid = data['dirN'].reshape(-1, 24).T\ndiff_grid = data['diff'].reshape(-1, 24).T\nplt.plot(dirN_grid.sum(axis=0), color='darkorange', label='directe')\nplt.fill_between(x, np.zeros_like(x), dirN_grid.sum(axis=0)/24, color='darkorange', alpha=0.1);\n\nplt.plot(diff_grid.sum(axis=0), color='lightslategray', label='diffus')\n#plt.fill_between(x, np.zeros_like(x), diff_grid.sum(axis=0)/24, color='lightslategray', alpha=0.1);\n\nplt.legend();\nplt.xlim(0, T_ext_grid.shape[1]); plt.title(\"Energie solaire directe par jour (Wh/m2)\");\nplt.ylabel(\"Energie solaire directe par jour (Wh/m2)\");\nplt.ylim((0, 400*24))\n\n\n#plt.fill_between(x, np.zeros_like(x), dirN_grid.sum(axis=0)/24, color='darkorange', alpha=0.1);\n\n\n\nplt.xlabel(\"jour de l'année\");\nfig.suptitle(f'zone {zone_climatique} - {villes[zone_climatique]}', fontsize=16)\n\nplt.tight_layout(rect=(0, 0, 1, 0.97))\nfilename = f'{zone_climatique}_{villes[zone_climatique]}.svg'\nplt.savefig(filename)\n\n# +\n# == Heat map == \nT_ext_grid = np.array([h.value for h in datazone.col(1, start_rowx=1)]).reshape(-1, 24).T\n\nplt.figure(figsize=(15, 4))\nplt.pcolormesh(T_ext_grid, shading='flat'); plt.colorbar();\nplt.title(\"Température extérieure (°C)\")\nplt.xlabel(\"jour de l'année\"); plt.ylabel('heure');\n# -\n\nplt.figure(figsize=(15, 4))\nplt.pcolormesh(dirN_grid, shading='flat'); plt.colorbar();\n\nprint( list(data.keys()) )\n\n# +\n# Export to csv\nzone_climatique = 'H1c'\n\ncolumns_to_export = ['Htsmd', 'te0', 'dirN']\ndataarray = np.stack([data[c] for c in columns_to_export], axis=-1)\n\nfilename = f'{zone_climatique}_{villes[zone_climatique]}.csv'\nnp.savetxt(filename, dataarray, fmt='%.18e', delimiter=';', header=';'.join(columns_to_export))\n# -\n\n# # Look at Correlations\n\nplt.plot(data['te0'], data['dirN'], '.')\n\nplt.plot(dirN_grid.max(axis=0), T_ciel_grid.mean(axis=0), '.')\n\nplt.plot(data['te0'], data['Teciel'], '.')\n\n# https://physics.stackexchange.com/a/153947/105894\n# https://github.com/xdze2/thermique_appart/blob/master/Model02_tuile.ipynb\n#\n# It's much closer to 273 K than 2.73 K. The answer depends on the surface temperature, the humidity, the temperature gradient through the atmosphere, and what exactly you mean by \"the temperature of the clear night sky\".\n#\n# The Swinbank formula provides an ad hoc expression for the power radiated by the night sky. A modified version of this formula from Goforth et al. is $$P_{\\text{thermal}} = (1+KC^2)8.78\\times 10^{-13}\\,T^{5.852}\\,{RH}^{0.07195}$$ where\n#\n# $K$ is a scale factor based on cloud height, ranging from 0.34 for very low clouds to 0.06 for very high clouds,\n# $C$ is the fraction of the sky covered by clouds,\n# $T$ is the surface temperature, in kelvins,\n# $RH$ is the surface relative humidity, as a percentage (e.g., $RH$ would be 25 in the case of 25% relative humidity), and\n# $P_{\\text{thermal}}$ is the night sky radiation, in watts per square meter.\n#\n# This can be converted to an effective temperature via the Stefan-Boltzmann law. Now the question arises as to whether you are asking about the effective black body temperature or effective gray body temperature of the night sky. In the first case the Stefan-Boltzmann law yields $T = (P/\\sigma)^{1/4}$. Taking emissivity into account yields $T = (P/(\\epsilon \\sigma))^{1/4}$, where $\\epsilon\\approx 0.74$ is the emissivity of the atmosphere.\n#\n# A couple of examples:\n#\n# A cool clear night in the desert, with a temperature of 5°C and a relative humidity of 5%. The modified Swinbank formula yields a flux of 198 w/m2, which in turn corresponds to a black body temperature of -29.9°C or a gray body temperature of -10.9°C.\n#\n# A warm clear night in the countryside, with a temperature of 15°C and a relative humidity of 25%. The modified Swinbank formula in this case yields a flux of 274 w/m2, which in turn corresponds to a black body temperature of -9.5°C or a gray body temperature of 11.1°C.\n#\n\n\n","repo_name":"xdze2/simuthermique","sub_path":"weather_api/Fichiers_Meteo_RT2012/viz_yearly_weather_data.py","file_name":"viz_yearly_weather_data.py","file_ext":"py","file_size_in_byte":7232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21688667754","text":"import unittest\nfrom Utils.logger import *\nfrom selenium import webdriver\n\nfrom Utils.utility import *\nfrom classes.DriverHelpers.DriverHelper import DriverHelper\nfrom Utils.Constants import *\nfrom Utils.SetUp import *\nfrom classes.Pages.NFPageClass import *\nfrom classes.Pages.QuickTrendsPageClass import *\nfrom classes.Pages.GenerateReportsPopClass import *\nfrom classes.Pages.ReportsModuleClass import *\nfrom classes.Pages.ConfigurationPageClass import *\n\nsetup = SetUp()\n\nlogin(setup, \"admin\", \"Admin@123\")\nexploreScreenInstance = ExplorePageClass(setup.d)\nexploreHandle = getHandle(setup,\"explore_Screen\")\n\n# exploreScreenInstance.exploreList.launchScreen(exploreHandle,\"exploreList\",\"nf_Screen\")\n\nexploreScreenInstance.exploreList.switchApp(exploreHandle,1)\n\ntime.sleep(4)\nsetup.d.switch_to.window(setup.d.window_handles[1])\nconfScreenInstance = ConfigurationPageClass(setup.d)\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\nconfScreenInstance.leftColumn.select(1,confScreenHandle)\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\n\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"Name\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"NetworkElement1\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"NetworkElement2\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"Port\",'searchSelector','select')\nconfScreenInstance.dummyelement.doSelection(confScreenHandle,\"Protocol\",'searchSelector','select')\n\n\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][0])\n\ncreatePopInstance = GenerateReportsPopClass(setup.d)\ncreatePopHandle = getHandle(setup, \"config_popup\")\n\n# Bulk Upload ##\ncreatePopInstance.switcher.switchTo(1,createPopHandle,'createdialog','switcher')\ncreatePopHandle = getHandle(setup, \"config_popup\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['choosefile'],\"/Users/deepanshu.ahuja/Documents/nfib.csv\")\ncreatePopInstance.dropdown.customClick(createPopHandle['createdialog']['upload'])\n\n\n\n\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][0])\ncreatePopInstance = GenerateReportsPopClass(setup.d)\ncreatePopHandle = getHandle(setup, \"config_popup\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['nfName'],\"nfautomation\")\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"FA\",'createdialog','networkElement1')\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"HA\",'createdialog','networkElement2')\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['port'],\"12\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['protocol'],\"23\")\ncreatePopInstance.dropdown.customClick(createPopHandle['createdialog']['submit'])\n\n\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\n\n\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][3])\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][4])\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\n# Delete is not working now\nconfScreenInstance.table.setSelection1(3,confScreenHandle,\"table\")\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][1])\n\n\nconfScreenHandle = getHandle(setup,\"configuration_Screen\")\nconfScreenInstance.table.setSelection1(1,confScreenHandle,\"table\")\nconfScreenInstance.dummyelement.click(confScreenHandle['buttons']['crudbuttons'][2])\ncreatePopInstance = GenerateReportsPopClass(setup.d)\ncreatePopHandle = getHandle(setup, \"config_popup\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['nfName'],\"nfautomationHost1\")\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"FA\",'createdialog','networkElement1')\ncreatePopInstance.dropdown.doSelection(createPopHandle,\"HA\",'createdialog','networkElement2')\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['port'],\"12\")\ncreatePopInstance.dropdown.customSendkeys(createPopHandle['createdialog']['protocol'],\"23\")\ncreatePopInstance.dropdown.customClick(createPopHandle['createdialog']['submit'])\n\n# confScreenHandle = getHandle(setup,\"configuration_Screen\")\n\n\n\n\nsetup.d.close()","repo_name":"mayankmahajan/html5auto","sub_path":"suite_ibconfiguration/ibnetwork.py","file_name":"ibnetwork.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"14967167538","text":"from hamcrest import (\n assert_that,\n equal_to\n)\n\nfrom pynformatics.testutils import TestCase\nfrom pynformatics.utils.context import Context\n\n\nclass TestUtils__context_encode(TestCase):\n def test_simple(self):\n context = Context(\n user_id=1,\n problem_id=2,\n statement_id=None,\n )\n assert_that(\n context.encode(),\n equal_to({\n 'user_id': 1,\n 'problem_id': 2,\n 'statement_id': None,\n })\n )\n","repo_name":"riskingh/informatics-mccme-ru","sub_path":"pynformatics/tests/unit/utils/context/encode/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"73876346495","text":"\ndef get_pisano_period(m=10):\n prev, curr = 0, 1\n for i in range(0, m * m):\n prev, curr = curr, (prev + curr) % m\n\n # pisano number starts with 01\n if prev == 0 and curr == 1:\n return i+1\n return 60\n \ndef fib_sum(n):\n pp = get_pisano_period(10)\n n = n % pp\n\n if n <= 1:\n return n\n\n prev, cur, sum = 0, 1, 1\n for _ in range(2, n+1):\n prev, cur = cur, (prev + cur) % 10\n sum += cur\n return sum % 10\n\nif __name__ == '__main__':\n input_n = int(input())\n # input_n = 100\n # input_n = 240\n # input_n = 832564823476\n print(fib_sum(input_n))\n","repo_name":"sakshamsds/data-structures-and-algorithms","sub_path":"ucsd_specialization/01_Algorithmic_Toolbox/week2/2_6_fib_sum.py","file_name":"2_6_fib_sum.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74287780096","text":"\"\"\"\n Remember to update MAVLink dialect with:\n cp dialects/* .venv/lib/python3.7/site-packages/message_definitions/v1.0/\n\"\"\"\n\nimport time\nimport serial\nimport logging\n\nimport settings\n\nfrom pymavlink import mavutil\n\n\nlogging.basicConfig(**settings.LOGGING_KWARGS)\n\n\ndef connect_vehicle():\n while True:\n try:\n link = mavutil.mavlink_connection(**settings.MAVLINK_DAEMON)\n logging.info(\n f\"Vehicle connected at {settings.MAVLINK_DAEMON['device']}\")\n break\n except Exception as e:\n logging.error(f\"Vehicle connection error: {e}\")\n time.sleep(1)\n\n return link\n\n\nvehicle_link = connect_vehicle()\n\ntry:\n ground_link = mavutil.mavlink_connection(\n input=False,\n **settings.MAVLINK_GROUND\n )\n logging.info(f\"Ground at {settings.MAVLINK_GROUND['device']}\")\nexcept serial.SerialException:\n ground_link = None\n logging.warning(f\"NO GROUND LINK at {settings.MAVLINK_GROUND}\")\n\n\ntukano_link = mavutil.mavlink_connection(\n input=False,\n **settings.MAVLINK_TUKANO\n)\nlogging.info(f\"MAVLink tukano at {settings.MAVLINK_TUKANO['device']}\")\n\nlogging.info(\"Waiting for vehicle hearbeat\")\nvehicle_link.wait_heartbeat()\nlogging.info(\"Vehicle hearbeat received!\")\n\n\nwhile True:\n\n # From vehicle to ground/tukano\n try:\n vehicle_m = vehicle_link.recv()\n except ConnectionResetError as e:\n logging.error(f\"MAVLINK VEHICLE ERROR: {e}\")\n vehicle_link = connect_vehicle()\n continue\n\n vehicle_msgs = vehicle_link.mav.parse_buffer(vehicle_m)\n if vehicle_msgs:\n for vehicle_msg in vehicle_msgs:\n logging.debug(f\"(VEHICLE_MSG) {vehicle_msg}\")\n if ground_link:\n ground_link.write(vehicle_msg.get_msgbuf())\n\n if tukano_link:\n tukano_link.write(vehicle_msg.get_msgbuf())\n\n # From ground to vehicle\n if ground_link:\n ground_m = ground_link.recv()\n ground_msgs = ground_link.mav.parse_buffer(ground_m)\n if ground_msgs:\n for ground_msg in ground_msgs:\n logging.info(f\"(GROUND_MSG) {ground_msg}\")\n vehicle_link.write(ground_msg.get_msgbuf())\n\n # From tukano to vehicle\n tukano_m = tukano_link.recv()\n tukano_msgs = tukano_link.mav.parse_buffer(tukano_m)\n if tukano_msgs:\n for tukano_msg in tukano_msgs:\n logging.info(f\"(TUKANO_MSG) {tukano_msg}\")\n vehicle_link.write(tukano_msg.get_msgbuf())\n\n time.sleep(settings.SLEEPING_TIME)\n","repo_name":"josezy/tukano","sub_path":"src/deprecated_mavlink_daemon.py","file_name":"deprecated_mavlink_daemon.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31414642519","text":"\nclass Solution:\n def is_prime(self,n):\n if n == 2 or n == 3: return True\n if n < 2 or n%2 == 0: return False\n if n < 9: return True\n if n%3 == 0: return False\n r = int(n**0.5)\n # since all primes > 3 are of the form 6n ± 1\n # start with f=5 (which is prime)\n # and test f, f+2 for being prime\n # then loop by 6. \n f = 5\n while f <= r:\n \n if n % f == 0: return False\n if n % (f+2) == 0: return False\n f += 6\n return True\n \n def isUgly(self, n: int) -> bool:\n if n == 1 or n == 0:\n return True\n n = abs(n)\n # in an iteration, check find its factors. While finding, check whether each factor is prime or not\n # we dont need to iterate from 1 to n. From 1 to sqrt(n) is sufficient\n for i in range(7,n):\n # if i is a factor of n and bigger then 5, check if it s prime or not. Else, do nothing, so there is no else\n if n % i == 0:\n print(\"one of the factor is %s\" % i)\n # check if i is prime or not\n if self.is_prime(i):\n return False\n return True\n\nepsi = Solution()\nprint(epsi.isUgly(-2147483648))\n","repo_name":"HuachenZH/Python_leet","sub_path":"Math/263. Ugly Number/263.py","file_name":"263.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39759816065","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def pathSum(self, root: Optional[TreeNode], targetSum: int) -> int:\n prefix_sum = defaultdict(int)\n prefix_sum[0] = 1\n \n path_sum = 0\n def dfs(node, psum, prefix_dict):\n nonlocal path_sum\n if not node:\n return\n \n psum += node.val\n prefix_to_delete = psum - targetSum \n if prefix_to_delete in prefix_dict:\n path_sum += prefix_dict[prefix_to_delete]\n prefix_dict[psum] += 1\n dfs(node.left ,psum , prefix_dict)\n dfs(node.right , psum , prefix_dict)\n \n prefix_dict[psum] -= 1\n dfs(root, 0 , prefix_sum)\n return path_sum\n \n \n \n \n ","repo_name":"Natnael16/competitiveprogramming","sub_path":"0437-path-sum-iii/0437-path-sum-iii.py","file_name":"0437-path-sum-iii.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"25440677068","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 18 14:53:48 2023\n\n@author: edu_c\n\"\"\"\n\ndef isYearLeap(year):\n if (year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)):\n return True\n else:\n return False\n \n\ndef daysInMonth(year, month):\n meses_31 = [1,3,5,7,8,10,12]\n meses_30 = [4,6,9,11]\n if (month in meses_31):\n return 31 \n elif (month in meses_30):\n return 30\n elif (month == 2):\n if (isYearLeap(year)):\n return 29\n else:\n return 28 \n else:\n return None\n\ndef dias_del_anio(year, month, day):\n dias = 0\n if not((month > 0 and month <12) and (year > 0) and (day > 0 and day <= (daysInMonth(year, month)))):\n return None\n for i in range (1, month):\n dias += daysInMonth(year, i)\n dias += day\n return dias\n\n\n\nprint(dias_del_anio(2023,5,19)) #debe imprimir 139\nprint(dias_del_anio(2023,2,29)) #debe imprimir none","repo_name":"educeav/python_essentials","sub_path":"ejercicio4_dias_correspondientes_de__.py","file_name":"ejercicio4_dias_correspondientes_de__.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5077011232","text":"# # Python example - Fourier transform using numpy.fft method\n# import numpy as np\n# import pandas as pd\n# import matplotlib.pyplot as plt\n\n# df = pd.read_csv('E:\\\\Django_proj\\\\mysite\\\\media\\\\Acc_time.csv')\n# length = 40960\n\n# # How many time points are needed i,e., Sampling Frequency\n# samplingFrequency = length;\n\n# # At what intervals time points are sampled\n# samplingInterval = 1 / samplingFrequency;\n\n# # # Create subplot\n# # figure, axis = plotter.subplots(4, 1)\n# # plotter.subplots_adjust(hspace=1)\n\n# # Time points\n# time = df['time']\n# amplitude = df['amplitude']\n\n# # Frequency domain representation\n# fourierTransform = np.fft.fft(amplitude)/len(amplitude) # Normalize amplitude\n# fourierTransform = fourierTransform[range(int(len(amplitude)/2))] # Exclude sampling frequency\n# tpCount = len(amplitude)\n# values = np.arange(int(tpCount/2))\n# timePeriod = tpCount/samplingFrequency\n# frequencies = values/timePeriod\n\n# # Frequency domain representation\n\n# plt.title('Fourier transform depicting the frequency components')\n# plt.plot(frequencies, abs(fourierTransform))\n# plt.xlabel('Frequency')\n# plt.ylabel('Amplitude')\n# plt.show()\n\n\n\nimport csv\nimport pandas as pd\n# import numpy as np\n\nfile = (\"E:\\\\Django_proj\\\\restapi\\\\media\\\\Acc_time_ext.csv\")\n# csv = pd.read_csv(file)\n# csv = pd.read_csv(file, header=0, nrows=0).columns.tolist()\n# first = csv.index('time')\n# second = csv.index('amplitude')\n# if csv != first:\n# print('yes')\n# else:\n# print('no')\n# print(csv)\n# print(second)\n\nfile = (\"E:\\\\Django_proj\\\\restapi\\\\media\\\\Acc_time_ext.csv\")\n\ndf=pd.read_csv(file)\ncol = df.columns.tolist()\nn = len(col)\nprint(n)\nif col[0] != 'time' or col[1] != 'amplitude':\n print('column')\nelse:\n print('column')\n\n# time = len(csv[0])\n# num = csv['time']. iloc[1]\n# sf = int((time/num)*1000)\n# print(sf)\n# val = len(file.columns)\n# print(time)\n\n","repo_name":"paranormman/TEAL_project","sub_path":"visual/fft.py","file_name":"fft.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11839964599","text":"import string\n\nletters = string.ascii_uppercase\n\ndef get_result():\n total = 0\n with open('./files/p022_names.txt', 'r') as f:\n names = list(f.read().replace('\"','').split(','))\n names.sort()\n print(names)\n for pos in range(len(names)):\n score = 0\n for letter in names[pos]:\n score += letters.index(letter) + 1\n total += score * (pos + 1)\n return total","repo_name":"bruno-zaccariello/usefull","sub_path":"EulerProject/euler_22.py","file_name":"euler_22.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7342725685","text":"import random\n\n#The code for running the dice \ndef game():\n rand = random.randint(1, 6)\n print(\"Your dice rool is \" + str(rand))\n restart = input(\"do u want to get another dice rool?(y/n): \")\n if restart == 'y':\n game()\n else:\n print(\"Thanks for joining!\")\n\ndef again():\n t = True\n count = 0\n while t :\n rand = random.randint(1, 6)\n print(\"Your dice rools are: \")\n print(rand)\n if count == 99:\n break\n else:\n count += 1\n continue\n \n#Taking input from input for starting the game!\nprint(\"If you want 100 dice rools type '100': \")\nstart = input(\"Are you ready? (y/n/100): \")\nif start == \"y\":\n print(\"\")\n game()\nelif start == '100':\n again()\n\nelse:\n print(\"Thanks for joining us\")\n\n \n\n","repo_name":"AbhinavSilwal/dice-rolling-simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"37835021704","text":"import pyttsx3\r\nimport PyPDF2\r\nfrom tkinter import * # Importing the GUI named tkinter\r\nfrom tkinter.filedialog import *\r\nfrom page_range import *\r\nengine = pyttsx3.init() # Object creation\r\naudiotabclose = ''\r\n\r\ndef close_window_a():\r\n engine.stop()\r\n audiotabclose.destroy()\r\n\r\n\r\ndef audio(pageRange, audiotab):\r\n # Create a window\r\n audiotabclose = audiotab\r\n\r\n # Set Title as Image Loader\r\n audiotab.title(\"AudioBook\")\r\n\r\n # Set the resolution of window\r\n audiotab.geometry(\"1000x600\")\r\n audiotab.configure(bg = \"#FFFFFF\")\r\n canvas = Canvas(\r\n audiotab,\r\n bg = \"#FFFFFF\",\r\n height = 600,\r\n width = 1000,\r\n bd = 0,\r\n highlightthickness = 0,\r\n relief = \"ridge\")\r\n canvas.place(x = 0, y = 0)\r\n\r\n background_img = PhotoImage(file = f\"background 2.png\")\r\n background = canvas.create_image(\r\n 534.5, 310.5,\r\n image=background_img)\r\n\r\n img0 = PhotoImage(file = f\"stop 2.png\")\r\n b0 = Button(\r\n image = img0,\r\n borderwidth = 0,\r\n highlightthickness = 0,\r\n command = close_window_a,\r\n relief = \"flat\")\r\n\r\n b0.place(\r\n x = 386, y = 326,\r\n width = 249,\r\n height = 78)\r\n\r\n # Allow Window to be resizable\r\n \r\n frame = Frame(audiotab)\r\n frame.pack()\r\n \r\n \r\n\r\n\r\n rate = engine.getProperty('rate')\r\n print (rate) # Printing the current voice rate\r\n engine.setProperty('rate', 165) # Setting up the new voice rate\r\n volume = engine.getProperty('volume')\r\n print (volume) # Printing the current volume level\r\n engine.setProperty('volume',1.0) # Setting up the volume level between 0 and 1\r\n voices = engine.getProperty('voices')\r\n engine.setProperty('voice', voices[1].id)\r\n \r\n book=askopenfilename()\r\n pdfreader=PyPDF2.PdfFileReader(book)\r\n pages=pdfreader.numPages\r\n try:\r\n a , b = get_text(pageRange)\r\n for num in range(a, b):\r\n page=pdfreader.getPage(num)\r\n text=page.extractText()\r\n player=pyttsx3.init()\r\n player.say(text)\r\n player.runAndWait()\r\n except:\r\n for num in range(0,pages):\r\n page=pdfreader.getPage(num)\r\n text=page.extractText()\r\n player=pyttsx3.init()\r\n player.say(text)\r\n player.runAndWait()\r\n finally:\r\n engine.save_to_file(text, 'audio.mp3') # Saving the voice to a file \r\n engine.runAndWait()\r\n print(\"Your audiobook file has been generated as an mp3 file. Check the project file directory for getting the file.\")\r\n audiotab.mainloop()\r\n\r\n\r\n\r\n \r\n","repo_name":"Lakshminarayana155/Audio-book-using-python-2nd-year-project-","sub_path":"audio.py","file_name":"audio.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70052074497","text":"from __future__ import with_statement\nimport functools, random\nfrom array import array\nfrom heapq import nsmallest\nfrom operator import itemgetter\nfrom threading import Lock\nfrom time import time\n\nfrom whoosh.compat import iteritems, xrange\n\n\ntry:\n from collections import Counter\nexcept ImportError:\n class Counter(dict):\n def __missing__(self, key):\n return 0\n\n\ndef unbound_cache(func):\n \"\"\"Caching decorator with an unbounded cache size.\n \"\"\"\n\n cache = {}\n\n @functools.wraps(func)\n def caching_wrapper(*args):\n try:\n return cache[args]\n except KeyError:\n result = func(*args)\n cache[args] = result\n return result\n\n return caching_wrapper\n\n\ndef lru_cache(maxsize=100):\n \"\"\"A simple cache that, when the cache is full, deletes the least recently\n used 10% of the cached values.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0] # Hits, misses\n data = {}\n lastused = {}\n\n @functools.wraps(user_function)\n def wrapper(*args):\n try:\n result = data[args]\n stats[0] += 1 # Hit\n except KeyError:\n stats[1] += 1 # Miss\n if len(data) == maxsize:\n for k, _ in nsmallest(maxsize // 10 or 1,\n iteritems(lastused),\n key=itemgetter(1)):\n del data[k]\n del lastused[k]\n data[args] = user_function(*args)\n result = data[args]\n finally:\n lastused[args] = time()\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n data.clear()\n lastused.clear()\n stats[0] = stats[1] = 0\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n\ndef lfu_cache(maxsize=100):\n \"\"\"A simple cache that, when the cache is full, deletes the least frequently\n used 10% of the cached values.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0] # Hits, misses\n data = {}\n usecount = Counter()\n\n @functools.wraps(user_function)\n def wrapper(*args):\n try:\n result = data[args]\n stats[0] += 1 # Hit\n except KeyError:\n stats[1] += 1 # Miss\n if len(data) == maxsize:\n for k, _ in nsmallest(maxsize // 10 or 1,\n iteritems(usecount),\n key=itemgetter(1)):\n del data[k]\n del usecount[k]\n data[args] = user_function(*args)\n result = data[args]\n finally:\n usecount[args] += 1\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n data.clear()\n usecount.clear()\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n\ndef random_cache(maxsize=100):\n \"\"\"A very simple cache that, when the cache is filled, deletes 10% of the\n cached values AT RANDOM.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0] # hits, misses\n data = {}\n\n @functools.wraps(user_function)\n def wrapper(*args):\n try:\n result = data[args]\n stats[0] += 1 # Hit\n except KeyError:\n stats[1] += 1 # Miss\n if len(data) == maxsize:\n keys = data.keys()\n for i in xrange(maxsize // 10 or 1):\n n = random.randint(0, len(keys) - 1)\n k = keys.pop(n)\n del data[k]\n data[args] = user_function(*args)\n result = data[args]\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n data.clear()\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n\ndef db_lru_cache(maxsize=100):\n \"\"\"Double-barrel least-recently-used cache decorator. This is a simple\n LRU algorithm that keeps a primary and secondary dict. Keys are checked\n in the primary dict, and then the secondary. Once the primary dict fills\n up, the secondary dict is cleared and the two dicts are swapped.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics tuple ``(hits, misses, maxsize, currsize)``\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n # Cache1, Cache2, Pointer, Hits, Misses\n stats = [{}, {}, 0, 0, 0]\n\n @functools.wraps(user_function)\n def wrapper(*args):\n ptr = stats[2]\n a = stats[ptr]\n b = stats[not ptr]\n key = args\n\n if key in a:\n stats[3] += 1 # Hit\n return a[key]\n elif key in b:\n stats[3] += 1 # Hit\n return b[key]\n else:\n stats[4] += 1 # Miss\n result = user_function(*args)\n a[key] = result\n if len(a) >= maxsize:\n stats[2] = not ptr\n b.clear()\n return result\n\n def cache_info():\n return stats[3], stats[4], maxsize, len(stats[0]) + len(stats[1])\n\n def cache_clear():\n \"\"\"Clear the cache and cache statistics\"\"\"\n stats[0].clear()\n stats[1].clear()\n stats[3] = stats[4] = 0\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n\n return wrapper\n return decorating_function\n\n\ndef clockface_lru_cache(maxsize=100):\n \"\"\"Least-recently-used cache decorator.\n\n This function duplicates (more-or-less) the protocol of the\n ``functools.lru_cache`` decorator in the Python 3.2 standard library, but\n uses the clock face LRU algorithm instead of an ordered dictionary.\n\n If *maxsize* is set to None, the LRU features are disabled and the cache\n can grow without bound.\n\n Arguments to the cached function must be hashable.\n\n View the cache statistics named tuple (hits, misses, maxsize, currsize)\n with f.cache_info(). Clear the cache and statistics with f.cache_clear().\n Access the underlying function with f.__wrapped__.\n \"\"\"\n\n def decorating_function(user_function):\n stats = [0, 0, 0] # hits, misses, hand\n data = {}\n\n if maxsize:\n # The keys at each point on the clock face\n clock_keys = [None] * maxsize\n # The \"referenced\" bits at each point on the clock face\n clock_refs = array(\"B\", (0 for _ in xrange(maxsize)))\n lock = Lock()\n\n @functools.wraps(user_function)\n def wrapper(*args):\n key = args\n try:\n with lock:\n pos, result = data[key]\n # The key is in the cache. Set the key's reference bit\n clock_refs[pos] = 1\n # Record a cache hit\n stats[0] += 1\n except KeyError:\n # Compute the value\n result = user_function(*args)\n with lock:\n # Current position of the clock hand\n hand = stats[2]\n # Remember to stop here after a full revolution\n end = hand\n # Sweep around the clock looking for a position with\n # the reference bit off\n while True:\n hand = (hand + 1) % maxsize\n current_ref = clock_refs[hand]\n if current_ref:\n # This position's \"referenced\" bit is set. Turn\n # the bit off and move on.\n clock_refs[hand] = 0\n elif not current_ref or hand == end:\n # We've either found a position with the\n # \"reference\" bit off or reached the end of the\n # circular cache. So we'll replace this\n # position with the new key\n current_key = clock_keys[hand]\n if current_key in data:\n del data[current_key]\n clock_keys[hand] = key\n clock_refs[hand] = 1\n break\n # Put the key and result in the cache\n data[key] = (hand, result)\n # Save the new hand position\n stats[2] = hand\n # Record a cache miss\n stats[1] += 1\n return result\n\n else:\n @functools.wraps(user_function)\n def wrapper(*args):\n key = args\n try:\n result = data[key]\n stats[0] += 1\n except KeyError:\n result = user_function(*args)\n data[key] = result\n stats[1] += 1\n return result\n\n def cache_info():\n return stats[0], stats[1], maxsize, len(data)\n\n def cache_clear():\n \"\"\"Clear the cache and cache statistics\"\"\"\n data.clear()\n stats[0] = stats[1] = stats[2] = 0\n for i in xrange(maxsize):\n clock_keys[i] = None\n clock_refs[i] = 0\n\n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function\n\n","repo_name":"zhl2008/awd-platform","sub_path":"web_flaskbb/lib/python2.7/site-packages/whoosh/util/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":11852,"program_lang":"python","lang":"en","doc_type":"code","stars":574,"dataset":"github-code","pt":"79"} +{"seq_id":"18041374864","text":"#!/usr/bin/python3\nfrom subprocess import call\nimport os \n\ncall(['git', 'clone', 'https://github.com/CDPS-ETSIT/practica_creativa2.git'])\ncall(['sudo', 'apt-get', 'update'])\ncall(['sudo', 'apt-get', 'install', '-y', 'python3-pip'])\n\ncall(['pip3', 'install', '-r', 'requirements.txt'])\n\nos.chdir('practica_creativa2/bookinfo/src/productpage')\n\nos.environ['GROUP_NUMBER'] = '36'\nnumGrupo = os.environ.get('GROUP_NUMBER')\n\nos.chdir('templates')\ncall(['cp', 'productpage.html', 'productpage_temporal.html'])\nfin = open('productpage_temporal.html', 'r')\nfout = open('productpage.html', 'w')\n\nfor line in fin:\n\tif '{% block title %}Simple Bookstore App{% endblock %}' in line :\n\t\tfout.write(line.replace('{% block title %}Simple Bookstore App{% endblock %}', '{% block title %}Simple Bookstore App [' + numGrupo + ']{% endblock %}'))\n\telse :\n\t\tfout.write(line)\n\nfin.close()\nfout.close()\ncall(['rm', '-f', 'productpage_temporal.html'])\n\nos.chdir('..')\ncall(['python3', 'productpage_monolith.py', '9080'])\n","repo_name":"luis-trave/Creativa2Def","sub_path":"apartado1/apartado1.py","file_name":"apartado1.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14684841275","text":"from istanza import Istanza\nfrom greedy import Greedy\nfrom simulatedAnnealing import SimulatedAnnealing\nfrom pathRelinking import PathRelinking\nimport os\nfrom heapq import nsmallest\nfrom random import choice\n\nclass Menù():\n\tdef __init__(self, config, mainW):\n\t\tself.config = config # Configurazione\n\t\tself.mainW = mainW\t # Interfaccia grafica\n\t\t\n\t\t# Istanze degli algoritmi\n\t\tself.classeIstanza = Istanza(config)\n\t\tself.classeGreedy = Greedy(config)\n\t\tself.classeSimulatedAnnealing = SimulatedAnnealing(config)\n\t\tself.classePathRelinking = PathRelinking(config)\n\t\t\n\t\t# Strutture dati contenenti i contenitori per la grafica\n\t\tself.graficaGreedy = [mainW.greedy_1, mainW.greedy_2]\n\t\tself.graficaSA = [mainW.simulated_annealing_1, mainW.simulated_annealing_2]\n\t\tself.graficaPR = [mainW.path_relinking_1, mainW.path_relinking_2]\n\t\t\n\t\t# Struttura dati contenente le soluzioni create\n\t\tself.istanzaCorrente = None\n\t\tself.listaGreedy = []\n\t\tself.listaSimulatedAnnealing = []\n\t\tself.listaPathRelinking = []\n\t\n\t'''\n\tFunzione eseguita dal thread demone, gestisce l'interfaccia utente.\n\t'''\n\tdef start(self):\n\t\t# Menù contestuale\n\n\t\ttitolo = \"\"\"\n ______ _ _ _ _ \n | ___ \\ | | | | | |(_) \n | |_/ / _ __ ___ __ _ ___ | |_ | |_ ___ __| | _ \n | __/ | '__| / _ \\ / _` | / _ \\| __|| __| / _ \\ / _` || | \n | | | | | (_) || (_| || __/| |_ | |_ | (_) | | (_| || | \n \\_| |_| \\___/ \\__, | \\___| \\__| \\__| \\___/ \\__,_||_| \n __/ | \n |___/ \n______ _ _____ _ _ \n| ___ \\(_) | _ | | | (_) \n| |_/ / _ ___ ___ _ __ ___ __ _ | | | | _ __ ___ _ __ __ _ | |_ _ __ __ __ _ \n| / | | / __| / _ \\| '__| / __| / _` | | | | || '_ \\ / _ \\| '__| / _` || __|| |\\ \\ / / / _` |\n| |\\ \\ | || (__ | __/| | | (__ | (_| | \\ \\_/ /| |_) || __/| | | (_| || |_ | | \\ V / | (_| |\n\\_| \\_||_| \\___| \\___||_| \\___| \\__,_| \\___/ | .__/ \\___||_| \\__,_| \\__||_| \\_/ \\__,_|\n | | \n |_| \"\"\"\n\t\tprint(titolo)\n\n\t\t# Dizionario per gestire la scelta utente\n\t\tscelta = {\n\t\t\t\t\t1 : self.soluzioneAutomatica,\n\t\t\t\t\t2 : self.nuovaIstanza,\n\t\t\t\t\t3 : self.nuovaGreedy,\n\t\t\t\t\t4 : self.nuovoSA,\n\t\t\t\t\t5 : self.nuovoPR,\n\t\t\t\t\t6 : self.visualizzaMigliori,\n\t\t\t\t\t7 : self.visualizzaMigliore,\n\t\t\t\t\t8 : self.config.mostra,\n\t\t\t\t\t9 : self.config.modifica,\n\t\t\t\t\t10 : self.aiuto,\n\t\t\t\t\t11 : self.uscita\n\t\t}\n\t\t\n\t\t# Menù principale\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\trisposta = int(input(\n\"\"\"\\nSelezionare un'opzione:\n\n1) Crea soluzione automatica (istanza + GRASP + PR)\n2) Crea una nuova istanza\n3) Applica un algoritmo Greedy\n4) Applica Simulated Annealing\n5) Applica Path Relinking\n6) Visualizza dati soluzioni migliori per categoria\n7) Visualizza soluzione migliore\n8) Visualizza configurazione\n9) Modifica configurazione\n10) Aiuto\n11) Esci\n\n>: \"\"\"))\n\t\t\t\tif risposta < 1 or risposta > len(scelta):\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\")\n\t\t\telse:\n\t\t\t\tscelta[risposta]()\n\t\n\t'''\n\tFunzione per generare una soluzione ottima utilizzando il metodo GRASP + Path Relinking, tutto automatizzato.\n\t'''\n\tdef soluzioneAutomatica(self):\n\t\tprint(\"\\nGenerazione nuova istanza...\\n\")\n\t\tself.nuovaIstanza()\n\t\tprint(\"Generazione istanza completata.\\n\\n Inizio generazione soluzioni greedy...\\n\")\n\n\t\t# Greedy\n\t\ttipoGreedy = [\"LPT\", \"SPT\", \"FIFO\"]\n\t\tfor i in range(self.config.GreedyGenerabili):\n\t\t\tprint(\"Generazione soluzione {} di {}\\n\".format(i + 1, self.config.GreedyGenerabili))\n\t\t\tself.listaGreedy.append(self.classeGreedy.start(self.istanzaCorrente, choice(tipoGreedy)))\n\t\tprint(\"Generazione soluzioni greedy completata.\\n\")\n\t\t# Simulated Annealing\n\t\tprint(\"Inizio generazione soluzioni Simulated Annealing...\\n\")\n\t\tfor i, greedy in enumerate(self.listaGreedy, start=1):\n\t\t\tprint(\"Generazione soluzione {} di {}\\n\".format(i, len(self.listaGreedy)))\n\t\t\tself.listaSimulatedAnnealing.append(self.classeSimulatedAnnealing.start(greedy))\n\t\tprint(\"Generazione soluzioni Simulated Annealing completata.\\n\")\n\t\t\n\t\t# Path Relinking\n\t\tprint(\"Inizio generazione soluzioni Path Relinking...\\n\")\n\t\tfor i in range(self.config.PRGenerabili):\n\t\t\tprint(\"Generazione soluzione {} di {}\\n\".format(i + 1, self.config.PRGenerabili))\n\t\t\tself.listaPathRelinking.append(self.classePathRelinking.start(choice(self.listaSimulatedAnnealing), choice(self.listaSimulatedAnnealing)))\n\t\tprint(\"Generazione soluzioni Path Relinking completata.\")\n\t\t\n\t\t# Ricerca soluzione migliore\n\t\tsoluzioniTotali = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\tsoluzioneMigliore = nsmallest(1, soluzioniTotali, key=lambda x : x.makeSpan)[0]\n\n\t\tif soluzioneMigliore.tipo == \"G\":\n\t\t\tself.graficaGreedy[0].tipo = soluzioneMigliore.tipoGreedy\n\t\t\tself.graficaGreedy[0].popolamentoDati(soluzioneMigliore)\n\t\telif soluzioneMigliore.tipo == \"SA\":\n\t\t\tself.graficaSA[0].popolamentoDati(soluzioneMigliore)\n\t\telse:\n\t\t\tself.graficaPR[0].popolamentoDati(soluzioneMigliore)\n\t\t\n\t\tself.visualizzaSoluzione(soluzioneMigliore)\n\n\t'''\n\tFunzione per creare una nuova istanza del problema e graficarla.\n\t'''\n\tdef nuovaIstanza(self):\n\t\tself.istanzaCorrente = self.classeIstanza.start()\n\n\t\t# Reset completo di grafica e soluzioni\n\t\tself.resetGrafica()\n\t\tself.listaGreedy = []\n\t\tself.listaSimulatedAnnealing = []\n\t\tself.listaPathRelinking = []\n\n\t\tself.mainW.istanza.popolamentoDati(self.istanzaCorrente)\n\t\n\t'''\n\tFunzione per creare una nuova soluzione greedy. Viene richiesto all'utente la tipologia desiderata di greedy, infine viene graficata la soluzione creata.\n\t'''\n\tdef nuovaGreedy(self):\n\t\tif not self.istanzaCorrente:\n\t\t\tprint(\"\\nUna soluzione greedy necessita di una istanza di un problema per poter operare.\\nPrima di creare nuove soluzioni, generare una nuova istanza.\\n\")\n\t\t\tinput(\">: Premere un tasto per continuare\")\n\t\t\treturn\n\t\t\n\t\t# Dizionario per gestire la scelta utente\n\t\tscelta = {\n\t\t\t\t\t1 : \"LPT\",\n\t\t\t\t\t2 : \"SPT\",\n\t\t\t\t\t3 : \"FIFO\",\n\t\t}\n\t\t\n\t\t# Richiesta tipologia greedy iterativa\n\t\tflag = True\n\t\twhile flag:\n\t\t\tflag = False\n\t\t\trisposta = input(\n\"\"\"\\nQuale tipologia greedy utilizzare? (premere Invio per annullare):\n\n1) LPT (Longest Processing Time)\n2) SPT (Shortest Processing Time)\n3) FIFO (First In First Out)\n\n>: \"\"\")\n\t\t\tif risposta == \"\":\n\t\t\t\tprint(\"\\nAnnullato\")\n\t\t\t\treturn\n\t\t\ttry:\n\t\t\t\trisposta = int(risposta)\n\t\t\t\tif risposta < 1 or risposta > len(scelta):\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\\n\\n\")\n\t\t\t\tflag = True\n\t\t\telse:\n\t\t\t\t# Nuova soluzione\n\t\t\t\tnuovaGreedy = self.classeGreedy.start(self.istanzaCorrente, scelta[risposta])\n\t\t\t\t\n\t\t\t\t# Visualizzazione e salvataggio in memoria\n\t\t\t\tself.listaGreedy.append(nuovaGreedy)\n\t\t\t\tself.resetGrafica()\n\t\t\t\tself.graficaGreedy[0].tipo = scelta[risposta] \n\t\t\t\tself.graficaGreedy[0].popolamentoDati(nuovaGreedy)\n\t\t\t\t\n\t\t\t\tself.visualizzaSoluzione(nuovaGreedy)\n\n\t'''\n\tFunzione che genera una nuova soluzione SA a partire da una soluzione greedy. La soluzione viene infine graficata.\n\t'''\n\tdef nuovoSA(self):\n\t\tif len(self.listaGreedy) + len(self.listaSimulatedAnnealing) + len(self.listaPathRelinking) == 0:\n\t\t\tprint(\"\\nSimulated Annealing necessita di una soluzione iniziale.\\nPrima di utilizzare questo algoritmo, generare una nuova soluzione di classe Greedy.\\n\")\n\t\t\tinput(\">: Premere un tasto per continuare\")\n\t\t\treturn\n\t\t\n\t\tflag = True\n\t\twhile flag:\n\t\t\tflag = False\n\t\t\tprint(\"\\nQuale soluzione adottare?\")\n\t\t\tindice = 1\n\t\t\tif len(self.listaGreedy) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Greedy]\\n\")\n\t\t\t\tfor soluzione in self.listaGreedy:\n\t\t\t\t\tprint(str(indice) + \") Tipo: \" + soluzione.tipoGreedy + \" Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaSimulatedAnnealing) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Simulated Annealing]\\n\")\n\t\t\t\tfor soluzione in self.listaSimulatedAnnealing:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaPathRelinking) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Path Relinking]\\n\")\n\t\t\t\tfor soluzione in self.listaPathRelinking:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\t\n\t\t\t# Input utente\n\t\t\trisposta = input(\"\\n(premere Invio per annullare)>: \")\n\t\t\tif risposta == \"\":\n\t\t\t\tprint(\"\\nAnnullato\")\n\t\t\t\treturn\n\t\t\ttry:\n\t\t\t\trisposta = int(risposta)\n\t\t\t\tif risposta < 1 or risposta > indice - 1:\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\")\n\t\t\t\tflag = True\n\t\t\telse:\n\t\t\t\t# Nuova soluzione\n\t\t\t\tlistaTotale = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\t\t\tsoluzione = listaTotale[risposta - 1]\n\t\t\t\tnuovoSA = self.classeSimulatedAnnealing.start(soluzione)\n\t\t\t\t\n\t\t\t\tself.confrontaSoluzioni(nuovoSA, soluzione)\n\t\t\t\t\n\t\t\t\t# Visualizzazione e salvataggio in memoria\n\t\t\t\tself.listaSimulatedAnnealing.append(nuovoSA)\n\t\t\t\tself.resetGrafica()\n\t\t\t\tself.graficaSA[0].popolamentoDati(nuovoSA)\n\n\t\t\t\t# Visualizzazione soluzione di partenza\n\t\t\t\tif soluzione.tipo == \"G\":\n\t\t\t\t\tself.graficaGreedy[0].tipo = soluzione.tipoGreedy\n\t\t\t\t\tself.graficaGreedy[0].popolamentoDati(soluzione)\n\t\t\t\telif soluzione.tipo == \"SA\":\n\t\t\t\t\tself.graficaSA[1].popolamentoDati(soluzione)\n\t\t\t\telse:\n\t\t\t\t\tself.graficaPR[0].popolamentoDati(soluzione)\n\t\n\t'''\n\tFunzione che crea una soluzione Path Relinking partendo da due soluzioni iniziali, definite dall'utente, perciò di qualsiasi classe.\n\t'''\n\tdef nuovoPR(self):\n\t\tif len(self.listaGreedy) + len(self.listaSimulatedAnnealing) < 2:\n\t\t\tprint(\"\\nPath Relinking necessita di due soluzioni iniziali.\\nPrima di utilizzare questo algoritmo, generare due nuove soluzioni di classe Greedy o Simulated Annealing.\\n\")\n\t\t\tinput(\">: Premere un tasto per continuare\")\n\t\t\treturn\n\t\t\n\t\tsoluzioniScelte = []\n\t\tflag = True\n\t\twhile flag:\n\t\t\tflag = False\n\t\t\tprint(\"\\nQuale soluzione adottare?\")\n\t\t\tindice = 1\n\t\t\tif len(self.listaGreedy) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Greedy]\\n\")\n\t\t\t\tfor soluzione in self.listaGreedy:\n\t\t\t\t\tprint(str(indice) + \") Tipo: \" + soluzione.tipoGreedy + \" Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaSimulatedAnnealing) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Simulated Annealing]\\n\")\n\t\t\t\tfor soluzione in self.listaSimulatedAnnealing:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\tif len(self.listaPathRelinking) > 0:\n\t\t\t\tprint(\"\\n[Soluzioni Path Relinking]\\n\")\n\t\t\t\tfor soluzione in self.listaPathRelinking:\n\t\t\t\t\tprint(str(indice) + \") Energia: \" + str(soluzione.energia) + \" Efficienza: \" + \"{:.2%}\".format(soluzione.efficienza) + \" Makespan: \" + str(soluzione.makeSpan))\n\t\t\t\t\tindice += 1\n\t\t\t\n\t\t\t# Input utente\n\t\t\trisposta = input(\"\\n(premere Invio per annullare)>: \")\n\t\t\tif risposta == \"\":\n\t\t\t\tprint(\"\\nAnnullato\")\n\t\t\ttry:\n\t\t\t\trisposta = int(risposta)\n\t\t\t\tif risposta < 1 or risposta > indice - 1:\n\t\t\t\t\traise ValueError()\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"\\nInput errato.\")\n\t\t\t\tflag = True\n\t\t\telse:\n\t\t\t\t# Nuova soluzione\n\t\t\t\tlistaTotale = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\t\t\tsoluzioniScelte.append(listaTotale[risposta - 1])\n\t\t\t\tif len(soluzioniScelte) < 2: # Se non sono state scelte due soluzioni, ne verrà richiesta un'altra\n\t\t\t\t\tflag = True\n\t\t\n\t\t# Avvio algoritmo Path Relinking\n\t\tnuovoPR = self.classePathRelinking.start(soluzioniScelte[0], soluzioniScelte[1])\n\t\n\t\t# Visualizzazione e salvataggio in memoria\n\t\tself.listaPathRelinking.append(nuovoPR)\n\t\tself.resetGrafica()\n\t\tself.graficaPR[0].popolamentoDati(nuovoPR)\n\n\t\t# Stampa delle informazioni delle soluzioni\n\t\tself.confrontaSoluzioni(nuovoPR, soluzioniScelte[0], soluzioniScelte[1])\n\t\t\n\t\t# Visualizzazione soluzioni iniziali\n\t\tindiceG = 0\n\t\tindiceSA = 0\n\t\tindicePR = 1\n\t\tfor soluzione in soluzioniScelte:\n\t\t\tif soluzione.tipo == \"G\":\n\t\t\t\tself.graficaGreedy[indiceG].tipo = soluzione.tipoGreedy\n\t\t\t\tself.graficaGreedy[indiceG].popolamentoDati(soluzione)\n\t\t\t\tindiceG += 1\n\t\t\telif soluzione.tipo == \"SA\":\n\t\t\t\tself.graficaSA[indiceSA].popolamentoDati(soluzione)\n\t\t\t\tindiceSA += 1\n\t\t\telse:\n\t\t\t\tself.graficaPR[indicePR].popolamentoDati(soluzione)\n\t\t\t\tindicePR += 1\n\n\t'''\n\tFunzione che mostra le soluzioni migliori ottenute attualmente per ogni classe di algoritmi.\n\t'''\n\tdef visualizzaMigliori(self):\n\t\t# Ricerca heap per visualizzare le soluzioni migliori\n\t\tsolG = nsmallest(2, self.listaGreedy, key= lambda x : x.makeSpan)\n\t\tsolSA = nsmallest(2, self.listaSimulatedAnnealing, key= lambda x : x.makeSpan)\n\t\tsolPR = nsmallest(2, self.listaPathRelinking, key= lambda x : x.makeSpan)\n\t\t\n\t\tself.resetGrafica()\n\t\t\n\t\tindiceG = 0\n\t\tindiceSA = 0\n\t\tindicePR = 0\n\t\tfor soluzione in solG:\n\t\t\tself.visualizzaSoluzione(soluzione)\n\t\t\tself.graficaGreedy[indiceG].popolamentoDati(soluzione)\n\t\t\tself.graficaGreedy[indiceG].tipo = soluzione.tipoGreedy\n\t\t\tindiceG += 1\n\t\tfor soluzione in solSA:\n\t\t\tself.visualizzaSoluzione(soluzione)\n\t\t\tself.graficaSA[indiceSA].popolamentoDati(soluzione)\n\t\t\tindiceSA += 1\n\t\tfor soluzione in solPR:\n\t\t\tself.visualizzaSoluzione(soluzione)\n\t\t\tself.graficaPR[indicePR].popolamentoDati(soluzione)\n\t\t\tindicePR += 1\n\t\n\t'''\n\tFunzione per visualizzare la soluzione migliore trovata finora.\n\t'''\n\tdef visualizzaMigliore(self):\n\t\tlistaCompleta = self.listaGreedy + self.listaSimulatedAnnealing + self.listaPathRelinking\n\t\tsoluzione = nsmallest(1, listaCompleta, key=lambda x : x.makeSpan)[0]\n\t\t\n\t\tself.visualizzaSoluzione(soluzione)\n\n\t\t# Per la grafica\n\t\tself.resetGrafica()\n\t\t\n\t\tif soluzione.tipo == \"G\":\n\t\t\tself.graficaGreedy[0].popolamentoDati(soluzione)\n\t\telif soluzione.tipo == \"SA\":\n\t\t\tself.graficaSA[0].popolamentoDati(soluzione)\n\t\telse:\n\t\t\tself.graficaPR[0].popolamentoDati(soluzione)\n\t\n\t'''\n\tFunzione per cancellare tutte le visualizzazioni degli algoritmi.\n\t'''\n\tdef resetGrafica(self):\n\t\tfor grafico in self.graficaGreedy + self.graficaSA + self.graficaPR:\n\t\t\tgrafico.cancellaDati()\n\t\n\t'''\n\tFunzione per la schermata informativa.\n\t'''\n\tdef aiuto(self):\n\t\tprint(\"\"\"\nPremessa:\n\nIl programma gestisce il seguente problema:\n\nLo scenario si compone di un poliambulatorio, composto da tre ambulatori medici identici e cinque medici, ognuno specializzato in un esame medico diverso. In tutto, gli ambulatori possono fornire un totale di cinque esami diversi.\nNel poliambulatorio entrano alcuni pazienti (numero variabile), ognuno può scegliere a quali esami sottoporsi, da un minimo di uno, ad un massimo di cinque. Quando un paziente occupa un ambulatorio, deve rimanerci dentro fino alla completa risoluzione di tutti i suoi esami, inoltre egli preclude ad altri la possibilità di utilizzare l'ambulatorio occupato.\nSiccome ogni tipologia di esame può essere eseguita solo da un medico in particolare, nello stesso istante non possono essere in esecuzione esami della stessa natura in ambulatori diversi.\nL'obiettivo del problema è fornire tutte le prestazioni mediche richiete dai pazienti, avendo un makespan minimo.\n\nCaratteristiche:\n\nIl programma permette all'utente di creare un nuovo problema da risolvere, partendo da una configurazione estesa personalizzabile.\nSuccessivamente è possibile creare soluzioni utilizzando diversi algoritmi:\n\n- Greedy: soluzione di partenza in cui è possibile sceglierne la tipologia (LPT, SPT, FIFO) e se utilizzare la randomicità durante la creazione.\n- Simulated Annealing: ricerca locale utilizzata per migliorare una soluzione.\n- Path Relinking: ricerca nello spazio ristretto alle soluzioni simili a quelle di input della procedura\n\nAll'utente viene fornita la possibilità di gestire manualmente la creazione delle soluzioni, oppure di avvalersi di una procedura automatica che, partendo dalla creazione di una nuova istanza del problema e arrivando all'applicazione di Path Relinking, genera una soluzione ottima al problema attuale.\nL'interfaccia grafica prevede una semplice visualizzazione delle soluzioni generate, utile per il confronto manuale da parte dell'utente.\n\t\t\"\"\")\n\t\tinput(\">: Premere un tasto per continuare\")\n\t\n\t'''\n\tFunzione per visualizzare informazioni inerenti la soluzione ottenuta.\n\t'''\n\tdef visualizzaSoluzione(self, soluzione):\n\t\tprint(\"\\nTipologia soluzione: {}\\nMakespan: {}\\nEfficienza: {:.2%}\".format(soluzione.tipo, soluzione.makeSpan, soluzione.efficienza))\n\t\n\t'''\n\tFunzione che mostra eventuali migliorie ottenute con la nuova soluzione. nuovaSoluzione2 è la seconda soluzione utilizzata durante Path Relinking.\n\t'''\n\tdef confrontaSoluzioni(self, nuovaSoluzione, vecchiaSoluzione, vecchiaSoluzione2=None):\n\t\tprint(\"\\nNuova soluzione:\")\n\t\tself.visualizzaSoluzione(nuovaSoluzione)\n\t\tprint(\"------------------\")\n\t\t\n\t\tself.visualizzaSoluzione(vecchiaSoluzione)\n\t\tif vecchiaSoluzione2:\n\t\t\tself.visualizzaSoluzione(vecchiaSoluzione2)\n\t\t\tvecchiaSoluzioneMin = min([vecchiaSoluzione, vecchiaSoluzione2], key=lambda x : x.makeSpan)\n\t\telse:\n\t\t\tvecchiaSoluzioneMin = vecchiaSoluzione\n\t\tprint(\"\\nRisultato finale:\")\n\t\tpercentualeFinale = nuovaSoluzione.makeSpan / vecchiaSoluzioneMin.makeSpan\n\t\t\n\t\tif percentualeFinale > 1:\n\t\t\tprint(\"\\nLa nuova soluzione è peggiorata del {:.2%}.\".format(1 - percentualeFinale))\n\t\telif percentualeFinale == 1:\n\t\t\tprint(\"\\nLa nuova soluzione possiede lo stesso makespan.\\n\")\n\t\telse:\n\t\t\tprint(\"\\nLa nuova soluzione è migliorata del {:.2%}.\".format(1 - percentualeFinale))\n\t'''\n\tFunzione per la gestione dell'uscita dal thread e dal programma.\n\t'''\n\tdef uscita(self):\n\t\tos._exit(1)","repo_name":"MicheleCESO/ROAmbulatori","sub_path":"menù.py","file_name":"menù.py","file_ext":"py","file_size_in_byte":18537,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29349126179","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom backend import * \nimport architectures\nimport sys\nimport numpy as np\n\n\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('sup_per_class', -1,\n 'Number of labeled samples used per class.')\n\nflags.DEFINE_integer('sup_seed', -1,\n 'Integer random seed used for labeled set selection.')\n\nflags.DEFINE_integer('sup_per_batch', 16,\n 'Number of labeled samples per class per batch.')\n\nflags.DEFINE_integer('unsup_batch_size', 64,\n 'Number of unlabeled samples per batch.')\n\nflags.DEFINE_integer('eval_interval', 500,\n 'Number of steps between evaluations.')\n\nflags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')\n\nflags.DEFINE_float('decay_factor', 0.33, 'Learning rate decay factor.')\n\nflags.DEFINE_float('decay_steps', 4000,\n 'Learning rate decay interval in steps.')\n\nflags.DEFINE_float('visit_weight', 0.65, 'Weight for visit loss.')\n\nflags.DEFINE_integer('max_steps', 20000, 'Number of training steps.')\n\nflags.DEFINE_string('checkpoint_dir', '/harddisk/hdd_c/camelyon/code1/new-2015-test/IDC-new/result/model-all-3000-all/model', \n 'Save checkpoint path.')\n\nflags.DEFINE_string('logdir', '/harddisk/hdd_c/camelyon/code1/new-2015-test/IDC-new/semisup_bach/semi-all-3000-all', 'Training log path.')\n\nimport dataset as dataset_tools \nimport sys\nNUM_LABELS = dataset_tools.NUM_LABELS\nIMAGE_SHAPE = dataset_tools.IMAGE_SHAPE\n\n\ndef main(_):\n train_images, train_labels, val_images, val_labels, test_images, test_labels = dataset_tools.get_data()\n\n\n # Sample labeled training subset.\n seed = FLAGS.sup_seed if FLAGS.sup_seed != -1 else None\n sup_by_label = sample_by_label(train_images, train_labels,\n FLAGS.sup_per_class, NUM_LABELS, seed)\n\n graph = tf.Graph()\n with graph.as_default():\n model = SemisupModel(architectures.dataset_model, NUM_LABELS, IMAGE_SHAPE)\n \n# unsup_num = 3000\n # Set up inputs.\n# t_unsup_images, _ = create_input(train_images[0:unsup_num], train_labels[0:unsup_num], FLAGS.unsup_batch_size)\n t_unsup_images, _ = create_input(train_images, train_labels, FLAGS.unsup_batch_size)\n \n t_sup_images, t_sup_labels = create_per_class_inputs(sup_by_label, FLAGS.sup_per_batch)\n\n # Compute embeddings and logits.\n t_sup_emb = model.image_to_embedding(t_sup_images)\n t_unsup_emb = model.image_to_embedding(t_unsup_images)\n t_sup_logit = model.embedding_to_logit(t_sup_emb)\n\n # Add losses.\n model.add_semisup_loss(t_sup_emb, t_unsup_emb, t_sup_labels, visit_weight = FLAGS.visit_weight)\n model.add_logit_loss(t_sup_logit, t_sup_labels)\n\n t_learning_rate = tf.train.exponential_decay(\n FLAGS.learning_rate,\n model.step,\n FLAGS.decay_steps,\n FLAGS.decay_factor,\n staircase=True)\n train_op, train_loss = model.create_train_op(t_learning_rate)\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(FLAGS.logdir, graph)\n\n saver = tf.train.Saver()\n\n with tf.Session(graph=graph) as sess:\n tf.global_variables_initializer().run()\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n for step in xrange(FLAGS.max_steps):\n _, loss ,summaries = sess.run([train_op, train_loss, summary_op])\n\n \n if step % 10 == 0:\n test_loss = model.classify_loss(val_images, val_labels)\n# print(test_loss)\n test_loss_summary = tf.Summary(\n value=[tf.Summary.Value(\n tag='Validation Loss', simple_value=test_loss)])\n \n summary_writer.add_summary(summaries, step)\n summary_writer.add_summary(test_loss_summary, step)\n \n val_pred_2 = model.classify(val_images).argmax(-1)\n test_acc = 100 - (np.array(val_labels) != np.array(val_pred_2)).mean() * 100\n \n test_acc_summary = tf.Summary(\n value=[tf.Summary.Value(\n tag='Validation acc', simple_value=test_acc)])\n summary_writer.add_summary(test_acc_summary, step)\n\n \n if (step + 1) % FLAGS.eval_interval == 0 or step == 99:\n print('Step: %d' % step)\n \n # validation\n val_pred = model.classify(val_images).argmax(-1)\n conf_mtx = confusion_matrix(val_labels, val_pred, NUM_LABELS)\n val_err = (val_labels != val_pred).mean() * 100\n print(conf_mtx)\n print('Validation error: %.2f %%' % val_err)\n print()\n\n\n saver.save(sess, FLAGS.checkpoint_dir, model.step)\n\n coord.request_stop()\n coord.join(threads)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"USTC-HIlab/Semi-HIC","sub_path":"IDC-code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"7379809936","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nclass ConcatModel(nn.Module):\n def __init__(self, model, out_channels, num_classes):\n super().__init__()\n self.cnn = model\n\n self.fc1 = nn.Linear(out_channels+2, int((out_channels+2)/2))\n self.fc2 = nn.Linear(int((out_channels+2)/2), num_classes)\n\n def forward(self, image, meta):\n x1 = self.cnn(image)\n x2 = meta\n\n x = torch.cat((x1,x2), dim=1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n\n return x","repo_name":"cch76/skin_classification","sub_path":"models/fc.py","file_name":"fc.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7449857538","text":"from telegram import Update as _Update\nfrom telegram.ext import CallbackContext as _CallbackContext\n\n\nname = \"registrar\"\ndescription = \"Registra el chat\"\ndef cmd(update: _Update, context: _CallbackContext):\n\n # Crear lista de chats si es que no existe\n if \"chats\" not in context.bot_data:\n context.bot_data[\"chats\"] = set()\n\n chat_id = update.effective_chat.id\n\n context.bot_data[\"chats\"].add(chat_id)\n\n update.effective_message.reply_text(\n text=f\"Agregado chat con id {chat_id}\"\n )\n\n update.effective_message.reply_text(\n text=f\"Lista de ids: {str(context.bot_data['chats'])}\"\n )\n","repo_name":"CleoStoat/plantilla_bot_tg","sub_path":"comandos/registrar_chat.py","file_name":"registrar_chat.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9999584960","text":"from datetime import datetime\nfrom picamera import PiCamera\nfrom ina219 import INA219\nimport FaBo9Axis_MPU9250\nimport RPi.GPIO as GPIO\nfrom time import sleep\nfrom math import atan2\nGPIO.setmode(GPIO.BCM)\nfrom PIL import Image\nimport numpy as np\nimport serial\nimport base64\nimport pigpio\nimport smbus\nimport time\nimport math\nimport sys\nimport PIL\nimport os\n\nservo_type = 270\nsl = 13\nsr = 12\npi = pigpio.pi()\npi.set_mode(sl, pigpio.OUTPUT)\npi.set_mode(sr, pigpio.OUTPUT)\n\ndef sa(a,b):\n a = servo_type-a\n pi.set_servo_pulsewidth(sl,500+2000*int(a)/servo_type)\n pi.set_servo_pulsewidth(sr,500+2000*int(b)/servo_type)\n\ncamera = PiCamera()\ncamera.resolution = (1280, 720)\ncamera.framerate = 30\nsensor = 6\nbuzz = 26\nled = 4\nu = 0.1\nlaunch = 11\nGPIO_TRIGGER = 18\nGPIO_ECHO = 24\nGPIO.setwarnings(False)\nGPIO.setup(GPIO_TRIGGER, GPIO.OUT)\nGPIO.setup(GPIO_ECHO, GPIO.IN)\nGPIO.setup(buzz, GPIO.OUT)\nGPIO.setup(led, GPIO.OUT)\nGPIO.setup(sensor, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(launch, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\npd = 0\nedis = 0\npressure = 0\ntemp = 0\ndef beep(t):\n GPIO.output(26,1)\n GPIO.output(4,1)\n sleep(t)\n GPIO.output(26,0)\n GPIO.output(4,0)\n sleep(t)\ndef bmpp():\n global temp,pressure\n bus = smbus.SMBus(1)\n try:\n b1 = bus.read_i2c_block_data(0x76, 0x88, 24)\n dig_T1 = b1[1] * 256 + b1[0]\n dig_T2 = b1[3] * 256 + b1[2]\n if dig_T2 > 32767 :\n dig_T2 -= 65536\n dig_T3 = b1[5] * 256 + b1[4]\n if dig_T3 > 32767 :\n dig_T3 -= 65536\n dig_P1 = b1[7] * 256 + b1[6]\n dig_P2 = b1[9] * 256 + b1[8]\n if dig_P2 > 32767 :\n dig_P2 -= 65536\n dig_P3 = b1[11] * 256 + b1[10]\n if dig_P3 > 32767 :\n dig_P3 -= 65536\n dig_P4 = b1[13] * 256 + b1[12]\n if dig_P4 > 32767 :\n dig_P4 -= 65536\n dig_P5 = b1[15] * 256 + b1[14]\n if dig_P5 > 32767 :\n dig_P5 -= 65536\n dig_P6 = b1[17] * 256 + b1[16]\n if dig_P6 > 32767 :\n dig_P6 -= 65536\n dig_P7 = b1[19] * 256 + b1[18]\n if dig_P7 > 32767 :\n dig_P7 -= 65536\n dig_P8 = b1[21] * 256 + b1[20]\n if dig_P8 > 32767 :\n dig_P8 -= 65536\n dig_P9 = b1[23] * 256 + b1[22]\n if dig_P9 > 32767 :\n dig_P9 -= 65536\n dig_H1 = bus.read_byte_data(0x76, 0xA1)\n b1 = bus.read_i2c_block_data(0x76, 0xE1, 7)\n dig_H2 = b1[1] * 256 + b1[0]\n if dig_H2 > 32767 :\n dig_H2 -= 65536\n dig_H3 = (b1[2] & 0xFF)\n dig_H4 = (b1[3] * 16) + (b1[4] & 0xF)\n if dig_H4 > 32767 :\n dig_H4 -= 65536\n dig_H5 = (b1[4] / 16) + (b1[5] * 16)\n if dig_H5 > 32767 :\n dig_H5 -= 65536\n dig_H6 = b1[6]\n if dig_H6 > 127 :\n dig_H6 -= 256\n bus.write_byte_data(0x76, 0xF2, 0x01)\n bus.write_byte_data(0x76, 0xF4, 0x27)\n bus.write_byte_data(0x76, 0xF5, 0xA0)\n data = bus.read_i2c_block_data(0x76, 0xF7, 8)\n adc_p = ((data[0] * 65536) + (data[1] * 256) + (data[2] & 0xF0)) / 16\n adc_t = ((data[3] * 65536) + (data[4] * 256) + (data[5] & 0xF0)) / 16\n adc_h = data[6] * 256 + data[7]\n var1 = ((adc_t) / 16384.0 - (dig_T1) / 1024.0) * (dig_T2)\n var2 = (((adc_t) / 131072.0 - (dig_T1) / 8192.0) * ((adc_t)/131072.0 - (dig_T1)/8192.0)) * (dig_T3)\n t_fine = (var1 + var2)\n cTemp = (var1 + var2) / 5120.0\n fTemp = cTemp * 1.8 + 32\n var1 = (t_fine / 2.0) - 64000.0\n var2 = var1 * var1 * (dig_P6) / 32768.0\n var2 = var2 + var1 * (dig_P5) * 2.0\n var2 = (var2 / 4.0) + ((dig_P4) * 65536.0)\n var1 = ((dig_P3) * var1 * var1 / 524288.0 + ( dig_P2) * var1) / 524288.0\n var1 = (1.0 + var1 / 32768.0) * (dig_P1)\n p = 1048576.0 - adc_p\n p = (p - (var2 / 4096.0)) * 6250.0 / var1\n var1 = (dig_P9) * p * p / 2147483648.0\n var2 = p * (dig_P8) / 32768.0\n pressure = (p + (var1 + var2 + (dig_P7)) / 16.0) / 100\n var_H = ((t_fine) - 76800.0)\n var_H = (adc_h - (dig_H4 * 64.0 + dig_H5 / 16384.0 * var_H)) * (dig_H2 / 65536.0 * (1.0 + dig_H6 / 67108864.0 * var_H * (1.0 + dig_H3 / 67108864.0 * var_H)))\n humidity = var_H * (1.0 - dig_H1 * var_H / 524288.0)\n if humidity > 100.0 :\n humidity = 100.0\n elif humidity < 0.0 :\n humidity = 0.0\n\n temp = \"%.2f\" %cTemp\n pressure = \"%.2f\" %pressure\n except:\n temp = \"\"\n pressure = \"\"\ndef GPS_Info():\n global NMEA_buff\n global lat_in_degrees\n global long_in_degrees\n global time\n nmea_time = []\n nmea_latitude = []\n nmea_longitude = []\n nmea_time = NMEA_buff[0] #extract time from GPGGA string\n nmea_latitude = NMEA_buff[1] #extract latitude from GPGGA string\n nmea_longitude = NMEA_buff[3]\n t =nmea_time #extract longitude from GPGGA string\n \n gpstime = str((int(t[0]+t[1])+7)%24),\":\",t[2],t[3],\":\",t[4],t[5]\n \n lat = float(nmea_latitude) #convert string into float for calculation\n longi = float(nmea_longitude) #convertr string into float for calculation\n \n lat_in_degrees = convert_to_degrees(lat) #get latitude in degree decimal format\n long_in_degrees = convert_to_degrees(longi) #get longitude in degree decimal format\ndef convert_to_degrees(raw_value):\n decimal_value = raw_value/100.00\n degrees = int(decimal_value)\n mm_mmmm = (decimal_value - int(decimal_value))/0.6\n position = degrees + mm_mmmm\n position = \"%.6f\" %(position)\n return position\ndef distance():\n global pd\n GPIO.output(GPIO_TRIGGER, True)\n sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n StartTime = time.time()\n StopTime = time.time()\n tmo = StartTime\n edis = 1\n while GPIO.input(GPIO_ECHO) == 0 and edis:\n StartTime = time.time()\n sleep(0.00001)\n if time.time()-tmo >= 0.06:\n edis = 0\n if edis:\n while GPIO.input(GPIO_ECHO) == 1:\n StopTime = time.time()\n TimeElapsed = StopTime - StartTime\n distance = TimeElapsed * 17150\n distance = \"%.2f\" % (distance/100)\n pd = distance\n return distance\n else:\n return pd\n\nina = INA219(0.1)\nina.configure()\n\ngpgga_info = \"$GNGGA,\"\nser = serial.Serial(\n port='/dev/ttyS0', #Replace ttyS0 with ttyAM0 for Pi1,Pi2,Pi0\n baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=0.02\n)\nGPGGA_buffer = 0\nNMEA_buff = 0\nlat_in_degrees = \"\"\nlong_in_degrees = \"\"\ngpstime = \"\"\n\nPI = 3.14159265\nmpu9250 = FaBo9Axis_MPU9250.MPU9250()\nti=0\ncounter=0\nGPIO.output(buzz,1)\nGPIO.output(led,1)\nsleep(0.1)\nGPIO.output(buzz,0)\nsleep(0.9)\nGPIO.output(led,0)\n\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H-%M-%S\")\nfilen = str(current_time)\nfinum = 0\nvdnum = 0\nmilli_sec = int(round(time.time() * 1000))\nsmilli = milli_sec\nlmillis = 0\nnakono = 0\n\nf = open(\"/home/pi/cansat/gycal.txt\", \"r\")\ncalmy = float(f.readline())\ncalmz = float(f.readline())\nnorth = float(f.readline())\ncalax = float(f.readline())\ncalay = float(f.readline())\ncalaz = float(f.readline())\nf.close()\n\ndef pmm():\n milli_sec = int(round(time.time() * 1000))\n print(\"start\",milli_sec%100000)\n\nwhile GPIO.input(launch) == 0:\n ti+=1\n ti = round(ti,2)\n mm = str(ti)\n#GPS\n try:\n received_data = (str)(ser.readline())\n GPGGA_data_available = received_data.find(gpgga_info)\n if (GPGGA_data_available>0):\n print(\"GPS!!\")\n GPGGA_buffer = received_data.split(\"$GNGGA,\",1)[1] #store data coming after \"$GPGGA,\" string \n NMEA_buff = (GPGGA_buffer.split(',')) #store comma separated data in buffer\n GPS_Info() #get time, latitude, longitude\n mm+= ','+lat_in_degrees+','+long_in_degrees\n else:\n mm+= ','+lat_in_degrees+','+long_in_degrees\n except:\n mm+= ','+lat_in_degrees+','+long_in_degrees\n#MPU BMP\n try:\n ac = mpu9250.readAccel()\n ma = mpu9250.readMagnet()\n mm+= \",\"+\"%.3f\" % (ac['x']+calax)+\",\"+\"%.3f\" % (ac['y']+calay)+\",\"+\"%.3f\" % (ac['z']+calaz)\n angle = atan2(ma['z']+calmz,ma['y']+calmy) * 180 / PI\n angle += north\n if angle < -180: angle+=360\n if angle > 180: angle-=360\n angle = \"%d\" %angle\n mm+= \",\"+angle\n except:\n mm+= \",,,,\"\n#BMP\n bmpp()\n try:\n alt = 44331.5 - 4946.62 * (float(pressure)*100) ** (0.190263)\n alt = \"%.2f\" %alt\n except:\n alt = \"\"\n try:\n temp = int(str(\"%d\" %float(temp)))\n except:\n temp = ''\n mm+= \",\"+str(temp)+\",\"+str(alt)\n#Ultrasonic\n Dis = distance()\n if Dis > 700: Dis = 700\n Dis = str(Dis)\n mm+= \",\"+Dis\n\n#Sensor\n s1 = GPIO.input(sensor)\n mm+= \",\"+str(s1)\n\n#Battery\n V = ina.voltage()\n I = ina.current()\n percent = \"%d\" %((V-6)/(2.2)*100)\n if int(percent)>100: percent = \"100\"\n if int(percent)<0: percent = \"0\"\n mm+=\",\"+percent\n\n camera.capture('tem.jpg', use_video_port=True)\n picture = Image.open('tem.jpg')\n picture.thumbnail((128,128), Image.ANTIALIAS)\n picture.save(\"s_tem.jpg\",optimize=True,quality=10)\n with open(\"s_tem.jpg\", \"rb\") as img_file:\n simg = \"img,\"+str(base64.b64encode(img_file.read()).decode('utf-8'))+\",,\"\n try:\n ser.write(bytes(mm,'utf-8'))\n ser.write(b\"\\n\")\n ser.write(bytes(simg,'utf-8'))\n ser.write(b\"\\n\")\n except:\n print(\"send error\")\n print(mm)\n\n milli_sec = int(round(time.time() * 1000))\n sleep((1000 - milli_sec % 1000)/1000)\n\n\n\n\n\n\n\n\n\n\n\nbmpp()\nsleep(1)\nbmpp()\nspacey = 0\npercentMin = 100\ntry:\n spacey = (44331.5 - 4946.62 * (float(pressure)*100) ** (0.190263))\nexcept:\n spacey = 0\n\nlaunch = 0\n#camera.start_recording('camera/'+filen+' ('+str(vdnum)+').h264')\n\nwhile True:\n ti+=1\n ti = round(ti,2)\n mm = str(ti)\n mmf = str(ti)\n#GPS\n try:\n received_data = (str)(ser.readline())\n GPGGA_data_available = received_data.find(gpgga_info)\n if (GPGGA_data_available>0):\n print(\"GPS!!\")\n GPGGA_buffer = received_data.split(\"$GNGGA,\",1)[1] #store data coming after \"$GPGGA,\" string \n NMEA_buff = (GPGGA_buffer.split(',')) #store comma separated data in buffer\n GPS_Info() #get time, latitude, longitude\n mm+= ','+lat_in_degrees+','+long_in_degrees\n mmf+= ','+lat_in_degrees+','+long_in_degrees+','+gpstime\n else:\n #mm+= ',n/a,n/a'\n #mmf+= ',n/a,n/a,n/a'\n mm+= ','+lat_in_degrees+','+long_in_degrees\n mmf+= ','+lat_in_degrees+','+long_in_degrees+','+gpstime\n except:\n #mm+= ',n/a,n/a'\n #mmf+= ',n/a,n/a,n/a'\n mm+= ','+lat_in_degrees+','+long_in_degrees\n mmf+= ','+lat_in_degrees+','+long_in_degrees+','+gpstime\n#MPU\n try:\n ac = mpu9250.readAccel()\n gy = mpu9250.readGyro()\n ma = mpu9250.readMagnet()\n mm+= \",\"+\"%.3f\" % (ac['x']+calax)+\",\"+\"%.3f\" % (ac['y']+calay)+\",\"+\"%.3f\" % (ac['z']+calaz)\n mmf+= \",\"+\"%.3f\" % (ac['x']+calax)+\",\"+\"%.3f\" % (ac['y']+calay)+\",\"+\"%.3f\" % (ac['z']+calaz)\n mmf+= \",\"+str(gy['x'])+\",\"+str(gy['y'])+\",\"+str(gy['z'])\n mmf+= \",\"+str(ma['x'])+\",\"+str(ma['y'])+\",\"+str(ma['z'])\n angle = atan2(ma['z']+calmz,ma['y']+calmy) * 180 / PI\n angle += north\n if angle < -180: angle+=360\n if angle > 180: angle-=360\n angle = \"%d\" %angle\n mm+= \",\"+angle\n mmf+= \",\"+angle\n except:\n mm+= \",,,,\"\n mmf+= \",,,,,,,,,,\"\n#BMP\n bmpp()\n try:\n alt = (44331.5 - 4946.62 * (float(pressure)*100) ** (0.190263))-spacey\n alt = \"%.2f\" %alt\n except:\n alt = \"\"\n try:\n temp = int(str(\"%d\" %float(temp)))\n except:\n temp = ''\n mm+= \",\"+str(temp)+\",\"+str(alt)\n mmf+= \",\"+str(temp)+\",\"+str(pressure)+\",\"+str(alt)\n#Ultrasonic\n Dis = distance()\n if float(Dis) > 7: Dis = \"7\"\n Dis = str(Dis)\n mm+= \",\"+Dis\n mmf+= \",\"+Dis\n\n#Sensor\n s1 = GPIO.input(sensor)\n mm+= \",\"+str(s1)\n mmf+= \",\"+str(s1)\n\n#Battery\n V = ina.voltage()\n I = ina.current()\n percent = (V-6)/(2.2)*100\n if percent > percentMin: percent = percentMin\n else: percentMin = percent\n percent = \"%d\" %percent\n if int(percent)>100: percent = \"100\"\n if int(percent)<0: percent = \"0\"\n mm+=\",\"+percent\n mmf+= \",\"+percent+\",\"+\"%.2f\" %V+\",\"+\"%.1f\"%I\n\n#Servo\n camera.capture('tem.jpg', use_video_port=True)\n img = Image.open('tem.jpg')\n red = 0\n green = 0\n blue = 0\n for i in range(520,761,10):\n for j in range(0,241,10):\n nino = img.getpixel((i,j))\n red += nino[0]\n green += nino[1]\n blue += nino[2]\n red = int(red/576)\n green = int(green/576)\n blue = int(blue/576)\n\n redl = 0\n greenl = 0\n bluel = 0\n for i in range(0,181,10):\n for j in range(0,181,10):\n nino = img.getpixel((i,j))\n redl += nino[0]\n greenl += nino[1]\n bluel += nino[2]\n redl = int(redl/324)\n greenl = int(greenl/324)\n bluel = int(blue/576)\n\n redr = 0\n greenr = 0\n bluer = 0\n for i in range(1099,1280,10):\n for j in range(0,181,10):\n nino = img.getpixel((i,j))\n redr += nino[0]\n greenr += nino[1]\n bluer += nino[2]\n redr = int(redr/324)\n greenr = int(greenr/324)\n bluer = int(blue/576)\n\n print(redl,greenl,red,green,redr,greenr)\n \n if green > red-30 and green > blue-30 and green > 50:\n lg = greenl > redl and greenl > bluel\n rg = greenr > redr and greenr > bluer\n if lg > rg:\n sa(0,270)\n mm+=\",0,270\"\n mmf+=\",0,270\"\n elif lg < rg:\n sa(270,0)\n mm+=\",270,0\"\n mmf+=\",270,0\"\n else:\n sa(135,135)\n mm+=\",135,135\"\n mmf+=\",135,135\"\n else:\n sa(135,135)\n mm+=\",135,135\"\n mmf+=\",135,135\"\n\n#Launch\n vec = math.sqrt(pow(ac['x']+calax,2)+pow(ac['y']+calay,2)+pow(ac['z']+calaz,2))\n if vec >= 2: \n launch = 1\n lmillis = int(round(time.time() * 1000))\n if int(round(time.time() * 1000)) - lmillis >= 30000 and launch == 1:\n launch = 2\n nakono = int(round(time.time() * 1000))\n if launch == 2:\n beep(u)\n beep(u)\n beep(u)\n sleep(2*u)\n beep(3*u)\n beep(3*u)\n beep(3*u)\n sleep(2*u)\n beep(u)\n beep(u)\n beep(u)\n if int(round(time.time() * 1000)) - nakono >= 20000 and launch == 2: launch = 0\n mm += \",\"+str(launch)\n mmf += \",\"+str(launch)\n\n\n fie = open(str(\"/home/pi/cansat/log/\"+filen+\" (\"+str(finum)+\").csv\"), \"a\")\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n mmf+= \",\"+current_time\n fie.write(mmf+'\\n')\n fie.close()\n #if os.stat(\"/home/pi/cansat/log/\"+filen+\" (\"+str(finum)+\").csv\").st_size >= 4096: finum += 1\n\n picture = Image.open('tem.jpg')\n picture.thumbnail((96,96), Image.ANTIALIAS)\n picture.save(\"s_tem.jpg\",optimize=True,quality=10)\n with open(\"s_tem.jpg\", \"rb\") as img_file:\n simg = \"img,\"+str(base64.b64encode(img_file.read()).decode('utf-8'))+\",,\"\n try:\n ser.write(bytes(mm,'utf-8'))\n ser.write(b\"\\n\")\n ser.write(bytes(simg,'utf-8'))\n ser.write(b\"\\n\")\n except:\n print(\"send error\")\n print(mm)\n\n \n milli_sec = int(round(time.time() * 1000))\n sleep((1000 - milli_sec % 1000)/1000)\n\n # if(milli_sec - smilli >= 300000):\n # camera.stop_recording()\n # smilli = milli_sec\n # vdnum += 1\n # camera.start_recording('camera/'+filen+' ('+str(vdnum)+').h264')\n\n","repo_name":"SecretKr/NAV-Cansat-2021","sub_path":"Cansat/cansat.py","file_name":"cansat.py","file_ext":"py","file_size_in_byte":16010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38698420167","text":"import numpy as np\r\nfrom scipy.ndimage import affine_transform\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras import backend as K\r\nfrom keras.utils import Sequence\r\nfrom keras.models import Model, load_model\r\nfrom pandas import read_csv\r\nfrom PIL.ImageDraw import Draw\r\nfrom PIL import Image as pil_image\r\nfrom os.path import isfile\r\nimport pickle\r\nfrom tqdm import tqdm\r\n\r\nimg_shape = (128, 128, 1)\r\nanisotropy = 2.15\r\n\r\n\r\ndef expand_path(p):\r\n if isfile('../data-train/' + p):\r\n return '../data-train/' + p\r\n if isfile('../data-test/' + p):\r\n return '../data-test/' + p\r\n return p\r\n\r\n\r\n# Transform coordinates according to the provided affine transformation\r\ndef coord_transform(list, trans):\r\n result = []\r\n for x, y in list:\r\n y, x, _ = trans.dot([y, x, 1]).astype(np.int)\r\n result.append((x, y))\r\n return result\r\n\r\n\r\ndef read_raw_image(p):\r\n return pil_image.open(expand_path(p))\r\n\r\n\r\ndef read_array(p):\r\n img = read_raw_image(p).convert('L')\r\n return img_to_array(img)\r\n\r\n\r\n# Apply an affine transformation to an image represented as a numpy array.\r\ndef transform_img(x, affine):\r\n matrix = affine[:2, :2]\r\n offset = affine[:2, 2]\r\n x = np.moveaxis(x, -1, 0)\r\n channels = [affine_transform(channel, matrix, offset, output_shape=img_shape[:-1], order=1,\r\n mode='constant', cval=np.average(channel)) for channel in x]\r\n return np.moveaxis(np.stack(channels, axis=0), 0, -1)\r\n\r\n\r\n# Compute the coordinate transformation required to center the pictures, padding as required.\r\ndef center_transform(affine, input_shape):\r\n hi, wi = float(input_shape[0]), float(input_shape[1])\r\n ho, wo = float(img_shape[0]), float(img_shape[1])\r\n top, left, bottom, right = 0, 0, hi, wi\r\n if wi / hi / anisotropy < wo / ho: # input image too narrow, extend width\r\n w = hi * wo / ho * anisotropy\r\n left = (wi - w) / 2\r\n right = left + w\r\n else: # input image too wide, extend height\r\n h = wi * ho / wo / anisotropy\r\n top = (hi - h) / 2\r\n bottom = top + h\r\n center_matrix = np.array([[1, 0, -ho / 2], [0, 1, -wo / 2], [0, 0, 1]])\r\n scale_matrix = np.array([[(bottom - top) / ho, 0, 0], [0, (right - left) / wo, 0], [0, 0, 1]])\r\n decenter_matrix = np.array([[1, 0, hi / 2], [0, 1, wi / 2], [0, 0, 1]])\r\n return np.dot(np.dot(decenter_matrix, scale_matrix), np.dot(affine, center_matrix))\r\n\r\n\r\ndef read_for_validation(p):\r\n x = read_array(p)\r\n t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\r\n t = center_transform(t, x.shape)\r\n x = transform_img(x, t)\r\n x -= np.mean(x, keepdims=True)\r\n x /= np.std(x, keepdims=True) + K.epsilon()\r\n return x, t\r\n\r\n\r\ndef generate_bbox(to_do, model):\r\n print(len(to_do))\r\n ret = {}\r\n for p in tqdm(to_do):\r\n img, trans = read_for_validation(p)\r\n a = np.expand_dims(img, axis=0)\r\n x0, y0, x1, y1 = model.predict(a).squeeze()\r\n (u0, v0), (u1, v1) = coord_transform([(x0, y0), (x1, y1)], trans)\r\n ret[p] = (u0, v0, u1, v1)\r\n return ret\r\n\r\n\r\ndef preview(to_do, dic):\r\n for p in to_do:\r\n img = read_raw_image(p).convert('RGB')\r\n draw = Draw(img)\r\n x0, y0, x1, y1 = dic[p]\r\n draw.line([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)], fill='yellow', width=6)\r\n img.save(p)\r\n\r\n\r\nif __name__ == '__main__':\r\n model = load_model('cropping.model')\r\n model.summary()\r\n to_do = [p for _, p, _ in read_csv('../data-raw/train.csv').to_records()]\r\n to_do += [p for _, p, _ in read_csv('../data-raw/sample_submission.csv').to_records()]\r\n dic = generate_bbox(to_do, model)\r\n with open('bbox.pickle', 'wb') as fout:\r\n pickle.dump(dic, fout)\r\n # preview(to_do[:25], dic)\r\n # print(dic)\r\n","repo_name":"maye9999/Humpback-Whale-Identification","sub_path":"maye/bbox.py","file_name":"bbox.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21589839935","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nfrom datetime import datetime\n\nAUTHOR = u\"Project Fondue Team\"\nSITENAME = u\"L'Alpiniste\"\nSITEURL = 'http://blog.projectfondue.com:9901'\nSITESUBTITLE = u\"The blog of the Project Fondue Team\"\n\nDISQUS_SITENAME = \"projectfondue\"\nTIMEZONE = 'Europe/London'\n\nDEFAULT_LANG = 'en'\n\n# Blogroll\nLINKS = (('Stuart Colville', 'http://muffinresearch.co.uk/'),\n ('Cyril Doussin', 'cyril.doussin.name'),\n )\n\n# Social widget\nSOCIAL = (('You can add links in your config file', '#'),\n ('Another social link', '#'),)\n\nDEFAULT_PAGINATION = 10\nTAG_CLOUD_STEPS = 10\nTAG_CLOUD_MAX_ITEMS = 20\n\nTHEME = 'theme'\nTHEME_STATIC_PATHS = (['static', 'theme/static'])\n\nTWITTER_USERNAME = \"projectfondue\"\nLATEST_POST_LIMIT = 5\n\nYEAR = datetime.now().year\n\nDEFAULT_PAGINATION = 5\nRELATIVE_URLS = False\n\nARTICLE_URL = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}'\nARTICLE_SAVE_AS = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}.html'\nARTICLE_LANG_URL = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}-{lang}'\nARTICLE_LANG_SAVE_AS = 'archives/{date:%Y}/{date:%m}/{date:%d}/{slug}-{lang}.html'\n\nPAGE_URL = 'pages/{slug}'\nPAGE_SAVE_AS = 'pages/{slug}.html'\nPAGE_LANG_URL = 'pages/{slug}-{lang}'\nPAGE_LANG_SAVE_AS = 'pages/{slug}-{lang}.html'\n\nPAGINATION_URL = '{name}-{page_num}'\nPAGINATION_SAVE_AS = '{name}-{page_num}.html'\n\nAUTHOR_URL = 'author/{name}'\nAUTHOR_SAVE_AS = 'author/{name}.html'\n\nCATEGORY_URL = 'category/{name}'\nCATEGORY_SAVE_AS = False\nTAG_URL = 'tag/{name}'\nTAG_SAVE_AS = 'tag/{name}.html'\n\n# DIRECT TEMPLATES\nPAGINATED_DIRECT_TEMPLATES = ('index', 'archives', 'authors', 'author')\nDIRECT_TEMPLATES = ('index', 'tags', 'archives')\n\nARCHIVES_SAVE_AS = 'archives/index.html'\n","repo_name":"project-fondue/blog","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74769098814","text":"import unittest\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Iterable, Iterator, Sequence\nfrom dataclasses import dataclass, field\nfrom functools import partial\nfrom itertools import chain\nfrom typing import Any, Deque, Generic, Optional, TypeVar, Union\n\nimport casadi as cs\nimport numpy as np\nfrom csnlp import Nlp, Solution\nfrom csnlp.util.math import quad_form\nfrom csnlp.wrappers import Mpc\nfrom gymnasium.wrappers import TimeLimit\nfrom scipy.linalg import cho_solve\n\nfrom mpcrl import (\n ExperienceReplay,\n LearnableParameter,\n LearnableParametersDict,\n LstdQLearningAgent,\n MpcSolverError,\n UpdateError,\n)\nfrom mpcrl import exploration as E\nfrom mpcrl import schedulers as S\nfrom mpcrl.util.math import cholesky_added_multiple_identities\nfrom mpcrl.wrappers.agents import RecordUpdates\n\n# ==================================================================================== #\n# ---------------------------------- START OLD CODE ---------------------------------- #\n# ==================================================================================== #\n\n\n@dataclass\nclass QuadRotorEnvConfig:\n T: float = 0.1\n g: float = 9.81\n thrust_coeff: float = 1.4\n pitch_d: float = 10\n pitch_dd: float = 8\n pitch_gain: float = 10\n roll_d: float = 10\n roll_dd: float = 8\n roll_gain: float = 10\n winds: dict[float, float] = field(default_factory=lambda: {1: 1.0, 2: 0.7, 3: 0.85})\n x0: np.ndarray = field(\n default_factory=lambda: np.array([0, 0, 3.5, 0, 0, 0, 0, 0, 0, 0])\n )\n xf: np.ndarray = field(\n default_factory=lambda: np.array([3, 3, 0.2, 0, 0, 0, 0, 0, 0, 0])\n )\n soft_constraints: bool = True\n x_bounds: np.ndarray = field(\n default_factory=lambda: np.array(\n [\n [-0.5, 3.5],\n [-0.5, 3.5],\n [-0.175, 4],\n [-np.inf, np.inf],\n [-np.inf, np.inf],\n [-np.inf, np.inf],\n [np.deg2rad(-30), np.deg2rad(30)],\n [np.deg2rad(-30), np.deg2rad(30)],\n [-np.inf, np.inf],\n [-np.inf, np.inf],\n ]\n )\n )\n u_bounds: np.ndarray = field(\n default_factory=lambda: np.array(\n [[-np.pi, np.pi], [-np.pi, np.pi], [0, 2 * 9.81]]\n )\n )\n\n\nclass QuadRotorEnv:\n spec: dict = None\n nx: int = 10\n nu: int = 3\n\n def __init__(self, config: Union[dict, QuadRotorEnvConfig] = None) -> None:\n config = init_config(config, QuadRotorEnvConfig)\n self.config = config\n\n # create dynamics matrices\n self._A, self._B, self._C, self._e = self.get_dynamics(\n g=config.g,\n thrust_coeff=config.thrust_coeff,\n pitch_d=config.pitch_d,\n pitch_dd=config.pitch_dd,\n pitch_gain=config.pitch_gain,\n roll_d=config.roll_d,\n roll_dd=config.roll_dd,\n roll_gain=config.roll_gain,\n winds=config.winds,\n )\n # weight for positional, control action usage and violation errors\n self._Wx = np.ones(self.nx)\n self._Wu = np.ones(self.nu)\n self._Wv = np.array([1e2, 1e2, 3e2, 3e2])\n\n @property\n def A(self) -> np.ndarray:\n return self._A.copy()\n\n @property\n def B(self) -> np.ndarray:\n return self._B.copy()\n\n @property\n def C(self) -> np.ndarray:\n return self._C.copy()\n\n @property\n def e(self) -> np.ndarray:\n return self._e.copy()\n\n @property\n def x(self) -> np.ndarray:\n return self._x.copy()\n\n @x.setter\n def x(self, val: np.ndarray) -> None:\n self._x = val.copy()\n\n def position_error(self, x: np.ndarray) -> float:\n return (np.square(x - self.config.xf) * self._Wx).sum(axis=-1)\n\n def control_usage(self, u: np.ndarray) -> float:\n return (np.square(u) * self._Wu).sum(axis=-1)\n\n def constraint_violations(self, x: np.ndarray, u: np.ndarray) -> float:\n W = self._Wv\n return (\n W[0] * np.maximum(0, self.config.x_bounds[:, 0] - x).sum(axis=-1)\n + W[1] * np.maximum(0, x - self.config.x_bounds[:, 1]).sum(axis=-1)\n + W[2] * np.maximum(0, self.config.u_bounds[:, 0] - u).sum(axis=-1)\n + W[3] * np.maximum(0, u - self.config.u_bounds[:, 1]).sum(axis=-1)\n )\n\n def phi(self, alt: Union[float, np.ndarray]) -> np.ndarray:\n if isinstance(alt, np.ndarray):\n alt = alt.squeeze()\n assert alt.ndim == 1, \"Altitudes must be a vector\"\n\n return np.vstack([np.exp(-np.square(alt - h)) for h in self.config.winds])\n\n def reset(\n self,\n seed: int = None,\n x0: np.ndarray = None,\n xf: np.ndarray = None,\n options: Optional[dict[str, Any]] = None,\n ) -> tuple[np.ndarray, dict[str, Any]]:\n self.np_random = np.random.default_rng(seed)\n if x0 is None:\n x0 = self.config.x0\n if xf is None:\n xf = self.config.xf\n self.x = x0\n self.config.x0 = x0\n self.config.xf = xf\n self._n_within_termination = 0\n return self.x, {}\n\n def step(self, u: np.ndarray) -> tuple[np.ndarray, float, bool, bool, dict]:\n u = np.asarray(u).squeeze() # in case a row or col was passed\n wind = (\n self._C\n @ self.phi(self.x[2])\n * self.np_random.uniform(\n low=[0, 0, -1, 0, 0, 0, -1, -1, 0, 0],\n high=[1, 1, 0, 0, 0, 0, 1, 1, 0, 0],\n ).reshape(self.nx, 1)\n )\n self.x = (\n self._A @ self.x.reshape((-1, 1))\n + self._B @ u.reshape((-1, 1))\n + self._e\n + wind\n ).flatten()\n error = self.position_error(self.x)\n usage = self.control_usage(u)\n violations = self.constraint_violations(self.x, u)\n cost = float(error + usage + violations)\n return self.x, cost, False, False, {\"error\": error}\n\n def render(self):\n raise NotImplementedError(\"Render method unavailable.\")\n\n def get_dynamics(\n self,\n g: Union[float, cs.SX],\n thrust_coeff: Union[float, cs.SX],\n pitch_d: Union[float, cs.SX],\n pitch_dd: Union[float, cs.SX],\n pitch_gain: Union[float, cs.SX],\n roll_d: Union[float, cs.SX],\n roll_dd: Union[float, cs.SX],\n roll_gain: Union[float, cs.SX],\n winds: dict[float, float] = None,\n ) -> Union[\n tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],\n tuple[cs.SX, cs.SX, cs.SX],\n ]:\n T = self.config.T\n is_casadi = any(\n isinstance(o, (cs.SX, cs.MX, cs.DM))\n for o in [\n g,\n thrust_coeff,\n pitch_d,\n pitch_dd,\n pitch_gain,\n roll_d,\n roll_dd,\n roll_gain,\n ]\n )\n if is_casadi:\n diag = lambda o: cs.diag(cs.vertcat(*o)) # noqa: E731\n block = cs.blockcat\n else:\n diag = np.diag\n block = np.block\n assert winds is not None, \"Winds are required to compute matrix C.\"\n nw = len(winds)\n wind_mag = np.array(list(winds.values()))\n A = T * block(\n [\n [np.zeros((3, 3)), np.eye(3), np.zeros((3, 4))],\n [np.zeros((2, 6)), np.eye(2) * g, np.zeros((2, 2))],\n [np.zeros((1, 10))],\n [np.zeros((2, 6)), -diag((pitch_d, roll_d)), np.eye(2)],\n [np.zeros((2, 6)), -diag((pitch_dd, roll_dd)), np.zeros((2, 2))],\n ]\n ) + np.eye(10)\n B = T * block(\n [\n [np.zeros((5, 3))],\n [0, 0, thrust_coeff],\n [np.zeros((2, 3))],\n [pitch_gain, 0, 0],\n [0, roll_gain, 0],\n ]\n )\n if not is_casadi:\n C = T * block(\n [\n [wind_mag],\n [wind_mag],\n [wind_mag],\n [np.zeros((3, nw))],\n [wind_mag],\n [wind_mag],\n [np.zeros((2, nw))],\n ]\n )\n e = block([[np.zeros((5, 1))], [-T * g], [np.zeros((4, 1))]])\n return (A, B, e) if is_casadi else (A, B, C, e)\n\n\n@dataclass(frozen=True)\nclass QuadRotorSolution:\n f: float\n vars: dict[str, cs.SX]\n vals: dict[str, np.ndarray]\n stats: dict[str, Any]\n get_value: partial\n\n @property\n def status(self) -> str:\n return self.stats[\"return_status\"]\n\n @property\n def success(self) -> bool:\n return self.stats[\"success\"]\n\n def value(self, x: cs.SX) -> np.ndarray:\n return self.get_value(x)\n\n\nclass GenericMPC:\n def __init__(self, name: str = None) -> None:\n self.name = f\"MPC{np.random.random()}\" if name is None else name\n self.f: cs.SX = None # objective\n self.vars: dict[str, cs.SX] = {}\n self.pars: dict[str, cs.SX] = {}\n self.cons: dict[str, cs.SX] = {}\n self.p = cs.SX()\n self.x, self.lbx, self.ubx = cs.SX(), np.array([]), np.array([])\n self.lam_lbx, self.lam_ubx = cs.SX(), cs.SX()\n self.g, self.lbg, self.ubg = cs.SX(), np.array([]), np.array([])\n self.lam_g = cs.SX()\n self.h, self.lbh, self.ubh = cs.SX(), np.array([]), np.array([])\n self.lam_h = cs.SX()\n self.solver: cs.Function = None\n self.opts: dict = None\n\n @property\n def ng(self) -> int:\n return self.g.shape[0]\n\n def add_par(self, name: str, *dims: int) -> cs.SX:\n assert name not in self.pars, f\"Parameter {name} already exists.\"\n par = cs.SX.sym(name, *dims)\n self.pars[name] = par\n self.p = cs.vertcat(self.p, cs.vec(par))\n return par\n\n def add_var(\n self,\n name: str,\n *dims: int,\n lb: np.ndarray = -np.inf,\n ub: np.ndarray = np.inf,\n ) -> tuple[cs.SX, cs.SX, cs.SX]:\n assert name not in self.vars, f\"Variable {name} already exists.\"\n lb, ub = np.broadcast_to(lb, dims), np.broadcast_to(ub, dims)\n assert np.all(lb < ub), \"Improper variable bounds.\"\n\n var = cs.SX.sym(name, *dims)\n self.vars[name] = var\n self.x = cs.vertcat(self.x, cs.vec(var))\n self.lbx = np.concatenate((self.lbx, cs.vec(lb).full().flatten()))\n self.ubx = np.concatenate((self.ubx, cs.vec(ub).full().flatten()))\n\n # create also the multiplier associated to the variable\n lam_lb = cs.SX.sym(f\"lam_lb_{name}\", *dims)\n self.lam_lbx = cs.vertcat(self.lam_lbx, cs.vec(lam_lb))\n lam_ub = cs.SX.sym(f\"lam_ub_{name}\", *dims)\n self.lam_ubx = cs.vertcat(self.lam_ubx, cs.vec(lam_ub))\n return var, lam_lb, lam_ub\n\n def add_con(\n self, name: str, expr1: cs.SX, op: str, expr2: cs.SX\n ) -> tuple[cs.SX, cs.SX]:\n assert name not in self.cons, f\"Constraint {name} already exists.\"\n expr = expr1 - expr2\n dims = expr.shape\n if op in {\"=\", \"==\"}:\n is_eq = True\n lb, ub = np.zeros(dims), np.zeros(dims)\n elif op in {\"<\", \"<=\"}:\n is_eq = False\n lb, ub = np.full(dims, -np.inf), np.zeros(dims)\n elif op in {\">\", \">=\"}:\n is_eq = False\n expr = -expr\n lb, ub = np.full(dims, -np.inf), np.zeros(dims)\n else:\n raise ValueError(f\"Unrecognized operator {op}.\")\n expr = cs.simplify(expr)\n lb, ub = cs.vec(lb).full().flatten(), cs.vec(ub).full().flatten()\n self.cons[name] = expr\n group = \"g\" if is_eq else \"h\"\n setattr(self, group, cs.vertcat(getattr(self, group), cs.vec(expr)))\n setattr(self, f\"lb{group}\", np.concatenate((getattr(self, f\"lb{group}\"), lb)))\n setattr(self, f\"ub{group}\", np.concatenate((getattr(self, f\"ub{group}\"), ub)))\n lam = cs.SX.sym(f\"lam_{group}_{name}\", *dims)\n setattr(\n self, f\"lam_{group}\", cs.vertcat(getattr(self, f\"lam_{group}\"), cs.vec(lam))\n )\n return expr, lam\n\n def minimize(self, objective: cs.SX) -> None:\n self.f = objective\n\n def init_solver(self, opts: dict) -> None:\n g = cs.vertcat(self.g, self.h)\n nlp = {\"x\": self.x, \"p\": self.p, \"g\": g, \"f\": self.f}\n self.solver = cs.nlpsol(f\"nlpsol_{self.name}\", \"ipopt\", nlp, opts)\n self.opts = opts\n\n def solve(\n self, pars: dict[str, np.ndarray], vals0: dict[str, np.ndarray] = None\n ) -> QuadRotorSolution:\n assert self.solver is not None, \"Solver uninitialized.\"\n assert len(self.pars.keys() - pars.keys()) == 0, (\n \"Trying to solve the MPC with unspecified parameters: \"\n + \", \".join(self.pars.keys() - pars.keys())\n + \".\"\n )\n p = subsevalf(self.p, self.pars, pars)\n kwargs = {\n \"p\": p,\n \"lbx\": self.lbx,\n \"ubx\": self.ubx,\n \"lbg\": np.concatenate((self.lbg, self.lbh)),\n \"ubg\": np.concatenate((self.ubg, self.ubh)),\n }\n if vals0 is not None:\n kwargs[\"x0\"] = np.clip(\n subsevalf(self.x, self.vars, vals0), self.lbx, self.ubx\n )\n sol: dict[str, cs.DM] = self.solver(**kwargs)\n lam_lbx = -np.minimum(sol[\"lam_x\"], 0)\n lam_ubx = np.maximum(sol[\"lam_x\"], 0)\n lam_g = sol[\"lam_g\"][: self.ng, :]\n lam_h = sol[\"lam_g\"][self.ng :, :]\n S = cs.vertcat(\n self.p, self.x, self.lam_g, self.lam_h, self.lam_lbx, self.lam_ubx\n )\n D = cs.vertcat(p, sol[\"x\"], lam_g, lam_h, lam_lbx, lam_ubx)\n get_value = partial(subsevalf, old=S, new=D)\n vals = {name: get_value(var) for name, var in self.vars.items()}\n return QuadRotorSolution(\n f=float(sol[\"f\"]),\n vars=self.vars.copy(),\n vals=vals,\n get_value=get_value,\n stats=self.solver.stats().copy(),\n )\n\n def __str__(self) -> str:\n msg = \"not initialized\" if self.solver is None else \"initialized\"\n C = len(self.cons)\n return (\n f\"{type(self).__name__} {{\\n\"\n f\" name: {self.name}\\n\"\n f\" #variables: {len(self.vars)} (nx={self.nx})\\n\"\n f\" #parameters: {len(self.pars)} (np={self.np})\\n\"\n f\" #constraints: {C} (ng={self.ng}, nh={self.nh})\\n\"\n f\" CasADi solver {msg}.\\n}}\"\n )\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}: {self.name}\"\n\n\ndef subsevalf(\n expr: cs.SX,\n old: Union[cs.SX, dict[str, cs.SX], list[cs.SX], tuple[cs.SX]],\n new: Union[cs.SX, dict[str, cs.SX], list[cs.SX], tuple[cs.SX]],\n eval: bool = True,\n) -> Union[cs.SX, np.ndarray]:\n if isinstance(old, dict):\n for name, o in old.items():\n expr = cs.substitute(expr, o, new[name])\n elif isinstance(old, (tuple, list)):\n for o, n in zip(old, new):\n expr = cs.substitute(expr, o, n)\n else:\n expr = cs.substitute(expr, old, new)\n\n if eval:\n expr = cs.evalf(expr).full().squeeze()\n return expr\n\n\nConfigType = TypeVar(\"ConfigType\")\n\n\ndef init_config(\n config: Optional[Union[ConfigType, dict]], cls: type[ConfigType]\n) -> ConfigType:\n if config is None:\n return cls()\n if isinstance(config, cls):\n return config\n if isinstance(config, dict):\n if not hasattr(cls, \"__dataclass_fields__\"):\n raise ValueError(\"Configiration class must be a dataclass.\")\n keys = cls.__dataclass_fields__.keys()\n return cls(**{k: config[k] for k in keys if k in config})\n raise ValueError(\n \"Invalid configuration type; expected None, dict or \"\n f\"a dataclass, got {cls} instead.\"\n )\n\n\n@dataclass\nclass QuadRotorMPCConfig:\n N: int = 10\n solver_opts: dict = field(\n default_factory=lambda: {\n \"expand\": True,\n \"print_time\": False,\n \"ipopt\": {\n \"max_iter\": 500,\n \"tol\": 1e-6,\n \"barrier_tol_factor\": 1,\n \"sb\": \"yes\",\n # for debugging\n \"print_level\": 0,\n \"print_user_options\": \"no\",\n \"print_options_documentation\": \"no\",\n },\n }\n )\n\n\nclass QuadRotorMPC(GenericMPC):\n def __init__(\n self,\n env: QuadRotorEnv,\n config: Union[dict, QuadRotorMPCConfig] = None,\n mpctype: str = \"V\",\n ) -> None:\n assert mpctype in {\n \"V\",\n \"Q\",\n }, \"MPC must be either V (state value func) or Q (action value func)\"\n super().__init__(name=mpctype)\n self.config = init_config(config, QuadRotorMPCConfig)\n N = self.config.N\n\n # ======================= #\n # Variable and Parameters #\n # ======================= #\n lbx, ubx = env.config.x_bounds[:, 0], env.config.x_bounds[:, 1]\n not_red = ~(np.isneginf(lbx) & np.isposinf(ubx))\n not_red_idx = np.where(not_red)[0]\n lbx, ubx = lbx[not_red].reshape(-1, 1), ubx[not_red].reshape(-1, 1)\n nx, nu = env.nx, env.nu\n x, _, _ = self.add_var(\"x\", nx, N)\n u, _, _ = self.add_var(\"u\", nu, N)\n ns = not_red_idx.size + nu\n s, _, _ = self.add_var(\"slack\", ns * N - not_red_idx.size, 1, lb=0)\n sx: cs.SX = s[: not_red_idx.size * (N - 1)].reshape((-1, N - 1))\n su: cs.SX = s[-nu * N :].reshape((-1, N))\n\n # 2) create model parameters\n for name in (\n \"g\",\n \"thrust_coeff\",\n \"pitch_d\",\n \"pitch_dd\",\n \"pitch_gain\",\n \"roll_d\",\n \"roll_dd\",\n \"roll_gain\",\n ):\n self.add_par(name, 1, 1)\n\n # =========== #\n # Constraints #\n # =========== #\n\n # 1) constraint on initial conditions\n x0 = self.add_par(\"x0\", env.nx, 1)\n x_ = cs.horzcat(x0, x)\n\n # 2) constraints on dynamics\n A, B, e = env.get_dynamics(\n g=self.pars[\"g\"],\n thrust_coeff=self.pars[\"thrust_coeff\"],\n pitch_d=self.pars[\"pitch_d\"],\n pitch_dd=self.pars[\"pitch_dd\"],\n pitch_gain=self.pars[\"pitch_gain\"],\n roll_d=self.pars[\"roll_d\"],\n roll_dd=self.pars[\"roll_dd\"],\n roll_gain=self.pars[\"roll_gain\"],\n )\n self.add_con(\"dyn\", x_[:, 1:], \"==\", A @ x_[:, :-1] + B @ u + e)\n\n # 3) constraint on state (soft, backed off, without infinity in g, and\n # removing redundant entries, no constraint on first state)\n # constraint backoff parameter and bounds\n bo = self.add_par(\"backoff\", 1, 1)\n\n # set the state constraints as\n # - soft-backedoff minimum constraint: (1+back)*lb - slack <= x\n # - soft-backedoff maximum constraint: x <= (1-back)*ub + slack\n # NOTE: there is a mistake here in the old code, since we are excluding the\n # first state from constraints which is actually the second.\n self.add_con(\"x_min\", (1 + bo) * lbx - sx, \"<=\", x[not_red_idx, 1:])\n self.add_con(\"x_max\", x[not_red_idx, 1:], \"<=\", (1 - bo) * ubx + sx)\n self.add_con(\"u_min\", env.config.u_bounds[:, 0] - su, \"<=\", u)\n self.add_con(\"u_max\", u, \"<=\", env.config.u_bounds[:, 1] + su)\n\n # ========= #\n # Objective #\n # ========= #\n J = 0 # (no initial state cost not required since it is not economic)\n s = cs.blockcat([[cs.SX.zeros(sx.size1(), 1), sx], [su]])\n xf = self.add_par(\"xf\", nx, 1)\n uf = cs.vertcat(0, 0, self.pars[\"g\"])\n w_x = self.add_par(\"w_x\", nx, 1) # weights for stage/final state\n w_u = self.add_par(\"w_u\", nu, 1) # weights for stage/final control\n w_s = self.add_par(\"w_s\", ns, 1) # weights for stage/final slack\n J += sum(\n (\n quad_form(w_x, x[:, k] - xf)\n + quad_form(w_u, u[:, k] - uf)\n + cs.dot(w_s, s[:, k])\n )\n for k in range(N - 1)\n )\n J += (\n quad_form(w_x, x[:, -1] - xf)\n + quad_form(w_u, u[:, -1] - uf)\n + cs.dot(w_s, s[:, -1])\n )\n self.minimize(J)\n\n # ====== #\n # Others #\n # ====== #\n if mpctype == \"Q\":\n u0 = self.add_par(\"u0\", nu, 1)\n self.add_con(\"init_action\", u[:, 0], \"==\", u0)\n else:\n perturbation = self.add_par(\"perturbation\", nu, 1)\n self.f += cs.dot(perturbation, u[:, 0])\n self.init_solver(self.config.solver_opts)\n\n\nMPCType = TypeVar(\"MPCType\", bound=GenericMPC)\n\n\nclass DifferentiableMPC(Generic[MPCType]):\n def __init__(self, mpc: MPCType) -> None:\n self._mpc = mpc\n\n @property\n def mpc(self) -> MPCType:\n return self._mpc\n\n @property\n def _non_redundant_x_bound_indices(self) -> tuple[np.ndarray, np.ndarray]:\n return (\n np.where(self._mpc.lbx != -np.inf)[0],\n np.where(self._mpc.ubx != np.inf)[0],\n )\n\n @property\n def lagrangian(self) -> cs.SX:\n idx_lbx, idx_ubx = self._non_redundant_x_bound_indices\n h_lbx = self._mpc.lbx[idx_lbx, None] - self._mpc.x[idx_lbx]\n h_ubx = self._mpc.x[idx_ubx] - self._mpc.ubx[idx_ubx, None]\n return (\n self._mpc.f\n + cs.dot(self._mpc.lam_g, self._mpc.g)\n + cs.dot(self._mpc.lam_h, self._mpc.h)\n + cs.dot(self._mpc.lam_lbx[idx_lbx], h_lbx)\n + cs.dot(self._mpc.lam_ubx[idx_ubx], h_ubx)\n )\n\n def __getattr__(self, name) -> Any:\n return getattr(self._mpc, name)\n\n\nT = TypeVar(\"T\")\n\n\nclass ReplayMemory(Deque[T]):\n def __init__(\n self, iterable: Iterable[T] = (), maxlen: int = None, seed: int = None\n ) -> None:\n super().__init__(iterable, maxlen=maxlen)\n self.np_random = np.random.default_rng(seed)\n\n def sample(\n self, n: Union[int, float], include_last_n: Union[int, float]\n ) -> Iterable[T]:\n length = len(self)\n if isinstance(n, float):\n n = int(self.maxlen * n)\n n = np.clip(n, min(1, length), length)\n if isinstance(include_last_n, float):\n include_last_n = int(n * include_last_n)\n include_last_n = np.clip(include_last_n, 0, n)\n last_n = range(length - include_last_n, length)\n sampled = self.np_random.choice(\n range(length - include_last_n), n - include_last_n, replace=False\n )\n yield from (self[i] for i in chain(last_n, sampled))\n\n\n@dataclass\nclass RLParameter:\n name: str\n value: np.ndarray\n bounds: np.ndarray\n symV: cs.SX\n symQ: cs.SX\n\n @property\n def size(self) -> int:\n return self.symV.shape[0] # since rl pars are all column vectors\n\n def __post_init__(self) -> None:\n shape = self.symV.shape\n assert shape == self.symQ.shape, (\n f\"Parameter {self.name} has different shapes in \"\n f\"Q ({self.symQ.shape}) and V ({self.symV.shape}).\"\n )\n assert self.symV.is_column(), f\"Parameter {self.name} must be a column vector.\"\n self.bounds = np.broadcast_to(self.bounds, (shape[0], 2))\n self.update_value(self.value)\n\n def update_value(self, new_val: np.ndarray) -> None:\n \"\"\"Updates the parameter's current value to the new one.\"\"\"\n new_val = np.broadcast_to(new_val, self.bounds.shape[0])\n assert (\n (self.bounds[:, 0] <= new_val) | np.isclose(new_val, self.bounds[:, 0])\n ).all() and (\n (new_val <= self.bounds[:, 1]) | np.isclose(new_val, self.bounds[:, 1])\n ).all(), \"Parameter value outside bounds.\"\n self.value = np.clip(new_val, self.bounds[:, 0], self.bounds[:, 1])\n\n\nclass RLParameterCollection(Sequence[RLParameter]):\n \"\"\"Collection of learnable RL parameters, which can be accessed by string as a\n dictionary or by index as a list.\"\"\"\n\n def __init__(self, *parameters: RLParameter) -> None:\n \"\"\"Instantiate the collection from another iterable, if provided.\"\"\"\n self._list: list[RLParameter] = []\n self._dict: dict[str, RLParameter] = {}\n for parameter in parameters:\n self._list.append(parameter)\n self._dict[parameter.name] = parameter\n\n @property\n def n_theta(self) -> int:\n return sum(self.sizes())\n\n @property\n def as_dict(self) -> dict[str, RLParameter]:\n return self._dict\n\n def values(self, as_dict: bool = False) -> Union[np.ndarray, dict[str, np.ndarray]]:\n if as_dict:\n return {name: p.value for name, p in self.items()}\n return np.concatenate([p.value for p in self._list])\n\n def bounds(self, as_dict: bool = False) -> Union[np.ndarray, dict[str, np.ndarray]]:\n if as_dict:\n return {name: p.bounds for name, p in self.items()}\n return np.row_stack([p.bounds for p in self._list])\n\n def symQ(self, as_dict: bool = False) -> Union[cs.SX, dict[str, cs.SX]]:\n if as_dict:\n return {name: p.symQ for name, p in self.items()}\n return cs.vertcat(*(p.symQ for p in self._list))\n\n def sizes(self, as_dict: bool = False) -> Union[list[int], dict[str, int]]:\n if as_dict:\n return {p.name: p.size for p in self._list}\n return [p.size for p in self._list]\n\n def update_values(\n self, new_vals: Union[np.ndarray, list[np.ndarray], dict[str, np.ndarray]]\n ) -> None:\n if isinstance(new_vals, np.ndarray):\n new_vals = np.split(new_vals, np.cumsum(self.sizes())[:-1])\n for p, val in zip(self._list, new_vals):\n p.update_value(val)\n elif isinstance(new_vals, list):\n for p, val in zip(self._list, new_vals):\n p.update_value(val)\n elif isinstance(new_vals, dict):\n for n in self._dict.keys():\n self._dict[n].update_value(new_vals[n])\n\n def items(self) -> Iterable[tuple[str, RLParameter]]:\n return self._dict.items()\n\n def __getitem__(\n self, index: Union[str, Iterable[str], int, slice, Iterable[int]]\n ) -> Union[RLParameter, list[RLParameter]]:\n if isinstance(index, str):\n return self._dict[index]\n if isinstance(index, (int, slice)):\n return self._list[index]\n if isinstance(index, Iterable):\n return [self._list[i] for i in index]\n\n def __iter__(self) -> Iterator[RLParameter]:\n return iter(self._list)\n\n def __next__(self) -> RLParameter:\n return next(self._list)\n\n def __len__(self) -> int:\n return len(self._list)\n\n\nclass QuadRotorBaseAgent(ABC):\n def __init__(\n self,\n env: QuadRotorEnv,\n agentname: str = None,\n agent_config: Union[dict[str, Any], Any] = None,\n fixed_pars: dict[str, np.ndarray] = None,\n mpc_config: Union[dict, QuadRotorMPCConfig] = None,\n seed: int = None,\n ) -> None:\n super().__init__()\n self.name = \"Agent\" if agentname is None else agentname\n self.env = env\n self.config = (\n init_config(agent_config, self.config_cls)\n if hasattr(self, \"config_cls\")\n else None\n )\n self.fixed_pars = {} if fixed_pars is None else fixed_pars\n self.seed = seed\n self.np_random = np.random.default_rng(seed)\n self.perturbation_chance = 0.0\n self.perturbation_strength = 0.0\n self.last_solution: Solution = None\n self.Q = QuadRotorMPC(env, config=mpc_config, mpctype=\"Q\")\n self.V = QuadRotorMPC(env, config=mpc_config, mpctype=\"V\")\n\n @property\n def unwrapped(self) -> \"QuadRotorBaseAgent\":\n return self\n\n def reset(self) -> None:\n self.last_solution = None\n self.Q.failures = 0\n self.V.failures = 0\n\n def solve_mpc(\n self,\n type: str,\n state: np.ndarray = None,\n sol0: dict[str, np.ndarray] = None,\n ) -> Solution:\n mpc: QuadRotorMPC = getattr(self, type)\n if state is None:\n state = self.env.x\n pars = self.fixed_pars.copy()\n pars[\"x0\"] = state\n pars.update(self._merge_mpc_pars_callback())\n if sol0 is None:\n if self.last_solution is None:\n g = float(pars.get(\"g\", 0))\n sol0 = {\n \"x\": np.tile(state, (mpc.vars[\"x\"].shape[1], 1)).T,\n \"u\": np.tile([0, 0, g], (mpc.vars[\"u\"].shape[1], 1)).T,\n \"slack\": 0,\n }\n else:\n sol0 = self.last_solution.vals\n self.last_solution = mpc.solve(pars, sol0)\n return self.last_solution\n\n def predict(\n self,\n state: np.ndarray = None,\n deterministic: bool = False,\n perturb_gradient: bool = True,\n **solve_mpc_kwargs,\n ) -> tuple[np.ndarray, np.ndarray, Solution]:\n perturbation_in_dict = \"perturbation\" in self.fixed_pars\n if perturbation_in_dict:\n self.fixed_pars[\"perturbation\"] = 0\n if deterministic or self.np_random.random() > self.perturbation_chance:\n sol = self.solve_mpc(type=\"V\", state=state, **solve_mpc_kwargs)\n u_opt = sol.vals[\"u\"][:, 0]\n else:\n u_bnd = self.env.config.u_bounds\n rng = self.np_random.normal(\n scale=self.perturbation_strength * np.diff(u_bnd).flatten(),\n size=self.V.vars[\"u\"].shape[0],\n )\n if perturb_gradient:\n assert (\n perturbation_in_dict\n ), \"No parameter 'perturbation' found to perturb gradient.\"\n self.fixed_pars[\"perturbation\"] = rng\n sol = self.solve_mpc(type=\"V\", state=state, **solve_mpc_kwargs)\n u_opt = sol.vals[\"u\"][:, 0]\n if not perturb_gradient:\n u_opt = np.clip(u_opt + rng, u_bnd[:, 0], u_bnd[:, 1])\n x_next = sol.vals[\"x\"][:, 0]\n return u_opt, x_next, sol\n\n def _merge_mpc_pars_callback(self) -> dict[str, np.ndarray]:\n return {}\n\n @staticmethod\n def _make_seed_list(seed: Optional[Union[int, list[int]]], n: int) -> list[int]:\n if seed is None:\n return [None] * n\n if isinstance(seed, int):\n return [seed + i for i in range(n)]\n assert len(seed) == n, \"Seed sequence with invalid length.\"\n return seed\n\n\nclass QuadRotorBaseLearningAgent(QuadRotorBaseAgent, ABC):\n def __init__(\n self,\n *args,\n init_learnable_pars: dict[str, tuple[np.ndarray, np.ndarray]],\n **kwargs,\n ) -> None:\n super().__init__(*args, **kwargs)\n self.V = DifferentiableMPC[QuadRotorMPC](self.V)\n self.Q = DifferentiableMPC[QuadRotorMPC](self.Q)\n self._init_learnable_pars(init_learnable_pars)\n self._init_learning_rate()\n self._epoch_n = None # keeps track of epoch number just for logging\n\n @abstractmethod\n def update(self) -> np.ndarray:\n pass\n\n @abstractmethod\n def learn_one_epoch(\n self,\n n_episodes: int,\n perturbation_decay: float = 0.75,\n seed: Union[int, list[int]] = None,\n return_info: bool = True,\n ) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray, dict[str, np.ndarray]]]:\n pass\n\n def learn(\n self,\n n_epochs: int,\n n_episodes: int,\n perturbation_decay: float = 0.75,\n seed: Union[int, list[int]] = None,\n throw_on_exception: bool = False,\n return_info: bool = True,\n ) -> Union[\n tuple[bool, np.ndarray],\n tuple[bool, np.ndarray, list[np.ndarray], list[dict[str, np.ndarray]]],\n ]:\n ok = True\n results = []\n seeds = iter(map(int, np.random.SeedSequence(seed).generate_state(n_epochs)))\n for e in range(n_epochs):\n self._epoch_n = e # just for logging\n try:\n results.append(\n self.learn_one_epoch(\n n_episodes=n_episodes,\n perturbation_decay=perturbation_decay,\n seed=next(seeds),\n return_info=return_info,\n )\n )\n except (MpcSolverError, UpdateError) as ex:\n if throw_on_exception:\n raise ex\n ok = False\n break\n if not results:\n return (ok, np.nan, [], []) if return_info else (ok, np.nan)\n if not return_info:\n return ok, np.stack(results, axis=0)\n returns, grads, weightss = list(zip(*results))\n return ok, np.stack(returns, axis=0), grads, weightss\n\n def _init_learnable_pars(\n self, init_pars: dict[str, tuple[np.ndarray, np.ndarray]]\n ) -> None:\n \"\"\"Initializes the learnable parameters of the MPC.\"\"\"\n required_pars = sorted(\n set(self.Q.pars)\n .intersection(self.V.pars)\n .difference({\"x0\", \"xf\"})\n .difference(self.fixed_pars)\n )\n self.weights = RLParameterCollection(\n *(\n RLParameter(\n name, *init_pars[name], self.V.pars[name], self.Q.pars[name]\n )\n for name in required_pars\n )\n )\n\n def _init_learning_rate(self) -> None:\n cfg = self.config\n if cfg is None or not hasattr(cfg, \"lr\"):\n return\n n_pars, n_theta = len(self.weights), self.weights.n_theta\n lr = np.asarray(cfg.lr).squeeze()\n if lr.ndim == 0:\n lr = np.full((n_theta,), lr)\n elif lr.size == n_pars and lr.size != n_theta:\n lr = np.concatenate([np.full(p.size, r) for p, r in zip(self.weights, lr)])\n assert lr.shape == (\n n_theta,\n ), \"Learning rate must have the same size as the learnable parameter vector.\"\n cfg.lr = lr\n\n def _merge_mpc_pars_callback(self) -> dict[str, np.ndarray]:\n return self.weights.values(as_dict=True)\n\n @staticmethod\n def _get_percentage_bounds(\n theta: np.ndarray,\n theta_bounds: np.ndarray,\n max_perc_update: float,\n ) -> tuple[np.ndarray, np.ndarray]:\n max_delta = np.maximum(np.abs(max_perc_update * theta), 0.1)\n lb = np.maximum(theta_bounds[:, 0], theta - max_delta)\n ub = np.minimum(theta_bounds[:, 1], theta + max_delta)\n return lb, ub\n\n\n@dataclass\nclass QuadRotorLSTDQAgentConfig:\n init_pars: dict[str, tuple[float, tuple[float, float]]] = field(\n default_factory=lambda: {\n \"g\": (9.81, (1, 40)),\n \"thrust_coeff\": (0.3, (0.1, 4)),\n \"backoff\": (0.1, (1e-3, 0.5)),\n }\n )\n fixed_pars: dict[str, float] = field(\n default_factory=lambda: {\n \"pitch_d\": 12,\n \"pitch_dd\": 5,\n \"pitch_gain\": 12,\n \"roll_d\": 13,\n \"roll_dd\": 6,\n \"roll_gain\": 8,\n \"w_x\": 1e1,\n \"w_u\": 1e0,\n \"w_s\": 1e2,\n }\n )\n replay_maxlen: float = 20\n replay_sample_size: float = 10\n replay_include_last: float = 5\n gamma: float = 1.0\n lr: float = 1e-1\n max_perc_update: float = np.inf\n\n\nclass QuadRotorLSTDQAgent(QuadRotorBaseLearningAgent):\n config_cls: type = QuadRotorLSTDQAgentConfig\n\n def __init__(\n self,\n env: QuadRotorEnv,\n agentname: str = None,\n agent_config: Union[dict, QuadRotorLSTDQAgentConfig] = None,\n mpc_config: Union[dict, QuadRotorMPCConfig] = None,\n seed: int = None,\n ) -> None:\n # create base agent\n agent_config = init_config(agent_config, self.config_cls)\n fixed_pars, init_pars = agent_config.fixed_pars, agent_config.init_pars\n fixed_pars.update({\"xf\": env.config.xf, \"perturbation\": np.nan})\n super().__init__(\n env,\n agentname=agentname,\n agent_config=agent_config,\n fixed_pars=fixed_pars,\n init_learnable_pars=init_pars,\n mpc_config=mpc_config,\n seed=seed,\n )\n self.perturbation_chance = 0.0\n self.perturbation_strength = 0.0\n self.replay_memory = ReplayMemory[list[tuple[np.ndarray, ...]]](\n maxlen=self.config.replay_maxlen, seed=seed\n )\n self._episode_buffer: list[tuple[np.ndarray, ...]] = []\n self._init_derivative_symbols()\n self._init_qp_solver()\n\n def save_transition(self, cost: float, solQ: Solution, solV: Solution) -> None:\n target = cost + self.config.gamma * solV.f\n td_err = target - solQ.f\n dQ = solQ.value(self.dQdtheta).reshape(-1, 1)\n d2Q = solQ.value(self.d2Qdtheta)\n g = -td_err * dQ\n H = dQ @ dQ.T - td_err * d2Q\n self._episode_buffer.append((g, H))\n\n def consolidate_episode_experience(self) -> None:\n if len(self._episode_buffer) == 0:\n return\n self.replay_memory.append(self._episode_buffer.copy())\n self._episode_buffer.clear()\n\n def update(self) -> np.ndarray:\n # sample the memory\n cfg: QuadRotorLSTDQAgentConfig = self.config\n sample = self.replay_memory.sample(\n cfg.replay_sample_size, cfg.replay_include_last\n )\n g, H = (np.mean(o, axis=0) for o in zip(*chain.from_iterable(sample)))\n R = cholesky_added_multiple_identities(H)\n p = cho_solve((R, True), g).flatten()\n theta = self.weights.values()\n lb, ub = self._get_percentage_bounds(\n theta, self.weights.bounds(), cfg.max_perc_update\n )\n sol = self._solver(p=np.concatenate((p, cfg.lr)), lbx=lb, ubx=ub)\n if not self._solver.stats()[\"success\"]:\n raise UpdateError(f\"RL update failed in epoch {self._epoch_n}.\")\n self.weights.update_values(theta + sol[\"x\"].full().flatten())\n return p\n\n def learn_one_epoch(\n self,\n n_episodes: int,\n perturbation_decay: float = 0.75,\n seed: Union[int, list[int]] = None,\n return_info: bool = False,\n ) -> Union[np.ndarray, tuple[np.ndarray, np.ndarray, dict[str, np.ndarray]]]:\n env, name, epoch_n = self.env, self.name, self._epoch_n\n returns = np.zeros(n_episodes)\n seeds = self._make_seed_list(seed, n_episodes)\n\n for e in range(n_episodes):\n state, _ = env.reset(seed=seeds[e])\n self.reset()\n truncated, terminated, t = False, False, 0\n action = self.predict(state, deterministic=False)[0]\n while not (truncated or terminated):\n # compute Q(s, a)\n self.fixed_pars.update({\"u0\": action})\n solQ = self.solve_mpc(\"Q\", state)\n # step the system\n state, r, truncated, terminated, _ = env.step(action)\n returns[e] += r\n # compute V(s+)\n action, _, solV = self.predict(state, deterministic=False)\n if solQ.success and solV.success:\n self.save_transition(r, solQ, solV)\n else:\n raise MpcSolverError(f\"{name}|{epoch_n}|{e}|{t}: mpc failed.\")\n t += 1\n self.consolidate_episode_experience()\n\n update_grad = self.update()\n self.perturbation_strength *= perturbation_decay\n self.perturbation_chance *= perturbation_decay\n return (\n (returns, update_grad, self.weights.values(as_dict=True))\n if return_info\n else returns\n )\n\n def _init_derivative_symbols(self) -> None:\n theta = self.weights.symQ()\n lagr = self.Q.lagrangian\n d2Qdtheta, dQdtheta = cs.hessian(lagr, theta)\n self.dQdtheta = cs.simplify(dQdtheta)\n self.d2Qdtheta = cs.simplify(d2Qdtheta)\n\n def _init_qp_solver(self) -> None:\n n_theta = self.weights.n_theta\n dtheta: cs.SX = cs.SX.sym(\"dtheta\", n_theta, 1)\n p: cs.SX = cs.SX.sym(\"p\", n_theta, 1)\n lr: cs.SX = cs.SX.sym(\"lr\", n_theta, 1)\n qp = {\n \"x\": dtheta,\n \"f\": 0.5 * dtheta.T @ dtheta + (lr * p).T @ dtheta,\n \"p\": cs.vertcat(p, lr),\n }\n opts = {\"print_iter\": False, \"print_header\": False}\n self._solver = cs.qpsol(f\"qpsol_{self.name}\", \"qrqp\", qp, opts)\n\n\nAgentType = TypeVar(\"AgentType\", bound=QuadRotorBaseLearningAgent)\n\n\nclass RecordLearningData(Generic[AgentType]):\n def __init__(self, agent: AgentType) -> None:\n self.agent = agent\n\n # initialize storages\n self.weights_history: dict[str, list[np.ndarray]] = {\n n: [p.value] for n, p in agent.weights.as_dict.items()\n }\n self.update_gradient: list[np.ndarray] = []\n\n @property\n def unwrapped(self) -> AgentType:\n return self.agent\n\n def learn_one_epoch(self, *args, **kwargs) -> tuple[np.ndarray, np.ndarray]:\n returns, grad, weights = self.agent.learn_one_epoch(*args, **kwargs)\n self._save(grad, weights)\n return returns, grad\n\n def learn(\n self, *args, **kwargs\n ) -> tuple[bool, np.ndarray, list[np.ndarray], list[dict[str, np.ndarray]]]:\n ok, returns, grads, weightss = self.agent.learn(*args, **kwargs)\n for grad, weights in zip(grads, weightss):\n self._save(grad, weights)\n return ok, returns, grads, weightss\n\n def _save(self, grad: np.ndarray, weights: dict[str, np.ndarray]) -> None:\n self.update_gradient.append(grad)\n for n, w in self.weights_history.items():\n w.append(weights[n])\n\n def __getattr__(self, name: str) -> Any:\n if name.startswith(\"_\"):\n raise AttributeError(f\"accessing private attribute '{name}' is prohibited.\")\n return getattr(self.agent, name)\n\n\n# ==================================================================================== #\n# ----------------------------------- END OLD CODE ----------------------------------- #\n# ==================================================================================== #\n\n\nclass QuadRotorMpcActual(Mpc):\n def __init__(self, env: QuadRotorEnv) -> None:\n N = QuadRotorMPCConfig.N\n super().__init__(Nlp(sym_type=\"SX\"), prediction_horizon=N, shooting=\"multi\")\n\n # ======================= #\n # Variable and Parameters #\n # ======================= #\n lbx, ubx = env.config.x_bounds[:, 0], env.config.x_bounds[:, 1]\n not_red = ~(np.isneginf(lbx) & np.isposinf(ubx))\n not_red_idx = np.where(not_red)[0]\n lbx, ubx = lbx[not_red].reshape(-1, 1), ubx[not_red].reshape(-1, 1)\n nx, nu = env.nx, env.nu\n x, _ = self.state(\"x\", nx)\n u, _ = self.action(\"u\", nu)\n ns = not_red_idx.size + nu\n s, _, _ = self.variable(\"slack\", (ns * N - not_red_idx.size, 1), lb=0)\n sx: cs.SX = s[: not_red_idx.size * (N - 1)].reshape((-1, N - 1))\n su: cs.SX = s[-nu * N :].reshape((-1, N))\n\n # 2) create model parameters\n for name in (\n \"g\",\n \"thrust_coeff\",\n \"pitch_d\",\n \"pitch_dd\",\n \"pitch_gain\",\n \"roll_d\",\n \"roll_dd\",\n \"roll_gain\",\n ):\n self.parameter(name, (1, 1))\n\n # =========== #\n # Constraints #\n # =========== #\n A, B, e = env.get_dynamics(\n g=self.parameters[\"g\"],\n thrust_coeff=self.parameters[\"thrust_coeff\"],\n pitch_d=self.parameters[\"pitch_d\"],\n pitch_dd=self.parameters[\"pitch_dd\"],\n pitch_gain=self.parameters[\"pitch_gain\"],\n roll_d=self.parameters[\"roll_d\"],\n roll_dd=self.parameters[\"roll_dd\"],\n roll_gain=self.parameters[\"roll_gain\"],\n )\n self.set_dynamics(lambda x, u: A @ x + B @ u + e, n_in=2, n_out=1)\n\n # 3) constraint on state\n bo = self.parameter(\"backoff\", (1, 1))\n self.constraint(\"x_min\", (1 + bo) * lbx - sx, \"<=\", x[not_red_idx, 2:])\n self.constraint(\"x_max\", x[not_red_idx, 2:], \"<=\", (1 - bo) * ubx + sx)\n self.constraint(\"u_min\", env.config.u_bounds[:, 0] - su, \"<=\", u)\n self.constraint(\"u_max\", u, \"<=\", env.config.u_bounds[:, 1] + su)\n\n # ========= #\n # Objective #\n # ========= #\n J = 0 # (no initial state cost not required since it is not economic)\n s = cs.blockcat([[cs.SX.zeros(sx.size1(), 1), sx], [su]])\n xf = self.parameter(\"xf\", (nx, 1))\n uf = cs.vertcat(0, 0, self.parameters[\"g\"])\n w_x = self.parameter(\"w_x\", (nx, 1)) # weights for stage/final state\n w_u = self.parameter(\"w_u\", (nu, 1)) # weights for stage/final control\n w_s = self.parameter(\"w_s\", (ns, 1)) # weights for stage/final slack\n J += sum(\n (\n quad_form(w_x, x[:, k + 1] - xf)\n + quad_form(w_u, u[:, k] - uf)\n + cs.dot(w_s, s[:, k])\n )\n for k in range(N - 1)\n )\n J += (\n quad_form(w_x, x[:, -1] - xf)\n + quad_form(w_u, u[:, -1] - uf)\n + cs.dot(w_s, s[:, -1])\n )\n self.minimize(J)\n self.init_solver(\n QuadRotorMPCConfig.__dataclass_fields__[\"solver_opts\"].default_factory()\n )\n\n\nclass TestQuadRotorQlearning(unittest.TestCase):\n def test(self):\n # for comparison\n # - replay maxlen must be 1, i.e., use only the latest episode for updates\n # - no exploration since np_randoms are placed differently\n seed = 42\n Tlimit = 20\n env = TimeLimit(QuadRotorEnv(), Tlimit)\n agent_config = {\n \"gamma\": 0.9792,\n \"lr\": [0.498],\n \"max_perc_update\": np.inf,\n \"replay_maxlen\": 1,\n \"replay_sample_size\": 1.0,\n \"replay_include_last\": 1,\n \"perturbation_decay\": 0.885,\n }\n agent_expected = RecordLearningData(\n QuadRotorLSTDQAgent(\n env=env, agentname=\"LSTDQ_0\", agent_config=agent_config, seed=seed\n )\n )\n results_expected = agent_expected.learn(\n n_epochs=2,\n n_episodes=1,\n perturbation_decay=agent_config[\"perturbation_decay\"],\n seed=seed + 1,\n throw_on_exception=True,\n )\n self.assertTrue(results_expected[0])\n\n mpc = QuadRotorMpcActual(env)\n fp_field = QuadRotorLSTDQAgentConfig.__dataclass_fields__[\"fixed_pars\"]\n fixed_pars = fp_field.default_factory()\n fixed_pars[\"xf\"] = env.config.xf\n lp_field = QuadRotorLSTDQAgentConfig.__dataclass_fields__[\"init_pars\"]\n learnable_pars = LearnableParametersDict[cs.SX](\n (\n LearnableParameter(\n name=name,\n shape=1,\n value=init,\n lb=lb,\n ub=ub,\n sym=cs.vec(mpc.parameters[name]),\n )\n for name, (init, (lb, ub)) in lp_field.default_factory().items()\n )\n )\n agent_actual = RecordUpdates(\n LstdQLearningAgent(\n mpc=mpc,\n discount_factor=agent_config[\"gamma\"],\n learning_rate=agent_config[\"lr\"][0],\n learnable_parameters=learnable_pars,\n fixed_parameters=fixed_pars,\n exploration=E.EpsilonGreedyExploration(\n S.ExponentialScheduler(0.0, agent_config[\"perturbation_decay\"]),\n S.ExponentialScheduler(0.0, agent_config[\"perturbation_decay\"]),\n seed=seed,\n ),\n experience=ExperienceReplay(maxlen=Tlimit, sample_size=1.0),\n update_strategy=Tlimit,\n cho_before_update=True,\n )\n )\n results_actual = LstdQLearningAgent.train(\n agent_actual,\n env=env,\n episodes=2,\n seed=seed + 1,\n )\n\n np.testing.assert_allclose(results_actual, results_expected[1].flatten())\n for n, weights in agent_actual.updates_history.items():\n np.testing.assert_allclose(weights, agent_expected.weights_history[n])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"FilippoAiraldi/mpc-reinforcement-learning","sub_path":"tests/test_quadrotor_q_learning.py","file_name":"test_quadrotor_q_learning.py","file_ext":"py","file_size_in_byte":48471,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"} +{"seq_id":"29130687420","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport math\n\n\ndef gelu(x):\n \"\"\" gelu激活函数\n 在GPT架构中,使用的是gelu函数的近似版本,公式如下:\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n 这里是直接求的解析解,就是原始论文给出的公式\n 论文 https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nactivations = {\"gelu\": gelu, \"relu\": F.relu, \"swish\": swish}\n\n\nclass LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12, conditional=False):\n \"\"\"layernorm 层,这里自行实现,目的是为了兼容 conditianal layernorm,使得可以做条件文本生成、条件分类等任务\n 条件layernorm来自于苏剑林的想法,详情:https://spaces.ac.cn/archives/7124\n \"\"\"\n super(LayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.eps = eps\n self.conditional = conditional\n if conditional:\n # 条件layernorm, 用于条件文本生成,\n # 这里采用全零初始化, 目的是在初始状态不干扰原来的预训练权重\n self.dense1 = nn.Linear(2 * hidden_size, hidden_size, bias=False)\n self.dense1.weight.data.uniform_(0, 0)\n self.dense2 = nn.Linear(2 * hidden_size, hidden_size, bias=False)\n self.dense2.weight.data.uniform_(0, 0)\n\n def forward(self, x):\n if self.conditional:\n inputs = x[0]\n cond = x[1]\n for _ in range(len(inputs.shape) - len(cond.shape)):\n cond = cond.unsqueeze(dim=1)\n u = inputs.mean(-1, keepdim=True)\n s = (inputs - u).pow(2).mean(-1, keepdim=True)\n x = (inputs - u) / torch.sqrt(s + self.eps)\n return (self.weight + self.dense1(cond)) * x + (self.bias + self.dense2(cond))\n else:\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n return self.weight * x + self.bias\n\n\nclass MultiHeadAttentionLayer(nn.Module):\n def __init__(self, hidden_size, num_attention_heads, dropout_rate, attention_scale=True,\n return_attention_scores=False):\n super(MultiHeadAttentionLayer, self).__init__()\n\n assert hidden_size % num_attention_heads == 0\n\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.attention_head_size = int(hidden_size / num_attention_heads)\n self.attention_scale = attention_scale\n self.return_attention_scores = return_attention_scores\n\n self.q = nn.Linear(hidden_size, hidden_size)\n self.k = nn.Linear(hidden_size, hidden_size)\n self.v = nn.Linear(hidden_size, hidden_size)\n\n self.o = nn.Linear(hidden_size, hidden_size)\n\n self.dropout = nn.Dropout(dropout_rate)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, query, key, value, attention_mask=None):\n\n # query shape: [batch_size, query_len, hidden_size]\n # key shape: [batch_size, key_len, hidden_size]\n # value shape: [batch_size, value_len, hidden_size]\n # 一般情况下,query_len、key_len、value_len三者相等\n\n mixed_query_layer = self.q(query)\n mixed_key_layer = self.k(key)\n mixed_value_layer = self.v(value)\n\n # mixed_query_layer shape: [batch_size, query_len, hidden_size]\n # mixed_query_layer shape: [batch_size, key_len, hidden_size]\n # mixed_query_layer shape: [batch_size, value_len, hidden_size]\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # query_layer shape: [batch_size, num_attention_heads, query_len, attention_head_size]\n # key_layer shape: [batch_size, num_attention_heads, key_len, attention_head_size]\n # value_layer shape: [batch_size, num_attention_heads, value_len, attention_head_size]\n\n # 交换k的最后两个维度,然后q和k执行点积, 获得attention score\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n # attention_scores shape: [batch_size, num_attention_heads, query_len, key_len]\n\n # 是否进行attention scale\n if self.attention_scale:\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # 执行attention mask,对于mask为0部分的attention mask,\n # 值为-1e10,经过softmax后,attention_probs几乎为0,所以不会attention到mask为0的部分\n if attention_mask is not None:\n # attention_scores = attention_scores.masked_fill(attention_mask == 0, -1e10)\n attention_mask = (1.0 - attention_mask) * -10000.0\n attention_scores = attention_scores + attention_mask\n\n # 将attention score 归一化到0-1\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n # context_layer shape: [batch_size, num_attention_heads, query_len, attention_head_size]\n\n # transpose、permute等维度变换操作后,tensor在内存中不再是连续存储的,而view操作要求tensor的内存连续存储,\n # 所以在调用view之前,需要contiguous来返回一个contiguous copy;\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n\n # context_layer shape: [batch_size, query_len, num_attention_heads, attention_head_size]\n\n new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n # 是否返回attention scores\n if self.return_attention_scores:\n # 这里返回的attention_scores没有经过softmax, 可在外部进行归一化操作\n return self.o(context_layer), attention_scores\n else:\n return self.o(context_layer)\n\n\nclass PositionWiseFeedForward(nn.Module):\n def __init__(self, hidden_size, intermediate_size, dropout_rate=0.5, hidden_act='gelu', is_dropout=True):\n # 原生的tf版本的bert在激活函数后,没有添加dropout层,但是在google AI的bert-pytorch开源项目中,多了一层dropout;\n # 并且在pytorch官方的TransformerEncoderLayer的实现中,也有一层dropout层,就像这样:self.linear2(self.dropout(self.activation(self.linear1(src))));\n # 这样不统一做法的原因不得而知,不过有没有这一层,差别可能不会很大;\n\n # 为了适配是否dropout,用is_dropout,dropout_rate两个参数控制;如果是实现原始的transformer,直接使用默认参数即可;如果是实现bert,则is_dropout为False,此时的dropout_rate参数并不会使用.\n super(PositionWiseFeedForward, self).__init__()\n\n self.is_dropout = is_dropout\n self.intermediate_act_fn = activations[hidden_act]\n self.intermediateDense = nn.Linear(hidden_size, intermediate_size)\n self.outputDense = nn.Linear(intermediate_size, hidden_size)\n if self.is_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, x):\n # x shape: (batch size, seq len, hidden_size)\n if self.is_dropout:\n x = self.dropout(self.intermediate_act_fn(self.intermediateDense(x)))\n else:\n x = self.intermediate_act_fn(self.intermediateDense(x))\n\n # x shape: (batch size, seq len, intermediate_size)\n x = self.outputDense(x)\n\n # x shape: (batch size, seq len, hidden_size)\n return x\n","repo_name":"MuQiuJun-AI/bert4pytorch","sub_path":"bert4pytorch/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":8184,"program_lang":"python","lang":"en","doc_type":"code","stars":373,"dataset":"github-code","pt":"79"} +{"seq_id":"8834712574","text":"# import data into a numpy array\r\n\r\nimport numpy as np\r\n\r\ndata_array = np.genfromtxt(\"python_language_1_data.csv\", delimiter=\",\", names=True,\r\n dtype=[int, int, float])\r\n\r\nrainfall = \"rainfall_mmday\"\r\n\r\n# store end years\r\nfirst_year = data_array[0][0]\r\nlast_year = data_array[-1][0]\r\n\r\n# create a year key for dictionary\r\nyear_tuple = tuple(range(first_year, last_year + 1))\r\n\r\n# create dictionary, with year keys and empty list values\r\ndata_dic = {}\r\n\r\nfor year in year_tuple:\r\n data_dic[year] = [] \r\n\r\n# iterate through rows, adding rainfall data to appropriate year list of dictionary\r\n'''\r\n# alternative version\r\nfor day in range(data_array.size):\r\n data_dic[data_array[day][0]].append(data_array[day][2])\r\n'''\r\nfor day in data_array:\r\n data_dic[day[0]].append(day[2])\r\n\r\n\r\n#export dictionary to json\r\n\r\nimport json\r\n\r\nwith open('python_language_1_data.json', 'w') as json_file:\r\n json.dump(data_dic, json_file, indent=4)\r\n\r\n# function to create a plot in png format of rainfall across a year,\r\n# takes a json file, year, and optional colour\r\n \r\nfrom matplotlib import pyplot as plt\r\n\r\ndef plot_from_json(filename, year, colour='green'):\r\n \r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n \r\n plot_data_dic = json.loads(temp_string)\r\n plot_data = plot_data_dic[year]\r\n \r\n year_graph, year_graph_axes = plt.subplots()\r\n \r\n # attempt to plot graph, raise error if colour input is invalid\r\n try:\r\n year_graph_axes.plot(plot_data, color = colour)\r\n except ValueError:\r\n pass\r\n \r\n year_graph_axes.set_title(\"Average daily rainfall for 1988\")\r\n year_graph_axes.set_ylabel(\"rainfall / mmday\")\r\n year_graph_axes.set_xlabel(\"day\")\r\n \r\n # save as .png\r\n year_graph.savefig('year_rainfall_graph.png')\r\n\r\n#plot a chart for 1998, and export plot as png file, with magenta line \r\nplot_from_json('python_language_1_data.json', '1998', 'magenta')\r\n\r\n\r\n#write a function to plot a graph of yearly mean rainfall for a custom period\r\ndef mean_from_list(num_list):\r\n return (sum(num_list) / len(num_list))\r\n\r\ndef yearly_mean_plot(filename, start_year, end_year):\r\n \r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n\r\n plot_data_dic = json.loads(temp_string)\r\n \r\n cust_year_list = list(range(int(start_year), int(end_year) + 1)) \r\n year_mean_list = []\r\n for year in cust_year_list:\r\n year_mean = mean_from_list(plot_data_dic[str(year)])\r\n year_mean_list.append(year_mean)\r\n \r\n custom_graph, custom_graph_axes = plt.subplots()\r\n \r\n custom_graph_axes.plot(cust_year_list, year_mean_list)\r\n \r\n custom_graph_axes.set_title(\"Yearly rainfall averages from {} to {}\".format(start_year, end_year))\r\n custom_graph_axes.set_ylabel(\"average rainfall / mm per day\")\r\n custom_graph_axes.set_xlabel(\"year\")\r\n \r\n # save as .png custom_graph.savefig('custom_rainfall_graph.png')\r\n custom_graph.savefig('mean_rainfall_graph.png')\r\n\r\n#prod a plot 1988-2000 inclusive\r\nyearly_mean_plot('python_language_1_data.json', '1988', '2000')\r\n\r\n\r\n\r\n# function to apply correction code: (rainfall_value * 1.2 ^ root(2))\r\ndef rain_corrector(rain_value):\r\n root_two = 2**(1/2)\r\n correct_rain_value = rain_value * (1.2 ** root_two) \r\n return correct_rain_value\r\n\r\n# function to correct all of the data for a given year (v1 - using a for loop)\r\ndef year_corrector_loop(filename, year):\r\n # import dictionary\r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n bad_data_dic = json.loads(temp_string)\r\n \r\n for v_index in range(len(bad_data_dic[year])):\r\n bad_entry = bad_data_dic[year][v_index]\r\n fixed_entry = rain_corrector(bad_entry)\r\n bad_data_dic[year][v_index] = fixed_entry\r\n \r\n# for rain_value in bad_data_dic[str(year)]:\r\n# bad_data_dic[str(year)][v_index] = rain_corrector(day)\r\n \r\n \r\n with open('fixed_rain_data_loop.json', 'w') as fixed_json_file:\r\n json.dump(bad_data_dic, fixed_json_file, indent=4)\r\n \r\n# test corrector loop version\r\nyear_corrector_loop('python_language_1_data.json', '2000')\r\n\r\n# function to correct all of the data for a given year (v2 - using a list comp)\r\ndef year_corrector_comp(filename, year):\r\n # import dictionary\r\n with open(filename, 'r') as f:\r\n temp_string = f.read()\r\n bad_data_dic = json.loads(temp_string)\r\n \r\n fixed_year = [rain_corrector(entry) for entry in bad_data_dic[year]]\r\n \r\n bad_data_dic[year] = fixed_year\r\n \r\n with open('fixed_rain_data_comp.json', 'w') as fixed_json_file:\r\n json.dump(bad_data_dic, fixed_json_file, indent=4)\r\n\r\n# test corrector comp version\r\nyear_corrector_comp('python_language_1_data.json', '1942')\r\n'''\r\nThe loop version benefits from spreading out the operations,\r\nwhich can make them easier to follow.\r\n\r\nThe list comprehension version benefits from conciseness,\r\nand general readibility\r\n'''\r\n\r\n\r\n'''\r\nspare code:\r\n #clear figure\r\n year_graph.clf()\r\n\r\n'''","repo_name":"cji1/Exam-Prep","sub_path":"lang/rainfall.py","file_name":"rainfall.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74009725374","text":"from typing import OrderedDict\n\nfrom mindspore_federated._mindspore_federated import VFLContext\n\nfrom ..common import check_type\n\n\nclass ServerConfig:\n \"\"\"\n Define the vertical server configuration.\n\n Args:\n server_name (str): Name of server, such as \"leader_server\", user defined.\n server_address (str): Address of server, such as 127.0.0.1:1086, user defined.\n \"\"\"\n def __init__(self, server_name, server_address):\n check_type.check_str(\"server_name\", server_name)\n check_type.check_str(\"server_address\", server_address)\n self.server_name = server_name\n self.server_address = server_address\n\n\ndef init_server_config(http_server_config, remote_server_config):\n \"\"\"\n Initialize local server configuration and remote server configuration.\n\n Args:\n http_server_config (ServerConfig): Configuration of local http server.\n remote_server_config (ServerConfig): Configuration of remote http server.\n \"\"\"\n ctx = VFLContext.get_instance()\n check_type.check_str(\"http_server_config.server_name\", http_server_config.server_name)\n check_type.check_str(\"http_server_config.server_address\", http_server_config.server_address)\n ctx.set_http_server_name(http_server_config.server_name)\n ctx.set_http_server_address(http_server_config.server_address)\n\n remote_server_dict = OrderedDict()\n if isinstance(remote_server_config, ServerConfig):\n check_type.check_str(\"remote_server_config.server_name\", remote_server_config.server_name)\n check_type.check_str(\"remote_server_config.server_address\", remote_server_config.server_address)\n remote_server_dict[remote_server_config.server_name] = remote_server_config.server_address\n\n elif isinstance(remote_server_config, list):\n for item in remote_server_config:\n check_type.check_str(\"remote_server_config.server_name\", item.server_name)\n check_type.check_str(\"remote_server_config.server_address\", item.server_address)\n remote_server_dict[item.server_name] = item.server_address\n ctx.set_remote_server_address(remote_server_dict)\n","repo_name":"gaoyang-zhang/mindspore-federated","sub_path":"mindspore_federated/fl_arch/python/mindspore_federated/startup/server_config.py","file_name":"server_config.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2979631773","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis file deals with AOI pattern recognition for an interpretation purpose.\n\"\"\"\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n IMPORTS\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport Constants as const\nimport pandas as pd\nimport numpy as np\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n FUNCTIONS\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\ndef clean_AOI(full_pd, seuil):\n \"\"\"\n This function will compress all the data to keep only the first row for each AOI, allowing to detect patterns after\n\n Parameters\n ----------\n full_pd : TYPE\n DESCRIPTION.\n seuil : TYPE\n DESCRIPTION.\n\n Returns\n -------\n clean : TYPE\n DESCRIPTION.\n\n \"\"\"\n full_pd=full_pd.copy(deep=True)\n clean = full_pd.loc[(full_pd.loc[:,\"AOI\"].shift() != full_pd.loc[:,\"AOI\"])].copy(deep=True)\n clean.loc[:,\"delta\"]=(-clean[\"FD_TIME_S\"]+clean[\"FD_TIME_S\"].shift(-1)).fillna(0)\n clean=clean.loc[(clean[\"delta\"]>seuil)]\n\n clean.reset_index(drop=True,inplace=True)\n return clean\n\n\n\n\ndef count_transitions(AOI_pd):\n \"\"\"\n\n\n Parameters\n ----------\n AOI_pd : TYPE\n DESCRIPTION.\n\n Returns\n -------\n pivot : TYPE\n DESCRIPTION.\n transition : TYPE\n DESCRIPTION.\n\n \"\"\"\n AOI_pd[\"next_AOI\"]=AOI_pd.loc[:,\"AOI\"].shift(-1,fill_value=\"0\")\n AOI_pd[\"prev_AOI\"]=AOI_pd[\"AOI\"].shift(1,fill_value=\"0\")\n AOI_pd[\"transition\"]=AOI_pd[\"AOI\"]+\"=>\"+AOI_pd[\"next_AOI\"]\n AOI_pd[\"prev_transition\"]=AOI_pd[\"prev_AOI\"]+\"=>\"+AOI_pd[\"AOI\"]\n\n AOI=AOI_pd.drop_duplicates(subset=\"AOI\").sort_values(\"AOI\").set_index(\"AOI\")\n\n transition=AOI_pd.drop_duplicates(subset=\"transition\").sort_values(\"transition\").set_index(\"transition\")\n transition.loc[:,\"count\"]=0\n transition.loc[:,\"average_time_bef\"]=0\n transition.loc[:,\"average_time_aft\"]=0\n transition.loc[:,\"%from\"]=0 # Depuis l'AOI de départ, % de fois ou on arrive à AOI arrivé\n transition.loc[:,\"%to\"]=0 # D'ou vient on depuis cet AOI d'arrivé\n transition.loc[:,\"%count\"]=0\n\n for a in transition.index:\n AOI1=transition.loc[a,\"AOI\"]\n AOI2=transition.loc[a,\"next_AOI\"]\n transition.loc[a:,\"count\"]=AOI_pd.loc[a==AOI_pd[\"transition\"]].count()[\"transition\"]\n transition.loc[a:,\"average_time_bef\"]=AOI_pd.loc[a==AOI_pd[\"transition\"],\"delta\"].mean()\n transition.loc[a:,\"average_time_aft\"]=AOI_pd.loc[a==AOI_pd[\"prev_transition\"],\"delta\"].mean()\n transition.loc[a,\"%from\"]=int((100*transition.loc[a,\"count\"]/AOI_pd.loc[AOI_pd[\"AOI\"]==AOI1].count()[\"AOI\"]))\n transition.loc[a,\"%to\"]=int((100*transition.loc[a,\"count\"]/AOI_pd.loc[AOI_pd[\"next_AOI\"]==AOI2].count()[\"next_AOI\"]))\n for b in transition.index:\n transition.loc[b,\"%count\"]=int((100*transition.loc[b,\"count\"]/transition[\"count\"].sum()))\n\n ind=[a for a in AOI.index]\n col=ind.copy()\n col.append(\"0\")\n pivot=pd.DataFrame(index=col,columns=ind)\n pivot.fillna(0,inplace=True)\n for i in ind:\n for j in col:\n a=i+\"=>\"+j\n\n if a in transition.index:\n pivot.loc[j,i]=transition.loc[a,\"%from\"]\n pivot=pivot.astype(int)\n transition.drop(columns=[\"AOI\",\"FD_TIME_S\",\"next_AOI\",\"average_time_aft\",\"prev_AOI\",\"prev_transition\"],inplace=True)\n return pivot,transition\n\n\n\n\ndef tete_fixe_tunnel(aois,t1,t2):\n \"\"\"\n\n\n Parameters\n ----------\n aois : TYPE\n DESCRIPTION.\n t1 : TYPE\n DESCRIPTION.\n t2 : TYPE\n DESCRIPTION.\n\n Returns\n -------\n fixe : TYPE\n DESCRIPTION.\n\n \"\"\"\n ref=aois.loc[t1,\"AOI\"]\n fixe=(aois.loc[aois.loc[:, \"FD_TIME_S\"]t1,\"AOI\"]==ref).all()\n return fixe\n\n\ndef tete_fixe(data,t1,t2,seuil=const.SEUIL_TETE_FIXE):\n \"\"\"\n\n\n Parameters\n ----------\n data : TYPE\n DESCRIPTION.\n t1 : TYPE\n DESCRIPTION.\n t2 : TYPE\n DESCRIPTION.\n seuil : TYPE, optional\n DESCRIPTION. The default is const.SEUIL_TETE_FIXE.\n\n Returns\n -------\n fixe : TYPE\n DESCRIPTION.\n\n \"\"\"\n local=data.loc[data[\"FD_TIME_S\"]t1,[\"FD_PILOT_HEAD_HEADING\",\"FD_PILOT_HEAD_PITCH\"]]\n mean=local.mean()\n fixe=((abs(local-mean)>seuil).all()).all()\n return fixe\n\ndef count_AOI(AOI_pd,full_pd):\n \"\"\"\n\n\n Parameters\n ----------\n AOI_pd : TYPE\n DESCRIPTION.\n full_pd : TYPE\n DESCRIPTION.\n\n Returns\n -------\n AOI : TYPE\n DESCRIPTION.\n\n \"\"\"\n AOI=AOI_pd.drop_duplicates(subset=\"AOI\").sort_values(\"AOI\").set_index(\"AOI\")\n AOI[\"count\"]=0\n AOI[\"average_time\"]=0\n AOI[\"total_time\"]=0\n AOI[\"%_time\"]=0\n total_time=full_pd[\"FD_TIME_S\"].max()-full_pd[\"FD_TIME_S\"].min()\n\n for a in AOI.index:\n AOI.loc[a,\"count\"]=AOI_pd.loc[a==AOI_pd[\"AOI\"]].count()[\"AOI\"]\n AOI.loc[a,\"average_time\"]=AOI_pd.loc[a==AOI_pd[\"AOI\"],\"delta\"].mean()\n AOI.loc[a,\"total_time\"]=AOI_pd.loc[a==AOI_pd[\"AOI\"],\"delta\"].sum()\n AOI.loc[:,\"%_time\"]=(100*AOI[\"total_time\"]/total_time).astype(int)\n return AOI\n\n\n\n\ndef chain_AOI(pivot,liste_aoi):\n \"\"\"\n\n\n Parameters\n ----------\n pivot : TYPE\n DESCRIPTION.\n liste_aoi : TYPE\n DESCRIPTION.\n\n Returns\n -------\n aoi_chain : TYPE\n DESCRIPTION.\n\n \"\"\"\n aois=pivot.index.copy().to_numpy()\n liste_aois=\"\".join(liste_aoi)\n aoi_chain=pd.DataFrame(columns=[\"count\"])\n for i in aois:\n for j in np.delete(aois,np.where(aois==i)):\n if liste_aois.count(i+j)>0:\n for k in np.delete(aois,np.where(aois==j)):\n if liste_aois.count(i+j+k)>0:\n temp=liste_aois.count(i+j+k)\n if temp>0 :\n aoi_chain.loc[i+j+k,\"count\"]=temp\n\n aoi_chain[\"pourcent\"]=100*aoi_chain.loc[:,\"count\"]/aoi_chain[\"count\"].sum()\n aoi_chain=aoi_chain.loc[aoi_chain[\"pourcent\"]>1]\n return aoi_chain","repo_name":"NatanVachon/PIE-018","sub_path":"DataAnalysis/Features/Pattern_From_AOI.py","file_name":"Pattern_From_AOI.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35439081580","text":"def solution(answers):\n n = len(answers)\n s1 = [1, 2, 3, 4, 5]\n s2 = [2, 1, 2, 3, 2, 4, 2, 5]\n s3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]\n answer = []\n\n score = [0, 0, 0]\n max_score = 0\n\n for i in range(n):\n if answers[i] == s1[i%5]:\n score[0] += 1\n if answers[i] == s2[i%8]:\n score[1] += 1\n if answers[i] == s3[i%10]:\n score[2] += 1\n\n for idx, j in enumerate(score):\n if j > max_score:\n answer = [idx+1]\n max_score = j\n elif j == max_score:\n answer.append(idx+1)\n\n return answer\n\n\n\nanswers2 = [1,2,3,4,5]\nanswers = [1,3,2,4,2]\nprint(solution(answers))","repo_name":"Mingdoo/coding_test_boom","sub_path":"210912/smile/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"32274320880","text":"def is_same_tree(inorder, preorder, postorder):\n if len(inorder) != len(preorder) or len(inorder) != len(postorder):\n return False\n\n if len(inorder) == 0:\n return True\n\n if len(inorder) == 1:\n return inorder[0] == preorder[0] == postorder[0]\n\n if set(inorder) != set(preorder) or set(inorder) != set(postorder):\n return False\n\n root = preorder[0]\n root_index = inorder.index(root)\n\n left_inorder = inorder[:root_index]\n right_inorder = inorder[root_index + 1:]\n\n left_preorder = preorder[1:root_index + 1]\n right_preorder = preorder[root_index + 1:]\n\n left_postorder = postorder[:root_index]\n right_postorder = postorder[root_index:-1]\n\n return is_same_tree(left_inorder, left_preorder, left_postorder) and \\\n is_same_tree(right_inorder, right_preorder, right_postorder)\n\n\n# Test Case 1\ninorder1 = [4, 2, 5, 1, 3]\npreorder1 = [1, 2, 4, 5, 3]\npostorder1 = [4, 5, 2, 3, 1]\nprint(is_same_tree(inorder1, preorder1, postorder1)) # Output: True\n\n# Test Case 2\ninorder2 = [4, 2, 5, 1, 3]\npreorder2 = [1, 5, 4, 2, 3]\npostorder2 = [4, 1, 2, 3, 5]\nprint(is_same_tree(inorder2, preorder2, postorder2)) # Output: False\n","repo_name":"Krutheesh/Placement_Assignment_Krutheesh","sub_path":"DSA/assignment22/four.py","file_name":"four.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25975508251","text":"#!/usr/bin/env python3\n#coding: utf-8\n### 1st line allows to execute this script by typing only its name in terminal, with no need to precede it with the python command\n### 2nd line declaring source code charset should be not necessary but for exemple pydoc request it\n\n\n\n__doc__ = \"This module concern volumes.\"#information describing the purpose of this module\n__status__ = \"Development\"#should be one of 'Prototype' 'Development' 'Production' 'Deprecated' 'Release'\n__version__ = \"2.0.0\"# version number,date or about last modification made compared to the previous version\n__license__ = \"public domain\"# ref to an official existing License\n#__copyright__ = \"Copyright 2000, The X Project\"\n__date__ = \"2016\"#started creation date / year month day\n__author__ = \"N-zo syslog@laposte.net\"#the creator origin of this prog,\n__maintainer__ = \"Nzo\"#person who curently makes improvements, replacing the author\n__credits__ = []#passed mainteners and any other helpers\n__contact__ = \"syslog@laposte.net\"# current contact adress for more info about this file\n\n\n\n### import the required modules\n#import antiprism_python # a collection of geometry \n\nfrom math import sqrt\nfrom numpy import array\n\n\n\n### ICOSAHEDRON\nPHI = (sqrt(5) + 1) / 2\nRAD = sqrt(PHI+2)\nA = 1/RAD\nB = PHI/RAD\nICO_VERTEX=[ (-A,0,B),(A,0,B),(-A,0,-B),(A,0,-B),(0,B,A),(0,B,-A),\n(0,-B,A),(0,-B,-A),(B,A,0),(-B,A,0),(B,-A,0),(-B,-A,0) ]\nICO_FACES=[ (1,4,0),(4,9,0),(4,5,9),(8,5,4),(1,8,4),\n(1,10,8),(10,3,8),(8,3,5),(3,2,5),(3,7,2),\n(3,10,7),(10,6,7),(6,11,7),(6,0,11),(6,1,0),\n(10,1,6),(11,0,9),(2,11,9),(5,2,9),(11,2,7) ]\n\n### TETRAHEDRON\nC= 1 / sqrt(3)\nTETRA_VERTEX=[(-C,C,-C),(-C,-C,C),(C,C,C),(C,-C,-C)]\nTETRA_FACES=[(0,2,1),(3,0,1),(3,1,2),(0,2,3)]\n\n\n\ndef normaliz(vector):\n\tlong= sqrt( sum(vector**2) )\n\treturn vector/long\n\n\ndef iterator(qantum,vertex_list,face_list):\n\twhile qantum:\n\t\tnew_face_list=[]\n\t\tfor face in face_list :\n\n\t\t\ta=array(vertex_list[face[0]])\n\t\t\tb=array(vertex_list[face[1]])\n\t\t\tc=array(vertex_list[face[2]])\n\t\t\t\n\t\t\tna= tuple(normaliz( (a+b)/2. ))\n\t\t\tnb= tuple(normaliz( (b+c)/2. ))\n\t\t\tnc= tuple(normaliz( (c+a)/2. ))\n\t\t\t\n\t\t\tindex=[]\n\t\t\tfor v in [na,nb,nc] :\n\t\t\t\tif v in vertex_list :\n\t\t\t\t\ti=vertex_list.index(v)\n\t\t\t\t\t#print(\"in list\")\n\t\t\t\telse :\n\t\t\t\t\ti=len(vertex_list)\n\t\t\t\t\tvertex_list.append(v)\n\t\t\t\tindex.append(i)\n\t\t\t\n\t\t\tfa=(face[0],index[0],index[2])\n\t\t\tfb=(face[1],index[1],index[0])\n\t\t\tfc=(face[2],index[2],index[1])\n\t\t\tfd=(index[0],index[1],index[2])\n\t\t\t\n\t\t\tnew_face_list.extend([fa,fb,fc,fd])\n\t\tface_list=new_face_list\n\t\tqantum-=1\n\treturn vertex_list,tuple(face_list)\n","repo_name":"N-z0/commonz","sub_path":"src/geometry/polyhedra.py","file_name":"polyhedra.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36663042334","text":"#! /usr/bin/env python3\n\nimport difflib\nimport requests\n\nwith open(\"expected_binding.py\") as f:\n # remove \\n from end of each line\n expected_binding = [line.rstrip() for line in f]\n\n\nsource = \"\"\"\n#include \"ffig/attributes.h\"\n\nstruct FFIG_EXPORT Asset\n{\n virtual FFIG_EXPORT_NAME(value) double PV() const = 0;\n virtual FFIG_PROPERTY_NAME(name) const char* id() const = 0;\n};\n virtual ~Asset() = default;\n\nstruct FFIG_NAME(CDO) CollateralisedDebtObligation : Asset\n{\n CollateralisedDebtObligation() {}\n\n double PV() const override { return 99.99; }\n const char* id() const override { return \"CDO\"; }\n};\n\"\"\"\n\npayload = {'module_name': \"test\", 'inp_file': source,\n \"bindings_to_generate\": [\"py3\"]}\n\nr = requests.post(\n \"http://127.0.0.1:5000/api/gen_bindings_from_tu\", data=payload)\n\nassert r.status_code == requests.codes.ok\n \njson_resp = r.json()\ndiffer = difflib.Differ()\nbinding_from_api = json_resp['res'].splitlines()\nres = list(differ.compare(binding_from_api, expected_binding))\nfor line in res:\n # Each line of a Differ delta begins with a two-letter code.\n # ' ' represents a line common to both sequences.\n assert line[0:2] == ' '\n","repo_name":"FFIG/rest-api","sub_path":"requests_at_explorer.py","file_name":"requests_at_explorer.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29946212028","text":"from pprint import pprint\nfrom .base import Base\nfrom utils.io import load_BFM\nimport numpy as np\nimport tensorflow as tf\n\n\nclass SCRFDTDMMPostModel(tf.keras.Model):\n\n def __init__(self, tdmm_cfg, pred_model, n_objs, top_k_n, kp_thres,\n nms_iou_thres, resize_shape, *args, **kwargs):\n super(SCRFDTDMMPostModel, self).__init__(*args, **kwargs)\n self.n_R = tdmm_cfg['n_R']\n self.n_shp, self.n_exp = tdmm_cfg['n_shp'], tdmm_cfg['n_exp']\n pms = tf.cast(np.load(tdmm_cfg['pms_path']), tf.float32)\n pms_R = pms[:, :self.n_R]\n pms_shp, pms_exp = pms[:, self.n_R:self.n_R + self.n_shp], pms[:,\n 208:-3]\n pms = tf.concat([pms_R, pms_shp, pms_exp], axis=-1)\n self.pms = pms[:2, :]\n head_model = load_BFM(tdmm_cfg['model_path'])\n kpt_ind = head_model['kpt_ind']\n X_ind_all = np.stack([kpt_ind * 3, kpt_ind * 3 + 1, kpt_ind * 3 + 2])\n X_ind_all = tf.concat([\n X_ind_all[:, :17], X_ind_all[:, 17:27], X_ind_all[:, 36:48],\n X_ind_all[:, 27:36], X_ind_all[:, 48:68]\n ],\n axis=-1)\n valid_ind = tf.reshape(tf.transpose(X_ind_all), (-1))\n self.valid_ind = tf.cast(valid_ind, tf.int32)\n self.u_base = tf.cast(head_model['shapeMU'], tf.float32)\n self.u_base = tf.gather(self.u_base, self.valid_ind)\n self.u_base = tf.reshape(self.u_base,\n (tf.shape(self.u_base)[0] // 3, 3))\n self.u_base = tf.reshape(self.u_base, (tf.shape(self.u_base)[0] * 3, 1))\n self.shp_base = tf.cast(head_model['shapePC'],\n tf.float32)[:, :self.n_shp]\n self.shp_base = tf.gather(self.shp_base, self.valid_ind)\n self.exp_base = tf.cast(head_model['expPC'], tf.float32)\n self.exp_base = tf.gather(self.exp_base, self.valid_ind)\n self.pred_model = pred_model\n self.n_objs = n_objs\n self.top_k_n = top_k_n\n self.kp_thres = kp_thres\n self.nms_iou_thres = nms_iou_thres\n self.resize_shape = tf.cast(resize_shape, tf.float32)\n self.cls_out_channels = 2\n self._feat_stride_fpn = [8, 16, 32]\n self.num_levels = len(self._feat_stride_fpn)\n self.num_level_anchors = [3200, 800, 200]\n self._num_anchors = 2\n\n def call(self, x, training=False):\n imgs, origin_shapes = x\n batch_size = tf.shape(imgs)[0]\n self.resize_ratio = tf.cast(origin_shapes / self.resize_shape,\n tf.dtypes.float32)\n preds = self.pred_model(imgs, training=False)\n box_results, lnms_results = self._anchor_assign(batch_size,\n preds[\"multi_lv_feats\"])\n return box_results, lnms_results\n\n # @tf.function\n def _anchor_assign(self, batch_size, multi_lv_feats):\n b_bbox_outputs = -tf.ones(shape=(batch_size, self.n_objs,\n self.cls_out_channels, 5))\n b_lnmk_outputs = -tf.ones(shape=(batch_size, self.n_objs,\n self.cls_out_channels, 68, 2))\n obj_start_idx = 0\n bbox_list, lnmk_list = [], []\n idxs_list = []\n for i, (lv_feats,\n stride) in enumerate(zip(multi_lv_feats,\n self._feat_stride_fpn)):\n if i == 0:\n continue\n b_cls_preds, b_bbox_preds, b_param_preds = lv_feats\n b_cls_preds = tf.math.sigmoid(b_cls_preds)\n\n b_bbox_preds = tf.reshape(b_bbox_preds, [-1, 4])\n b_param_preds = tf.reshape(b_param_preds,\n [-1, self.n_R + self.n_shp + self.n_exp])\n b_mask = b_cls_preds > self.kp_thres\n btach_idxs = tf.cast(tf.where(b_mask == True), tf.int32)[:, :1]\n b_cls_preds = tf.reshape(b_cls_preds, [-1, self.cls_out_channels])\n mask = b_cls_preds > self.kp_thres\n idxs = tf.where(mask == True)\n channel_idxs = tf.cast(idxs, tf.int32)[:, -1:]\n b_cls_preds = tf.expand_dims(b_cls_preds[mask], axis=-1)\n b_bboxes = self.decode_bbox(batch_size, stride, idxs, b_bbox_preds)\n pred_R, pred_shp, pred_exp = self.decod_params(idxs, b_param_preds)\n n_lnmks = self.reconstruct_lnmks(batch_size, b_bboxes, pred_R,\n pred_shp, pred_exp)\n num_detected_objs = tf.math.reduce_sum(tf.cast(mask, tf.float32))\n obj_idxs = tf.range(num_detected_objs, dtype=tf.int32)[:, None]\n obj_idxs += obj_start_idx\n b_bboxes = tf.einsum('n c d, b d -> n c d', b_bboxes[..., ::-1],\n self.resize_ratio)\n b_bboxes = tf.reshape(b_bboxes, (-1, 4))\n b_bboxes = tf.concat([b_bboxes, b_cls_preds], axis=-1)\n idxs = tf.concat([btach_idxs, obj_idxs, channel_idxs], axis=-1)\n n_lnmks = tf.einsum('n c d, b d -> n c d', n_lnmks[..., ::-1],\n self.resize_ratio)\n bbox_list.append(b_bboxes[:, :-1])\n lnmk_list.append(n_lnmks)\n idxs_list.append(idxs)\n b_bbox_outputs = tf.tensor_scatter_nd_update(\n b_bbox_outputs, idxs, b_bboxes)\n bbox_tensor = tf.concat(bbox_list, axis=0)\n lnmk_tensor = tf.concat(lnmk_list, axis=0)\n idxs_tensor = tf.concat(idxs_list, axis=0)\n b_scores = b_bbox_outputs[..., -1]\n b_bbox_outputs = b_bbox_outputs[..., :-1]\n # [B, N, Cate, 4]\n nms_reuslt = tf.image.combined_non_max_suppression(\n b_bbox_outputs,\n b_scores,\n self.n_objs,\n self.n_objs,\n iou_threshold=self.nms_iou_thres,\n clip_boxes=False)\n box_results = tf.where(nms_reuslt[0] == -1., np.inf, nms_reuslt[0])\n\n search_tensors = tf.reshape(\n box_results, [-1, 4])[:, None, :] - bbox_tensor[None, :, :]\n search_mask = tf.math.reduce_all(search_tensors == 0.0, axis=-1)\n idxs = tf.where(search_mask == True)[:, -1:]\n lnmk_tensor = tf.gather_nd(lnmk_tensor, idxs)\n idxs_tensor = tf.gather_nd(idxs_tensor, idxs)\n\n box_results = tf.where((box_results - 1.) == -1., np.inf, box_results)\n b_bboxes = tf.concat(\n [box_results, nms_reuslt[1][..., None], nms_reuslt[2][..., None]],\n axis=-1)\n b_bboxes = tf.where(b_bboxes == -1., np.inf, b_bboxes)\n b_bboxes = tf.reshape(b_bboxes, [-1, self.n_objs, 6])\n\n b_lnmk_outputs = tf.tensor_scatter_nd_update(b_lnmk_outputs,\n idxs_tensor, lnmk_tensor)\n b_lnmk_outputs = tf.where(b_lnmk_outputs == -1., np.inf, b_lnmk_outputs)\n return b_bboxes, b_lnmk_outputs\n\n def decod_params(self, idxs, b_param_preds):\n b_param_preds = tf.gather_nd(b_param_preds, idxs[:, 0][:, None])\n b_param_preds = b_param_preds * self.pms[1][None, :] + self.pms[0][\n None, :]\n R = b_param_preds[:, :self.n_R]\n shp = b_param_preds[:, self.n_R:self.n_R + self.n_shp]\n exp = b_param_preds[:, self.n_R + self.n_shp:]\n return R, shp, exp\n\n def decode_bbox(self, batch_size, stride, idxs, b_bbox_preds):\n b_bbox_preds = b_bbox_preds * stride\n height = self.resize_shape[0] // stride\n width = self.resize_shape[1] // stride\n X, Y = tf.meshgrid(tf.range(0, width), tf.range(0, height))\n anchor_centers = tf.stack([X, Y], axis=-1)\n anchor_centers = tf.reshape((anchor_centers * stride), (-1, 2))\n\n if self._num_anchors > 1:\n anchor_centers = tf.reshape(\n tf.stack([anchor_centers] * self._num_anchors, axis=1), (-1, 2))\n\n anchor_centers = tf.cast(anchor_centers, tf.float32)\n anchor_centers = tf.tile(anchor_centers[None, ...], (batch_size, 1, 1))\n anchor_centers = tf.reshape(anchor_centers, (-1, 2))\n b_bboxes = self.distance2bbox(anchor_centers, b_bbox_preds)\n b_bboxes = tf.gather_nd(b_bboxes, idxs[:, :1])\n b_bboxes = tf.reshape(b_bboxes, (-1, 2, 2))\n return b_bboxes\n\n def reconstruct_lnmks(self, batch_size, b_bboxes, R, shp, exp):\n n_lnmks = self.u_base + tf.linalg.matmul(\n self.shp_base, shp[..., None]) + tf.linalg.matmul(\n self.exp_base, exp[..., None])\n n_lnmks = tf.reshape(n_lnmks, (-1, tf.shape(n_lnmks)[-2] // 3, 3))\n R = tf.reshape(R, [-1, 3, 3])\n n_lnmks = tf.linalg.matmul(n_lnmks, R, transpose_b=(0, 2, 1))\n n_lnmks = n_lnmks[..., :2]\n n_lnmk_tls = tf.math.reduce_min(n_lnmks, axis=-2, keepdims=True)\n n_lnmk_brs = tf.math.reduce_max(n_lnmks, axis=-2, keepdims=True)\n n_bbox_tls = b_bboxes[:, :1, :]\n n_bbox_brs = b_bboxes[:, 1:, :]\n n_lnmks_wh = n_lnmk_brs - n_lnmk_tls\n n_bbox_wh = n_bbox_brs - n_bbox_tls\n n_scales = n_bbox_wh / n_lnmks_wh\n n_lnmks = tf.math.abs(n_scales) * n_lnmks\n return n_lnmks[..., :2]\n\n def distance2bbox(self, points, distance, max_shape=None):\n \"\"\"Decode distance prediction to bounding box.\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n distance (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom).\n max_shape (tuple): Shape of the image.\n\n Returns:\n Tensor: Decoded bboxes.\n \"\"\"\n x1 = points[..., 0] - distance[..., 0]\n y1 = points[..., 1] - distance[..., 1]\n x2 = points[..., 0] + distance[..., 2]\n y2 = points[..., 1] + distance[..., 3]\n if max_shape is not None:\n x1 = tf.clip_by_value(x1,\n clip_value_min=0,\n clip_value_max=max_shape[1])\n y1 = tf.clip_by_value(y1,\n clip_value_min=0,\n clip_value_max=max_shape[0])\n x2 = tf.clip_by_value(x2,\n clip_value_min=0,\n clip_value_max=max_shape[1])\n y2 = tf.clip_by_value(y2,\n clip_value_min=0,\n clip_value_max=max_shape[0])\n return tf.stack([x1, y1, x2, y2], axis=-1)\n","repo_name":"a0910257137/behavior_predictor","sub_path":"core/scrfdtdmm_model.py","file_name":"scrfdtdmm_model.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2201331454","text":"from Bio import SeqIO, AlignIO, Seq, SeqRecord\nfrom Bio.Align.Applications import ClustalwCommandline\nfrom Bio.Application import ApplicationError\nfrom Bio.Align.Applications import MuscleCommandline\nimport argparse\nfrom enum import Enum\nimport subprocess\nfrom shutil import which\n\nclass MSA(Enum):\n CLUSTALW = 1\n MUSCLE = 2\n\ndef isMethodInstalled(method):\n if method == MSA.CLUSTALW:\n return which(\"clustalw2\") is not None\n elif method == MSA.MUSCLE:\n return which(\"muscle\") is not None\n else:\n return False\n\ndef msa(in_file, out_file, method):\n \n if method == MSA.CLUSTALW:\n cline = ClustalwCommandline(\"clustalw2\",infile=in_file, outfile=out_file)\n\n elif method == MSA.MUSCLE:\n command = [\"muscle\", \"-align\", in_file, \"-output\", out_file]\n cline = lambda : subprocess.run(command, check=True) \n \n else: \n print(\"Error: Invalid MSA method\")\n exit(1)\n\n try:\n if not isMethodInstalled(method):\n print(f\"Error: Unable to run {method.name}. Make sure is installed\")\n exit(1)\n cline()\n except ApplicationError:\n print(f\"Error: Unable to run {method.name}. Make sure is installed\")\n exit(1)\n except OSError as e:\n print(f\"Error: Unable to open {in_file}: {e}\")\n exit(1)\n\nif \"__main__\" == __name__:\n\n parser = argparse.ArgumentParser(prog=\"ej3.py\", description=\"Execute Multiple Sequence Alignment with Clustawl or Muscle\")\n parser.add_argument(\"--method\", help=\"MSA method (clustalw or muscle)\", type=str, required=True, choices=[\"clustalw\", \"muscle\"])\n parser.add_argument(\"--input\", help=\"Input file (.fas)\", type=str, required=True)\n parser.add_argument(\"--output\", help=\"Output file\", type=str, required=True)\n\n args = parser.parse_args()\n in_file = args.input\n out_file = args.output\n\n extension = args.input.split(\".\")[-1]\n if extension != \"fas\" and extension != \"fasta\": \n print(\"Error: Please enter .fas or .fasta file\") \n exit(1) \n\n method = MSA.CLUSTALW if args.method == \"clustalw\" else MSA.MUSCLE\n msa(in_file, out_file, method)","repo_name":"eugepineiro/bioinformatica","sub_path":"ej3.py","file_name":"ej3.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38884856074","text":"from typing import Optional, Callable\n\nfrom urwid_utils.palette import *\nimport urwid\n\nfrom .logger import get_logger\nfrom .user_input import MouseButton, MouseState, MouseEvent\n\nlogger = get_logger()\n\n\n__all__ = [\"ScrollingListBox\"]\n\n\nclass ListBoxScrollBar(urwid.WidgetWrap):\n def __init__(self, parent):\n self.parent = parent\n self.pile = urwid.Pile([])\n super(ListBoxScrollBar, self).__init__(self.pile)\n\n def update(self, size):\n width, height = size\n scroll_marker_height = 1\n del self.pile.contents[:]\n\n if (len(self.parent.body) and self.parent.row_count and\n self.parent.focus is not None and self.parent.row_count > height):\n scroll_position = int(self.parent.focus_position / self.parent.row_count * height)\n scroll_marker_height = max(height * (height / self.parent.row_count), 1)\n else:\n scroll_position = -1\n\n pos_marker = urwid.AttrMap(urwid.Text(\" \"), {None: \"scroll_pos\"})\n down_marker = urwid.AttrMap(urwid.Text(u\"\\N{DOWNWARDS ARROW}\"), {None: \"scroll_marker\"})\n begin_marker = urwid.AttrMap(urwid.Text(u\"\\N{CIRCLED MINUS}\"), {None: \"scroll_marker\"})\n end_marker = urwid.AttrMap(urwid.Text(u\"\\N{CIRCLED PLUS}\"), {None: \"scroll_marker\"})\n view_marker = urwid.AttrMap(urwid.Text(\" \"), {None: \"scroll_view\"})\n bg_marker = urwid.AttrMap(urwid.Text(\" \"), {None: \"scroll_bg\"})\n\n for i in range(height):\n if abs(i - scroll_position) <= scroll_marker_height // 2:\n if i == 0 and self.parent.focus_position == 0:\n marker = begin_marker\n elif i + 1 == height and self.parent.row_count == self.parent.focus_position+1:\n marker = end_marker\n elif len(self.parent.body) == self.parent.focus_position + 1 \\\n and i == scroll_position + scroll_marker_height // 2:\n marker = down_marker\n else:\n marker = pos_marker\n else:\n if i < scroll_position:\n marker = view_marker\n elif self.parent.row_count and i / height < (len(self.parent.body) / self.parent.row_count):\n marker = view_marker\n else:\n marker = bg_marker\n\n self.pile.contents.append((urwid.Filler(marker), self.pile.options(\"weight\", 1)))\n\n self._invalidate()\n\n def selectable(self):\n # FIXME: mouse click/drag\n return False\n\n\nclass ScrollingListBox(urwid.WidgetWrap):\n signals = [\"select\", \"drag_start\", \"drag_continue\", \"drag_stop\", \"load_more\"]\n SCROLL_WHEEL_HEIGHT_RATIO = 0.5\n\n def __init__(self, body: urwid.Widget,\n infinite: bool = False,\n with_scrollbar: bool = False,\n row_count_fn: Optional[Callable] = None):\n self.infinite = infinite\n self.with_scrollbar = with_scrollbar\n self.row_count_fn = row_count_fn\n\n self.mouse_state: MouseState = MouseState.released\n self.drag_from = None\n self.drag_last = None\n self.drag_to = None\n self.load_more = False\n self.width: int = 0\n self.height: int = 0\n self.page: int = 0\n\n self.queued_keypress = None\n\n self.listbox = urwid.ListBox(body)\n self.columns = urwid.Columns([('weight', 1, self.listbox)])\n\n if self.with_scrollbar:\n self.scroll_bar = ListBoxScrollBar(self)\n self.columns.contents.append((self.scroll_bar, self.columns.options(\"given\", 1)))\n\n super(ScrollingListBox, self).__init__(self.columns)\n\n def mouse_event(self, size, event: str, button: int, col: int, row: int, focus: bool):\n if row < 0 or row >= self.height:\n return\n\n if event == MouseEvent.press:\n if button == MouseButton.left_button:\n self.mouse_state = MouseState.pressed\n self.drag_from = self.drag_last = (col, row)\n\n elif button == MouseButton.scroll_wheel_up:\n pos = self.listbox.focus_position - int(self.height * self.SCROLL_WHEEL_HEIGHT_RATIO)\n if pos < 0:\n pos = 0\n self.listbox.focus_position = pos\n self.listbox.make_cursor_visible(size)\n self._invalidate()\n\n elif button == MouseButton.scroll_wheel_down:\n pos = self.listbox.focus_position + int(self.height * self.SCROLL_WHEEL_HEIGHT_RATIO)\n if pos > len(self.listbox.body) - 1:\n if self.infinite:\n self.load_more = True\n pos = len(self.listbox.body) - 1\n self.listbox.focus_position = pos\n self.listbox.make_cursor_visible(size)\n self._invalidate()\n\n elif event == MouseEvent.drag:\n if self.drag_from is None:\n return\n\n if button == MouseButton.left_button:\n self.drag_to = (col, row)\n if self.mouse_state == MouseState.pressed:\n self.mouse_state = MouseState.dragging\n urwid.signals.emit_signal(self, \"drag_start\", self, self.drag_from)\n else:\n urwid.signals.emit_signal(self, \"drag_continue\", self, self.drag_last, self.drag_to)\n\n self.drag_last = (col, row)\n\n elif event == MouseEvent.release:\n if self.mouse_state == MouseState.dragging:\n self.drag_to = (col, row)\n urwid.signals.emit_signal(self, \"drag_stop\", self, self.drag_from, self.drag_to)\n self.mouse_state = MouseState.released\n\n return super(ScrollingListBox, self).mouse_event(size, event, button, col, row, focus)\n\n def keypress(self, size, key: str):\n command = self._command_map[key]\n if not command:\n return super(ScrollingListBox, self).keypress(size, key)\n\n # down, page down at end trigger load of more data\n if (\n command in [\"cursor down\", \"cursor page down\"]\n and self.infinite\n and (\n not len(self.body)\n or self.focus_position == len(self.body) - 1)\n ):\n self.load_more = True\n self.queued_keypress = key\n self._invalidate()\n\n elif command == \"activate\":\n urwid.signals.emit_signal(self, \"select\", self, self.selection)\n\n return super(ScrollingListBox, self).keypress(size, key)\n\n @property\n def selection(self):\n if len(self.body):\n return self.body[self.focus_position]\n\n def render(self, size, focus: bool = False):\n max_column: int = size[0]\n max_row: Optional[int] = size[1] if len(size) > 1 else None\n\n self.width = max_column\n if max_row:\n self.height = max_row\n\n if self.load_more and len(self.body) == 0 or \"bottom\" in self.ends_visible((max_column, max_row)):\n self.load_more = False\n self.page += 1\n try:\n focus = self.focus_position\n except IndexError:\n focus = None\n\n urwid.signals.emit_signal(self, \"load_more\", focus)\n\n if self.queued_keypress and focus and focus < len(self.body):\n self.keypress(size, self.queued_keypress)\n self.queued_keypress = None\n\n if self.with_scrollbar and len(self.body):\n self.scroll_bar.update(size)\n\n return super(ScrollingListBox, self).render(size, focus)\n\n def disable(self):\n self._selectable = False\n\n def enable(self):\n self._selectable = True\n\n @property\n def contents(self):\n return self.columns.contents\n\n @property\n def focus(self):\n return self.listbox.focus\n\n @property\n def focus_position(self):\n if not len(self.listbox.body):\n raise IndexError\n if len(self.listbox.body):\n return self.listbox.focus_position\n return None\n\n @focus_position.setter\n def focus_position(self, value):\n if not len(self.body):\n return\n self.listbox.focus_position = value\n self.listbox._invalidate()\n\n @property\n def row_count(self):\n if self.row_count_fn:\n return self.row_count_fn()\n return len(self.body)\n\n def __getattr__(self, attr):\n if attr in [\"ends_visible\", \"focus_position\", \"set_focus\", \"set_focus_valign\", \"body\", \"focus\"]:\n return getattr(self.listbox, attr)\n raise AttributeError(attr)\n\n @classmethod\n def get_palette_entries(cls):\n return {\n \"scroll_pos\": PaletteEntry(\n mono=\"white\",\n foreground=\"black\",\n background=\"white\",\n foreground_high=\"black\",\n background_high=\"white\"\n ),\n \"scroll_marker\": PaletteEntry(\n mono=\"white,bold\",\n foreground=\"black,bold\",\n background=\"white\",\n foreground_high=\"black,bold\",\n background_high=\"white\"\n ),\n \"scroll_view\": PaletteEntry(\n mono=\"black\",\n foreground=\"black\",\n background=\"light gray\",\n foreground_high=\"black\",\n background_high=\"g50\"\n ),\n \"scroll_bg\": PaletteEntry(\n mono=\"black\",\n foreground=\"light gray\",\n background=\"dark gray\",\n foreground_high=\"light gray\",\n background_high=\"g23\"\n ),\n\n }\n","repo_name":"emreay-/bank-statement-wizard","sub_path":"src/bank_statement_wizard/thirdparty/panwid/listbox.py","file_name":"listbox.py","file_ext":"py","file_size_in_byte":9745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"23208345593","text":"def to_ternary(n):\n result = []\n while n > 0:\n result.append(str(n % 3))\n n //= 3\n return \"\".join(result[::-1])\n\ndef to_decimal(n: str):\n decimal = 0\n for i in range(len(n)):\n decimal += int(n[i]) * (3 ** (len(n) - 1 - i))\n return decimal\n\ndef solution(n):\n ternary = to_ternary(n)\n flipped_ternary = ternary[::-1]\n answer = to_decimal(flipped_ternary)\n return answer\n\nn = 45\nprint(solution(n))\n","repo_name":"AppleYoujatea/OriginalApplePie","sub_path":"2nd_quarter/week05/pepe/3진법만들기.py","file_name":"3진법만들기.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"40466495298","text":"import random\r\nimport plotly.express as px\r\nimport plotly.figure_factory as ff\r\nimport statistics\r\n\r\ndice_result=[]\r\ncount=[]\r\nfor i in range(0,1000):\r\n dice1=random.randint(1,6)\r\n dice2=random.randint(1,6)\r\n dice_result.append(dice1+dice2)\r\n count.append(i)\r\nmean=sum(dice_result)/len(dice_result)\r\nstd_deviation=statistics.stdev(dice_result)\r\nmedian=statistics.median(dice_result)\r\nmode=statistics.mode(dice_result)\r\nprint(mean)\r\nprint(std_deviation)\r\nprint(median)\r\nprint(mode)\r\nfirst_std_dev_start, first_std_dev_end = mean-std_deviation, mean+std_deviation\r\nsec_std_dev_start, sec_std_dev_end = mean-(2*std_deviation), mean+(2*std_deviation)\r\nthi_std_dev_start, thi_std_dev_end = mean-(3*std_deviation), mean+(3*std_deviation)\r\nlist_of_data_within_1_std_deviation=[result for result in dice_result if result > first_std_dev_start and result < first_std_dev_end]\r\nlist_of_data_within_2_std_deviation=[result for result in dice_result if result > sec_std_dev_start and result < sec_std_dev_end]\r\nlist_of_data_within_3_std_deviation=[result for result in dice_result if result > thi_std_dev_start and result < thi_std_dev_end]\r\nprint(\"{}% of data lies within 1 standard deviation\".format(len(list_of_data_within_1_std_deviation)*100.0/len(dice_result)))\r\nprint(\"{}% of data lies within 2 standard deviation\".format(len(list_of_data_within_2_std_deviation)*100.0/len(dice_result)))\r\nprint(\"{}% of data lies within 3 standard deviation\".format(len(list_of_data_within_3_std_deviation)*100.0/len(dice_result)))","repo_name":"TanviLodhavia/Class_109","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20686976480","text":"#!/usr/bin/python3\ndef add_tuple(tuple_a=(), tuple_b=()):\n lent_a = len(tuple_a)\n lent_b = len(tuple_b)\n if lent_a == 0:\n a1 = 0\n b1 = 0\n elif lent_a < 2 and lent_a != 0:\n a1 = tuple_a[0]\n b1 = 0\n else:\n a1 = tuple_a[0]\n b1 = tuple_a[1]\n if lent_b == 0:\n a2 = 0\n b2 = 0\n elif lent_b < 2 and lent_b != 0:\n a2 = tuple_b[0]\n b2 = 0\n else:\n a2 = tuple_b[0]\n b2 = tuple_b[1]\n new_tuple = (a1 + a2, b1 + b2)\n return new_tuple\n","repo_name":"XimeonLeo/alx-higher_level_programming","sub_path":"0x03-python-data_structures/7-add_tuple.py","file_name":"7-add_tuple.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3305823767","text":"import sys\nsys.path.append(\"..\")\nfrom client import SAVNConnectionAssistant\nimport json\nimport unittest\nimport asyncio\nfrom unittest.mock import Mock\n\nclass AsyncMock(Mock):\n def __call__(self, *args, **kwargs):\n parent = super(AsyncMock, self)\n async def coro():\n return parent.__call__(*args, **kwargs)\n return coro()\n\n def __await__(self):\n return self().__await__()\n\nclass TestFrameworkClientMethods(unittest.TestCase):\n def setUp(self):\n self.connection = SAVNConnectionAssistant(42)\n self.connection.alive = True\n self.connection.ws = Mock()\n self.loop = asyncio.get_event_loop()\n\n def test_updateState(self):\n state = {\"car\": 1}\n timestamp = 0\n packet = {'type': 'simulation-state-update',\n 'content':\n {'simulationID': self.connection.simulationID,\n 'timestamp': timestamp,\n 'objects': state,\n 'frameworkID': 0}}\n self.connection.updateState(timestamp, state, sleepTime=0)\n message = self.loop.run_until_complete(self.connection.fetchMessage())\n self.assertEqual(packet, message)\n\n def test_message_reception(self):\n self.loop.run_in_executor = Mock()\n msg = {'content': 'fish'}\n async def op():\n return json.dumps(msg)\n self.connection.ws.recv = op\n self.loop.run_until_complete(self.connection.handler())\n self.loop.run_in_executor.assert_called_with(None,\n self.connection.onMessage,{'content': 'fish'})\n\n def test_messageQueue_drainage(self):\n self.loop.run_in_executor = Mock()\n packet = {'content': 'fish'}\n async def op():\n await asyncio.sleep(100)\n self.connection.ws.recv = op\n self.connection.send_packet = AsyncMock()\n msg = json.dumps(packet)\n self.connection.messageQueue.put_nowait(msg)\n self.loop.run_until_complete(self.connection.handler())\n self.connection.send_packet.assert_called_with(msg)\n\n def test_simulationRun(self):\n self.connection.handleSimulationRun = Mock()\n packet = {'type': 'simulation-start-parameters', 'content': {'frameworkID': 0}}\n self.connection.onMessage(packet)\n self.connection.handleSimulationRun.assert_called_with(packet['content'])\n\n def test_simulationStop(self):\n self.connection.handleSimulationStop = Mock()\n packet = {'type': 'framework-disconnect', 'content': {}}\n self.connection.onMessage(packet)\n self.connection.handleSimulationStop.assert_called_with(packet['content'])\n\n def test_simulationDataUpdate(self):\n self.connection.handleSimulationDataUpdate = Mock()\n packet = {'type': 'simulation-update', 'content': {}}\n self.connection.onMessage(packet)\n self.connection.handleSimulationDataUpdate.assert_called_with(packet['content'])\n","repo_name":"franklinsch/driverlesscarsimulations","sub_path":"framework/test/client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"21821346625","text":"from pickle import load\n\n# load doc into memory\ndef load_doc(filename):\n\t# open the file as read only\n\tfile = open(filename, 'r')\n\t# read all text\n\ttext = file.read()\n\t# close the file\n\tfile.close()\n\treturn text\n\n# load a pre-defined list of photo identifiers\ndef load_set(filename):\n\tdoc = load_doc(filename)\n\tdataset = list()\n\t# process line by line\n\tfor line in doc.split('\\n'):\n\t\t# skip empty lines\n\t\tif len(line) < 1:\n\t\t\tcontinue\n\t\t# get the image identifier\n\t\tidentifier = line.split('.')[0]\n\t\tdataset.append(identifier)\n\treturn set(dataset)\n\n# load clean descriptions into memory\ndef load_clean_descriptions(filename, dataset):\n\t# load document\n\tdoc = load_doc(filename)\n\tdescriptions = dict()\n\tfor line in doc.split('\\n'):\n\t\t# split line by white space\n\t\ttokens = line.split()\n\t\t# split id from description\n\t\timage_id, image_desc = tokens[0], tokens[1:]\n\t\t# skip images not in the set\n\t\tif image_id in dataset:\n\t\t\t# create list\n\t\t\tif image_id not in descriptions:\n\t\t\t\tdescriptions[image_id] = list()\n\t\t\t# wrap description in tokens\n\t\t\tdesc = 'startseq ' + ' '.join(image_desc) + ' endseq'\n\t\t\t# store\n\t\t\tdescriptions[image_id].append(desc)\n\treturn descriptions\n\n# load photo features\ndef load_photo_features(filename, dataset):\n\t# load all features\n\tall_features = load(open(filename, 'rb'))\n\t# filter features\n\tfeatures = {k: all_features[k] for k in dataset}\n\treturn features\n\n# load training dataset (6K)\nfilename = 'Flickr8k_text/Flickr_8k.trainImages.txt'\ntrain = load_set(filename)\nprint('Dataset: %d' % len(train))\n# descriptions\ntrain_descriptions = load_clean_descriptions('descriptions.txt', train)\nprint('Descriptions: train=%d' % len(train_descriptions))\n# photo features\ntrain_features = load_photo_features('features.pkl', train)\nprint('Photos: train=%d' % len(train_features))","repo_name":"enuguru/aiandml","sub_path":"nlp/code/chapter_26/3_load_prepared_data.py","file_name":"3_load_prepared_data.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"79"} +{"seq_id":"29691122025","text":"from django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^photos/$', views.photos, name = 'photos'),\n url(r'^videos/$', views.seasons, name = 'seasons'),\n url(r'^videos/(?P\\d+)/$', views.season_videos, name = 'season_videos'),\n url(r'^artists/$', views.artists, name='artists'),\n url(r'^about/$', views.about, name='about'),\n\n]","repo_name":"RoikYurii/brooklyn99","sub_path":"content/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70598726335","text":"from __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom mptt.admin import MPTTModelAdmin\nfrom .models import Taxon\nfrom .models import Upload_dwca\nfrom .models import DwcaTaxon\nfrom .models import DwcaDistribution\nfrom .models import DwcaResourceRelationship\nfrom .models import DwcaVernacular\nfrom .models import RawName\nfrom .models import RawNameAdmin\nfrom .models import NameFinderResult\nfrom .models import NameFinderJSON\n#from .models import NameFinderResultAdmin\n\nfrom django.contrib.admin import AdminSite\nfrom django.http import HttpResponse\nimport logging\n\n'''\nThe following code extends the admin change form for Publication\n(/publications/publication).\n\nIt adds a new action, \"find_names\".\n\nReference: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/actions/\n'''\nfrom publications.admin import PublicationAdmin\nfrom publications.models import Publication\nfrom publications.models import CustomFile\nimport requests\nimport json\nimport time\nfrom django.core.files.base import ContentFile\nimport json\nfrom taxonomy.functions import json_to_db\nfrom taxonomy.functions import find_names\nfrom taxonomy.functions import json_to_name_finder_results\n\n#Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\nclass CustomPublicationAdmin(PublicationAdmin):\n actions = ['add_extracted_taxon_names_file']\n\n def add_extracted_taxon_names_file(self, request, queryset):\n for pub in queryset:\n print('title: {} url: {}'.format(pub.title, pub.url))\n file_list = CustomFile.objects.filter(\n publication_id=pub.id).filter(\n description='extracted taxon names')\n if not file_list:\n taxa = find_names(pub.url)\n json_string = json.dumps(taxa)\n django_file = ContentFile(json_string)\n newfile = CustomFile()\n newfile.publication_id = pub.id\n newfile.description = 'extracted taxon names'\n newfile.file.save('extracted_taxon_names.json', django_file, save=True)\n print('new file attached.')\n print('adding data to NameFinderResults model ...')\n json_to_name_finder_results(pub, taxa)\n json_to_db(pub, taxa)\n print('FINIS')\n else:\n print('A file with description \"extracted_taxon_names\" already exists')\n\n add_extracted_taxon_names_file.short_description = \"Extract scientific names from selected publications\"\n\nadmin.site.unregister(Publication)\nadmin.site.register(Publication, CustomPublicationAdmin)\n'''\nEnd of code section.\n'''\n\n# Add mark_as_verified action to NameFinderResultAdmin change page\n\ndef mark_as_verified(self, request, queryset):\n queryset.update(verified=True)\nmark_as_verified.short_description = 'Mark selected results as verified'\n\n# http://www.gbif.org/species/1406619\n\n\nclass NameFinderResultAdmin(admin.ModelAdmin):\n list_filter = ('pub', 'verified',)\n list_display = ('verified', 'classification_path', 'GBIF')\n list_display_links = ('classification_path',)\n readonly_fields = (\n 'GBIF',\n 'pub',\n 'is_known_name',\n 'supplied_name_string',\n 'classification_path_ranks',\n 'classification_path',\n 'current_name_string',\n 'imported_at',\n 'canonical_form',\n 'data_source_id',\n 'match_value',\n 'data_source_title',\n 'gni_uuid',\n 'edit_distance',\n 'match_type',\n 'name_string',\n 'current_taxon_id',\n 'taxon_id',\n 'prescore',\n 'classification_path_ids',\n 'score',)\n actions = [mark_as_verified]\n\n def GBIF(self, obj):\n return '{}'.format(obj.taxon_id, obj.taxon_id)\n GBIF.allow_tags = True\n\n\n\nadmin.site.register(NameFinderResult, NameFinderResultAdmin)\n\n\n\nadmin.site.register(Taxon, MPTTModelAdmin)\nadmin.site.register(Upload_dwca)\nadmin.site.register(DwcaTaxon)\nadmin.site.register(DwcaDistribution)\nadmin.site.register(DwcaResourceRelationship)\nadmin.site.register(DwcaVernacular)\nadmin.site.register(RawName, RawNameAdmin)\nadmin.site.register(NameFinderJSON)\n\n\n\n\n# Ref for subclassing AdminSite:\n# http://stackoverflow.com/questions/35875454/django-admin-extending-admin-with-custom-views\nclass MyAdminSite(AdminSite):\n\n def custom_view(self, request):\n return HttpResponse(\"Test\")\n\n def get_urls(self):\n from django.conf.urls import url\n urls = super(MyAdminSite, self).get_urls()\n urls += [\n url(r'^custom_view/$', self.admin_view(self.custom_view))\n ]\n return urls\n\nadmin_site = MyAdminSite()\n\n\n# @admin.register(DwcaTaxon, site=admin_site)\n# class SomeModelAdmin(admin.ModelAdmin):\n# pass\n","repo_name":"aubreymoore/GuamInvasiveSpeciesList","sub_path":"taxonomy/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70151990015","text":"from flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef primario():\n return \"\"\"Site teste de Gabriel Bugmann
\n 301 - Info\n Link\"\"\"\n\n@app.route(\"/lista_pessoas\")\ndef lista_pessoas():\n lista = [\"João da Silva\",\"Maria Oliveira\"]\n for i in lista:\n return f'

{i}

'\n\napp.run(debug=True, host=\"0.0.0.0\")","repo_name":"Bugmenn/prog","sub_path":"server_web.py","file_name":"server_web.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43699046855","text":"#!/usr/bin/python3\n\n# The purpose of this software is to create PDF-file from\n# Finnish national archive scanned document available as JPG-files\n# Software needs as an input required document or file with list of documents\n# and optional maximum size for single PDF-file\n\nfrom urllib.request import Request, urlopen\nfrom urllib.error import URLError\nimport re\nimport os\nimport sys\nimport argparse\nfrom PIL import Image as PILImage\nfrom PIL import ImageDraw, ImageFont\nimport numpy as np\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.platypus.flowables import Image as RepImage\nimport textwrap\n\n# function to get list of pages or exit if the required document doesn't exist\n# NEEDS TO BE FIXED TO SUPPORT MULTIPLE DOCUMENT DOWNLOAD\n\ndef getPageList(IndexText):\n PageList = re.findall('view.ka\\?kuid=(\\d*)',IndexText)\n if not len(PageList):\n print('Ei löytynyt sivuja, tarkista arkistoyksikkönumero')\n sys.exit(1)\n return PageList \n\n# function to create an error page if a page from archives fails to download\n# page has required text to inform reader\n\ndef makeErrorPage(text, pagenumber):\n errorpage=PILImage.new('RGB',(595,842),(255,255,255))\n drawing=ImageDraw.Draw(errorpage)\n drawing.text((10,10),text,(0,0,0))\n errorpage=errorpage.resize((5950,8420))\n errorpage.save('%s.jpg'%pagenumber)\n return \n\n# function to download pages as jpg-files from narc-service and call error page\n# creation function for failed pages\n# NEEDS OUTPUT FOR SUCCESS/FAILURE\n\ndef downloadPages(ListOfPages):\n for page in ListOfPages:\n try:\n image=urlopen('http://digi.narc.fi/digi/fetch_hqjpg.ka?kuid=%s' % page)\n \n except URLError as e:\n if hasattr(e, 'reason'):\n reason='Palvelimeen ei saatu yhteyttä.\\nIlmoitettu syy: '+e.reason\n elif hasattr(e, 'code'):\n reason='Palvelin ei voinut täyttää hakua.\\nVirhekoodi: '+e.code\n makeErrorPage(reason,page)\n else:\n image=urlopen('http://digi.narc.fi/digi/fetch_hqjpg.ka?kuid=%s' % page)\n typeinfo=image.info().get_content_type()\n if typeinfo=='image/jpeg':\n file=open('%s.jpg' % page,'wb')\n file.write(image.read())\n file.close()\n else:\n makeErrorPage(image.read(),page)\n\n return\n\n# function to create name for PDF-file from the title of the narc document\n\ndef createFilename(title,part):\n Filename=re.subn('(\\\\|\\/|:|\\*|\\\"|\\||;|,|/)',\"\",title)\n Filename=re.subn('(\\.|\\s)','_',Filename[0])\n fname=Filename[0]\n fname+='_osa_'+str(part)+'.pdf'\n return fname\n\n# function to calculate scaling to a4\n\ndef calcScale(imagesize,a4size):\n SizeOfX=imagesize[0]/a4size[0]\n SizeOfY=imagesize[1]/a4size[1]\n for X in np.arange(0,11,0.25):\n difference = abs(SizeOfX-X)\n if difference<0.25:\n break\n\n for Y in np.arange(0,11,0.25):\n difference = abs(SizeOfY-Y)\n if difference<0.25:\n break\n return (X,Y)\n\n# function to delete downloaded jpg-files\n\ndef cleanUp(ListOfPages):\n for page in ListOfPages:\n os.remove('%s.jpg'%page)\n return\n \n# function to create the pdf-file by\n# 1) getting list of document pages from narc\n# 2) find title from the narc\n# 3) download pages\n# 4) create pdf\n# 5) save downloaded jpg to pdf\n# 5b) close and create new pdf if size limit is exceeded\n# 6) clean downloaded jpg-files\n\ndef doPDFFile(IndexText,MaxSize):\n ListOfPages=getPageList(IndexText)\n TitleMatch = re.search(r\"dosearch\\.ka\\?sartun=\\d*\\.\\w*\\\">(.*?)<\\/b>\",IndexText)\n Title=TitleMatch.group(1)\n downloadPages(ListOfPages)\n Canvas = canvas.Canvas(createFilename(Title,1))\n Canvas.setTitle(Title)\n First = True\n counter=1\n for page in ListOfPages:\n filename='%s.jpg'%page\n if First:\n size=os.stat(filename).st_size\n else:\n size+=os.stat(filename).st_size\n\n if MaxSize and not First and size>(MaxSize*1024*1024):\n Canvas.save()\n counter+=1\n Canvas=canvas.Canvas(createFilename(Title,counter))\n size=os.stat(filename).st_size\n \n SavedImage = PILImage.open(filename)\n if First:\n SizeOfA4=SavedImage.size\n First=False\n\n #scale pages so that first image is A4-sized \n scale=calcScale(SavedImage.size,SizeOfA4)\n Canvas.setPageSize((A4[0]*scale[0],A4[1]*scale[1]))\n Canvas.drawImage(filename,0,0,A4[0]*scale[0],A4[1]*scale[1],preserveAspectRatio=True)\n Canvas.showPage()\n SavedImage.close()\n\n cleanUp(ListOfPages)\n\n Canvas.save()\n\n return 0\n\n# Check validity of input (either pure number or link to narc page\n\ndef checkInputString(inputstring):\n if re.fullmatch('\\d*',inputstring):\n output='http://digi.narc.fi/digi/slistaus.ka?ay='+inputstring\n elif re.fullmatch('http://digi\\.narc\\.fi/digi/slistaus\\.ka\\?ay=\\d*',inputstring):\n output=inputstring\n else:\n return\n return output\n\n# Get list of documents from input file\n# single number or fullurl = directly single url\n# rangeset = generate range with the numpy.arange from [start,end(not included),step]\n# rangeset2 = generate range with the numpy.arange from start-end(included), with 1 as a step\n\ndef getList(urlfile):\n lines = [line.strip() for line in open(urlfile)]\n urls = []\n for line in lines:\n singlenumber=re.fullmatch('\\d*',line)\n fullurl=re.fullmatch('http://digi\\.narc\\.fi/digi/slistaus\\.ka\\?ay=\\d*',line)\n rangeset=re.fullmatch('\\[(\\d*),(\\d*),(\\d)\\]',line)\n rangeset2=re.fullmatch('(\\d*)-(\\d*)',line)\n if singlenumber:\n urls.append(line)\n elif fullurl:\n urls.append(line)\n elif rangeset:\n for value in np.arange(int(rangeset.group(1)),int(rangeset.group(2)),int(rangeset.group(3))):\n urls.append(str(value))\n elif rangeset2:\n for value in np.arange(int(rangeset2.group(1)),int(rangeset2.group(2))+1,1):\n urls.append(str(value))\n \n return urls\n \n# Main function\n# if single document requested run it directly\n# if multiple create list from input file and run them consecutively\n# MAKE EXIT ONLY AFTER ALL FILES HAVE BEEN RUN\n\ndef main(url,size,file):\n ExitValue=0\n if not (file):\n ExitValue=run(url,size)\n \n else:\n ListOfUrls=getList(url)\n for url_value in ListOfUrls:\n ExitValue=run(url_value,size)\n if(ExitValue):\n sys.exit(ExitValue) \n\n sys.exit(ExitValue)\n\n# Run single document unit to download it by\n# 1) check validity of the input url\n# 2) request the document from narc\n# 3) send narc html-page to pdf-creating subprogram\n# return values different from 0 indicate error\n\ndef run(url,size): \n SourceUrl=url\n Url=checkInputString(SourceUrl)\n MaxSize=size #maximum size for pdf (may be exceeded a bit because of pdf format)\n if(Url):\n Req=Request(Url)\n try:\n Response=urlopen(Req)\n except URLError as e:\n if hasattr(e, 'reason'):\n print('Palvelimeen ei saatu yhteyttä.')\n print('Ilmoitettu syy: ',e.reason)\n sys.exit(1)\n elif hasattr(e, 'code'):\n print('Palvelin ei voinut täyttää hakua.')\n print('Virhekoodi: ',e.code)\n sys.exit(1)\n else:\n IndexText=Response.read().decode('latin-1')\n doPDFFile(IndexText,MaxSize)\n return 0\n \n else:\n print(\"Väärä osoite, osoitteen tulee olla joko http://digi.narc.fi/digi/slistaus.ka?ay=X -muotoa tai pelkkä X,\"+\n \" joka on halutun arkistoyksikön numero digi narcissa.\")\n return 2\n\n sys.stderr.write(\"Tuntematon virhe \\n\")\n return 1\n\n\n# argument parser\n# CREATE BETTER HELP AND FILE PARSING INPUT\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='Lataa digi.narc.fi palvelusta arkistoyksiköitä pdf-muodossa.',\n epilog=textwrap.dedent('''\\\n numerolistan muoto\n rivillä joko\n yksittäinen arkistointiyksikkö numero tai url\n tai listan generointi seuraavilla tavoin\n [aloitusnumero,lopetusnumero,askel] \n tämä generoi listan numeroita aloituksesta lopetukseen (ei mukana)\n \t\t\t aloitusnumero-lopetusnumero \n tämä generoi listan numeroita aloituksesta lopetukseen (mukana) 1 välein\n '''))\n parser.add_argument('url', metavar='URL', help='arkistointiyksikön numero tai url muodossa http://digi.narc.fi/digi/slistaus.ka?ay=numero')\n parser.add_argument('-m','--maxsize',default=0,type=int, help='Maksimikoko pdf-tieodostolle, oletus 0 = ei rajoitusta')\n parser.add_argument('-f','--file',action='store_true', help='Lataa useampi yksikkö kerralla, numerolista tiedostossa ja tiedoston nimi URL:n sijaan')\n args=parser.parse_args()\n main(args.url,args.maxsize,args.file)\n\n \n","repo_name":"teakfi/kansallisarkisto_downloader","sub_path":"narchaku.py","file_name":"narchaku.py","file_ext":"py","file_size_in_byte":9296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"37566672515","text":"# Poly - Many\n# morphism - Form\n# ============================\n# Duck Typing\n# Operator overloading\n# Method overloading\n# Method Overriding\n# ============================\n\"\"\"\nDuck Typing\n\"\"\"\n# x = 5\n# print(type(x), id(x))\n# x = 'Gopi'\n# print(type(x), id(x))\nclass PyCharm:\n def execute(self):\n print('Compiling', \"\\nRunning\")\n\n\nclass MyEditor:\n def execute(self):\n print('Spell check')\n print('Convention check')\n print('Compiling', \"\\nRunning\")\n\n\nclass Laptop:\n def code(self, ide):\n ide.execute()\n\n\nide = PyCharm()\nide1 = MyEditor()\n\nlap1 = Laptop()\nlap1.code(ide)\nlap1.code(ide1)\n","repo_name":"Gopi25071993/TeluskoAllFiles","sub_path":"Polymorphism.py","file_name":"Polymorphism.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12469481095","text":"import os\nimport sys\nimport pandas\nimport random\nimport pytz\nimport pandas as pd\nimport uuid\nimport django\nimport uuid\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.config.local')\ndjango.setup()\n\nfrom swift_parsing_app.models import SourceFile, MessageType, SwiftMessage, SwiftField, SwiftFieldValueDetail, \\\n SwiftFieldValue\n\n# Columns: MT;Status;Tag;Field_Name;Content_Options;KeyMTTag\nswift_fields_df = pandas.read_csv('../../mock/SM.csv', sep=';')\nswift_messages_df = pandas.read_pickle('../../mock/output_dataframe.pkl')\n\n\ndef populate_swift_msg_types():\n list_of_msg_types = swift_fields_df['MT'].unique()\n list_of_msg_types = sorted(list_of_msg_types)\n\n for msg_type in list_of_msg_types:\n new_object = MessageType.objects.get_or_create(type_name=msg_type)\n print('Message Types were created')\n\n\ndef populate_swift_fields():\n for row_index, row in swift_fields_df.iterrows():\n mandatory = 1 if row['Status'] == 'M' else 2\n msg_type = MessageType.objects.filter(type_name=row['MT']).first()\n swift_field = SwiftField.objects.get_or_create(key_mt_tag=row['KeyMTTag'], field_name=row['Tag'],\n field_tag=row['Field_Name'], status=mandatory,\n content_options=row['Content_Options'],\n message_type=msg_type)\n\n print('Swift Fields were created')\n\n\ndef populate_source_file():\n source_file = SourceFile.objects.get_or_create(file_name='test_file_001.csv', status=2)\n print('Source Files were created')\n pass\n\n\ndef populate_swift_message():\n list_of_msgs = swift_messages_df['transaction_id'].unique()\n list_of_msgs = sorted(list_of_msgs)\n\n source_file = SourceFile.objects.first()\n\n for transaction_id in list_of_msgs:\n direction = swift_messages_df[\n (swift_messages_df['transaction_id'] == transaction_id) & (swift_messages_df['field_name'] == 'Direction')][\n 'field_value'].item()\n direction_value = 1 if direction == 'I' else 2\n\n application_header = swift_messages_df[\n (swift_messages_df['transaction_id'] == transaction_id) & (swift_messages_df['field_name'] == '2')][\n 'field_value'].item()\n\n msg_type = swift_messages_df[\n (swift_messages_df['transaction_id'] == transaction_id) & (swift_messages_df['field_name'] == 'MT')][\n 'field_value'].item()\n msg_type_object = MessageType.objects.filter(type_name=msg_type).first()\n # transaction_id = transaction_id.replace('-','')\n new_object = SwiftMessage.objects.get_or_create(transaction_id=transaction_id, source_file=source_file,\n direction=direction_value, message_type=msg_type_object,\n application_header=application_header)\n\n print('Swift Messages were created')\n pass\n\n\ndef populate_swift_field_values():\n list_of_msgs = swift_messages_df['transaction_id'].unique()\n list_of_msgs = sorted(list_of_msgs)\n\n for transaction_id in list_of_msgs:\n transaction_object = SwiftMessage.objects.get(transaction_id=transaction_id)\n\n list_of_fields = swift_messages_df[swift_messages_df['transaction_id'] == transaction_id]\n\n for index, row in list_of_fields.iterrows():\n\n swift_fields_not_in_dictionary = ['Direction', 'MT', 'Rest of 2', '2', '3']\n swift_field_name = row['field_name']\n if swift_field_name not in swift_fields_not_in_dictionary:\n related_swift_field = SwiftField.objects.get(field_name=swift_field_name,\n message_type=transaction_object.message_type)\n swift_field_value = row['field_value']\n new_object = SwiftFieldValue.objects.get_or_create(swift_message=transaction_object,\n swift_field=related_swift_field,\n field_value=swift_field_value)\n\n print('Swift Fields Values were created')\n pass\n\n\ndef format_db():\n # SourceFile, MessageType, SwiftMessage, SwiftField, SwiftFieldValueDetail\n SwiftFieldValueDetail.objects.all().delete()\n SwiftFieldValue.objects.all().delete()\n SwiftMessage.objects.all().delete()\n SourceFile.objects.all().delete()\n SwiftField.objects.all().delete()\n\n\nif __name__ == '__main__':\n print(\"Formating the Database\")\n format_db()\n\n populate_swift_msg_types()\n populate_swift_fields()\n populate_source_file()\n populate_swift_message()\n populate_swift_field_values()\n\n print('Populating Complete')\n","repo_name":"NightingaleV/sweeper-swift-parsing-web-app","sub_path":"swift_parsing_app/models/populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14478367642","text":"import RPi.GPIO as GPIO\nimport usb.core\nimport usb.util\nimport os \nimport sys\nfrom time import gmtime, strftime\nimport time\nimport copy\nimport serial\n#control_motorディレクトリへのパを追加\nsys.path.append(os.path.join(os.path.dirname(__file__), '../control_motor'))\nimport blv_lib\nimport az_lib_direct\n\n#GPIO_init###########################################\npin_list = [12,16,18] #move,rclu,arm\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(pin_list[0],GPIO.OUT)\nGPIO.setup(pin_list[1],GPIO.OUT)\nGPIO.setup(pin_list[2],GPIO.OUT)\n#####################################################\n\n#定数################################################\nDED_ZONE = 150\nZ_DED_ZONE = 250\nDIFF_SIZE = 1\nZ_DIFF_SIZE = 10\n#####################################################\n\n\n#状態変数############################################\nMode = 0 #0:クローラ, 1:リモートセンタ機構&リフトアップ, 2:ロボットアーム\nRC_mode = 1 #0:階段降り, 1:真ん中, 2:椅子座り, 3:階段上り\nLU_mode = 1 #0:収納, 1:テンション維持モード 2:リフトアップ\n#####################################################\n\n#LED#################################################\ndef LED_setting(pin_data_list):\n global pin_list\n for i in range(len(pin_list)):\n GPIO.output(pin_list[i],pin_data_list[i])\n#####################################################\n\n#サーフティーの状態\nSafety = 0\n\nwhile True:\n #セーフティの読み込み\n Safety = 1 #ここで確定1だが実際はボタンの値を読み込む\n if Safety == 0:\n continue\n\n #コントローラ変数(セーフティ解除時に初期化される)#######\n Z_push = 0 #Z軸方向の変位\n old_Z_push = 0 #前回のZ軸方向の変位\n R_list = [0,0,0] #軸に対する回転の変位\n old_R_list = 0 #前回の軸に対する回転の変位\n Button_number = 0 #左右のボタンの値\n ########################################################\n\n #RC変数#################################################\n RC_flag = 1 #クリックの判定(1の時は次への移動をしない)\n ########################################################\n #LED_setting############################################\n LED_setting([1,0,0]) \n ########################################################\n\n dev = usb.core.find(idVendor=0x46d, idProduct=0xc626)\n if dev is None:\n raise ValueError('SpaceNavigator not found');\n else:\n print(dev)\n cfg = dev.get_active_configuration()\n print('cfg is ', cfg)\n intf = cfg[(0,0)]\n print('intf is ', intf)\n ep = usb.util.find_descriptor(intf, custom_match = lambda e: usb.util.endpoint_direction(e.bEndpointAddress) == usb.util.ENDPOINT_IN)\n print('ep is ', ep)\n reattach = False\n if dev.is_kernel_driver_active(0):\n reattach = True\n dev.detach_kernel_driver(0)\n\n ep_in = dev[0][(0,0)][0]\n ep_out = dev[0][(0,0)][1]\n print('')\n print('Exit by pressing any button on the SpaceNavigator')\n print('')\n\n\n #自分の端末ごとに適切に設定する\n client = serial.Serial(\"/dev/ttyXRUSB0\",115200,timeout=0.1,parity=serial.PARITY_EVEN,stopbits=serial.STOPBITS_ONE)\n #モータのインスタンス化##############################\n motor1 = blv_lib.blv_motor(client,1) #右クローラ\n motor2 = blv_lib.blv_motor(client,2) #左のクローラ\n motor3 = az_lib_direct.az_motor_direct(client,3) #リフトアップ右\n motor4 = az_lib_direct.az_motor_direct(client,4) #リフトアップ左\n motor5 = az_lib_direct.az_motor_direct(client,5,[0,58436,90000,116750]) #リモートセンタ\n #####################################################\n\n #初期移動ステッピングモータ関連######################\n #リモートセンターの移動\n motor5.go_list(RC_mode)\n #リフトアップの移動\n if LU_mode == 0:\n motor3.go(0)\n motor4.go(0)\n elif LU_mode == 1:\n motor3.go_torque(300)#15%\n motor4.go_torque(300)#15%\n elif LU_mode == 2:\n motor3.go(13200)#位置移動\n motor4.go(13200)#位置移動\n #####################################################\n\n #初期設定ブラシレスモータ関連########################\n motor1.set_acc_dec_time(2)\n motor2.set_acc_dec_time(2)\n #####################################################\n\n\n\n while True:\n try:\n data = dev.read(ep_in.bEndpointAddress, ep_in.bLength, 0)\n\n #Z軸のプッシュ判定#############################################################\n if data[0] == 1:\n old_Z_push = copy.deepcopy(Z_push)\n Z_push = data[5] + (data[6]*256)\n\n if data[6] > 127:\n Z_push -= 65536\n\n #デッドゾーンの処理\n if Z_push <= Z_DED_ZONE and Z_push >= -Z_DED_ZONE:\n Z_push = 0\n\n #感度の処理\n diff = abs(Z_push - old_Z_push)\n if diff > Z_DIFF_SIZE and sum(R_list) == 0:\n print(\"Push: \",Z_push)\n\n #Mode:0 クローラモード\n if Mode == 0:\n pass\n #Mode:1 リモート&リフトアップ \n elif Mode == 1:\n if Z_push > 300:\n LU_mode = 2\n motor5.go_list(3)\n time.sleep(5)\n motor3.go(point=13200,speed=200,rate=1)\n motor4.go(point=13200,speed=200,rate=1)\n motor5.go_list(RC_mode)\n \n elif Z_push < -250:\n LU_mode = 0\n motor3.go(point=0)\n motor4.go(point=0)\n\n #Mode2 : アームモード\n elif Mode == 2:\n pass\n ##############################################################################\n\n #Rの移動判定##################################################################\n if data[0] == 2:\n old_R_list = copy.deepcopy(R_list)\n R_list[0] = data[1] + (data[2]*256)\n R_list[1] = data[3] + (data[4]*256)\n R_list[2] = data[5] + (data[6]*256)\n\n if data[2] > 127:\n R_list[0] -= 65536\n if data[4] > 127:\n R_list[1] -= 65536\n if data[6] > 127:\n R_list[2] -= 65536\n\n #デッドゾーンの処理\n for i in range(3):\n if R_list[i] <= DED_ZONE and R_list[i] >= -DED_ZONE :\n R_list[i] = 0\n\n #感度の処理\n diff = abs(sum(R_list) - sum(old_R_list))\n if diff > DIFF_SIZE and abs(Z_push) < Z_DED_ZONE:\n print(\"R: \", R_list[0], R_list[1], R_list[2])\n\n #Mode:0 クローラモード\n if Mode == 0:\n if R_list[0] == 0 and R_list[1] == 0 and R_list[2]==0: #停止\n #motor1.set_speed(0)\n #motor2.set_speed(0)\n motor1.go(1,1)\n motor2.go(1,1)\n elif R_list[0] > 0: #前進移動\n if R_list[1] >= 0:#左をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)) + int(R_list[2]*0.04))\n elif R_list[0] < 0:#右をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)) + int(R_list[2]*0.04))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)))\n #motor1.go(1,0)\n #motor2.go(0,1)\n motor1.go(0,1)\n motor2.go(1,0)\n elif R_list[0] < 0: #後進移動\n if R_list[1] >= 0:#左をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)) + int(abs(R_list[2]*0.04)))\n elif R_list[1] < 0:#右をはやく\n motor1.set_speed(int(abs(80*R_list[0]*0.01)))\n motor2.set_speed(int(abs(80*R_list[0]*0.01)) + int(abs(R_list[2]*0.04)))\n #motor1.go(0,1)\n #motor2.go(1,0)\n motor1.go(1,0)\n motor2.go(0,1)\n elif R_list[2] > 0: #右は前,左は後ろ\n motor1.set_speed(int(abs(80*R_list[2]*0.01)))\n motor2.set_speed(int(abs(80*R_list[2]*0.01)))\n motor1.go(1,0)\n motor2.go(1,0)\n \n elif R_list[2] < 0: #左は前,右は後ろ\n motor1.set_speed(int(abs(80*R_list[2]*0.01)))\n motor2.set_speed(int(abs(80*R_list[2]*0.01)))\n motor1.go(0,1)\n motor2.go(0,1)\n\n #Mode:1 リモート&リフトアップ\n elif Mode == 1:\n #リモートセンターの判定##########################################\n if R_list[0] == 0 and RC_flag==1:\n RC_flag = 0\n elif R_list[0] > 300 and RC_flag==0:#前への移動\n if RC_mode == 3:\n pass\n else:#移動処理\n RC_mode+=1\n motor5.go_list(RC_mode)\n RC_flag = 1\n elif R_list[0] < -170 and RC_flag==0:#後ろへの移動\n if RC_mode == 0:\n pass\n else:#移動処理\n RC_mode -=1\n motor5.go_list(RC_mode)\n RC_flag = 1\n ##################################################################\n\n #リフトアップの判定###############################################\n if abs(R_list[2]) > 340:\n LU_mode = 1\n #motor3.go_torque(150)\n #motor4.go_torque(150)\n motor3.set_position_deviation(30000)\n motor4.set_position_deviation(30000)\n motor3.go_torque_pos(point=9000,op_current=150)\n motor4.go_torque_pos(point=9000,op_current=150)\n ##################################################################\n\n #Mode:2 アームモード\n elif Mode == 1:\n pass\n ##############################################################################\n\n #ボタンの判定(左が2,右が1)####################################################\n if data[0] == 3:\n if data[1]== 0:\n print(\"push button : \", Button_number)\n if Button_number == 1:\n if Mode == 2:\n Mode = 0\n else:\n Mode += 1\n if Mode == 1:\n RC_flag = 0\n elif Button_number == 2:\n if Mode == 0:\n Mode = 2\n else:\n Mode -= 1\n if Mode == 1:\n RC_flag = 0\n print(\"Now Mode:\",Mode)\n if Mode == 0:\n LED_setting([1,0,0])\n elif Mode == 1:\n LED_setting([0,1,0])\n elif Mode == 2:\n LED_setting([0,0,1])\n\n Button_number = 0\n\n else:\n Button_number = data[1]\n ##############################################################################\n\n except KeyboardInterrupt:\n print(\"end\")\n Safety = 0\n break\n\n except usb.core.USBError:\n print(\"USB error\")\n Safety = 0\n break\n except:\n print(\"Error\")\n Safety = 0\n break\n\n\n # end while\n usb.util.dispose_resources(dev)\n\n if reattach:\n dev.attach_kernel_driver(0)\n","repo_name":"KobayashiRui/CYBATHLON","sub_path":"complete_version/Controler_bac_no_arm.py","file_name":"Controler_bac_no_arm.py","file_ext":"py","file_size_in_byte":13104,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"6534291830","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 9 18:17:04 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nimport boto3\r\nimport pprint\r\nimport pandas as pd\r\nimport time\r\n\r\n# with open('./config/config.json', 'r') as file:\r\n# config = json.loads(file.read())\r\n\r\ndynamodb = boto3.resource(\r\n 'dynamodb',\r\n region_name='ap-northeast-2',\r\n # aws_access_key_id=config['ID'],\r\n # aws_secret_access_key=config['KEY']\r\n)\r\n\r\n\r\n# 1. Table 제거\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n response = table.delete()\r\n printer = pprint.PrettyPrinter(indent=2)\r\n printer.pprint(response)\r\n\r\n\r\n# 2. DynamoDB 내 Table 생성하기\r\n# 키 정리할 때, 기준 키만 설정하면 된다. 핵 좋아!\r\nif __name__ == '__main__':\r\n table = dynamodb.create_table(\r\n TableName='relatedTags',\r\n KeySchema=[\r\n {\r\n 'AttributeName': 'idx',\r\n 'KeyType': 'HASH'\r\n }\r\n ],\r\n AttributeDefinitions=[\r\n {\r\n 'AttributeName': 'idx',\r\n 'AttributeType': 'N'\r\n }\r\n ],\r\n ProvisionedThroughput={\r\n 'ReadCapacityUnits': 50,\r\n 'WriteCapacityUnits': 50\r\n }\r\n )\r\n\r\n# 3. DynamoDB 내 생성된 특정 Table 정보 확인 및 아이템 가져오기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n print(1, table.creation_date_time)\r\n\r\n response = table.get_item(\r\n Key={\r\n 'idx': 1\r\n }\r\n )\r\n item = response['Item']\r\n print(2, item)\r\n\r\n# 4. 아이템 업데이트 하기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n table.update_item(\r\n Key={\r\n 'idx': 2\r\n },\r\n UpdateExpression='SET createdTime = :val1',\r\n ExpressionAttributeValues={\r\n ':val1': \"2018-08-08T05:07:13.515Z\"\r\n }\r\n )\r\n\r\n# 5. 아이템 삭제��기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n table.delete_item(\r\n Key={\r\n 'idx': 1\r\n }\r\n )\r\n\r\n# 6. 항목 생성하기\r\nif __name__ == '__main__':\r\n table = dynamodb.Table('relatedTags')\r\n table.put_item(\r\n Item={\r\n 'idx': 1,\r\n 'cretedTime': \"2018-08-09T05:07:13.515Z\",\r\n 'relatedTag': {\r\n \"1\": [\r\n 177,\r\n 60,\r\n 1231,\r\n 1298423,\r\n 8831092,\r\n 20931\r\n ],\r\n \"2\": [\r\n 54,\r\n 782,\r\n 229,\r\n 7821,\r\n 49632,\r\n 85214\r\n ],\r\n \"3\": [\r\n 285,\r\n 2,\r\n 987,\r\n 128,\r\n 6356,\r\n 6684\r\n ]\r\n },\r\n }\r\n )\r\n \r\n# 7. Json 파일을 이용해 항목 데이터로 생성하기\r\nif __name__ == '__main__':\r\n def load_json_to_dict(load_path):\r\n import json\r\n with open(load_path, 'r', encoding=\"utf-8\") as data_file:\r\n data = data_file.read()\r\n data_file.close()\r\n d = json.loads(data)\r\n return d\r\n \r\n table = dynamodb.Table('relatedTags')\r\n load_path = \"D:\\\\vora_recommendation\\\\data_add_time_dynamo1.json\"\r\n data = load_json_to_dict(load_path)\r\n table = dynamodb.Table('relatedTags')\r\n \r\n dataIdx = data['idx']\r\n dataCreatedTime = data['createdTime']\r\n dataRelatedTags = data['relatedTags']\r\n \r\n response = table.put_item(\r\n Item={\r\n 'idx': dataIdx,\r\n 'createdTime': dataCreatedTime,\r\n 'relatedTags': dataRelatedTags\r\n })\r\n printer = pprint.PrettyPrinter(indent=2)\r\n printer.pprint(response)\r\n time.sleep(.500)\r\n","repo_name":"boohk/Python","sub_path":"AWS/DynamoDBConnector.py","file_name":"DynamoDBConnector.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40246396807","text":"from flask_mqtt import Mqtt\nfrom sqlalchemy import exc\nfrom FASToryLine.configurations import BASE_TOPIC\nfrom FASToryLine.dbModels import AuthResult,Emotion\nfrom FASToryLine import app,db\nimport json,datetime\nfrom pprint import pprint as P\nmqtt = Mqtt(app)\n#####MQTT callbacks################\n\n@mqtt.on_connect()\ndef handle_connect(client, userdata, flags, rc):\n if rc==0:\n mqtt.unsubscribe_all()\n mqtt.subscribe(f'{BASE_TOPIC}authentication')\n print(f'[X-Routes] Subscribed to topic: {BASE_TOPIC}authentication')\n mqtt.subscribe(f'{BASE_TOPIC}emotion')\n print(f'[X-Routes] Subscribed to topic: {BASE_TOPIC}emotion') \n else:\n print(\"[X-Routes] Bad connection Returned code=\",rc)\n\n@mqtt.on_subscribe()\ndef handle_subscribe(client, userdata, mid, granted_qos):\n print('[X-Routes] Subscription id {} granted with qos {}.'\n .format(mid, granted_qos)) \n\n@mqtt.on_disconnect()\ndef handle_disconnect():\n mqtt.unsubscribe_all()\n print(\"[X-Routes] CLIENT DISCONNECTED\")\n\n@mqtt.on_message()\ndef handle_mqtt_message(client, userdata, message):\n try:\n message_in=json.loads(message.payload)\n #print(f\"[X-Routes] {type(message_in)},'??',{message_in}\")\n if message.retain ==1:\n print(f'[X] Retained message from zRefApp......')\n return \n \n if message.topic == f'{BASE_TOPIC}authentication':\n \n authResults = message_in\n result = AuthResult( \n Authenticated = authResults.get(\"authenticated\"), \n Description = authResults.get(\"description\"),\n DetectedFaces = authResults.get(\"detectedFaces\"), \n DistanceScore = authResults.get(\"distanceScore\")\n )\n db.session.add(result)\n db.session.commit()\n P(message_in)\n print(f'[X]: Auth result added to DB @ {datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}')\n elif message.topic == f'{BASE_TOPIC}emotion':\n #{\"detail\":\"Not a valid file was uploaded\"}\n #print(message.topic)\n if message_in.get(\"Response\"):\n result = Emotion( \n StressLevel = message_in.get(\"Response\").get('stress_level')\n )\n db.session.add(result)\n db.session.commit()\n P(message_in)\n print(f'[X]: Emotion response result added to DB @ {datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}')\n else:\n result = Emotion( \n Description = message_in.get(\"detail\")\n )\n db.session.add(result)\n db.session.commit()\n P(message_in)\n print(f'[X]: Emotion Not valid profile result added to DB @ {datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n except exc.SQLAlchemyError as e:\n print(f'[XE] {e}')\n except ValueError:\n print('[X-Routes] Decoding JSON has failed')\n\n# @app.route('/welcomes', methods = ['GET'])\n# def welcomes():\n# return ''\n","repo_name":"mahboobelahi/ZDMPStuff","sub_path":"Quadible-CALM_Old/FASToryLine/messageBus.py","file_name":"messageBus.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22583805606","text":"import time\nimport unittest\nfrom selenium import webdriver\nfrom Pages.MainPage import MainPageClass\nfrom Pages.MyAccountHomePage import MyAccountHomePageClass\nfrom Pages.AmazonCardSection import AmazonCardSectionClass\nfrom Pages.AmazonItemSearchField import AmazonItemSearchFieldClass\nfrom Pages.SearchResultPage import SearchResultPageClass\nfrom Pages.FoundItemPage import FoundItemPageClass\n\n\n\n\nclass AmazonSimpleTestClass(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.delete_all_cookies()\n self.driver.maximize_window()\n self.mainPage = MainPageClass(self.driver)\n self.MyAccountHomePage = MyAccountHomePageClass(self.driver)\n self.AmazonCardSection = AmazonCardSectionClass(self.driver)\n self.AmazonItemSearchField = AmazonItemSearchFieldClass(self.driver)\n self.SearchResultPage = SearchResultPageClass(self.driver)\n self.FoundItemPage = FoundItemPageClass(self.driver)\n\n\n\n def test_simpleTC(self):\n self.driver.get(\"https://www.amazon.com/\")\n self.mainPage.press_amazon_SignIn_account_Button()\n self.mainPage.fill_signin_field(\"kimkrugeractress@gmail.com\")\n\n time.sleep(4)\n self.mainPage.press_amazon_continue_Button()\n\n time.sleep(3)\n self.mainPage.fill_password_field(\"kim2002++\")\n\n time.sleep(5)\n self.mainPage.press_amazon_checkbox_field()\n\n time.sleep(5)\n self.mainPage.press_amazon_SignIn_Button()\n\n time.sleep(5)\n self.MyAccountHomePage.press_amazon_bucket_Button()\n\n time.sleep(3)\n self.AmazonCardSection.delete_one_product()\n\n time.sleep(3)\n self.AmazonItemSearchField.fill_item_search_field(\"jbl bluetooth headphones\")\n\n time.sleep(3)\n self.AmazonItemSearchField.press_item_search_button()\n\n time.sleep(2)\n self.SearchResultPage.scroll(\"window.scrollto(0, 0)\")\n\n time.sleep(3)\n self.SearchResultPage.finde_certain_item_button()\n\n time.sleep(3)\n self.FoundItemPage.change_location_button()\n\n time.sleep(3)\n self.FoundItemPage.fill_zip_code_filde(\"19701\")\n\n time.sleep(3)\n self.FoundItemPage.press_zip_code_apply_button()\n\n time.sleep(3)\n self.FoundItemPage.press_add_to_card_button()\n\n\n\n\n\n def tearDown(self):\n time.sleep(4)\n self.driver.close()","repo_name":"petrosyankn/pythonProjectSelenium","sub_path":"TestCases/AmazonTest.py","file_name":"AmazonTest.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42378162633","text":"import sys\nimport re\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport os\nimport pdb\nimport numpy as np\nfrom sklearn import metrics\nimport json\n\ndef readGOPToDF(df, gop_file, method):\n temp_list = []\n with open(gop_file, 'r') as in_file:\n for line in in_file:\n line = line.strip()\n fields = line.split(' ')\n if len(fields) != 5:\n sys.exit(\"wrong line in the input GOP files\")\n temp_list.append([fields[1], round(float(fields[2]),3), fields[3], method])\n return df.append(pd.DataFrame(temp_list, columns=('phoneme','score','label', 'method')))\n \ndef plot(df, json_dict, outFile):\n methods = df['method'].unique()\n all_phonemes = df['phoneme'].unique()\n fig, axs = plt.subplots(len(all_phonemes), len(methods), figsize=(20, 4*len(all_phonemes)))\n df[\"label\"] = np.where(df['label']=='C', 0, 1 )\n for row,phoneme in enumerate(all_phonemes):\n for col,mtd in enumerate(methods):\n data_true = df.loc[(df[\"phoneme\"] == phoneme) & (df[\"method\"] == mtd) & (df[\"label\"] == 1), ['score', 'label']].to_numpy()\n data_false = df.loc[(df[\"phoneme\"] == phoneme) & (df[\"method\"] == mtd) & (df[\"label\"] == 0), ['score','label']].to_numpy()\n ax = axs[row][col]\n plot_labels = []\n add_label(ax.violinplot(data_true[:,0],vert=False, quantiles=[0.25,0.5,0.75], points=500, positions=[0]), \"Sub or Del({})\".format(data_true[:,0].shape[0]), plot_labels)\n add_label(ax.violinplot(data_false[:,0],vert=False, quantiles=[0.25,0.5,0.75], points=100, positions=[1]), \"Correct({})\".format(data_false[:,0].shape[0]), plot_labels)\n ax.set_xlim([-70, 5])\n ax.set_xlim([-70, 5])\n ax.set_title(mtd + ', Gop for phoneme: ' + phoneme)\n ax.get_yaxis().set_visible(False)\n auc_value = auc_cal(np.concatenate((data_true, data_false), axis=0))\n auc_artist, = plt.plot([], [])\n auc_label = (auc_artist, \"AUC = {}\".format(auc_value))\n if phoneme in json_dict[mtd][\"phonemes\"].keys(): \n #p:(closest_phoneme, mean_diff, auc_value, entropy, count_of_real, count_of_error)\n entropy = json_dict[mtd][\"phonemes\"][phoneme][3]\n auc_teacher = json_dict[mtd][\"phonemes\"][phoneme][2]\n L = round(entropy*auc_teacher, 3)\n json_artist, = plt.plot([], [])\n json_label = (json_artist, \"E={}, A={}, L={}\".format(entropy, auc_teacher, L))\n ax.legend(*zip(*(plot_labels+[auc_label, json_label])), loc=2)\n else:\n ax.legend(*zip(*(plot_labels+[auc_label])), loc=2)\n os.makedirs(os.path.dirname(outFile), exist_ok=True)\n plt.savefig(outFile)\n\ndef auc_cal(array): #input is a nX2 array, with the columns \"score\", \"label\"\n labels = [ 0 if i == 0 else 1 for i in array[:, 1]]\n if len(set(labels)) <= 1:\n return \"NoDef\"\n else:\n #negative because GOP is negatively correlated to the probablity of making an error\n return round(metrics.roc_auc_score(labels, -array[:, 0]),3)\n \n\ndef add_label(violin, method, labels):\n color = violin[\"bodies\"][0].get_facecolor().flatten()\n labels.append((mpatches.Patch(color=color), method))\n\ndef read_json(path):\n with open(path,\"r\") as injson:\n return json.load(injson)\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1 :\n sys.exit(\"this script takes ... as arguments. It plots the GOP distributions for each phoneme\")\n\n df = pd.DataFrame(columns=('phoneme','score','label', 'method'))\n #methods = ['GMM-mono', 'GMM-mono-frame', 'DNN-mono', 'DNN-tri']\n methods = ['GMM-mono', 'DNN-tri']\n json_dict = { mtd:None for mtd in methods}\n assert(len(methods) == (len(sys.argv) - 2)/2)\n for i,mtd in enumerate(methods):\n df = readGOPToDF(df, sys.argv[2*i+1], mtd)\n json_dict[mtd] = read_json(sys.argv[2*i+2])\n print(\"read one GOP\")\n\n plot(df, json_dict, sys.argv[-1])\n","repo_name":"frank613/tools-ntnu","sub_path":"cmu_miss_pron/exp-new/plot_gop_entropy.py","file_name":"plot_gop_entropy.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71494160254","text":"import re\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\ndelta = 1e-12\nIs = 1\n\n\ndef f(x):\n y = 2 / 3 * x - 5 / 3 + math.exp(40 * x)\n return y\n\n\ndef df(x):\n d = (f(x) - f(x - delta)) / delta\n return d\n\n\ndef line2zero(x0):\n y = f(x0)\n k = df(x0)\n x1 = x0 - y / k\n return x1\n\n\ndef cmpr(x):\n if f(x) <= delta:\n flag = True\n else:\n flag = False\n return flag\n\n\nx = 10\n\nwhile True:\n if cmpr(x):\n print(x)\n break\n else:\n x = line2zero(x)\n\n\ndef i_diode(vd):\n i = Is * (math.exp(40 * vd) - 1)\n return i\n\n\ndef v_diode(id):\n v = math.log((id / Is + 1), math.e) / 40\n return v\n\n\ndef plot_i_v(start, stop, point_num):\n x = np.linspace(start, stop, point_num, endpoint=True)\n i = []\n for index in range(len(x)):\n i += [i_diode(x[index])]\n\n ymax = max(i, key=lambda v : v)\n ymin = min(i, key=lambda v : v)\n m = ymax * 1.2\n n = ymin * 1.2\n\n plt.plot(x, i, color=\"blue\", linewidth=1.0, linestyle=\"-\")\n plt.xlim(start, stop)\n plt.xticks(np.linspace(start, stop, 9, endpoint=True))\n plt.ylim(n, m)\n plt.yticks(np.linspace(n, m, 5, endpoint=True))\n plt.xlabel('voltage $V_D$/V')\n plt.ylabel('current $i_D$/A')\n plt.title('I-V for diode\\n', fontsize=12)\n\n ax = plt.gca()\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.spines['bottom'].set_position(('data', 0))\n ax.yaxis.set_ticks_position('left')\n ax.spines['left'].set_position(('data', 0))\n\n plt.savefig(\"I-V_D.png\", dpi=288)\n plt.show()\n\n return\n\n\nplot_i_v(-0.1, 0.1, 1000)\n","repo_name":"yangbyangb/EDA_python","sub_path":"pyEDA/hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14712838799","text":"from string import Template\nfrom datetime import datetime\n\n\ndef welcome(login_user, name_user):\n with open ('src/template/template_welcome.html', 'r') as file:\n template = Template(file.read())\n date_now = datetime.now().strftime('%d/%m/%y')\n body_message = template.substitute(login=login_user , name=name_user, date=date_now)\n print(body_message)\n return body_message\n\n\nwelcome('Superman', 'Clark')","repo_name":"wagnerberna/cursos-python","sub_path":"Flask/07_RESTX_Mongo_Token_email_users_v4/src/view/view_v1.py","file_name":"view_v1.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10355985591","text":"class MyCircularQueue:\n def __init__(self, k: int):\n self.Length = k\n self.Queue = [None for i in range(k)]\n self.FrontIDX = 0\n self.RearIDX = 0\n self.Full = False\n def enQueue(self, value: int) -> bool:\n if not self.Full:\n self.Queue[self.RearIDX] = value\n self.RearIDX += 1\n if self.RearIDX == self.Length:\n self.RearIDX = 0\n if self.RearIDX == self.FrontIDX:\n self.Full = True\n return True\n return False\n def deQueue(self) -> bool:\n if self.isEmpty():\n return False\n self.Full = False\n self.FrontIDX += 1\n if self.FrontIDX == self.Length:\n self.FrontIDX = 0\n return True\n def Front(self) -> int:\n if self.isEmpty():\n return -1\n return self.Queue[self.FrontIDX]\n def Rear(self) -> int:\n if self.isEmpty():\n return -1\n return self.Queue[self.RearIDX-1]\n def isEmpty(self) -> bool:\n return self.Full == False and self.RearIDX == self.FrontIDX\n def isFull(self) -> bool:\n return self.Full\n\n\n# Your MyCircularQueue object will be instantiated and called as such:\n# obj = MyCircularQueue(k)\n# param_1 = obj.enQueue(value)\n# param_2 = obj.deQueue()\n# param_3 = obj.Front()\n# param_4 = obj.Rear()\n# param_5 = obj.isEmpty()\n# param_6 = obj.isFull()","repo_name":"hyuneie/LeetCode","sub_path":"622-design-circular-queue/622-design-circular-queue.py","file_name":"622-design-circular-queue.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1650763699","text":"import sys\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential,Model\nfrom gensim.models import Word2Vec\nfrom keras.layers import Input,LSTM,Bidirectional,Flatten, GRU, Dropout, Dense,TimeDistributed, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint,EarlyStopping\nfrom keras import optimizers\nimport _pickle as pk\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nfrom keras import regularizers\nimport gensim\n\ndef loaddata(file_label,file_nolab):\n\tlabel = []\n\tword_train = []\n\tlab_data = []\n\tla_data = open(file_label,\"r\",encoding='utf-8')\n\tno_la = open(file_nolab,\"r\",encoding='utf-8')\n\tfor l in la_data:\n\t\ttmp = l.strip().split(\" +++$+++ \")\n\t\tlabel.append(int(tmp[0]))\n\t\tword_train.append(tmp[1])\n\t\tlab_data.append(tmp[1])\n\tfor n in no_la:\n\t\ttmp1 = n.strip()\n\t\tword_train.append(tmp1)\n\tlabel = np.array(label)\n\tword_train = np.array(word_train)\n\tlab_data = np.array(lab_data)\n\t#print(\"yes/no label :\",len(label),\"tr_data : \",len(word_train))\n\treturn label,word_train,lab_data\n\ndef random(Xtrain,Ytrain):\n r_list = np.array(range(0,len(Xtrain)))\n np.random.shuffle(r_list)\n Xtrain = Xtrain[r_list]\n Ytrain = Ytrain[r_list]\n return Xtrain,Ytrain\ndef split_data(X,Y, ratio):\n\tdata_size = len(X)\n\tval_size = int(data_size * ratio)\n\treturn X[val_size:],Y[val_size:],X[:val_size],Y[:val_size]\n\nfile_label = sys.argv[1]\nfile_nolab = sys.argv[2]\ntok_path = sys.argv[3]\nWord_path = sys.argv[4]\nmodel_path = sys.argv[5]\n(tr_lab,word_data,tr_data) = loaddata(file_label,file_nolab)\n#print(\"num 0 \",tr_lab[0],tr_data[0])\nprint(\"label :\",tr_lab.shape,\"tr_data : \",tr_data.shape)\nMax_len = 40\n\nstem = gensim.parsing.porter.PorterStemmer()\ntr_data = [e for e in stem.stem_documents(tr_data)]\nword_data = [k for k in stem.stem_documents(word_data)]\n\ntokenizer = Tokenizer(num_words=None, filters='\\t\\n')\ntokenizer.fit_on_texts(word_data)\n\npk.dump(tokenizer,open(tok_path,'wb'))\ntokenizer = pk.load(open(tok_path,'rb'))\n(tr_data_f,tr_lab_f,va_data,va_lab) = split_data(tr_data,tr_lab,0.1)\n\nsequences = tokenizer.texts_to_sequences(tr_data_f)\ndata = np.array(pad_sequences(sequences, maxlen=Max_len))\nval_sequences = tokenizer.texts_to_sequences(va_data)\nvalid_data = np.array(pad_sequences(val_sequences, maxlen=Max_len))\n\n\n#labels = np.array(to_categorical(tr_lab))\n#labels = tr_lab\n\n\n#print(\"tr_data_f,tr_lab_f,va_data,va_lab : \",tr_data_f.shape,tr_lab_f.shape,va_data.shape,va_lab.shape)\n\nword2vec_data = [w.split(\" \") for w in word_data]\nprint(\"=============Word2Vec=============\")\nWVmodel = Word2Vec(word2vec_data, size=100, window=5, min_count=0, workers=4)\nWVmodel.save(Word_path)\nR_WVmodel = Word2Vec.load(Word_path)\n\n\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\nprint(\"Sequence 0 :\",sequences[0])\nprint(\"tr_data 0 : \",tr_data[0])\nlen_tr = len(word2vec_data)\n\n#translate\nembeded = np.zeros((len(word_index),100))\ncou = 0\nfor w ,i in word_index.items():\n\ttry:\n\t\ttmp = R_WVmodel.wv[w]\n\t\tembeded[i] = tmp\n\texcept:\n\t\tcou+=1\n#train\ninputs = Input(shape=(Max_len,))\n\n# Embedding layer\nembedding_inputs = Embedding(len(word_index),100,weights=[embeded],trainable=False)(inputs)\n# RNN \nRNN_cell_f = Bidirectional(LSTM(128,activation=\"tanh\",dropout=0.3,return_sequences = True))(embedding_inputs)\nRNN_cell = Bidirectional(LSTM(50,activation=\"tanh\",dropout=0.2,return_sequences = False))(RNN_cell_f)\n\n#RNN_cell= LSTM(128,dropout=0.3,return_sequences = False)\n#RNN_output = RNN_cell(embedding_inputs)\n# DNN layer\noutputs = Dense(50,activation='relu',kernel_regularizer=regularizers.l2(0.1))(RNN_cell)\noutputs = Dropout(0.3)(outputs)\noutputs = Dense(1, activation='sigmoid')(outputs)\n \nmodel = Model(inputs=inputs,outputs=outputs)\n\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\",metrics=[\"accuracy\"])\n\nModel_Check_Point = []\nModel_Check_Point.append(ModelCheckpoint('model-{epoch:05d}-{val_acc:.5f}-{val_loss:.5f}.hdf5', monitor='val_acc', save_best_only=True,mode='auto', period=1))\n#for i in rangwe(3):\nmodel.summary()\nmodel.fit(data,tr_lab_f,validation_data=(valid_data,va_lab) ,batch_size=64, epochs=10,callbacks = Model_Check_Point)\nmodel.save(model_path)\n","repo_name":"yuju30/NTUML18","sub_path":"hw5/HW5_sentiment.py","file_name":"HW5_sentiment.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34928167191","text":"# -*- coding: utf-8 -*-\n\n'''bluetoothd mock template\n\nThis creates the expected methods and properties of the object manager\norg.bluez object (/), the manager object (/org/bluez), but no adapters or\ndevices.\n\nThis supports BlueZ 5 only.\n'''\n\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation; either version 3 of the License, or (at your option) any\n# later version. See http://www.gnu.org/copyleft/lgpl.html for the full text\n# of the license.\n\n__author__ = 'Philip Withnall'\n__copyright__ = '''\n(c) 2013 Collabora Ltd.\n(c) 2017 - 2022 Martin Pitt \n'''\n\nfrom pathlib import Path\n\nimport dbus\n\nfrom dbusmock import OBJECT_MANAGER_IFACE, mockobject\n\nBUS_NAME = 'org.bluez'\nMAIN_OBJ = '/'\nSYSTEM_BUS = True\nIS_OBJECT_MANAGER = True\n\nBLUEZ_MOCK_IFACE = 'org.bluez.Mock'\nAGENT_MANAGER_IFACE = 'org.bluez.AgentManager1'\nPROFILE_MANAGER_IFACE = 'org.bluez.ProfileManager1'\nADAPTER_IFACE = 'org.bluez.Adapter1'\nMEDIA_IFACE = 'org.bluez.Media1'\nNETWORK_SERVER_IFACE = 'org.bluez.Network1'\nDEVICE_IFACE = 'org.bluez.Device1'\n\n# The device class of some arbitrary Android phone.\nMOCK_PHONE_CLASS = 5898764\n\n\n@dbus.service.method(AGENT_MANAGER_IFACE,\n in_signature='os', out_signature='')\ndef RegisterAgent(manager, agent_path, capability):\n all_caps = ['DisplayOnly', 'DisplayYesNo', 'KeyboardOnly',\n 'NoInputNoOutput', 'KeyboardDisplay']\n\n if agent_path in manager.agent_paths:\n raise dbus.exceptions.DBusException(\n 'Another agent is already registered ' + manager.agent_path,\n name='org.bluez.Error.AlreadyExists')\n\n if capability not in all_caps:\n raise dbus.exceptions.DBusException(\n 'Unsupported capability ' + capability,\n name='org.bluez.Error.InvalidArguments')\n\n if not manager.default_agent:\n manager.default_agent = agent_path\n manager.agent_paths += [agent_path]\n manager.capabilities[str(agent_path)] = capability\n\n\n@dbus.service.method(AGENT_MANAGER_IFACE,\n in_signature='o', out_signature='')\ndef UnregisterAgent(manager, agent_path):\n if agent_path not in manager.agent_paths:\n raise dbus.exceptions.DBusException(\n 'Agent not registered ' + agent_path,\n name='org.bluez.Error.DoesNotExist')\n\n manager.agent_paths.remove(agent_path)\n del manager.capabilities[agent_path]\n if manager.default_agent == agent_path:\n if len(manager.agent_paths) > 0:\n manager.default_agent = manager.agent_paths[-1]\n else:\n manager.default_agent = None\n\n\n@dbus.service.method(AGENT_MANAGER_IFACE,\n in_signature='o', out_signature='')\ndef RequestDefaultAgent(manager, agent_path):\n if agent_path not in manager.agent_paths:\n raise dbus.exceptions.DBusException(\n 'Agent not registered ' + agent_path,\n name='org.bluez.Error.DoesNotExist')\n manager.default_agent = agent_path\n\n\ndef load(mock, _parameters):\n mock.AddObject('/org/bluez', AGENT_MANAGER_IFACE, {}, [\n ('RegisterAgent', 'os', '', RegisterAgent),\n ('RequestDefaultAgent', 'o', '', RequestDefaultAgent),\n ('UnregisterAgent', 'o', '', UnregisterAgent),\n ])\n\n bluez = mockobject.objects['/org/bluez']\n bluez.AddMethods(PROFILE_MANAGER_IFACE, [\n ('RegisterProfile', 'osa{sv}', '', ''),\n ('UnregisterProfile', 'o', '', ''),\n ])\n bluez.agent_paths = []\n bluez.capabilities = {}\n bluez.default_agent = None\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='o', out_signature='')\ndef RemoveDevice(adapter, path):\n adapter.RemoveObject(path)\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(path),\n [DEVICE_IFACE],\n ])\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='', out_signature='')\ndef StartDiscovery(adapter):\n adapter.props[ADAPTER_IFACE]['Discovering'] = True\n # NOTE: discovery filter support is minimal to mock\n # the Discoverable discovery filter\n if adapter.props[ADAPTER_IFACE]['DiscoveryFilter'] is not None:\n adapter.props[ADAPTER_IFACE]['Discoverable'] = True\n adapter.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n ADAPTER_IFACE,\n {\n 'Discoverable': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discoverable'], variant_level=1),\n 'Discovering': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discovering'], variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='', out_signature='')\ndef StopDiscovery(adapter):\n adapter.props[ADAPTER_IFACE]['Discovering'] = False\n # NOTE: discovery filter support is minimal to mock\n # the Discoverable discovery filter\n if adapter.props[ADAPTER_IFACE]['DiscoveryFilter'] is not None:\n adapter.props[ADAPTER_IFACE]['Discoverable'] = False\n adapter.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n ADAPTER_IFACE,\n {\n 'Discoverable': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discoverable'], variant_level=1),\n 'Discovering': dbus.Boolean(adapter.props[ADAPTER_IFACE]['Discovering'], variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(ADAPTER_IFACE,\n in_signature='a{sv}', out_signature='')\ndef SetDiscoveryFilter(adapter, discovery_filter):\n adapter.props[ADAPTER_IFACE]['DiscoveryFilter'] = discovery_filter\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='s')\ndef AddAdapter(self, device_name, system_name):\n '''Convenience method to add a Bluetooth adapter\n\n You have to specify a device name which must be a valid part of an object\n path, e. g. \"hci0\", and an arbitrary system name (pretty hostname).\n\n Returns the new object path.\n '''\n path = '/org/bluez/' + device_name\n address_start = int(device_name[-1])\n address = (f\"{address_start:02d}:{address_start+1:02d}:{address_start+2:02d}:\"\n f\"{address_start+3:02d}:{address_start+4:02d}:{address_start+5:02d}\")\n adapter_properties = {\n 'UUIDs': dbus.Array([\n # Reference:\n # http://git.kernel.org/cgit/bluetooth/bluez.git/tree/lib/uuid.h\n # PNP\n '00001200-0000-1000-8000-00805f9b34fb',\n # Generic Access Profile\n '00001800-0000-1000-8000-00805f9b34fb',\n # Generic Attribute Profile\n '00001801-0000-1000-8000-00805f9b34fb',\n # Audio/Video Remote Control Profile (remote)\n '0000110e-0000-1000-8000-00805f9b34fb',\n # Audio/Video Remote Control Profile (target)\n '0000110c-0000-1000-8000-00805f9b34fb',\n ], variant_level=1),\n 'Discoverable': dbus.Boolean(False, variant_level=1),\n 'Discovering': dbus.Boolean(False, variant_level=1),\n 'Pairable': dbus.Boolean(True, variant_level=1),\n 'Powered': dbus.Boolean(True, variant_level=1),\n 'Address': dbus.String(address, variant_level=1),\n 'AddressType': dbus.String('public', variant_level=1),\n 'Alias': dbus.String(system_name, variant_level=1),\n 'Modalias': dbus.String('usb:v1D6Bp0245d050A', variant_level=1),\n 'Name': dbus.String(system_name, variant_level=1),\n # Reference:\n # http://bluetooth-pentest.narod.ru/software/\n # bluetooth_class_of_device-service_generator.html\n 'Class': dbus.UInt32(268, variant_level=1), # Computer, Laptop\n 'DiscoverableTimeout': dbus.UInt32(180, variant_level=1),\n 'PairableTimeout': dbus.UInt32(0, variant_level=1),\n }\n\n self.AddObject(path,\n ADAPTER_IFACE,\n # Properties\n adapter_properties,\n # Methods\n [\n ('RemoveDevice', 'o', '', RemoveDevice),\n ('StartDiscovery', '', '', StartDiscovery),\n ('StopDiscovery', '', '', StopDiscovery),\n ('SetDiscoveryFilter', 'a{sv}', '', SetDiscoveryFilter),\n ])\n\n adapter = mockobject.objects[path]\n adapter.AddMethods(MEDIA_IFACE, [\n ('RegisterEndpoint', 'oa{sv}', '', ''),\n ('UnregisterEndpoint', 'o', '', ''),\n ])\n adapter.AddMethods(NETWORK_SERVER_IFACE, [\n ('Register', 'ss', '', ''),\n ('Unregister', 's', '', ''),\n ])\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',\n 'oa{sa{sv}}', [\n dbus.ObjectPath(path),\n {ADAPTER_IFACE: adapter_properties},\n ])\n\n return path\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='s')\ndef RemoveAdapter(self, device_name):\n '''Convenience method to remove a Bluetooth adapter\n '''\n path = '/org/bluez/' + device_name\n # We could remove the devices related to the adapters here, but\n # when bluez crashes, the InterfacesRemoved aren't necessarily sent\n # devices first, so in effect, our laziness is testing an edge case\n # in the clients\n self.RemoveObject(path)\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(path),\n [ADAPTER_IFACE],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='s')\ndef RemoveAdapterWithDevices(self, device_name):\n '''Convenience method to remove a Bluetooth adapter and all\n the devices associated to it\n '''\n adapter_path = '/org/bluez/' + device_name\n adapter = mockobject.objects[adapter_path]\n manager = mockobject.objects['/']\n\n to_remove = []\n for path in mockobject.objects:\n if path.startswith(adapter_path + '/'):\n to_remove.append(path)\n\n for path in to_remove:\n adapter.RemoveObject(path)\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(path),\n [DEVICE_IFACE],\n ])\n\n self.RemoveObject(adapter_path)\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesRemoved',\n 'oas', [\n dbus.ObjectPath(adapter_path),\n [ADAPTER_IFACE],\n ])\n\n\n@dbus.service.method(DEVICE_IFACE,\n in_signature='', out_signature='')\ndef Pair(device):\n if device.paired:\n raise dbus.exceptions.DBusException(\n 'Device already paired',\n name='org.bluez.Error.AlreadyExists')\n device_address = device.props[DEVICE_IFACE]['Address']\n adapter_device_name = Path(device.props[DEVICE_IFACE]['Adapter']).name\n device.PairDevice(adapter_device_name, device_address, MOCK_PHONE_CLASS)\n\n\n@dbus.service.method(DEVICE_IFACE,\n in_signature='', out_signature='')\ndef Connect(device):\n if device.connected:\n raise dbus.exceptions.DBusException(\n 'Already Connected',\n name='org.bluez.Error.AlreadyConnected')\n device.connected = True\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Connected': dbus.Boolean(device.connected, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(DEVICE_IFACE,\n in_signature='', out_signature='')\ndef Disconnect(device):\n if not device.connected:\n raise dbus.exceptions.DBusException(\n 'Not Connected',\n name='org.bluez.Error.NotConnected')\n device.connected = False\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Connected': dbus.Boolean(device.connected, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='sss', out_signature='s')\ndef AddDevice(self, adapter_device_name, device_address, alias):\n '''Convenience method to add a Bluetooth device\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The alias is the human-readable name\n for the device (e.g. as set on the device itself), and the adapter device\n name is the device_name passed to AddAdapter.\n\n This will create a new, unpaired and unconnected device.\n\n Returns the new object path.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n\n properties = {\n 'Address': dbus.String(device_address, variant_level=1),\n 'AddressType': dbus.String('public', variant_level=1),\n 'Name': dbus.String(alias, variant_level=1),\n 'Icon': dbus.String('', variant_level=1),\n 'Class': dbus.UInt32(0, variant_level=1),\n 'Appearance': dbus.UInt16(0, variant_level=1),\n 'UUIDs': dbus.Array([], signature='s', variant_level=1),\n 'Paired': dbus.Boolean(False, variant_level=1),\n 'Connected': dbus.Boolean(False, variant_level=1),\n 'Trusted': dbus.Boolean(False, variant_level=1),\n 'Blocked': dbus.Boolean(False, variant_level=1),\n 'WakeAllowed': dbus.Boolean(False, variant_level=1),\n 'Alias': dbus.String(alias, variant_level=1),\n 'Adapter': dbus.ObjectPath(adapter_path, variant_level=1),\n 'LegacyPairing': dbus.Boolean(False, variant_level=1),\n 'Modalias': dbus.String('', variant_level=1),\n 'RSSI': dbus.Int16(-79, variant_level=1), # arbitrary\n 'TxPower': dbus.Int16(0, variant_level=1),\n 'ManufacturerData': dbus.Array([], signature='a{qv}', variant_level=1),\n 'ServiceData': dbus.Array([], signature='a{sv}', variant_level=1),\n 'ServicesResolved': dbus.Boolean(False, variant_level=1),\n 'AdvertisingFlags': dbus.Array([], signature='ay', variant_level=1),\n 'AdvertisingData': dbus.Array([], signature='a{yv}', variant_level=1),\n }\n\n self.AddObject(path,\n DEVICE_IFACE,\n # Properties\n properties,\n # Methods\n [\n ('CancelPairing', '', '', ''),\n ('Connect', '', '', Connect),\n ('ConnectProfile', 's', '', ''),\n ('Disconnect', '', '', Disconnect),\n ('DisconnectProfile', 's', '', ''),\n ('Pair', '', '', Pair),\n ])\n device = mockobject.objects[path]\n device.paired = False\n device.connected = False\n\n manager = mockobject.objects['/']\n manager.EmitSignal(OBJECT_MANAGER_IFACE, 'InterfacesAdded',\n 'oa{sa{sv}}', [\n dbus.ObjectPath(path),\n {DEVICE_IFACE: properties},\n ])\n\n return path\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ssi', out_signature='')\ndef PairDevice(_self, adapter_device_name, device_address, class_):\n '''Convenience method to mark an existing device as paired.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This unblocks the device if it was blocked.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(f'Device {device_name} does not exist.', name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n device.paired = True\n\n # Based off pairing with an Android phone.\n uuids = [\n '00001105-0000-1000-8000-00805f9b34fb',\n '0000110a-0000-1000-8000-00805f9b34fb',\n '0000110c-0000-1000-8000-00805f9b34fb',\n '00001112-0000-1000-8000-00805f9b34fb',\n '00001115-0000-1000-8000-00805f9b34fb',\n '00001116-0000-1000-8000-00805f9b34fb',\n '0000111f-0000-1000-8000-00805f9b34fb',\n '0000112f-0000-1000-8000-00805f9b34fb',\n '00001200-0000-1000-8000-00805f9b34fb',\n ]\n\n device.props[DEVICE_IFACE]['UUIDs'] = dbus.Array(uuids, variant_level=1)\n device.props[DEVICE_IFACE]['Paired'] = dbus.Boolean(True, variant_level=1)\n device.props[DEVICE_IFACE]['LegacyPairing'] = dbus.Boolean(True,\n variant_level=1)\n device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(False,\n variant_level=1)\n\n try:\n device.props[DEVICE_IFACE]['Modalias']\n except KeyError:\n device.AddProperties(DEVICE_IFACE, {\n 'Modalias': dbus.String('bluetooth:v000Fp1200d1436',\n variant_level=1),\n 'Class': dbus.UInt32(class_, variant_level=1),\n 'Icon': dbus.String('phone', variant_level=1),\n })\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'UUIDs': dbus.Array(uuids, variant_level=1),\n 'Paired': dbus.Boolean(True, variant_level=1),\n 'LegacyPairing': dbus.Boolean(True, variant_level=1),\n 'Blocked': dbus.Boolean(False, variant_level=1),\n 'Modalias': dbus.String('bluetooth:v000Fp1200d1436',\n variant_level=1),\n 'Class': dbus.UInt32(class_, variant_level=1),\n 'Icon': dbus.String('phone', variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='')\ndef BlockDevice(_self, adapter_device_name, device_address):\n '''Convenience method to mark an existing device as blocked.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This disconnects the device if it was connected.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(f'Device {device_name} does not exist.', name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n\n device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(True, variant_level=1)\n device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(False,\n variant_level=1)\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Blocked': dbus.Boolean(True, variant_level=1),\n 'Connected': dbus.Boolean(False, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='')\ndef ConnectDevice(_self, adapter_device_name, device_address):\n '''Convenience method to mark an existing device as connected.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This unblocks the device if it was blocked.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Device {device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n\n device.props[DEVICE_IFACE]['Blocked'] = dbus.Boolean(False,\n variant_level=1)\n device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(True,\n variant_level=1)\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Blocked': dbus.Boolean(False, variant_level=1),\n 'Connected': dbus.Boolean(True, variant_level=1),\n },\n [],\n ])\n\n\n@dbus.service.method(BLUEZ_MOCK_IFACE,\n in_signature='ss', out_signature='')\ndef DisconnectDevice(_self, adapter_device_name, device_address):\n '''Convenience method to mark an existing device as disconnected.\n\n You have to specify a device address which must be a valid Bluetooth\n address (e.g. 'AA:BB:CC:DD:EE:FF'). The adapter device name is the\n device_name passed to AddAdapter.\n\n This does not change the device's blocked status.\n\n If the specified adapter or device doesn't exist, a NoSuchAdapter or\n NoSuchDevice error will be returned on the bus.\n\n Returns nothing.\n '''\n device_name = 'dev_' + device_address.replace(':', '_').upper()\n adapter_path = '/org/bluez/' + adapter_device_name\n device_path = adapter_path + '/' + device_name\n\n if adapter_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Adapter {adapter_device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchAdapter')\n if device_path not in mockobject.objects:\n raise dbus.exceptions.DBusException(\n f'Device {device_name} does not exist.',\n name=BLUEZ_MOCK_IFACE + '.NoSuchDevice')\n\n device = mockobject.objects[device_path]\n\n device.props[DEVICE_IFACE]['Connected'] = dbus.Boolean(False,\n variant_level=1)\n\n device.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [\n DEVICE_IFACE,\n {\n 'Connected': dbus.Boolean(False, variant_level=1),\n },\n [],\n ])\n","repo_name":"martinpitt/python-dbusmock","sub_path":"dbusmock/templates/bluez5.py","file_name":"bluez5.py","file_ext":"py","file_size_in_byte":23968,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"79"} +{"seq_id":"1263551559","text":"import requests\nimport os\n\n\nclass TmdbApi(object):\n \"\"\"\n This is a base class that all api endpoints will inherit from\n \"\"\"\n\n def __init__(self):\n\n # Grab the api key from the os environment an verify we actually have it\n api_key = os.getenv(\"TMDB_KEY\")\n if api_key is None:\n raise Exception(\"The api_key is missing.\")\n\n self.base_url = \"https://api.themoviedb.org/3\"\n self.api_key = \"?api_key={}\".format(api_key)\n\n\n def _get_appended_data(self, data_to_append):\n return \"&append_to_response={}\".format(\",\".join(data_to_append) if isinstance(data_to_append, list) else data_to_append)\n\n def _check_status_code(self, status_code):\n if status_code != 200:\n raise AssertionError(\"The api call failed. The response's status code was {}\".format(status_code))\n\n\nclass TmdbMoviesApi(TmdbApi):\n \"\"\"\n This is a class specific to testing the movies endpoint.\n \"\"\"\n\n def __init__(self):\n super(TmdbMoviesApi, self).__init__()\n self.movie_url = \"{}/movie\".format(self.base_url)\n\n def get_movie_details(self, media_id, detail_type=None, append_detail=None, check_response_code=True):\n # Verify detail_type and append_detail aren't being used at the same time\n if detail_type is not None and append_detail is not None:\n raise Exception(\"You don't need to set data_type if you are using append_detail.\")\n\n # Create the url\n detail_type = \"/{}\".format(detail_type) if detail_type is not None else \"\"\n append = \"{}\".format(self._get_appended_data(append_detail)) if append_detail is not None else \"\"\n url = \"{}/{}{}{}{}\".format(self.movie_url, str(media_id), detail_type, self.api_key, append)\n\n response = requests.get(url)\n\n if check_response_code:\n self._check_status_code(response.status_code)\n\n return response\n","repo_name":"gontib/roger_api_test","sub_path":"lib/tmdb_api.py","file_name":"tmdb_api.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17622754441","text":"import locale\nimport os\nimport sys\nimport yaml\nfrom collections import OrderedDict\nimport projectconfig_yamllib as pcy\n\ndef main():\n locale.setlocale(locale.LC_COLLATE, 'C')\n\n yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n pcy.construct_yaml_map)\n\n yaml.add_representer(OrderedDict, pcy.project_representer,\n Dumper=pcy.IndentedDumper)\n\n chandata = yaml.load(open('gerritbot/channels.yaml'))\n for k,v in chandata.items():\n v['projects'] = sorted(v['projects'])\n\n sys.stdout.write('# This file is sorted alphabetically by channel name.\\n')\n first = True\n for k in sorted(chandata.keys()):\n if not first:\n sys.stdout.write('\\n')\n first = False\n sys.stdout.write(yaml.dump({k: chandata[k]}, default_flow_style=False,\n Dumper=pcy.IndentedDumper, width=80, indent=2))\n\nif __name__ == '__main__':\n main()\n","repo_name":"nibalizer/openstack-infra-combined","sub_path":"project-config/tools/normalize_channels_yaml.py","file_name":"normalize_channels_yaml.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"22597624142","text":"import re\nfrom typing import List, Optional, Union\n\nfrom ..core import Config, Field, Schema\nfrom .virtual_field import VirtualField\n\n\nclass StringField(Field):\n \"\"\"\n A string field.\n \"\"\"\n\n storage_type = str\n\n def __init__(\n self,\n *,\n min_len: Optional[int] = None,\n max_len: Optional[int] = None,\n regex: Optional[str] = None,\n choices: Optional[List[str]] = None,\n transform_case: Optional[str] = None,\n transform_strip: Optional[Union[bool, str]] = None,\n **kwargs\n ):\n \"\"\"\n The string field can perform transformations on the value prior to validating it if either\n *transform_case* or *transform_strip* are specified.\n\n :param min_len: minimum allowed length\n :param max_len: maximum allowed length\n :param regex: regex pattern that the value must match\n :param choices: list of valid choices\n :param transform_case: transform the value's case to either ``upper`` or ``lower`` case\n :param transform_strip: strip the value by calling :meth:`str.strip`.\n Setting this to ``True`` will call :meth:`str.strip` without any arguments (ie.\n striping all whitespace characters) and if this is a ``str``, then :meth:`str.strip`\n will be called with ``transform_strip``.\n \"\"\"\n super().__init__(**kwargs)\n self.min_len = min_len\n self.max_len = max_len\n self.regex = re.compile(regex) if regex else None\n self.choices = choices\n self.transform_case = transform_case.lower() if transform_case else None\n self.transform_strip = transform_strip\n\n if self.transform_case and self.transform_case not in (\"lower\", \"upper\"):\n raise TypeError('transform_case must be \"lower\" or \"upper\"')\n\n def _validate(self, cfg: Config, value: str) -> str:\n \"\"\"\n Validate a value.\n\n :param cfg: current Config\n :param value: value to validate\n \"\"\"\n if not isinstance(value, str):\n raise ValueError(\"value must be a string, not a %s\" % type(value).__name__)\n\n if self.transform_strip:\n if isinstance(self.transform_strip, str):\n value = value.strip(self.transform_strip)\n else:\n value = value.strip()\n\n if self.required and not value:\n raise ValueError(\"value is required\")\n\n if self.transform_case:\n value = value.lower() if self.transform_case == \"lower\" else value.upper()\n\n if self.min_len is not None and len(value) < self.min_len:\n raise ValueError(\"value must be at least %d characters\" % self.min_len)\n\n if self.max_len is not None and len(value) > self.max_len:\n raise ValueError(\"value must not be more than %d characters\" % self.max_len)\n\n if self.regex and not self.regex.match(value):\n raise ValueError(\"value does not match pattern %s\" % self.regex.pattern)\n\n if self.choices and value not in self.choices:\n if len(self.choices) < 6:\n postfix = \": must be one of: \" + \", \".join(self.choices)\n else:\n postfix = \"\"\n raise ValueError(\"value is not a valid choice\" + postfix)\n\n return value\n\n\nclass LogLevelField(StringField):\n \"\"\"\n A field representing the Python log level.\n \"\"\"\n\n storage_type = str\n\n def __init__(self, levels: Optional[List[str]] = None, **kwargs):\n \"\"\"\n :param levels: list of log levels. If not specified, the default Python log levels will be\n used: ``debug``, ``info``, ``warning``, ``error``, and ``critical``.\n \"\"\"\n if not levels:\n levels = [\"debug\", \"info\", \"warning\", \"error\", \"critical\"]\n\n self.levels = levels\n kwargs.setdefault(\"transform_case\", \"lower\")\n kwargs.setdefault(\"transform_strip\", True)\n kwargs[\"choices\"] = levels\n super().__init__(**kwargs)\n\n\nclass ApplicationModeField(StringField):\n \"\"\"\n A field representing the application operating mode.\n \"\"\"\n\n storage_type = str\n HELPER_MODE_PATTERN = re.compile(\"^[a-zA-Z0-9_]+$\")\n\n def __init__(\n self, modes: Optional[List[str]] = None, create_helpers: bool = True, **kwargs\n ):\n \"\"\"\n The *create_helpers* parameter will create a boolean :class:`VirtualField` for each\n ``mode`` named ``is__mode``, that returns ``True`` when the mode is active. When\n *create_helpers=True* then each mode name must be a valid Python variable name.\n\n :param modes: application modes, if not specified the default modes will be used:\n ``production`` and ``development``\n :param create_helpers: create helper a bool ``VirtualField`` for each mode\n \"\"\"\n if not modes:\n modes = [\"development\", \"production\"]\n\n self.modes = modes\n self.create_helpers = create_helpers\n\n if create_helpers:\n for mode in modes:\n if not self.HELPER_MODE_PATTERN.match(mode):\n raise TypeError(\"invalid mode name: %s\" % mode)\n\n kwargs.setdefault(\"transform_case\", \"lower\")\n kwargs.setdefault(\"transform_strip\", True)\n kwargs[\"choices\"] = modes\n super().__init__(**kwargs)\n\n def _create_helper(self, mode: str) -> \"VirtualField\":\n \"\"\"\n Create helper VirtualField.\n \"\"\"\n return VirtualField(lambda cfg: self.__getval__(cfg) == mode)\n\n def __setkey__(self, schema: Schema, key: str) -> None:\n \"\"\"\n Set the key and optionally add ``VirtualField`` helpers to the schema if\n *create_helpers=True*.\n \"\"\"\n super().__setkey__(schema, key)\n if self.create_helpers:\n for mode in self.modes:\n schema._add_field(\"is_%s_mode\" % mode, self._create_helper(mode))\n","repo_name":"ameily/cincoconfig","sub_path":"cincoconfig/fields/string_field.py","file_name":"string_field.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"32929846982","text":"from dateutil.relativedelta import relativedelta\nimport datetime\nimport logging\nimport time\nimport os\nfrom openerp.osv import osv, fields\nimport openerp.tools\nfrom openerp.tools.translate import _\nfrom config import file_location\n\nfrom openerp.addons.decimal_precision import decimal_precision as dp\n\n_logger = logging.getLogger(__name__)\n\n\nclass custom_contract(osv.osv):\n _inherit = 'account.analytic.account'\n _columns = {\n 'include_cdr_amount': fields.boolean('Calculate amount from CDR files', store=True),\n }\n\n def cron_save_cdr_logs(self, cr, uid, context=None):\n cdr_log = self.pool.get('cdr.logs')\n logs = self.read_cdr_files(cr,uid)\n for log in logs:\n if len(log) == 16:\n hash_key = log[14].replace('\"', '')\n cr.execute(\"select id,name from res_partner where hash_key='\" + hash_key.strip() + \"'\")\n partner = cr.dictfetchall()\n if len(partner) > 0:\n res = {\n 'customer_id': partner[0]['id'],\n 'customer_name': partner[0]['name'],\n 'hash_key': hash_key.strip(),\n 'region': log[9].replace('\"', '').strip(),\n 'incoming_call_receiver': log[2].replace('\"', '').strip(),\n 'dialer': log[3].replace('\"', '').strip(),\n 'time_stamp': log[5].replace('\"', '').strip() + \" \" + log[6].replace('\"', '').strip(),\n 'total_call_time_from_dialing': log[7].replace('\"', '').strip(),\n 'calling_talk_time': log[8].replace('\"', '').strip(),\n 'charging_rate': log[11].replace('\"', '').strip(),\n 'call_type': log[10].replace('\"', '').strip(),\n 'type': 'normal'\n }\n cdr_log.create(cr, uid, res, context=context)\n elif len(log) == 18:\n hash_key = log[16].replace('\"','')\n cr.execute(\"select id,name from res_partner where hash_key='\"+hash_key.strip()+\"'\")\n partner = cr.dictfetchall()\n if len(partner)>0:\n res = {\n 'customer_id': partner[0]['id'],\n 'customer_name': partner[0]['name'] ,\n 'hash_key': hash_key.strip() ,\n 'region': log[11].replace('\"','').strip(),\n 'incoming_call_receiver':log[2].replace('\"','').strip() ,\n 'dialer': log[3].replace('\"','').strip() ,\n 'time_stamp': log[7].replace('\"','').strip() + \" \" + log[8].replace('\"','').strip(),\n 'total_call_time_from_dialing': log[9].replace('\"','').strip(),\n 'calling_talk_time': log[10].replace('\"','').strip(),\n 'charging_rate': log[13].replace('\"','').strip(),\n 'type': 'tf'\n }\n cdr_log.create(cr, uid, res, context=context)\n return True\n\n # Get Wizard Record\n def read_cdr_files(self, cr, uid, context=None):\n end_lst = []\n for loc in file_location:\n path = os.path.expanduser(loc)\n try:\n #make sure using r'filepath' to mean its a string literal\n fl = open(path,'r')\n fl_all = fl.read()\n lst_rec = fl_all.split('\\n')\n for rec in lst_rec:\n rec_lst = rec.split(',')\n if len(rec_lst) > 1:\n end_lst.append(rec_lst)\n except:\n print(\"File is not present in current directory\")\n return end_lst\n\n\n def cal_invoice_amount(self, cr, uid, partner_id, context=None):\n total = 0.0\n cr.execute(\"Select * from call_rates where partner_id='\"+str(partner_id.id)+\"'\")\n call_rates = cr.dictfetchall()\n free_mintues = call_rates[0]['free_mins']\n counter = 0.0\n cr.execute(\"SELECT * FROM public.cdr_logs where charging_rate>0 and customer_id='\" + str(partner_id.id) + \"'\"+\"order by charging_rate asc\")\n call_history = cr.dictfetchall()\n for log in call_history:\n if counter > free_mintues:\n talk_time = log['calling_talk_time']/60\n if log['charging_rate']== 0.02 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_one']\n elif log['charging_rate']== 0.04 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_two']\n elif log['charging_rate']== 0.12 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_three']\n elif log['charging_rate']== 0.16 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_four']\n elif log['charging_rate']== 0.25 and log['type']=='tf':\n total = total+ talk_time*call_rates[0]['tf_package_five']\n elif log['call_type']=='National' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['national_rates']\n elif log['call_type']=='Mobile' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['mobile_rates']\n elif log['call_type']=='Local' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['local_rates']\n elif log['call_type']=='Special' and log['type']=='normal':\n total = total + talk_time * call_rates[0]['local_rates']\n else:\n counter = counter + (log['calling_talk_time']/60)\n return total\n\n # This is the function which is reponsible to create invoice lines from cron job we must modified these lines\n def _prepare_invoice_line(self, cr, uid, line,contract, fiscal_position, context=None):\n amount = self.cal_invoice_amount(cr, uid, contract.partner_id, context=context)\n fpos_obj = self.pool.get('account.fiscal.position')\n res = line.product_id\n account_id = res.property_account_income.id\n if not account_id:\n account_id = res.categ_id.property_account_income_categ.id\n account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)\n\n taxes = res.taxes_id or False\n tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes, context=context)\n if contract.include_cdr_amount:\n values = {\n 'name': line.name,\n 'account_id': account_id,\n 'account_analytic_id': line.analytic_account_id.id,\n 'price_unit': amount or 0.0,\n 'quantity': line.quantity,\n 'uos_id': line.uom_id.id or False,\n 'product_id': line.product_id.id or False,\n 'invoice_line_tax_id': [(6, 0, tax_id)],\n }\n else:\n values = {\n 'name': line.name,\n 'account_id': account_id,\n 'account_analytic_id': line.analytic_account_id.id,\n 'price_unit': line.price_unit or 0.0,\n 'quantity': line.quantity,\n 'uos_id': line.uom_id.id or False,\n 'product_id': line.product_id.id or False,\n 'invoice_line_tax_id': [(6, 0, tax_id)],\n }\n return values\n\n def _prepare_invoice_lines(self, cr, uid, contract,fiscal_position_id, context=None):\n fpos_obj = self.pool.get('account.fiscal.position')\n fiscal_position = None\n if fiscal_position_id:\n fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)\n invoice_lines = []\n for line in contract.recurring_invoice_line_ids:\n values = self._prepare_invoice_line(cr, uid, line,contract,fiscal_position, context=context)\n invoice_lines.append((0, 0, values))\n return invoice_lines\n\n def _prepare_invoice(self, cr, uid, contract, context=None):\n invoice = self._prepare_invoice_data(cr, uid, contract, context=context)\n invoice['invoice_line'] = self._prepare_invoice_lines(cr, uid, contract,invoice['fiscal_position'], context=context)\n return invoice\n\n def _recurring_create_invoice(self, cr, uid, ids, automatic=False, context=None):\n context = context or {}\n invoice_ids = []\n current_date = time.strftime('%Y-%m-%d')\n if ids:\n contract_ids = ids\n else:\n contract_ids = self.search(cr, uid, [('recurring_next_date', '<=', current_date), ('state', '=', 'open'),\n ('recurring_invoices', '=', True), ('type', '=', 'contract')])\n if contract_ids:\n cr.execute(\n 'SELECT company_id, array_agg(id) as ids FROM account_analytic_account WHERE id IN %s GROUP BY company_id',\n (tuple(contract_ids),))\n for company_id, ids in cr.fetchall():\n context_contract = dict(context, company_id=company_id, force_company=company_id)\n for contract in self.browse(cr, uid, ids, context=context_contract):\n try:\n if contract.include_cdr_amount:\n invoice_values = self._prepare_invoice(cr, uid, contract,context=context_contract)\n invoice_values['invoice_type'] = 'CDR'\n else:\n invoice_values = self._prepare_invoice(cr, uid, contract, context=context_contract)\n invoice_ids.append(\n self.pool['account.invoice'].create(cr, uid, invoice_values, context=context))\n next_date = datetime.datetime.strptime(contract.recurring_next_date or current_date, \"%Y-%m-%d\")\n interval = contract.recurring_interval\n if contract.recurring_rule_type == 'daily':\n new_date = next_date + relativedelta(days=+interval)\n elif contract.recurring_rule_type == 'weekly':\n new_date = next_date + relativedelta(weeks=+interval)\n elif contract.recurring_rule_type == 'monthly':\n new_date = next_date + relativedelta(months=+interval)\n else:\n new_date = next_date + relativedelta(years=+interval)\n self.write(cr, uid, [contract.id], {'recurring_next_date': new_date.strftime('%Y-%m-%d')},\n context=context)\n if automatic:\n cr.commit()\n except Exception:\n if automatic:\n cr.rollback()\n _logger.exception('Fail to create recurring invoice for contract %s', contract.code)\n else:\n raise\n return invoice_ids\n","repo_name":"Parkash067/ERP","sub_path":"custom_contracts/contracts.py","file_name":"contracts.py","file_ext":"py","file_size_in_byte":11217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24065909619","text":"# -*- coding: utf-8 -*-\n\"\"\"\nInfo\n----\nThis file contains the basic functionalities of the ThermalEnergyStorage class.\n\n\"\"\"\n\nimport pandas as pd\nfrom .component import Component\n\n\nclass ThermalEnergyStorage(Component):\n def __init__(\n self,\n target_temperature,\n min_temperature,\n hysteresis,\n mass,\n cp,\n thermal_energy_loss_per_day,\n unit,\n identifier=None,\n environment=None,\n user_profile=None,\n cost=None,\n ):\n\n \"\"\"\n Info\n ----\n ...\n \n Parameters\n ----------\n \n The parameter timebase determines the resolution of the given data. \n Furthermore the parameter environment (Environment) is given to provide weather data and further external influences.\n To account for different people using a component, a use case (VPPUseCase) can be passed in to improve the simulation.\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n # Call to super class\n super(ThermalEnergyStorage, self).__init__(\n unit, environment, user_profile, cost\n )\n\n # Configure attributes\n self.identifier = identifier\n self.target_temperature = target_temperature\n self.current_temperature = target_temperature - hysteresis\n self.min_temperature = min_temperature\n self.timeseries = pd.DataFrame(\n columns=[\"temperature\"],\n index=pd.date_range(\n start=self.environment.start,\n end=self.environment.end,\n freq=self.environment.time_freq,\n name=\"time\",\n ),\n )\n self.hysteresis = hysteresis\n self.mass = mass\n self.cp = cp\n self.state_of_charge = mass * cp * (self.current_temperature + 273.15)\n # Aus Datenblättern ergibt sich, dass ein Wärmespeicher je Tag rund 10%\n # Bereitschaftsverluste hat (ohne Rohrleitungen!!)\n self.thermal_energy_loss_per_day = thermal_energy_loss_per_day\n self.efficiency_per_timestep = 1 - (\n thermal_energy_loss_per_day\n / (24 * (60 / self.environment.timebase))\n )\n self.needs_loading = None\n\n def operate_storage(self, timestamp, thermal_energy_generator):\n\n if self.get_needs_loading():\n thermal_energy_generator.ramp_up(timestamp)\n else:\n thermal_energy_generator.ramp_down(timestamp)\n\n thermal_energy_demand = self.user_profile.thermal_energy_demand.thermal_energy_demand.loc[\n timestamp\n ]\n observation = thermal_energy_generator.observations_for_timestamp(\n timestamp\n )\n thermal_production = observation[\"thermal_energy_output\"]\n\n # Formula: E = m * cp * T\n # <=> T = E / (m * cp)\n self.state_of_charge -= (\n (thermal_energy_demand - thermal_production)\n * 1000 # kWh to Wh ?? Why?\n / (60 / self.environment.timebase)\n )\n self.state_of_charge *= self.efficiency_per_timestep\n self.current_temperature = (\n self.state_of_charge\n# * 3600 # kWh to KJ\n / (self.mass * self.cp)\n ) - 273.15\n\n if thermal_energy_generator.is_running:\n el_load = observation[\"el_demand\"]\n else:\n el_load = 0\n\n self.timeseries.temperature[timestamp] = self.current_temperature\n\n # log timeseries of thermal_energy_generator_class:\n thermal_energy_generator.log_observation(observation, timestamp)\n\n return self.current_temperature, el_load\n\n def get_needs_loading(self):\n\n if self.current_temperature <= (\n self.target_temperature - self.hysteresis\n ):\n self.needs_loading = True\n\n if self.current_temperature >= (\n self.target_temperature + self.hysteresis\n ):\n self.needs_loading = False\n\n if self.current_temperature < self.min_temperature:\n raise ValueError(\n \"Thermal energy production to low to maintain \"\n + \"heat storage temperature!\"\n )\n\n return self.needs_loading\n\n def value_for_timestamp(self, timestamp):\n\n \"\"\"\n Info\n ----\n This function takes a timestamp as the parameter and returns the \n corresponding value for that timestamp. \n A positiv result represents a load. \n A negative result represents a generation. \n \n This abstract function needs to be implemented by child classes.\n Raises an error since this function needs to be implemented by child classes.\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n raise NotImplementedError(\n \"value_for_timestamp needs to be implemented by child classes!\"\n )\n\n def observations_for_timestamp(self, timestamp):\n\n \"\"\"\n Info\n ----\n This function takes a timestamp as the parameter and returns a \n dictionary with key (String) value (Any) pairs. \n Depending on the type of component, different status parameters of the \n respective component can be queried. \n \n For example, a power store can report its \"State of Charge\".\n Returns an empty dictionary since this function needs to be \n implemented by child classes.\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n return {}\n\n def prepare_time_series(self):\n\n \"\"\"\n Info\n ----\n This function is called to prepare the time series.\n Currently equals reset_time_series. Adjust if needed in later versions.\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n self.timeseries = pd.DataFrame(\n columns=[\"temperature\"],\n index=pd.date_range(\n start=self.environment.start,\n end=self.environment.end,\n freq=self.environment.time_freq,\n name=\"time\",\n ),\n )\n return self.timeseries\n\n def reset_time_series(self):\n\n \"\"\"\n Info\n ----\n This function is called to reset the time series\n \n Parameters\n ----------\n \n ...\n \t\n Attributes\n ----------\n \n ...\n \n Notes\n -----\n \n ...\n \n References\n ----------\n \n ...\n \n Returns\n -------\n \n ...\n \n \"\"\"\n\n self.timeseries = pd.DataFrame(\n columns=[\"temperature\"],\n index=pd.date_range(\n start=self.environment.start,\n end=self.environment.end,\n freq=self.environment.time_freq,\n name=\"time\",\n ),\n )\n\n return self.timeseries\n","repo_name":"Pyosch/vpplib","sub_path":"vpplib/thermal_energy_storage.py","file_name":"thermal_energy_storage.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"79"} +{"seq_id":"37117246239","text":"'''\nhttps://keras.io/activations/\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\n\n(X_train0, y_train0), (X_test0, y_test0) = mnist.load_data()\nX_train = X_train0.reshape(60000, 784).astype('float32')/255.0\nX_test = X_test0.reshape(10000, 784).astype('float32')/255.0\nY_train = np_utils.to_categorical(y_train0, 10)\nY_test = np_utils.to_categorical(y_test0, 10)\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\n\nnp.random.seed(0)\nmodel0 = Sequential()\nmodel0.add(Dense(15, input_dim=784, activation=\"sigmoid\"))\n#model0.add(Dense(15, input_dim=784, activation=\"tanh\"))\nmodel0.add(Dense(10, activation=\"sigmoid\"))\nmodel0.compile(optimizer=SGD(lr=0.2), loss='mean_squared_error', metrics=[\"accuracy\"])\n\n#%%time\nhist0 = model0.fit(X_train, Y_train, epochs=30, batch_size=100, validation_data=(X_test, Y_test), verbose=0)\n\nnp.random.seed(0)\nmodel1 = Sequential()\nmodel1.add(Dense(15, input_dim=784, activation=\"sigmoid\"))\nmodel1.add(Dense(10, activation=\"sigmoid\"))\n#model1.add(Dense(15, input_dim=784, activation=\"relu\"))\n#model1.add(Dense(10, activation=\"softmax\"))\nmodel1.compile(optimizer=SGD(lr=0.2), loss='categorical_crossentropy', metrics=[\"accuracy\"])\n#model1.compile(optimizer=SGD(lr=0.2), loss='binary_crossentropy', metrics=[\"accuracy\"])\n\n#%%time\nhist1 = model1.fit(X_train, Y_train, epochs=30, batch_size=100, validation_data=(X_test, Y_test), verbose=0)\n\nplt.plot(hist0.history['val_acc'], ls=\":\", label=\"mean squared error\")\nplt.plot(hist1.history['val_acc'], label=\"cross entropy\")\nplt.legend()\nplt.show()\n","repo_name":"cjsong21/Machine-learning","sub_path":"딥러닝/01.딥러닝모델예/ModelDL.py","file_name":"ModelDL.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25704972253","text":"import logging\n\nfrom PySide2 import QtWidgets, QtGui, QtCore\nfrom pynput import mouse\n\nfrom auto_assistant.model import actions\n\nlogger = logging.getLogger(__name__)\n_SECONDS_IN_A_DAY = 86400\n\n\nclass AddActionDialog(QtWidgets.QDialog):\n def __init__(self):\n super().__init__()\n self.resize(300, 200)\n self.__result = None\n self.__my_layout = QtWidgets.QGridLayout()\n\n button_grid = QtWidgets.QGridLayout()\n self.__ok_button = QtWidgets.QPushButton('Ok')\n self.__cancel_button = QtWidgets.QPushButton('Cancel')\n button_grid.addWidget(self.__cancel_button, 0, 0)\n button_grid.addWidget(self.__ok_button, 0, 1)\n self.__ok_button.clicked.connect(self.accept)\n self.__cancel_button.clicked.connect(self.reject)\n\n pick_action_combo_box = QtWidgets.QComboBox()\n pick_action_combo_box.addItems([i.value for i in actions.ActionType])\n pick_action_combo_box.currentTextChanged.connect(self.__handle_action_type_change)\n\n # The default type of grid will be one for a ClickAction\n self.__input_grid = self.__generate_grid_layout_for_click_action()\n\n self.__my_layout.addWidget(pick_action_combo_box, 0, 0)\n self.__my_layout.addLayout(self.__input_grid, 1, 0)\n self.__my_layout.addLayout(button_grid, 2, 0)\n self.setLayout(self.__my_layout)\n\n def __clear_items_in(self, layout: QtWidgets.QLayout):\n while layout.count() > 0:\n item = layout.takeAt(0)\n if isinstance(item, QtWidgets.QLayout):\n self.__clear_items_in(item)\n else:\n logger.debug(f'Removing {type(item.widget())}')\n item.widget().deleteLater()\n logger.debug(f'Removing {type(layout)}')\n layout.deleteLater()\n\n def __handle_action_type_change(self, selected_action_type: str):\n logger.info(f'Generating UI for {selected_action_type}')\n if self.__input_grid is not None:\n self.__my_layout.removeItem(self.__input_grid)\n self.__clear_items_in(self.__input_grid)\n self.__input_grid = None\n logger.info('\\tOld UI removed')\n try:\n self.__input_grid = self.__generate_grid_layout_for(actions.ActionType(selected_action_type))\n self.__my_layout.addLayout(self.__input_grid, 1, 0)\n except RuntimeError:\n logger.error('Unable to generate the input grid', exc_info=True)\n\n def __generate_grid_layout_for(self, action_type: actions.ActionType) -> QtWidgets.QGridLayout:\n if actions.ActionType.CLICK_ACTION == action_type:\n return self.__generate_grid_layout_for_click_action()\n elif actions.ActionType.SLEEP_ACTION == action_type:\n return self.__generate_grid_layout_for_sleep_action()\n else:\n raise RuntimeError(f'Unsupported action type: {action_type}')\n\n def __generate_grid_layout_for_sleep_action(self) -> QtWidgets.QGridLayout:\n self.__ok_button.setEnabled(True)\n return_value = QtWidgets.QGridLayout()\n\n # create the label\n duration_label = QtWidgets.QLabel('Sleep for (secs): ')\n duration_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n return_value.addWidget(duration_label, 0, 0)\n\n # create the input for the time\n self.__sleep_text_line = QtWidgets.QLineEdit()\n self.__sleep_text_line.setPlaceholderText('Enter time in seconds')\n self.__sleep_text_line.setValidator(QtGui.QIntValidator(0, _SECONDS_IN_A_DAY))\n self.__sleep_text_line.editingFinished.connect(self.__handle_sleep_time_input)\n return_value.addWidget(self.__sleep_text_line, 0, 1)\n\n return return_value\n\n def __handle_sleep_time_input(self):\n self.__result = actions.SleepAction(int(self.__sleep_text_line.text()))\n\n def __generate_grid_layout_for_click_action(self) -> QtWidgets.QGridLayout:\n self.__ok_button.setEnabled(False)\n return_value = QtWidgets.QGridLayout()\n x_label = QtWidgets.QLabel('x: ')\n x_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n self.__x_value = QtWidgets.QLabel('1')\n y_label = QtWidgets.QLabel('y: ')\n y_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n self.__y_value = QtWidgets.QLabel('1')\n return_value.addWidget(x_label, 0, 0)\n return_value.addWidget(self.__x_value, 0, 1)\n return_value.addWidget(y_label, 0, 2)\n return_value.addWidget(self.__y_value, 0, 3)\n self.__get_click_button = QtWidgets.QPushButton('Get click')\n self.__get_click_button.clicked.connect(self.__get_click)\n self.__mouse_listener = mouse.Listener(on_click=self.__on_click)\n return_value.addWidget(self.__get_click_button, 1, 0, 1, -1)\n return return_value\n\n def __toggle_buttons_to(self, enabled: bool):\n self.__ok_button.setEnabled(enabled)\n self.__cancel_button.setEnabled(enabled)\n self.__get_click_button.setEnabled(enabled)\n\n def __get_click(self):\n self.__mouse_listener.start()\n self.__toggle_buttons_to(False)\n\n def __on_click(self, x: int, y: int, button: mouse.Button, pressed: bool) -> bool:\n logger.debug(f'Clicked at ({x}, {y}) with button {button} and pressed={pressed}')\n if pressed and button == mouse.Button.left:\n self.__x_value.setText(str(x))\n self.__y_value.setText(str(y))\n self.__toggle_buttons_to(True)\n\n # reset the mouse listener for next time\n self.__mouse_listener = mouse.Listener(on_click=self.__on_click)\n self.__result = actions.ClickAction(int(self.__x_value.text()), int(self.__y_value.text()))\n return False\n\n def get_result(self) -> actions.Action:\n return self.__result\n\n def accept(self):\n super().accept()\n\n def reject(self):\n super().reject()\n self.__result = None\n","repo_name":"NateJSchmidt/autoassistant","sub_path":"src/auto_assistant/view/add_action_dialog.py","file_name":"add_action_dialog.py","file_ext":"py","file_size_in_byte":5978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38327902164","text":"from collections import deque\nfrom pprint import pprint\nn = int(input())\narr = [list(map(int, input().split())) for _ in range(n)]\nG = dict()\nfor i in range(n):\n G[i] = []\n for j in range(n):\n if arr[i][j] == 1:\n value = G[i]\n value.append(j)\n G[i] = value\n\nq = deque()\nfor i in range(len(G)):\n q.append(i)\n visit = [0] * len(G)\n while q:\n start = q.popleft()\n for w in G[start]:\n if visit[w]:\n continue\n visit[w] = 1\n arr[i][w] = 1\n q.append(w)\n\nfor i in range(n):\n print(*arr[i])\n\n# for m in range(len(G)): # 경유지 기준으로\n# for st in range(len(G)): # 시작점 다 돌려보고\n# for end in range(len(G)): # 도착점 다 돌려봤을 때\n# if arr[st][end] == 0: # 만약 시작점에서 도착점으로 가는 곳이 현재까지는 없는 경우\n# arr[st][end] = arr[st][m] & arr[m][end] # 가능하다면 갈 수 있다고 판단하여 배열 바꿔줌\n# print(arr)","repo_name":"swanious/Algorithm","sub_path":"BOJ/11403_경로찾기.py","file_name":"11403_경로찾기.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"71836524734","text":"from datetime import datetime\n\nfrom flask import request\nfrom flask_restful import Resource\n\nfrom config import db\nfrom models.user import User\nfrom schemas.user import UserSchema\nfrom utils import get_args_parser\n\nuser_schema = UserSchema(many=False)\nparser = get_args_parser([\n {'name': \"num_mark_tasks\", 'type': int, 'required': True,\n 'help': 'number of mark tasks the user has'},\n {'name': \"password\", 'type': str, 'required': False,\n 'help': 'unprocessed password of the user'},\n {'name': \"name\", 'type': str, 'required': True,\n 'help': 'name of the user'}\n])\n\n\nclass UserResource(Resource):\n \"\"\"Resource to handle CRUD operations for users table\"\"\"\n\n @staticmethod\n def get(user_id: int):\n \"\"\"\n Returns single user\n \"\"\"\n\n if not user_id:\n return {'status': 'failed', 'message': \"Empty ID field\"}, 404\n\n user = User.query.filter(User.user_id == user_id).one_or_none()\n if not user:\n return {'status': 'failed', 'message': \"User not found\"}, 404\n\n return {'data': user_schema.dump(user)}, 200\n\n @staticmethod\n def post(user_id: int):\n \"\"\"\n Creates new user\n \"\"\"\n\n request.get_json(force=True)\n data = parser.parse_args(strict=True)\n if not data:\n return {'status': 'failed', 'message': 'No input data provided'}, 204\n\n user = User.query.filter_by(user_id=user_id).one_or_none()\n if user:\n return {'status': 'failed', 'message': 'User already exists'}, 400\n\n data.update({'user_id': user_id, 'last_activity_ds': datetime.now(),\n 'registration_date': datetime.now()})\n user = User(**data)\n db.session.add(user)\n db.session.commit()\n\n result = user_schema.dump(user)\n\n return {\"status\": 'success', 'data': result}, 201\n\n @staticmethod\n def put(user_id: int):\n \"\"\"\n Updates the user\n Possible fields for update:\n 'num_mark_tasks': int,\n 'password': str,\n 'name': str,\n 'last_name': str\n \"\"\"\n\n request.get_json(force=True)\n data = parser.parse_args(strict=True)\n if not data:\n return {'status': 'failed', 'message': 'No input data provided'}, 204\n\n user = User.query.filter_by(user_id=user_id).first()\n if not user:\n return {'status': 'failed', 'message': 'User does not exist'}, 204\n\n for k, v in data.items():\n user.__setattr__(k, v)\n db.session.commit()\n\n result = user_schema.dump(user)\n return {\"status\": 'success', 'data': result}, 202\n\n @staticmethod\n def delete(user_id):\n \"\"\"\n Deletes single user\n \"\"\"\n\n user = User.query.filter_by(user_id=user_id).one_or_none()\n if not user:\n return {'status': 'failed',\n 'message': 'User does not exist'}, 204\n User.query.filter_by(user_id=user_id).delete()\n db.session.commit()\n\n result = user_schema.dump(user)\n return {\"status\": 'success', 'data': result}, 202\n","repo_name":"kirilllzaitsev/datamark-backend","sub_path":"backend/flaskr/res/UserRes.py","file_name":"UserRes.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27995752137","text":"#!/usr/bin/env python3\n\nimport buffer\n\nbuf = bytearray(6)\nbuffer.snprintf(buf, \"Hello world!\")\nprint(buf)\n\nsize = 256\npybuf = bytearray(size)\nfor i in range(size):\n pybuf[i] = i\n\nbuf = buffer.Buffer()\nbuf.put(2*size)\nbuffer.write2(pybuf, buf, size)\n\nfor i in range(2*size):\n print(f\"{i} : {hex(buf[i])}\")\n\n","repo_name":"savagesmc/swig_play","sub_path":"pybuffer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43870270734","text":"from PySide2.QtWidgets import QListWidget,QPushButton\r\nimport PySide2.QtCore\r\nfrom src import event_key, event_dispatcher\r\nimport copy\r\n\r\nclass CategoryApplyWindow:\r\n def __init__(self, window: QListWidget,clear_button: QPushButton):\r\n self.list_window = window\r\n self.current_filter = []\r\n self.clear_button = clear_button\r\n self.list_window.itemDoubleClicked.connect(self.delete)\r\n self.clear_button.clicked.connect(self.clear_button_pushed)\r\n\r\n def add(self, category: str):\r\n self.current_filter.append(category)\r\n self.list_window.addItem(category)\r\n\r\n def delete(self, category_item):\r\n category_text = category_item.text()\r\n self.current_filter.remove(category_text)\r\n remove_list = self.list_window.findItems(category_text, PySide2.QtCore.Qt.MatchFixedString)\r\n for item in remove_list:\r\n row = self.list_window.row(item)\r\n self.list_window.takeItem(row)\r\n # dispatch\r\n dispatch_data = copy.deepcopy(self.current_filter)\r\n event_dispatcher.emit_event(event_key.SEND_CATEGORY_FILTER, dispatch_data)\r\n event_dispatcher.emit_event(event_key.LOG_FILTERING, None)\r\n\r\n def clear(self):\r\n self.current_filter.clear()\r\n self.list_window.clear()\r\n\r\n def is_contain(self, category: str) -> bool:\r\n return True if category in self.current_filter else False\r\n\r\n # @Event\r\n def receive_add_filter_event(self, category):\r\n if not self.is_contain(category):\r\n self.add(category)\r\n # dispatch\r\n dispatch_data = copy.deepcopy(self.current_filter)\r\n event_dispatcher.emit_event(event_key.SEND_CATEGORY_FILTER, dispatch_data)\r\n event_dispatcher.emit_event(event_key.LOG_FILTERING, None)\r\n\r\n # @Slot\r\n def clear_button_pushed(self):\r\n self.clear()\r\n event_dispatcher.emit_event(event_key.SEND_CATEGORY_FILTER, [])\r\n event_dispatcher.emit_event(event_key.LOG_FILTERING, None)\r\n","repo_name":"TERABYTE0130/logViewer","sub_path":"src/category_apply_window.py","file_name":"category_apply_window.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33548619678","text":"from network import LoRa\r\nimport socket\r\nimport time\r\nimport ubinascii\r\nfrom lora_help import connect_lora_socket\r\n\r\nimport pycom # \"pycom\" will be an error in your\r\n# IDE because it's not on your computer, but on\r\n# the device\r\nimport time\r\nimport machine\r\n\r\nfrom machine import ADC\r\nfrom machine import Pin\r\nfrom network import WLAN\r\nimport socket\r\n\r\n#LoRa\r\n#from network import LoRa\r\n#import binascii\r\n#print(binascii.hexlify(LoRa().mac()).upper())\r\n\r\npycom.heartbeat(False)\r\npycom.rgbled(0x0000FF) # blue\r\n#time.sleep(2) #sleep for 1 second\r\n\r\n##====== LoRa ======\r\n\r\n## Initialise LoRa in LORAWAN mode.\r\n## Please pick the region that matches where you are using the device:\r\n## Asia = LoRa.AS923\r\n## Australia = LoRa.AU915\r\n## Europe = LoRa.EU868\r\n## United States = LoRa.US915\r\nlora = LoRa(mode=LoRa.LORAWAN, region=LoRa.EU868)\r\n\r\n# create an OTAA authentication parameters, change them to the provided credentials\r\napp_eui = ubinascii.unhexlify('6081F9FF68E87979')\r\napp_key = ubinascii.unhexlify('B8078474D99CC4CCAEFE3B563AECB8E7')\r\n#uncomment to use LoRaWAN application provided dev_eui\r\ndev_eui = ubinascii.unhexlify('70B3D549957622C1')\r\n\r\n## Uncomment for US915 / AU915 & Pygate\r\n## for i in range(0,8):\r\n## lora.remove_channel(i)\r\n## for i in range(16,65):\r\n## lora.remove_channel(i)\r\n## for i in range(66,72):\r\n## lora.remove_channel(i)\r\n\r\n## join a network using OTAA (Over the Air Activation)\r\n##uncomment below to use LoRaWAN application provided dev_eui\r\n##lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)\r\n#lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)\r\n\r\n#pycom.rgbled(0xFF0000) # Red\r\n\r\n## wait until the module has joined the network\r\n#while not lora.has_joined():\r\n# time.sleep(2.5)\r\n# print('Not yet joined...')\r\n\r\n#print('Joined')\r\n#pycom.rgbled(0x00FF00) # Green\r\n\r\n## create a LoRa socket\r\n#s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\r\n\r\n## set the LoRaWAN data rate\r\n#s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)\r\n\r\n##====== End LoRa ======\r\n\r\n#====== WiFi ======\r\n\r\nwlan = WLAN(mode=WLAN.STA)\r\n\r\nwlan.connect(ssid='Stargate_IoT', auth=(WLAN.WPA2, 'TieFighter'))\r\n#wlan.connect(ssid='Martins iPhone', auth=(WLAN.WPA2, 'j1aqdr2q2heb9'))\r\n#while not wlan.isconnected():\r\n# print(\"WiFi not connected\")\r\n# time.sleep(2) #sleep for 2 seconds\r\n# machine.idle()\r\n\r\ntime.sleep(5) #sleep for 5 seconds\r\n\r\n#====== End WiFi ======\r\n\r\ndata = ''\r\nadc = ADC()\r\ntempsensor = adc.channel(pin='P15') # create an analog pin on P15\r\nbat_voltage = adc.channel(attn=ADC.ATTN_11DB, pin='P16')\r\n\r\nwhile True: #Forever loop\r\n\r\n vbat = bat_voltage.voltage()*2\r\n # note that the expansionboard 3 has a voltage divider of 1M / 1M to account for\r\n # 1M / 1M, ratio = 1:2\r\n\r\n millivolts = tempsensor.voltage() # Analog temperature measured in millivolts\r\n degC = (millivolts - 500.0) / 10.0 # Convert millivolts to celsius\r\n degF = ((degC * 9.0) / 5.0) + 32.0 # Convert celsius to fahrenheit\r\n\r\n print('battery voltage:', vbat, 'mV')\r\n print('temperature:', degC, ' C')\r\n\r\n if vbat >= 4420:\r\n pycom.rgbled(0x00FF00) # Green\r\n else:\r\n pycom.rgbled(0xFF0000) # Red\r\n\r\n if wlan.isconnected():\r\n\r\n print(\"WiFi connected\")\r\n time.sleep(5) #sleep for 5 seconds\r\n print(wlan.ifconfig())\r\n\r\n # setup socket for connection\r\n wifi_socket = socket.socket()\r\n #s = ssl.wrap_socket(s)\r\n host = 'dev.electra.se'\r\n addr = socket.getaddrinfo(host,80)[0][-1]\r\n wifi_socket.connect(addr)\r\n print('socket connected')\r\n\r\n data = '2,' + str(vbat) + ',' + str(degC) + ',' + '4'\r\n httpreq = 'POST /MessageHandler.ashx HTTP/1.1 \\r\\nHOST: '+ host + '\\r\\nContent-Length: ' + str(len(data)) + '\\r\\nConnection: keep-alive \\r\\n\\r\\n' + data\r\n print('http request: \\n', httpreq)\r\n wifi_socket.send(httpreq)\r\n rec_bytes = wifi_socket.recv(10000)\r\n print(rec_bytes)\r\n else:\r\n print(\"WiFi not connected\")\r\n\r\n # Try to join LoRa\r\n if not lora.has_joined():\r\n # join a network using OTAA (Over the Air Activation)\r\n #uncomment below to use LoRaWAN application provided dev_eui\r\n #lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)\r\n lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)\r\n\r\n # wait until the module has joined the network\r\n while not lora.has_joined():\r\n time.sleep(2.5)\r\n print('LoRa not yet joined...')\r\n\r\n # create a LoRa socket\r\n lora_socket = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\r\n\r\n #else:\r\n #lora_socket = connect_lora_socket()\r\n\r\n print('LoRa joined')\r\n\r\n ## send some data\r\n data = '2,' + str(vbat) + ',' + str(degC) + ',' + '3'\r\n lora_socket.send(data)\r\n\r\n #time.sleep(600) #sleep for 10 minutes\r\n time.sleep(10) #sleep for 10 seconds\r\n","repo_name":"martinkvarmo/my_summerhouse_IOT_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3181610332","text":"#!/usr/bin/env python3\n\n#_____________________________________________________________________________\n#\n# filter for changing the gender of pronouns in a plaintext\n#\n# Author: Samdney \n# D4A7 35E8 D47F 801F 2CF6 2BA7 927A FD3C DE47 E13B \n# License: See LICENSE for licensing information\n#_____________________________________________________________________________\n\"\"\"\n***\nGENDER SWITCHING\n***\nSwitching pronouns in a plaintext message of one gender, to the pronouns of the \nother gender.\n\n---\nThe problems of gender switching\n---\nAssumptions:\n1. The message is in English => English pronouns\n\tThis are: he, she, him, her, his, hers (6 single pronouns)\n2. We can have two possible cases:\n\t=> A random pattern (or something written by a person with terrible writing \n\tskills ;)\n\t=> A natural language text with established English grammar\n\n3. We have the following possible pronoun pairs:\n\the \t<=> she\t\tPersonal pronoun - subject\n\thim <=> her\t\tPersonal pronoun - object\n\this <=> her\t\tPossessive determine\n\this <=> hers\tPossessive pronoun\n\t(4 pairs)\n\n\tProblem:\tThis map is NOT injective!\n\tBecause:\ther -> him or his\n\t\t\t\this -> her or hers\n\t=> We have to find an additional helpful quality!\n\t\n\tFirst idea:\tWe look for the position within a sentence.\n\tProblem: An pronoun can be direct or indirect\n\t=> Idea not helpful\n\t\n\tSecond idea: Looking for a natural language parser which can\n\ttell me which kind of word it is (SUB=subject, OBJ=object, etc., ...)\n\t\n\t=> Solution is only so good like the natural language parser!\n\t=> I found this wrapper parser: \n\thttps://github.com/EducationalTestingService/python-zpar\t\t\t\t\n\"\"\"\n\nimport string\nimport sys\n\nclass gender_filter():\n\n\tdef __init__(self):\n\t\tself.msg_new\t= \" \"\n\n\tdef change_msg(self,filter_switch,msg):\n\t\tswitch = gender_filter()\n\t\tif filter_switch == 0:\n\t\t\t_msg_new = msg\n\t\telif filter_switch == 1:\n\t\t\t_msg_new = switch.simple_switch(msg)\n\t\telse:\n\t\t\t_msg_new = switch.lingu_switch(msg)\n\n\t\tself.msg_new = _msg_new\n\n\t# Switch one pronoun pair \n\tdef switch_one_pronoun_pair(self,pn1,pn2,msg):\n\t\t# Placeholder should be no \"real\" word\n\t\t# Something with lim -> 0 probability to appear in msg\n\t\ttmp = \"6m7Q6q16\"\n\n\t\tmsg1 \t= msg.replace(pn1,tmp)\n\t\tmsg2 \t= msg1.replace(pn2,pn1)\n\t\tmsg3\t= msg2.replace(tmp,pn2)\n\n\t\tmsg_switched = msg3\n\t\treturn msg_switched\n\n\t# Switch for all possible positions within a sentence and msg\n\tdef switch_one_pronoun_pair_allpos(self,pn1,pn2,msg):\n\t\tmyfilter = gender_filter()\n\t\t_msg_new = msg\n\n\t\t# Beginning and Middle\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \" \", \" \" + str(pn2) + \" \",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \", \", \" \" + str(pn2) + \", \",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \"'s \", \" \" + str(pn2) + \"'s \",_msg_new)\n\t\t\n\t\t# End\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \".\", \" \" + str(pn2) + \".\",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \"!\", \" \" + str(pn2) + \"!\",_msg_new)\n\t\t_msg_new = myfilter.switch_one_pronoun_pair(\" \" + str(pn1) + \"?\", \" \" + str(pn2) + \"?\",_msg_new)\n\t\treturn _msg_new\n\t\t\n\n\t\"\"\"\n\t# SIMPLE_SWITCH\n\t\"\"\"\t\n\t# Idea: Simple find and replace.\n\t#\tStep1:\tSwitch he <=> she\n\t#\tStep2:\tSwitch him <=> her\n\t#\tStep3:\tSwitch his <=> her\n\t#\tStep4:\tSwitch his <=> hers\n\t# Comment: The pronoun parsing only works correct for an msg which follows \n\t# the established rules of English grammar. Absolutely not, for a random \n\t# pattern text\n\t# Comment: The input msg variable should contain the full message, at one. \n\t# If we do parsing for each single data of buffer_size, parsing will not \n\t# work if a pronoun is splited between two buffer packages. \n\t# E.g.: package1|package2 = msg = \"He and sh\"|\"e are good friends.\"\n\t# => Has to be fixed.\n\t# TODO: Result would be better, if we do switching not chronologically \n\t# (step1, step2, step3, step4). Instead we should have an additional look at\n\t# probability tables for the probability of the appereance of a single \n\t# pronoun in an English text. Then do the switching of the not-injective \n\t# pronoun pairs under consideration of this probabilities.\n\tdef simple_switch(self,msg):\n\t\t\n\t\t_msg_new = \" \"\n\t\tmyfilter = gender_filter()\n\t\n\t\t# Add an additional space character at the beginng of msg\n\t\t# Reason: Then you can clearly identify pronouns at the beginning of msg\n\t\t_msg_new = \" \" + str(msg)\n\t\n\t\t# Find and replace for different pronoun pairs\n\t\t# Find and replace for different notations: he, He, HE ...\n\t\t# Find and replace for different positions within a sentence\n\t\t\n\t\t#pronoun_pairs = {\"he\" : \"she\", \"him\":\"her\", \"his\":\"her\", \"his\":\"hers\"}\n\t\t\n\t\t# Switching of Step3 with Step4\n\t\tpronoun_pairs = {\"he\" : \"she\", \"him\":\"her\", \"his\":\"hers\", \"his\":\"her\"}\n\t\t\n\t\t# Example of the different results\n\t\t# Old: He, and SHE likes me so much. HELP him! \n\t\t# \tHis dog likes tea and eats with him cake. That's hers.\n\t\t# New: She, and HE likes me so much. HELP her! \n\t\t# \tHers dog likes tea and eats with her cake. That's his.\n\n\t\t# Old: He, and SHE likes me so much. HELP him! \n\t\t# \tHis dog likes tea and eats with him cake. That's hers.\n\t\t# New: She, and HE likes me so much. HELP his! \n\t\t# \tHer dog likes tea and eats with his cake. That's hers.\n\n # TODO If we have very long messages, we should add 'if cases' within \n\t\t# the loop, to not always run all 'find and replace' functions for each\n # pronoun pair. E.g. 'he' or 'she' aren't at the end of a senctence, if\n # the sentence follows english grammar rules, or? -> Saving of\n # computation time\n\t\tfor male in pronoun_pairs:\n\t\t\tpn1 = male\n\t\t\tpn2 = pronoun_pairs[male]\n\t\n\t\t\t# Lower\n\t\t\tpn1_lower = pn1.lower()\n\t\t\tpn2_lower = pn2.lower()\n\t\t\t_msg_new = myfilter.switch_one_pronoun_pair_allpos(pn1_lower,pn2_lower,_msg_new)\n\n\t\t\t# Upper\n\t\t\tpn1_upper = pn1.upper()\n\t\t\tpn2_upper = pn2.upper()\n\t\t\t_msg_new = myfilter.switch_one_pronoun_pair_allpos(pn1_upper,pn2_upper,_msg_new)\n\t\t\n\t\t\t# Titled\n\t\t\tpn1_titled = pn1.title()\n\t\t\tpn2_titled = pn2.title()\n\t\t\t_msg_new = myfilter.switch_one_pronoun_pair_allpos(pn1_titled,pn2_titled,_msg_new)\n\t\t\t\t\n\t\t# Remove the additional space character from the beginng of msg\n\t\tlen_msg = len(_msg_new)\n\t\t_msg_new = _msg_new[1:len_msg]\n\n\t\tself.msg_new = _msg_new\n\t\treturn _msg_new\n\n\t\"\"\"\n\t# LINGU_SWITCH\n\t\"\"\"\t\n\t# TODO: Not implemented until now\n\t# Idea: \n\t# - Send msg to natural language parser to determine the kind of word \n\t# \t(subject, object, ...).\n\t# - Search for all her and his and their result of the natural language \n\t# \tparsing\n\t# - Use this information to decide if we have: her => him or her => his, \n\t#\this => her or his => hers\n\t# - Change pronouns under consideration of this additional information\n\tdef lingu_switch(self,msg):\n\t\t_msg_new = \" \"\n\t\tself.msg_new = _msg_new\n\t\treturn _msg_new\n\n\"\"\"\n# TEST\n\"\"\"\n\ndef test():\n\t# Test messages\n\t#msg = \"She, you and me. Sheer is funny! He is it, too.\"\n\tmsg = \"He, and SHE likes me so much. HELP him! His dog likes tea and eats with him cake. That's hers. He's great.\"\n\tprint(\"Old: \" + msg)\n\t\n\tmyfilter = gender_filter()\n\tmyfilter.change_msg(1,msg)\n\tmsg_new = myfilter.msg_new\n\tprint(\"New: \" + msg_new)\n\nif __name__=='__main__':\n\ttest()\n","repo_name":"Samdney/pysocks5sys","sub_path":"myproxyfilter.py","file_name":"myproxyfilter.py","file_ext":"py","file_size_in_byte":7204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23176883372","text":"l = int(input())\nheight = list(map(int, input().split()))\nm = int(input())\ncnt = 0\ninfo = {}\nindex = []\n\nfor i in range(1, l+1):\n info[i] = height[i-1]\n# print(info)\n\n\ndef find_index():\n global info\n global index\n info = dict(sorted(info.items(), key = lambda x : x[1], reverse=True))\n # print(info)\n index = list(info.keys())\n return index\n\n\nwhile cnt < m:\n cnt += 1\n find_index()\n high_index = index[0]\n low_index = index[-1]\n info[high_index] -= 1\n info[low_index] += 1\n # print(cnt, info)\nresult = list(info.values())\nprint(max(result) - min(result))","repo_name":"Seoyun0626/CodingTest","sub_path":"인프런/창고정리.py","file_name":"창고정리.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30270343324","text":"from decimal import Decimal\nfrom itertools import chain\nfrom numbers import Number\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import FieldDoesNotExist\nfrom django.conf import settings\nimport copy\nimport datetime\nimport inspect\n\n\ndef javascript_date_format(python_date_format):\n format = python_date_format.replace(r'Y', 'yyyy')\n format = format.replace(r'm', 'mm')\n format = format.replace(r'd', 'dd')\n if not format:\n format = 'yyyy-mm-dd'\n return format\n\n\ndef duplicate(obj, changes=None):\n \"\"\" Duplicates any object including m2m fields\n changes: any changes that should occur, example\n changes = (('fullname','name (copy)'), ('do not copy me', ''))\"\"\"\n if not obj.pk:\n raise ValueError('Instance must be saved before it can be cloned.')\n duplicate = copy.copy(obj)\n duplicate.pk = None\n for change in changes:\n duplicate.__setattr__(change[0], change[1])\n duplicate.save()\n # trick to copy ManyToMany relations.\n for field in obj._meta.many_to_many:\n source = getattr(obj, field.attname)\n destination = getattr(duplicate, field.attname)\n for item in source.all():\n try: # m2m, through fields will fail.\n destination.add(item)\n except:\n pass\n return duplicate\n\n\nDATE = 1\nNUMBER = 2\n\n\ndef sort_helper(x, sort_key, sort_type):\n \"\"\" Sadly python 3 makes it very hard to sort mixed types\n We can work around this by forcing the types\n \"\"\"\n result = x[sort_key]\n if result is None:\n if sort_type == DATE:\n result = datetime.date(datetime.MINYEAR, 1, 1)\n elif sort_type == NUMBER:\n result = 0\n else: # Last try - make it a string\n result = ''\n return result\n\n\ndef sort_data(data_list, display_field):\n \"\"\" Sort data based on display_field settings\n data_list - 2d array of data\n display_field - report_builder.DisplayField object\n returns sorted data_list\n \"\"\"\n position = display_field.position\n is_reverse = display_field.sort_reverse\n # Try to inspect sample data to determine type\n sample_data = data_list[0][position]\n if sample_data is None:\n sample_data = data_list[-1][position]\n sort_type = None\n if isinstance(sample_data, (datetime.date, datetime.datetime)):\n sort_type = DATE\n elif isinstance(sample_data, (int, float, complex)):\n sort_type = NUMBER\n return sorted(\n data_list,\n key=lambda x: sort_helper(x, position, sort_type),\n reverse=is_reverse\n )\n\n\ndef increment_total(display_field, data_row):\n val = data_row[display_field.position]\n if isinstance(val, bool):\n # True: 1, False: 0\n display_field.total_count += Decimal(val)\n elif isinstance(val, Number):\n display_field.total_count += Decimal(str(val))\n elif val:\n display_field.total_count += Decimal(1)\n\n\ndef formatter(value, style):\n \"\"\" Convert value to Decimal to apply numeric formats.\n value - The value we wish to format.\n style - report_builder.Format object\n \"\"\"\n try:\n value = Decimal(value)\n except Exception:\n pass\n\n try:\n return style.string.format(value)\n except ValueError:\n return value\n\n\n# Model Utils\n\n\ndef isprop(v):\n return isinstance(v, property)\n\n\ndef get_properties_from_model(model_class):\n \"\"\" Show properties from a model \"\"\"\n properties = []\n attr_names = [name for (name, value) in inspect.getmembers(model_class, isprop)]\n for attr_name in attr_names:\n if attr_name.endswith('pk'):\n attr_names.remove(attr_name)\n else:\n properties.append(dict(label=attr_name, name=attr_name.strip('_').replace('_', ' ')))\n return sorted(properties, key=lambda k: k['label'])\n\n\ndef get_relation_fields_from_model(model_class):\n \"\"\" get related fields (m2m, fk, and reverse fk) \"\"\"\n relation_fields = []\n all_fields_names = get_all_field_names(model_class)\n for field_name in all_fields_names:\n field = copy.deepcopy(model_class._meta.get_field(field_name))\n direct = field.concrete\n m2m = field.many_to_many\n # get_all_field_names will return the same field\n # both with and without _id. ignore the duplicate.\n if field_name[-3:] == '_id' and field_name[:-3] in all_fields_names:\n continue\n if m2m or not direct or field.is_relation:\n field.field_name = field_name\n relation_fields += [field]\n return relation_fields\n\n\ndef get_all_field_names(model_class):\n \"\"\" Restores a function from django<1.10 \"\"\"\n return list(set(chain.from_iterable(\n (field.name, field.attname) if hasattr(field, 'attname') else (field.name,)\n for field in model_class._meta.get_fields()\n # For complete backwards compatibility, you may want to exclude\n # GenericForeignKey from the results.\n if not (field.many_to_one and field.related_model is None)\n )))\n\n\ndef get_direct_fields_from_model(model_class):\n \"\"\" Direct, not m2m, not FK \"\"\"\n direct_fields = []\n all_fields_names = get_all_field_names(model_class)\n for field_name in all_fields_names:\n field = model_class._meta.get_field(field_name)\n direct = field.concrete\n m2m = field.many_to_many\n if direct and not m2m and not field.is_relation:\n direct_fields += [field]\n return direct_fields\n\n\ndef get_custom_fields_from_model(model_class):\n \"\"\" django-custom-fields support \"\"\"\n if 'custom_field' in settings.INSTALLED_APPS:\n from custom_field.models import CustomField\n try:\n content_type = ContentType.objects.get(\n model=model_class._meta.model_name,\n app_label=model_class._meta.app_label)\n except ContentType.DoesNotExist:\n content_type = None\n custom_fields = CustomField.objects.filter(content_type=content_type)\n return custom_fields\n\n\ndef get_model_from_path_string(root_model, path):\n \"\"\" Return a model class for a related model\n root_model is the class of the initial model\n path is like foo__bar where bar is related to foo\n \"\"\"\n for path_section in path.split('__'):\n if path_section:\n try:\n field = root_model._meta.get_field(path_section)\n direct = field.concrete\n except FieldDoesNotExist:\n return root_model\n if direct:\n if hasattr(field, 'related'):\n try:\n root_model = field.related.parent_model()\n except AttributeError:\n root_model = field.related.model\n\n elif hasattr(field, 'related_model') and field.related_model:\n root_model = field.related_model\n\n else:\n if hasattr(field, 'related_model'):\n root_model = field.related_model\n else:\n root_model = field.model\n return root_model\n","repo_name":"burke-software/django-report-builder","sub_path":"report_builder/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":753,"dataset":"github-code","pt":"77"} +{"seq_id":"72561631930","text":"import numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\nprint(\"This script is used to print the accesses of a TPG to a CU according to the out_best_stats.md file. Result is plot using matplotlib and corresponds to a sizeXsize image (CU) with a color bar legend.\")\nprint(\"This script is often used with launch_all_TPGs-Accesses_print.py which calls it many times.\")\n\nif (len(sys.argv) != 2):\n print(\"Illegal number of parameters\")\n print(\"Usage: python3.6 print_TPGAcesses.py FILE_NAME\")\n print(\"Example: python3.6 /home/cleonard/dev/stage/scripts/python/printData/print_TPGAcesses.py /home/cleonard/dev/stage/results/scripts_results/Binary/Actions_bal_dataset1/NP/out_best_stats_ent0_bNP_63,63.md\")\n\n# Global variable\nsize = 32\n\n# Get file name\nprint(\"Script name : \", str(sys.argv[0]))\nprint(\"File name : \", str(sys.argv[1]))\ninputFile = str(sys.argv[1])\nsplitName = inputFile.split(\"/\")[-2]\nprint(splitName)\n\n# Open file, get last line and print it\nfile = open(inputFile, \"r\")\ndata = file.readlines()[-1]\nprint(data)\nfile.close()\n\n# Remove first ('{') and last ('}' + any space if there is) char from data\ndata = data[1:]\nwhile data[-1] != \"}\":\n data = data[:-1]\ndata = data[:-1]\n\n# Split data in a tab containing every pair\npixels = data.split(\"} {\")\n\n# Init the access array\naccess = np.zeros((size, size))\n\n# Store data in access\nfor p in pixels:\n # Split every pair with the pixel index (var[0]) and the number of accesses (var[1])\n var = p.split(\",\")\n # Compute 2D indexes\n row = int(var[0]) // size\n col = int(var[0]) % size\n # Store number of accesses in the corresponding pixel\n access[row][col] = int(var[1])\n\n# *** Own colormap (pretty but not really efficient) ***\n# # Create the colors (normalized)\n# topo_colors = [(255/255, 255/255, 255/255), # Blanc\n# (243/255, 232/255, 77/255), # Jaune\n# (255/255, 146/255, 3/255), # Orange\n# (255/255, 0/255, 0/255), # Rouge\n# (197/255, 3/255, 255/255), # Violet\n# (3/255, 205/255, 255/255), # Bleu\n# (75/255, 255/255, 9/255) # Vert\n# ]\n# # Create the colormap from my personnalized colors\n# my_cmap = LinearSegmentedColormap.from_list('topo_basic', topo_colors)\n\n# *** JET colormap (internet) ***\ncmap = plt.cm.jet # define the colormap\n# Extract all colors from the .jet map\ncmaplist = [cmap(i) for i in range(cmap.N)]\n# Force the first color entry to be white\ncmaplist[0] = (1, 1, 1, 1.0)\n# Create the new map\ncmap = LinearSegmentedColormap.from_list(\n 'Custom cmap', cmaplist, cmap.N)\n\n# Show image\nfig = plt.figure(splitName)\nplt.imshow(access, cmap=cmap)\nplt.colorbar(extend = 'both')\nplt.show()\n","repo_name":"CedricLeon/scripts","sub_path":"python/printData/print_TPGAcesses.py","file_name":"print_TPGAcesses.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15593867412","text":"#!flask/bin/python\nfrom flask import Flask, jsonify, request\nfrom random import uniform\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return jsonify({'ok': True}), 200\n\n\n@app.route('/dimensions', methods=['POST'])\ndef calculate_dimensions():\n\n json = request.get_json(silent=True)\n errors = check_params(json)\n\n if len(errors) > 0:\n response = jsonify(errors)\n response.status_code = 400\n return response\n\n dimensions = random_dimensions()\n\n return jsonify(dimensions), 200\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return jsonify({'error': 'Not found'}), 404\n\n\n@app.errorhandler(405)\ndef method_not_allowed(e):\n return jsonify({'error': 'Method not allowed'}), 405\n\n\ndef check_params(json):\n errors = []\n\n if not json:\n errors.append(error('Incorrect JSON body'))\n else:\n if 'image' not in json:\n errors.append(error('Missing parameter', 'image'))\n elif not decode_image(json['image']):\n errors.append(error('Invalid base64 representation', 'image'))\n\n return errors\n\n\ndef decode_image(img):\n try:\n img = img.replace('data:image/png;base64,', '')\n img.decode('base64')\n return True\n except:\n return False\n\n\ndef random_dimensions():\n return {\n 'height': round(uniform(0, 10), 2),\n 'length': round(uniform(0, 20), 2),\n 'weight': round(uniform(0, 15), 2)\n }\n\n\ndef error(message, field=None):\n msg = {\n 'message': message\n }\n if field:\n msg['field'] = field\n return msg\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"mathifonseca/sizer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10562023353","text":"import sys\nimport pytest\nimport unittest\nimport boto\nfrom boto.ec2.autoscale.launchconfig import LaunchConfiguration\nfrom boto.ec2.autoscale.group import AutoScalingGroup\nfrom boto.ec2.cloudwatch.alarm import MetricAlarm\nfrom moto import mock_autoscaling_deprecated\nfrom moto import mock_ec2_deprecated\nfrom moto import mock_elb_deprecated\nfrom moto.cloudwatch import mock_cloudwatch_deprecated\n\nfrom License2Deploy.rolling_deploy import RollingDeploy\nfrom License2Deploy.AWSConn import AWSConn\n\n\nclass RollingDeployTest(unittest.TestCase):\n\n autoscaling_group_name = 'autoscaling_group_name'\n launch_configuration_name = 'launch_configuration_name'\n load_balancer_name = 'load_balancer_name'\n\n GMS_LAUNCH_CONFIGURATION_STG = 'server-backend-stg-servergmsextenderLCstg-46TIE5ZFQTLB'\n GMS_LAUNCH_CONFIGURATION_PRD = 'server-backend-prd-servergmsextenderLCprd-46TIE5ZFQTLB'\n GMS_AUTOSCALING_GROUP_STG = 'server-backend-stg-servergmsextenderASGstg-3ELOD1FOTESTING'\n GMS_AUTOSCALING_GROUP_PRD = 'server-backend-prd-servergmsextenderASGprd-3ELOD1FOTESTING'\n\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n @mock_ec2_deprecated\n def setUp(self):\n self.setUpELB()\n self.rolling_deploy = RollingDeploy('stg', 'server-gms-extender', '0', 'ami-abcd1234', None, './regions.yml', force_redeploy=True)\n\n def get_autoscaling_configurations(self, launch_configuration_name, autoscaling_group_name):\n return {\n self.autoscaling_group_name: autoscaling_group_name,\n self.launch_configuration_name: launch_configuration_name\n }\n\n @mock_autoscaling_deprecated\n def setUpAutoScaleGroup(self, configurations, env=\"stg\"):\n conn = boto.connect_autoscale()\n for configuration in configurations:\n config = LaunchConfiguration(\n name=configuration[self.launch_configuration_name],\n image_id='ami-abcd1234',\n instance_type='m1.medium',\n )\n load_balancer_name = self.load_balancer_name\n group = AutoScalingGroup(\n name=configuration[self.autoscaling_group_name],\n availability_zones=['us-east-1a'],\n default_cooldown=300,\n desired_capacity=2,\n health_check_period='0',\n health_check_type=\"EC2\",\n max_size=10,\n min_size=2,\n launch_config=config,\n load_balancers=[load_balancer_name],\n vpc_zone_identifier='subnet-1234abcd',\n termination_policies=[\"Default\"],\n )\n conn.create_launch_configuration(config)\n conn.create_auto_scaling_group(group)\n\n @mock_elb_deprecated\n def setUpELB(self, env='stg'):\n conn_elb = boto.connect_elb()\n zones = ['us-east-1a']\n ports = [(80, 8080, 'http')]\n load_balancer_name = self.load_balancer_name\n conn_elb.create_load_balancer(load_balancer_name, zones, ports)\n balancers = conn_elb.get_all_load_balancers(load_balancer_names=[load_balancer_name])\n self.assertEqual(balancers[0].name, load_balancer_name)\n\n @mock_ec2_deprecated\n @mock_elb_deprecated\n def setUpEC2(self, tag=True):\n self.setUpELB()\n conn_elb = boto.connect_elb()\n conn = boto.connect_ec2()\n instance_id_list = []\n reservation = conn.run_instances('ami-1234abcd', min_count=2, private_ip_address=\"10.10.10.10\")\n instance_ids = reservation.instances\n for instance in instance_ids:\n if tag:\n instance.add_tag('BUILD', 0)\n instance_id_list.append(instance.id)\n elb = conn_elb.get_all_load_balancers(load_balancer_names=[self.load_balancer_name])[0]\n elb.register_instances(instance_id_list)\n elb_ids = [instance.id for instance in elb.instances]\n self.assertEqual(instance_id_list.sort(), elb_ids.sort())\n\n return [conn, instance_id_list]\n\n @mock_cloudwatch_deprecated\n def setUpCloudWatch(self, instance_ids, env=\"stg\"):\n alarm = MetricAlarm(\n name = \"servergmsextender_CloudWatchAlarm\" + env,\n namespace = \"AWS/EC2\",\n metric = \"CPUUtilization\",\n comparison = \">=\",\n threshold = \"90\",\n evaluation_periods = 1,\n statistic = \"Average\",\n period = 300,\n dimensions = {'InstanceId': instance_ids},\n alarm_actions=['arn:alarm'],\n ok_actions=['arn:ok']\n )\n watch_conn = boto.connect_cloudwatch()\n watch_conn.put_metric_alarm(alarm)\n\n @mock_cloudwatch_deprecated\n def setUpCloudWatchWithWrongConfig(self, instance_ids, env=\"stg\"):\n alarm = MetricAlarm(\n name = \"servergmsextender_CloudWatchAlarm\" + env,\n namespace = \"AWS/EC2\",\n metric = \"CPUUtilization\",\n comparison = \"GreaterThanThreshold\", # wrong configuration that would generate error.\n threshold = \"90\",\n evaluation_periods = 1,\n statistic = \"Average\",\n period = 300,\n dimensions = {'InstanceId': instance_ids},\n alarm_actions=['arn:alarm'],\n ok_actions=['arn:ok']\n )\n watch_conn = boto.connect_cloudwatch()\n watch_conn.put_metric_alarm(alarm)\n\n @mock_cloudwatch_deprecated\n def test_retrieve_project_cloudwatch_alarms(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n cloud_watch_alarms = self.rolling_deploy.retrieve_project_cloudwatch_alarms()\n print(cloud_watch_alarms)\n self.assertEqual(1, len(cloud_watch_alarms))\n\n @mock_cloudwatch_deprecated\n def test_retrieve_project_cloudwatch_alarms_with_no_valid_alarms(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n self.rolling_deploy.env = \"wrong_env_prd\" # set a wrong environment\n cloud_watch_alarms = self.rolling_deploy.retrieve_project_cloudwatch_alarms()\n self.assertEqual(0, len(cloud_watch_alarms))\n\n @mock_cloudwatch_deprecated\n def test_retrieve_project_cloudwatch_alarms_with_wrong_config(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatchWithWrongConfig(instance_ids)\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.retrieve_project_cloudwatch_alarms())\n\n @mock_cloudwatch_deprecated\n def test_enable_project_cloudwatch_alarms_Error(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.enable_project_cloudwatch_alarms())\n\n @mock_cloudwatch_deprecated\n def test_disable_project_cloudwatch_alarms_Error(self):\n instance_ids = self.setUpEC2()\n self.setUpCloudWatch(instance_ids)\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.disable_project_cloudwatch_alarms())\n\n @mock_ec2_deprecated\n def test_tag_ami(self):\n conn = self.setUpEC2()[0]\n reservation = conn.run_instances('ami-1234xyz1', min_count=1)\n instance_ids = reservation.instances\n conn.create_image(instance_ids[0].id, \"test-ami\", \"this is a test ami\")\n _ami_ids = conn.get_all_images()\n _ami_id = _ami_ids[0].id\n self.rolling_deploy = RollingDeploy('stg', 'server-gms-extender', '0', _ami_id, None, './regions.yml')\n self.rolling_deploy.tag_ami(str(_ami_id), 'stg')\n self.rolling_deploy.tag_ami(str(_ami_id), 'qa')\n self.rolling_deploy.tag_ami(str(_ami_id), 'qa')\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.tag_ami('blargness', 'qa'))\n\n @mock_ec2_deprecated\n def test_load_config(self):\n self.assertEqual(AWSConn.load_config('regions.yml').get('qa'), 'us-west-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('stg'), 'us-east-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('prd'), 'us-east-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('default'), 'us-west-1')\n self.assertEqual(AWSConn.load_config('regions.yml').get('zero'), None)\n\n @mock_ec2_deprecated\n def test_load_config(self):\n self.assertEqual(AWSConn.determine_region('get-shwifty'), 'us-west-1')\n\n @mock_ec2_deprecated\n def test_wait_ami_availability(self):\n conn = self.setUpEC2()[0]\n inst_ids = self.setUpEC2()[1]\n conn.create_image(inst_ids[0], \"test-ami\", \"this is a test ami\")\n ami_ids = conn.get_all_images()\n ami_id = ami_ids[0]\n self.assertEqual(str(ami_id), str(self.rolling_deploy.get_ami_id_state(ami_id.id)))\n self.assertTrue(self.rolling_deploy.wait_ami_availability(ami_id.id))\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.wait_ami_availability('bad-id')) #Will raise exception because ami can't be found\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.wait_ami_availability(ami_id.id, -100)) #Will raise exception as time limit is over\n\n @mock_ec2_deprecated\n @mock_elb_deprecated\n def test_confirm_lb_has_only_new_instances(self):\n instance_ids = self.setUpEC2()[1]\n self.rolling_deploy.load_balancer = self.load_balancer_name\n self.assertEqual(len(instance_ids), len(self.rolling_deploy.confirm_lb_has_only_new_instances())) #Return All LB's with the proper build number\n\n @mock_ec2_deprecated\n @mock_elb_deprecated\n def test_lb_healthcheck(self):\n instance_ids = self.setUpEC2()[1]\n self.rolling_deploy.load_balancer = self.load_balancer_name\n self.assertTrue(self.rolling_deploy.lb_healthcheck(instance_ids)) #Return InService for all instances in ELB\n # Below doesn't work as I am unable to change the instance state. Need to modify elb_healthcheck method and also modify instance_health template.\n ## https://github.com/spulec/moto/blob/master/moto/elb/responses.py#L511 ##\n ## https://github.com/spulec/moto/blob/master/moto/elb/responses.py#L219 ##\n #self.assertRaises(SystemExit, lambda: self.rolling_deploy.lb_healthcheck(instance_ids, 1, 1)) #Return OutOfService for the first instance in the ELB which will raise an exit call\n\n @mock_autoscaling_deprecated\n def test_get_group_info(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n group = self.rolling_deploy.get_group_info([self.GMS_AUTOSCALING_GROUP_STG])[0]\n self.assertEqual(group.name, self.GMS_AUTOSCALING_GROUP_STG)\n\n @mock_autoscaling_deprecated\n def test_failure_get_group_info(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.get_group_info('cool'))\n\n @mock_autoscaling_deprecated\n def test_get_autoscale_group_name_stg(self):\n autoscaling_configurations = list()\n autoscaling_configurations.append(self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG))\n autoscaling_configurations.append(self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_PRD, self.GMS_AUTOSCALING_GROUP_PRD))\n self.setUpAutoScaleGroup(autoscaling_configurations)\n group = self.rolling_deploy.get_autoscale_group_name()\n self.assertEqual(group, self.GMS_AUTOSCALING_GROUP_STG)\n self.assertNotEqual(group, self.GMS_AUTOSCALING_GROUP_PRD)\n\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_get_autoscale_group_name_prd(self):\n self.setUpELB(env='prd')\n self.rolling_deploy = RollingDeploy('prd', 'server-gms-extender', '0', 'ami-test212', None, './regions.yml')\n autoscaling_configurations = list()\n autoscaling_configurations.append(self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_PRD, self.GMS_AUTOSCALING_GROUP_PRD))\n self.setUpAutoScaleGroup(autoscaling_configurations, env='prd')\n group = self.rolling_deploy.get_autoscale_group_name()\n self.assertEqual(group, self.GMS_AUTOSCALING_GROUP_PRD)\n self.assertNotEqual(group, self.GMS_AUTOSCALING_GROUP_STG)\n\n @mock_autoscaling_deprecated\n def test_calculate_autoscale_desired_instance_count(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n increase = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'increase')\n decrease = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'decrease')\n self.assertEqual(increase, 4)\n self.assertEqual(decrease, 1)\n\n @mock_autoscaling_deprecated\n def test_calculate_autoscale_desired_instance_count_failure(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'nothing'))\n\n @mock_ec2_deprecated\n def test_get_instance_ip_addrs(self):\n self.setUpEC2()\n self.rolling_deploy.get_instance_ip_addrs(self.setUpEC2()[1])\n self.rolling_deploy.log_instances_ips(self.setUpEC2()[1], 'group')\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.get_instance_ip_addrs(['blah', 'blarg']))\n\n @mock_ec2_deprecated\n def test_is_redeploy(self):\n self.setUpEC2()\n self.assertTrue(self.rolling_deploy.is_redeploy())\n\n @mock_ec2_deprecated\n def test_is_redeploy_fails(self):\n self.setUpEC2(tag=False)\n with pytest.raises(SystemExit):\n self.rolling_deploy.is_redeploy()\n\n def test_stop_deploy(self):\n with pytest.raises(SystemExit):\n self.rolling_deploy.stop_deploy('error!')\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_get_all_instance_ids(self):\n self.setUpELB()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n reservation = conn.run_instances('ami-1234abcd', min_count=2, private_ip_address=\"10.10.10.10\")\n instance_ids = reservation.instances\n rslt = self.rolling_deploy.get_all_instance_ids(self.GMS_AUTOSCALING_GROUP_STG)\n self.assertEqual(len(instance_ids), len(rslt))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_validate_instance_list(self):\n self.setUpELB()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n reservation = conn.run_instances('ami-1234abcd', min_count=2, private_ip_address=\"10.10.10.10\")\n instances = reservation.instances\n self.assertTrue(self.rolling_deploy.validate_instance_list(instances))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n @mock_elb_deprecated\n def test_failure_validate_instance_list(self):\n instances = []\n self.assertRaises(Exception, lambda: self.rolling_deploy.validate_instance_list(instances))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n def test_get_instance_ids_by_requested_build_tag(self):\n self.setUpEC2()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n new_inst = []\n res_ids = conn.get_all_instances()\n for i_id in res_ids:\n for name in i_id.instances:\n if [y for y in name.tags if y == 'BUILD' and name.tags['BUILD'] == '0']:\n new_inst.append(name.id)\n self.rolling_deploy.new_desired_capacity = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'increase')\n\n self.assertEqual(len(self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 0)), 2)\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 1))\n\n self.rolling_deploy.original_instance_ids = list(new_inst)\n self.rolling_deploy.force_redeploy = False\n self.assertEqual(len(self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 0)), 2)\n self.rolling_deploy.force_redeploy = True\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 0))\n\n @mock_ec2_deprecated\n @mock_autoscaling_deprecated\n def test_get_instance_ids_by_requested_build_tag_race_condition(self):\n self.setUpEC2()\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n conn = boto.connect_ec2()\n new_inst = []\n res_ids = conn.get_all_instances()\n for i_id in res_ids:\n for name in i_id.instances:\n if [y for y in name.tags if y == 'BUILD' and name.tags['BUILD'] == '0']:\n new_inst.append(name.id)\n break\n self.rolling_deploy.force_redeploy = True\n self.rolling_deploy.new_desired_capacity = self.rolling_deploy.calculate_autoscale_desired_instance_count(self.GMS_AUTOSCALING_GROUP_STG, 'increase')\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag(new_inst, 1))\n\n\n @mock_ec2_deprecated\n def test_get_instance_ids_by_requested_build_tag_failure(self):\n self.setUpEC2()\n self.assertRaises(Exception, lambda: self.rolling_deploy.get_instance_ids_by_requested_build_tag([], 0))\n\n @mock_autoscaling_deprecated\n def test_set_autoscale_instance_desired_count(self):\n self.setUpAutoScaleGroup([self.get_autoscaling_configurations(self.GMS_LAUNCH_CONFIGURATION_STG, self.GMS_AUTOSCALING_GROUP_STG)])\n self.assertTrue(self.rolling_deploy.set_autoscale_instance_desired_count(4, self.GMS_AUTOSCALING_GROUP_STG))\n\n @mock_ec2_deprecated\n def test_wait_for_new_instances(self):\n instance_ids = self.setUpEC2()[1]\n self.assertEqual(self.rolling_deploy.wait_for_new_instances(instance_ids, 9), None)\n\n @mock_ec2_deprecated\n def test_wait_for_new_instances_failure(self):\n conn = self.setUpEC2()[0]\n instance_ids = self.setUpEC2()[1]\n reservations = conn.get_all_instances()\n reservations[0].instances[0].stop()\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.wait_for_new_instances(instance_ids, 3, 1))\n\n def test_set_autoscale_instance_desired_count_failure(self):\n self.assertRaises(SystemExit, lambda: self.rolling_deploy.set_autoscale_instance_desired_count(4, self.GMS_AUTOSCALING_GROUP_STG))\n\n def test_double_autoscale_instance_count(self):\n self.assertEqual(self.rolling_deploy.double_autoscale_instance_count(2), 4)\n\n def test_decrease_autoscale_instance_count(self):\n self.assertEqual(self.rolling_deploy.decrease_autoscale_instance_count(4), 2)\n","repo_name":"dandb/License2Deploy","sub_path":"tests/rolling_deploy_test.py","file_name":"rolling_deploy_test.py","file_ext":"py","file_size_in_byte":18154,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"42755615175","text":"from p5 import *\n\nclass Matrix:\n ## first constructor\n def __init__(self,r,c):\n self.rows = r\n self.cols = c\n self.matrix = [[0.] * self.cols for i in range(self.rows)]\n\n def returnMatrix(self):\n return self.matrix\n \n ## second constructor\n @classmethod\n def float(cls,m):\n rows = len(m)\n cols = len(m[0])\n return cls(rows,cols)\n \n @classmethod\n def initializedVector(cls,vector):\n vlen = len(vector)\n m = cls(vlen,1)\n for i in range(0,vlen):\n m.matrix[i][0] = vector[i]\n return m\n \n def dot(self,n):\n result = []\n result = Matrix(self.rows,n.cols)\n \n if self.cols == n.rows :\n for i in range(self.rows) :\n for j in range(n.cols):\n sum = 0 \n for k in range(self.cols):\n sum += self.matrix[i][k]*n.matrix[k][j]\n result.matrix[i][j] = sum\n return result\n \n def randomize(self):\n for i in range(self.rows):\n for j in range(self.cols):\n self.matrix[i][j] = random_uniform(-1,1)\n \n def matrixToVector(self):\n arr = []\n for i in range(self.rows):\n for j in range(self.cols):\n arr.append(self.matrix[i][j])\n return arr\n \n def addBias(self):\n n = Matrix(self.rows+1,1)\n for i in range(self.rows):\n n.matrix[i][0] = self.matrix[i][0]\n n.matrix[self.rows][0] = 1.\n return n\n \n def activate(self):\n n = Matrix(self.rows,self.cols)\n for i in range(self.rows):\n for j in range(self.cols):\n n.matrix[i][j] = self.relu(self.matrix[i][j])\n return n\n \n @staticmethod\n def relu(x):\n return max(0,x)\n \n def mutate(self,mutationRate):\n for i in range(self.rows) :\n for j in range(self.cols) :\n rand = random_uniform(1)\n if rand < mutationRate :\n self.matrix[i][j] += random_gaussian()/5\n \n if self.matrix[i][j] > 1 :\n self.matrix[i][j] = 1\n if self.matrix[i][j] < -1 :\n self.matrix[i][j] = -1\n \n def crossover(self,partner):\n child = Matrix(self.rows,self.cols)\n \n randR = floor(random_uniform(self.rows))\n randC = floor(random_uniform(self.cols))\n \n for i in range(self.rows):\n for j in range(self.cols):\n if i < randR or (i == randR and j <= randC) :\n child.matrix[i][j] = self.matrix[i][j]\n else:\n child.matrix[i][j] = partner.matrix[i][j];\n return child\n \n def clone(self):\n clone = Matrix(self.rows,self.cols)\n for i in range(self.rows):\n for j in range(self.cols):\n clone.matrix[i][j] = self.matrix[i][j]\n return clone\n \n\n \n\n\n\n \n \n \n \n \n \n \n \n \n\n \n","repo_name":"ElirazO/IronDomeAI","sub_path":"Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75285391289","text":"from django.urls import path\nfrom . import views\n\napp_name = 'front'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path(r'login/', views.login, name='login'),\n path('work/', views.work, name='work'),\n path('refreshlog/', views.refresh_log, name='refresh_log'),\n path('logout/', views.logout, name='logout'),\n path('connect/admin/', views.connect_admin, name='connect_admin'),\n]","repo_name":"bopopescu/refreshHuaweiCdn","sub_path":"front/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3758257464","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nimport random\n\ndef upper_confidence_bound(file):\n dataset = pd.read_csv(file)\n d = 10\n N = 10000\n ads_selected = []\n numbers_of_selections = [0] * d\n sums_of_rewards = [0] * d\n total_reward = 0\n for n in range(0, N):\n ad = 0\n max_upper_bound = 0\n for i in range(0, d):\n if (numbers_of_selections[i] > 0):\n upper_bound = (sums_of_rewards[i] / numbers_of_selections[i]) + \\\n (math.sqrt(3/2 * math.log(n + 1) / numbers_of_selections[i]))\n else:\n upper_bound = 1e400\n if upper_bound > max_upper_bound:\n max_upper_bound = upper_bound\n ad = i\n ads_selected.append(ad)\n numbers_of_selections[ad] += 1\n reward = dataset.values[n, ad]\n sums_of_rewards[ad] += reward\n total_reward += reward\n\n # Visualizing results\n plt.hist(ads_selected)\n plt.title('Histogram of Ad Selections')\n plt.xlabel('Ads')\n plt.ylabel('Number of times each ad selected')\n plt.savefig('Images/UCB.png')\n plt.show()\n\ndef thompson_sampling(file):\n dataset = pd.read_csv(file)\n\n # Implementing Thompson Sampling\n d = 10\n N = 10000\n ads_selected = []\n number_of_rewards1 = [0] * d\n number_of_rewards0 = [0] * d\n total_rewards = 0\n for n in range(0, N):\n ad = 0\n max_random = 0\n for i in range(0, d):\n random_beta = random.betavariate(number_of_rewards1[i] + 1, number_of_rewards0[i] + 1)\n if random_beta > max_random:\n max_random = random_beta\n ad = i\n ads_selected.append(ad)\n reward = dataset.values[n, ad]\n if reward == 1:\n number_of_rewards1[ad] += 1\n else:\n number_of_rewards0[ad] += 1\n total_rewards += reward\n\n # Visualize Histogram of results\n plt.hist(ads_selected)\n plt.title('Histogram of Ad Selections')\n plt.xlabel('Ads')\n plt.ylabel('Number of times each ad selected')\n plt.savefig('Images/Thompson_Sampling.png')\n plt.show()","repo_name":"jmgccp4eva/machinelearningaipython","sub_path":"Reinforcement_Learning.py","file_name":"Reinforcement_Learning.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42163718402","text":"\"\"\"\nThis example illustrates how to manually use a temporary array manager (if you must).\n\"\"\"\n\nimport numpy\nfrom reikna.cluda import dtypes, any_api\nfrom reikna.cluda.tempalloc import ZeroOffsetManager\n\n\napi = any_api()\nthr = api.Thread.create()\n\n\ndef demo_array_dependencies():\n\n # ZeroOffsetManager attempts to pack temporary allocations\n # in a collection of real allocations with minimal total size.\n # All the virtual allocations start at the beginning of the real allocations.\n\n # Create a manager that will try to minimize the total size of real allocations\n # every time a temporary allocation occurs, or a temporary array is freed.\n # Note that this may involve re-pointing a temporary array to a different part of memory,\n # so all of the data in it is lost.\n temp_manager = ZeroOffsetManager(thr, pack_on_alloc=True, pack_on_free=True)\n\n # Alternatively one can pass `False` to these keywords and call `.pack()` manually.\n # This can be useful if a lot of allocations are happening in a specific place at once.\n\n # Create two arrays that do not depend on each other.\n # This means the manager will allocate a single (200, int32) real array,\n # and point both `a1` and `a2` to its beginning.\n a1 = temp_manager.array(100, numpy.int32)\n a2 = temp_manager.array(200, numpy.int32)\n\n # You can see that the total size of virtual arrays is 1200,\n # but the total size of real arrays is only 800 (the size of the larger array).\n print(\"Allocated a1 = (100, int32) and a2 = (200, int32)\")\n print(temp_manager._statistics())\n\n # Now we allocate a dependent array.\n # This means that the real memory `a3` points to cannot intersect with that of `a1`.\n # If we could point temporary arrays at any address within real allocations,\n # we could fit it into the second half of the existing real allocation.\n # But `ZeroOffsetManager` cannot do that, so it has to create another allocation.\n a3 = temp_manager.array(100, numpy.int32, dependencies=[a1])\n\n print(\"Allocated a3 = (100, int32) depending on a1\")\n print(temp_manager._statistics())\n\n # Now that we deallocated `a1`, `a3` can now fit in the same real allocation as `a2`,\n # so one of the real allocations will be removed.\n del a1\n\n print(\"Freed a1\")\n print(temp_manager._statistics())\n\n\nclass MyComputation:\n\n def __init__(self, temp_manager):\n self.temp_array = temp_manager.array(100, numpy.int32)\n\n # The magic property containing temporary arrays used\n self.__tempalloc__ = [self.temp_array]\n\n def __call__(self, array1, array2):\n # a sequence of kernel calls using `self.temp_array` to store some intermediate results\n pass\n\n\ndef demo_object_dependencies():\n\n temp_manager = ZeroOffsetManager(thr, pack_on_alloc=True, pack_on_free=True)\n\n # A `MyComputation` instance creates a temporary array for internal usage\n comp = MyComputation(temp_manager)\n\n print(\"MyComputation created\")\n print(temp_manager._statistics())\n\n # Create another temporary array whose usage does not intersect with `MyComputation` usage.\n # This means that if `comp` is called, the contents of `a1` may be rewritten.\n a1 = temp_manager.array(100, numpy.int32)\n\n # It is put in the same real allocation as the temporary array of `comp`.\n print(\"Allocated a1 = (100, int32)\")\n print(temp_manager._statistics())\n\n # Now let's say we want to put the result of `comp` call somewhere.\n # This means we want to make sure it does not occupy the same memory\n # as any of the temporary arrays in `comp`, so we are passing `comp` as a dependency.\n # It will pick up whatever `comp` declared in its `__tempalloc__` attribute.\n result = temp_manager.array(100, numpy.int32, dependencies=[comp])\n\n # You can see that a new real allocation was created to host the result.\n print(\"Allocated result = (100, int32)\")\n print(temp_manager._statistics())\n\n\nif __name__ == '__main__':\n demo_array_dependencies()\n demo_object_dependencies()\n","repo_name":"fjarri/reikna","sub_path":"examples/demo_tempalloc.py","file_name":"demo_tempalloc.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"77"} +{"seq_id":"69995453688","text":"from django.db import models\nfrom django.conf import settings\nfrom django.utils import timezone\n\n# Create your models here.\n\n\nclass User(models.Model):\n FAMILY_ROLE = [('엄마', '엄마'), ('아빠', '아빠'),]\n username = models.CharField(max_length=10, unique=True)\n role = models.CharField(max_length=5, choices=FAMILY_ROLE)\n \n def __str__(self):\n return self.username\n\n\nclass SetLocation(models.Model):\n user_id = models.OneToOneField('User', on_delete=models.CASCADE)\n homeX = models.FloatField()\n homeY = models.FloatField()\n companyX = models.FloatField()\n companyY = models.FloatField()\n\n\nclass Location(models.Model):\n user_id = models.ForeignKey('User', on_delete=models.CASCADE)\n geoX = models.FloatField()\n geoY = models.FloatField()\n timeStamp = models.DateTimeField(auto_now_add=True)\n onHomeRoad = models.IntegerField(default=0)\n onCompanyRoad = models.IntegerField(default=0)\n\n\nclass Alert(models.Model):\n user_id = models.ForeignKey('User', on_delete=models.CASCADE)\n alertType = models.IntegerField()\n timeStamp = models.DateTimeField(auto_now_add=True)\n","repo_name":"sseonnn/FAFA","sub_path":"Back-End/FAFA/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22212128210","text":"import time\nimport numpy as np\nimport torch\nfrom torch.nn.functional import binary_cross_entropy_with_logits\nfrom torch.utils.data import DataLoader, RandomSampler\nfrom rmp_nav.common.utils import save_model, load_model, module_grad_stats\nimport tabulate\nimport os\n\n\ndef _load_weights(model_file, nets, net_opts):\n state = load_model(os.path.dirname(model_file),\n os.path.basename(model_file), load_to_cpu=True)\n epoch = int(state['epoch'])\n\n for name, net in nets.items():\n net.load_state_dict(state['nets'][name])\n\n for name, opt in net_opts.items():\n opt.load_state_dict(state['optims'][name])\n # Move the parameters stored in the optimizer into gpu\n for opt_state in opt.state.values():\n for k, v in opt_state.items():\n if torch.is_tensor(v):\n opt_state[k] = v.to(device='cuda')\n return epoch\n\n\ndef _save_model(nets, net_opts, epoch, global_args, model_file):\n state = {\n 'epoch': epoch,\n 'global_args': global_args,\n 'optims': {\n name: opt.state_dict() for name, opt in net_opts.items()\n },\n 'nets': {\n name: net.state_dict() for name, net in nets.items()\n }\n }\n save_model(state, epoch, '', model_file)\n\n\ndef train_multiframedst(nets, net_opts, dataset, vis, global_args):\n (\n model_file,\n max_epochs,\n batch_size,\n n_worker,\n log_interval,\n vis_interval,\n save_interval,\n train_device,\n resume,\n weight_loss,\n weight_loss_min_clip,\n model_variant,\n proximity_label,\n heading_diff_label\n ) = [global_args[_] for _ in ['model_file',\n 'max_epochs',\n 'batch_size',\n 'n_dataset_worker',\n 'log_interval',\n 'vis_interval',\n 'save_interval',\n 'train_device',\n 'resume',\n 'weight_loss',\n 'weight_loss_min_clip',\n 'model_variant',\n 'proximity_label',\n 'heading_diff_label']]\n\n epoch = 0\n if resume:\n epoch = _load_weights(model_file, nets, net_opts)\n torch.manual_seed(231239 + epoch)\n print('loaded saved state. epoch: %d' % epoch)\n\n # FIXME: hack to mitigate the bug in torch 1.1.0's schedulers\n if epoch <= 1:\n last_epoch = epoch - 1\n else:\n last_epoch = epoch - 2\n\n net_scheds = {\n name: torch.optim.lr_scheduler.StepLR(\n opt,\n step_size=global_args['lr_decay_epoch'],\n gamma=global_args['lr_decay_rate'],\n last_epoch=last_epoch)\n for name, opt in net_opts.items()\n }\n\n n_samples = global_args['samples_per_epoch']\n\n while True:\n print('===== epoch %d =====' % epoch)\n\n sampler = RandomSampler(dataset, True, n_samples)\n\n loader = DataLoader(dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=n_worker,\n pin_memory=True,\n drop_last=True)\n\n last_log_time = time.time()\n\n for idx, (batch_src_imgs, batch_dst_imgs, batch_waypoints, batch_extras) in enumerate(loader):\n for _, opt in net_opts.items():\n opt.zero_grad()\n\n if idx % vis_interval == 0:\n imgs = []\n for i in range(3):\n src_img = batch_src_imgs[i].data.numpy()\n dst_imgs = batch_dst_imgs[i].data.numpy()\n imgs.append(src_img[None])\n imgs.append(dst_imgs)\n imgs = np.concatenate(imgs, axis=0)\n vis.images(imgs, nrow=(dst_imgs.shape[0] + 1),\n win='batch_imgs', opts={'title': 'src-dst'})\n\n batch_src_imgs = batch_src_imgs.to(device=train_device, non_blocking=True)\n batch_dst_imgs = batch_dst_imgs.to(device=train_device, non_blocking=True)\n batch_waypoints = batch_waypoints.to(device=train_device, non_blocking=True)\n\n for k, v in batch_extras.items():\n batch_extras[k] = v.to(device=train_device, non_blocking=True)\n\n batch_size, win_size, c, h, w = batch_dst_imgs.size()\n\n if model_variant == 'attention':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(-1, c, h, w)).view(\n batch_size, win_size, -1) # batch_size x win_size x dim\n\n # FIXME: disabled attention temporarily\n # dst_terminal_features = dst_features[:, -1, :]\n # attention = nets['attention_encoder'](torch.cat([src_features,\n # dst_terminal_features], dim=1))\n dst_temporal_features = nets['seq_encoder'](dst_features)\n final_features = torch.cat([src_features, dst_temporal_features], dim=1)\n pred_waypoints = nets['wp_regressor'](final_features)\n\n elif model_variant == 'concat_early':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(-1, c, h, w)).view(\n batch_size, win_size, -1) # batch_size x win_size x dim\n\n src_dst_features = torch.cat([src_features.unsqueeze(1).expand_as(dst_features),\n dst_features], dim=-1)\n temporal_features = nets['seq_encoder'](src_dst_features)\n pred_waypoints = nets['wp_regressor'](temporal_features)\n\n elif model_variant == 'future':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(-1, c, h, w)).view(\n batch_size, win_size, -1) # batch_size x win_size x dim\n\n win_size = dst_features.size(1) // 2\n\n past_features = dst_features[:, :win_size + 1]\n future_features = dst_features[:, win_size:]\n\n past_temporal_features = nets['seq_encoder'](past_features)\n future_temporal_features = nets['seq_encoder'](future_features)\n\n final_features = torch.cat([src_features,\n past_temporal_features,\n future_temporal_features], dim=1)\n pred_waypoints = nets['wp_regressor'](final_features)\n\n elif model_variant == 'future_stack':\n img_stack = torch.cat([batch_src_imgs.unsqueeze(1), batch_dst_imgs], dim=1)\n features = nets['stack_encoder'](img_stack)\n pred_waypoints = nets['wp_regressor'](features)\n\n elif model_variant == 'future_stack_v2':\n # Only stack dst images.\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['stack_encoder'](batch_dst_imgs)\n features = torch.cat([src_features, dst_features], dim=-1)\n pred_waypoints = nets['wp_regressor'](features)\n\n elif model_variant == 'future_pair':\n batch_src_imgs2 = batch_src_imgs.unsqueeze(1).expand_as(batch_dst_imgs).contiguous()\n pair_features = nets['img_pair_encoder'](\n batch_src_imgs2.view(batch_size * win_size, c, h, w),\n batch_dst_imgs.view(batch_size * win_size, c, h, w)).view(batch_size, -1)\n pred_waypoints = nets['wp_regressor'](pair_features)\n if proximity_label:\n pred_proximity = nets['proximity_regressor'](pair_features)\n if heading_diff_label:\n pred_heading_diff = nets['heading_diff_regressor'](pair_features)\n\n elif model_variant == 'future_pair_conv':\n batch_src_imgs2 = batch_src_imgs.unsqueeze(1).expand_as(batch_dst_imgs).contiguous()\n pair_features = nets['img_pair_encoder'](\n batch_src_imgs2.view(batch_size * win_size, c, h, w),\n batch_dst_imgs.view(batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n conv_feature = nets['conv_encoder'](pair_features.transpose(1, 2))\n pred_waypoints = nets['wp_regressor'](conv_feature)\n if proximity_label:\n pred_proximity = nets['proximity_regressor'](conv_feature)\n if heading_diff_label:\n pred_heading_diff = nets['heading_diff_regressor'](conv_feature)\n\n elif model_variant == 'future_pair_featurized':\n src_features = nets['img_encoder'](batch_src_imgs)\n dst_features = nets['img_encoder'](batch_dst_imgs.view(\n batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n src_features = src_features.unsqueeze(1).expand_as(dst_features).contiguous()\n pair_features = nets['feature_pair_encoder'](\n src_features.view(batch_size * win_size, -1),\n dst_features.view(batch_size * win_size, -1)).view(batch_size, -1)\n pred_waypoints = nets['wp_regressor'](pair_features)\n\n elif model_variant == 'future_pair_featurized_v2':\n src_features = nets['src_img_encoder'](batch_src_imgs)\n dst_features = nets['dst_img_encoder'](batch_dst_imgs.view(\n batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n src_features = src_features.unsqueeze(1).expand_as(dst_features).contiguous()\n pair_features = nets['feature_pair_encoder'](\n src_features.view(batch_size * win_size, -1),\n dst_features.view(batch_size * win_size, -1)).view(batch_size, -1)\n pred_waypoints = nets['wp_regressor'](pair_features)\n\n elif model_variant == 'raw_control':\n batch_src_imgs2 = batch_src_imgs.unsqueeze(1).expand_as(batch_dst_imgs).contiguous()\n pair_features = nets['img_pair_encoder'](\n batch_src_imgs2.view(batch_size * win_size, c, h, w),\n batch_dst_imgs.view(batch_size * win_size, c, h, w)).view(batch_size, win_size, -1)\n conv_feature = nets['conv_encoder'](pair_features.transpose(1, 2))\n\n velocity = batch_extras['velocity'].to(device=train_device, non_blocking=True)\n angular_vel = batch_extras['angular_vel'].to(device=train_device, non_blocking=True)\n\n all_features = torch.cat([conv_feature, velocity, angular_vel], dim=-1)\n\n # Note that pred_waypoints here are actually raw controls.\n pred_waypoints = nets['wp_regressor'](all_features)\n\n if proximity_label:\n pred_proximity = nets['proximity_regressor'](conv_feature)\n\n if heading_diff_label:\n pred_heading_diff = nets['heading_diff_regressor'](conv_feature)\n\n else:\n raise RuntimeError('Unknown model variant %s' % model_variant)\n\n l2_loss = torch.sum(torch.pow(pred_waypoints - batch_waypoints, 2), dim=1)\n if weight_loss:\n l2_loss *= 1.0 / torch.max(batch_waypoints.norm(p=2, dim=1),\n batch_waypoints.new_tensor(weight_loss_min_clip))\n loss = torch.mean(l2_loss)\n if proximity_label:\n assert pred_proximity.size() == batch_extras['proximity'].size()\n proximity_loss = binary_cross_entropy_with_logits(pred_proximity,\n batch_extras['proximity'])\n loss += proximity_loss\n\n if heading_diff_label:\n assert pred_heading_diff.size() == batch_extras['heading_diff'].size()\n heading_diff_loss = torch.mean(torch.sum(torch.pow(\n pred_heading_diff - batch_extras['heading_diff'], 2), dim=1))\n loss += heading_diff_loss\n\n loss.backward()\n\n for _, opt in net_opts.items():\n opt.step()\n\n if idx % log_interval == 0:\n print('epoch %d batch time %.2f sec loss: %6.2f' % (\n epoch, (time.time() - last_log_time) / log_interval, loss.item()))\n print('learning rate:\\n%s' % tabulate.tabulate([\n (name, opt.param_groups[0]['lr']) for name, opt in net_opts.items()]))\n for name, net in nets.items():\n print('%s grad:\\n%s' % (name, module_grad_stats(net)))\n\n vis.line(X=np.array([epoch * n_samples + idx * batch_size]),\n Y=np.array([loss.item()]),\n win='loss', update='append', opts={'title': 'loss'})\n\n if proximity_label:\n def format(l):\n return '(' + ','.join(['%.2f' % _ for _ in l]) + ')'\n print('proximity:\\n%s' % tabulate.tabulate([\n ['pred'] + [format(_) for _ in torch.sigmoid(pred_proximity[:10]).tolist()],\n ['gt'] + [format(_) for _ in batch_extras['proximity'][:10].tolist()]\n ]))\n vis.line(X=np.array([epoch * n_samples + idx * batch_size]),\n Y=np.array([proximity_loss.item()]),\n win='proximity loss', update='append',\n opts={'title': 'proximity loss'})\n\n if heading_diff_label:\n def format(l):\n return '(' + ','.join(['%.2f' % _ for _ in l]) + ')'\n print('heading_diff:\\n%s' % tabulate.tabulate([\n ['pred'] + [format(_) for _ in pred_heading_diff[:10].tolist()],\n ['gt'] + [format(_) for _ in batch_extras['heading_diff'][:10].tolist()]\n ]))\n vis.line(X=np.array([epoch * n_samples + idx * batch_size]),\n Y=np.array([heading_diff_loss.item()]),\n win='heading_diff loss', update='append',\n opts={'title': 'heading diff loss'})\n\n last_log_time = time.time()\n vis.save([vis.env])\n\n for _, sched in net_scheds.items():\n sched.step()\n\n epoch += 1\n if epoch > max_epochs:\n break\n\n if epoch % save_interval == 0:\n print('saving model...')\n _save_model(nets, net_opts, epoch, global_args, model_file)\n","repo_name":"xymeng/rmp_nav","sub_path":"topological_nav/controller/train_fixture.py","file_name":"train_fixture.py","file_ext":"py","file_size_in_byte":15154,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"77"} +{"seq_id":"15257555545","text":"from flask import Blueprint, render_template, render_template_string, request, flash, jsonify\nfrom flask_login import login_required, current_user\nfrom .models import Note, User\nfrom . import db\nfrom datetime import datetime\nviews = Blueprint('views', __name__)\nimport json\n\n@views.route('/', methods=['POST','GET'])\n@login_required\ndef home():\n if request.method == 'POST':\n note = request.form.get('note')\n\n if len(note) < 1:\n flash('Note is too short!', category='error')\n else:\n now = datetime.now()\n new_note = Note(data=note, date=now, user_id=current_user.id)\n db.session.add(new_note)\n db.session.commit()\n flash('Note added!', category='success')\n\n return render_template('home.html', user=current_user)\n\n@views.route('/admin', methods=['GET', 'POST'])\n@login_required\ndef admin():\n if request.method == 'GET':\n users = User.query.all()\n notes = Note.query.all()\n\n return render_template('admin.html', user=current_user, users=users, notes=notes)\n\n@views.route('/delete-note', methods=['POST'])\ndef delete_note():\n note = json.loads(request.data)\n noteId = note['noteId']\n note = Note.query.get(noteId)\n if note:\n if note.user_id == current_user.id: #POSSIAMO CANCELLARE LE NOTE DA QUEST'IF\n db.session.delete(note)\n db.session.commit()\n \n return jsonify({}) \n\n@views.route('/delete-user', methods=['POST'])\ndef delete_user():\n user = json.loads(request.data)\n userId = user['userId']\n user = User.query.get(userId)\n if user:\n db.session.delete(user)\n db.session.commit()\n return jsonify({})\n\n@views.route('/delete-note-admin', methods=['POST'])\ndef delete_note_admin():\n note = json.loads(request.data)\n noteId = note['noteId']\n note = Note.query.get(noteId)\n if note: \n db.session.delete(note)\n db.session.commit()\n \n return jsonify({}) \n\n@views.route('/user', methods=['GET'])\ndef user():\n username = request.args.get('username', default = current_user.first_name)\n\n template = ''' \n {% extends \"base.html\" %} {% block title %}User panel{% endblock %}\n \n {%block content%}\n
\n

User panel

\n
My name is: ''' + username + '''
\n
My email address is: {{user.email}}
\n \n \n {%endblock%}\n '''\n\n return render_template_string(template, user = current_user)","repo_name":"Dongo9/COD-2022","sub_path":"Worst backend ever/website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5493057363","text":"# try to build a lstm network\r\nimport numpy as np\r\nimport torch.nn as nn\r\nimport torch\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\nfrom torch.optim import lr_scheduler\r\nfrom MyDNNDecoder import MyDNNDecoder\r\nfrom MySalEncoder import MySalEncoder\r\nimport torch.nn.utils as utils\r\nimport skimage\r\nimport skimage.io\r\nfrom skimage.segmentation import slic\r\nfrom skimage.util import img_as_float\r\nfrom skimage import transform,data\r\nfrom torchvision import datasets,transforms\r\nimport networkx as nx\r\nimport scipy.spatial.distance\r\nimport scipy.signal\r\nimport math\r\nimport copy\r\nimport os\r\nfrom PIL import Image\r\nfrom network import resnet34\r\nimport time\r\nfrom TrainDataset import TrainDataset\r\nfrom TestDataset import TestDataset\r\nfrom ValDataset import ValDataset\r\nimport pandas as pd\r\nfrom torch.utils.data import DataLoader\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable # torch 中 Variable 模块\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\r\n\r\nfile_w_dir = 'data/DUT/DUTSal'\r\n\r\ndata_transforms = transforms.Compose([\r\n # transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[1, 1, 1])\r\n ])\r\n\r\n\r\ndef S(x1, x2, geodesic,sigma_clr=10):\r\n return math.exp(-pow(geodesic[x1, x2], 2)/(2*sigma_clr*sigma_clr))\r\n\r\n\r\ndef compute_saliency_cost(smoothness, w_bg, wCtr):\r\n n = len(w_bg)\r\n A = np.zeros((n, n))\r\n b = np.zeros((n))\r\n for x in range(0,n):\r\n A[x,x] = 2 * w_bg[x] + 2 * (wCtr[x])\r\n b[x] = 2 * wCtr[x]\r\n for y in range(0, n):\r\n A[x, x] += 2 * smoothness[x, y]\r\n A[x, y] -= 2 * smoothness[x, y]\r\n x = np.linalg.solve(A, b)\r\n return x\r\n\r\n\r\ndef path_length(path, G):\r\n dist = 0.0\r\n for i in range(1,len(path)):\r\n dist += G[path[i - 1]][path[i]]['weight']\r\n return dist\r\n\r\n\r\ndef make_graph(grid):\r\n # get unique labels\r\n vertices = np.unique(grid)\r\n # map unique labels to [1,...,num_labels]\r\n reverse_dict = dict(zip(vertices,np.arange(len(vertices))))\r\n grid = np.array([reverse_dict[x] for x in grid.flat]).reshape(grid.shape)\r\n\r\n # create edges\r\n down = np.c_[grid[:-1, :].ravel(), grid[1:, :].ravel()]\r\n right = np.c_[grid[:, :-1].ravel(), grid[:, 1:].ravel()]\r\n all_edges = np.vstack([right, down])\r\n all_edges = all_edges[all_edges[:, 0] != all_edges[:, 1], :]\r\n all_edges = np.sort(all_edges, axis=1)\r\n num_vertices = len(vertices)\r\n edge_hash = all_edges[:,0] + num_vertices * all_edges[:, 1]\r\n # find unique connections\r\n edges = np.unique(edge_hash)\r\n # undo hashing\r\n edges = [[vertices[x%num_vertices], vertices[int(x/num_vertices)]] for x in edges]\r\n\r\n return vertices, edges\r\n\r\n\r\n# def encoder_opt(encoder_output, DATA_MAX_LEN, vertices_batch, edges_batch, boundary_batch, centers_batch,\r\n# max_dist_batch):\r\n#\r\n# train_batchsize = encoder_output.data.shape[0]\r\n# OPTW_batch = np.zeros((train_batchsize, DATA_MAX_LEN))\r\n# for ii in range(train_batchsize):\r\n# vertices = vertices_batch[ii]\r\n# edges = edges_batch[ii]\r\n# boundary = boundary_batch[ii]\r\n# centers = centers_batch[ii]\r\n# max_dist = max_dist_batch[ii]\r\n#\r\n# features = encoder_output.data[ii, :, :]\r\n#\r\n# G = nx.Graph()\r\n# #buid the graph\r\n# for edge in edges:\r\n# pt1 = edge[0]\r\n# pt2 = edge[1]\r\n# mm1 = features[pt1, :]\r\n# mm2 = features[pt2, :]\r\n#\r\n# color_distance = scipy.spatial.distance.euclidean(mm1,mm2)\r\n# color_distance = np.sqrt(np.sum(np.square(color_distance)))\r\n# #color_distance = np.linalg.norm(mm1,mm2)\r\n# G.add_edge(pt1, pt2, weight=color_distance)\r\n#\r\n# #add a new edge in graph if edges are both on boundary\r\n# for v1 in vertices:\r\n# if boundary[v1] == 1:\r\n# for v2 in vertices:\r\n# if boundary[v2] == 1:\r\n# #color_distance = tf.reduce_sum(tf.sqrt(tf.square(features[v1] - features[v2])), 0)\r\n# color_distance = scipy.spatial.distance.euclidean(features[v1],features[v2])\r\n# color_distance = np.sqrt(np.sum(np.square(color_distance)))\r\n# G.add_edge(v1, v2, weight=color_distance)\r\n#\r\n# geodesic = np.zeros((len(vertices), len(vertices)), dtype=float)\r\n# spatial = np.zeros((len(vertices), len(vertices)), dtype=float)\r\n# smoothness = np.zeros((len(vertices), len(vertices)), dtype=float)\r\n# adjacency = np.zeros((len(vertices), len(vertices)), dtype=float)\r\n#\r\n# sigma_clr = 10.0\r\n# sigma_bndcon = 1.0\r\n# sigma_spa = 0.25\r\n# mu = 0.1\r\n# all_shortest_paths_color = nx.shortest_path(G, source=None, target=None, weight='weight')\r\n#\r\n# for v1 in vertices:\r\n# for v2 in vertices:\r\n# if v1 == v2:\r\n# geodesic[v1, v2] = 0\r\n# spatial[v1, v2] = 0\r\n# smoothness[v1, v2] = 0\r\n# else:\r\n# geodesic[v1, v2] = path_length(all_shortest_paths_color[v1][v2], G)\r\n# spatial[v1, v2] = scipy.spatial.distance.euclidean(centers[v1], centers[v2]) / max_dist\r\n# smoothness[v1, v2] = math.exp(-(geodesic[v1, v2] * geodesic[v1, v2])/(2.0*sigma_clr*sigma_clr)) + mu\r\n#\r\n# for edge in edges:\r\n# pt1 = edge[0]\r\n# pt2 = edge[1]\r\n# adjacency[pt1, pt2] = 1\r\n# adjacency[pt2, pt1] = 1\r\n#\r\n# for v1 in vertices:\r\n# for v2 in vertices:\r\n# smoothness[v1, v2] = adjacency[v1, v2] * smoothness[v1, v2]\r\n#\r\n# area = dict()\r\n# len_bnd = dict()\r\n# bnd_con = dict()\r\n# w_bg = dict()\r\n# ctr = dict()\r\n# wCtr = dict()\r\n#\r\n# for v1 in vertices:\r\n# area[v1] = 0\r\n# len_bnd[v1] = 0\r\n# ctr[v1] = 0\r\n# for v2 in vertices:\r\n# d_app = geodesic[v1, v2]\r\n# d_spa = spatial[v1, v2]\r\n# w_spa = math.exp(- (d_spa * d_spa)/(2.0*sigma_spa*sigma_spa))\r\n# area_i = S(v1, v2, geodesic)\r\n# area[v1] += area_i\r\n# len_bnd[v1] += area_i * boundary[v2]\r\n# ctr[v1] += d_app * w_spa\r\n# bnd_con[v1] = len_bnd[v1] / math.sqrt(area[v1])\r\n# w_bg[v1] = 1.0 - math.exp(- (bnd_con[v1]*bnd_con[v1])/(2*sigma_bndcon*sigma_bndcon))\r\n#\r\n# for v1 in vertices:\r\n# wCtr[v1] = 0\r\n# for v2 in vertices:\r\n# d_app = geodesic[v1, v2]\r\n# d_spa = spatial[v1, v2]\r\n# w_spa = math.exp(- (d_spa*d_spa)/(2.0*sigma_spa*sigma_spa))\r\n# wCtr[v1] += d_app * w_spa * w_bg[v2]\r\n#\r\n# # normalise value for wCtr\r\n# min_value = min(wCtr.values())\r\n# max_value = max(wCtr.values())\r\n#\r\n# for v in vertices:\r\n# wCtr[v] = (wCtr[v] - min_value)/(max_value - min_value)\r\n#\r\n# r_opt_w = Variable(torch.FloatTensor(compute_saliency_cost(smoothness, w_bg, wCtr)))\r\n#\r\n# OPTW_batch[ii, :r_opt_w.shape[0]] = r_opt_w\r\n#\r\n# return OPTW_batch\r\n\r\n\r\ndef prepare_image_loader(img, gt):\r\n\r\n segments_slic = slic(img.cpu(), n_segments=160, compactness=1000, sigma=1, enforce_connectivity=1)\r\n\r\n nrows, ncols = segments_slic.shape\r\n max_dist = math.sqrt(nrows * nrows + ncols * ncols)\r\n\r\n grid = segments_slic\r\n\r\n (vertices, edges) = make_graph(grid)\r\n\r\n gridx, gridy = np.mgrid[:grid.shape[0], :grid.shape[1]]\r\n\r\n centers = dict()\r\n colors = dict()\r\n colors_rgb = dict()\r\n distances = dict()\r\n boundary = dict()\r\n roi = []\r\n\r\n for v in vertices:\r\n # centers[v] = [gridy[grid == v].mean(), gridx[grid == v].mean()]\r\n\r\n x_pix = gridx[grid == v]\r\n y_pix = gridy[grid == v]\r\n\r\n # if np.any(x_pix == 0) or np.any(y_pix == 0) or np.any(x_pix == nrows - 1) or np.any(y_pix == ncols - 1):\r\n # boundary[v] = 1\r\n # else:\r\n # boundary[v] = 0\r\n\r\n min_h_grid = min(x_pix)\r\n max_h_grid = max(x_pix)\r\n min_w_grid = min(y_pix)\r\n max_w_grid = max(y_pix)\r\n roi.append([min_h_grid, min_w_grid, max_h_grid, max_w_grid])\r\n\r\n # if np.any(x_pix == nrows - 1): # sign as boundary\r\n # roi.append([0, 0, 0, 0])\r\n\r\n # if v < 135:\r\n # for vi in range(134-v):\r\n # roi.append([0, 0, 0, 0])\r\n\r\n roi = np.array(roi)\r\n nnn = roi.shape[0]\r\n if nnn < 180:\r\n roi = roi.tolist()\r\n for vi in range(180-nnn):\r\n roi.append([0, 0, 0, 0])\r\n roi = np.array(roi)\r\n\r\n gt_pxl = []\r\n gt_np = gt.cpu().numpy()\r\n if len(gt.shape) == 3: # got a grayscale image\r\n gt_np = skimage.color.rgb2gray(gt_np)\r\n if gt_np.shape[0] != grid.shape[0] or gt_np.shape[0]!=grid.shape[0]:\r\n gt_np = transform.resize(gt_np, grid.shape)\r\n for v in vertices:\r\n gt_pxl.append(np.mean(gt_np[grid == v], axis=0))\r\n\r\n nn = vertices.shape[0]\r\n if nn < 180:\r\n for vi in range(180 - nnn):\r\n gt_pxl.append(0)\r\n\r\n gt_pxl = np.rint(np.array(gt_pxl))\r\n\r\n #guiyihuya=======================\r\n img = data_transforms(img)\r\n #================================\r\n\r\n img_rgb = img.permute(2, 0, 1).unsqueeze_(0).float().cuda()\r\n\r\n return img_rgb, gt_pxl, vertices, edges, boundary, centers, max_dist, grid, roi\r\n\r\n\r\ndef prepare_image4test_loader(img):\r\n\r\n img_np = img.cpu().numpy()\r\n segments_slic = slic(img.cpu(), n_segments=160, compactness=10, sigma=1, enforce_connectivity=1)\r\n img_superpixels = []\r\n\r\n nrows, ncols = segments_slic.shape\r\n max_dist = math.sqrt(nrows * nrows + ncols * ncols)\r\n\r\n grid = segments_slic\r\n\r\n (vertices, edges) = make_graph(grid)\r\n\r\n gridx, gridy = np.mgrid[:grid.shape[0], :grid.shape[1]]\r\n centers = dict()\r\n colors = dict()\r\n colors_rgb = dict()\r\n boundary = dict()\r\n roi = []\r\n\r\n for v in vertices:\r\n centers[v] = [gridy[grid == v].mean(), gridx[grid == v].mean()]\r\n colors[v] = np.mean(img_np[grid == v], axis=0)\r\n\r\n x_pix = gridx[grid == v]\r\n y_pix = gridy[grid == v]\r\n\r\n if np.any(x_pix == 0) or np.any(y_pix == 0) or np.any(x_pix == nrows - 1) or np.any(y_pix == ncols - 1):\r\n boundary[v] = 1\r\n else:\r\n boundary[v] = 0\r\n\r\n min_h_grid = min(x_pix)\r\n max_h_grid = max(x_pix)\r\n min_w_grid = min(y_pix)\r\n max_w_grid = max(y_pix)\r\n roi.append([min_h_grid, min_w_grid, max_h_grid, max_w_grid])\r\n\r\n colors_rgb[v] = np.mean(img_np[grid == v], axis=0)\r\n\r\n if np.any(x_pix == nrows - 1): # sign as boundary\r\n roi.append([0, 0, 0, 0])\r\n\r\n\r\n # if v < 135:\r\n # for vi in range(134-v):\r\n # roi.append([0, 0, 0, 0])\r\n\r\n roi = np.array(roi)\r\n nnn = roi.shape[0]\r\n if nnn < 180:\r\n roi = roi.tolist()\r\n for vi in range(180-nnn):\r\n roi.append([0, 0, 0, 0])\r\n roi = np.array(roi)\r\n\r\n\r\n img = data_transforms(img)\r\n img_rgb = img.permute(2, 0, 1).unsqueeze_(0).float().cuda()\r\n roi = np.array(roi)\r\n\r\n return img_rgb, img_superpixels, grid, vertices, edges, boundary, centers, max_dist, roi\r\n\r\n\r\ntrain_batchsize = 16\r\nval_batchsize = 16\r\ntest_batchsize = 1\r\n#######################################################\r\nworkers = 0\r\ntrain_data_list = pd.read_csv('data/label_dut.csv')\r\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\ntrain_data = TrainDataset(train_data_list,\r\n transform=transforms.Compose([\r\n transforms.Resize((224, 224)),\r\n # transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n # normalize,\r\n ]))\r\ntrain_loader = DataLoader(train_data, batch_size=train_batchsize, shuffle=True, pin_memory=True, num_workers=workers)\r\n\r\nval_data_list = pd.read_csv('data/val_msra.csv')\r\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\nval_data = ValDataset(val_data_list,\r\n transform=transforms.Compose([\r\n transforms.Resize((224, 224)),\r\n transforms.ToTensor(),\r\n # normalize,\r\n ]))\r\nval_loader = DataLoader(val_data, batch_size=val_batchsize, shuffle=True, pin_memory=True, num_workers=workers)\r\n\r\n\r\ntest_data_list = pd.read_csv('data/test.csv')\r\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\ntest_data = TestDataset(test_data_list,\r\n transform=transforms.Compose([\r\n transforms.Resize((224, 224)),\r\n transforms.ToTensor(),\r\n # normalize,\r\n ]))\r\ntest_loader = DataLoader(test_data, batch_size=test_batchsize, shuffle=True, pin_memory=True, num_workers=workers)\r\n\r\n######################################################\r\n\r\nnum_epochs = 50 # <---160\r\n\r\nEncoderModel = MySalEncoder()\r\nDecoderModel = MyDNNDecoder()\r\n\r\n#load the previous best parameters\r\n# checkpoint_encoder = torch.load('data/en_check_params.pkl')\r\n# EncoderModel.load_state_dict(checkpoint_encoder)\r\n# checkpoint_decoder = torch.load('data/de_check_params.pkl')\r\n# DecoderModel.load_state_dict(checkpoint_decoder)\r\n\r\n# for name, param in EncoderModel.named_parameters():\r\n# if 'bias' in name:\r\n# nn.init.constant_(param, 0.0)\r\n# elif 'weight' in name:\r\n# nn.init.xavier_normal_(param)\r\n# for name, param in DecoderModel.named_parameters():\r\n# if 'bias' in name:\r\n# nn.init.constant_(param, 0.0)\r\n# elif 'weight' in name:\r\n# nn.init.xavier_normal_(param)\r\n\r\nEncoderModel.cuda()\r\nDecoderModel.cuda()\r\n\r\n# DecoderModel = nn.DataParallel(DecoderModel)\r\n\r\ncriterion = nn.BCELoss() #SmoothL1Loss BCELoss\r\n\r\n\r\nencoder_optimizer = optim.Adam(EncoderModel.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\r\ndecoder_optimizer = optim.Adam(DecoderModel.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\r\n\r\n# encoder_optimizer = optim.RMSprop(EncoderModel.parameters(), lr=0.01, eps=1e-08, weight_decay=0)\r\n# decoder_optimizer = optim.RMSprop(DecoderModel.parameters(), lr=0.01, eps=1e-08, weight_decay=0)\r\n\r\n\r\nencoder_scheduler = lr_scheduler.ReduceLROnPlateau(encoder_optimizer, 'min', patience=6, factor=0.5, min_lr=0.000001)\r\ndecoder_scheduler = lr_scheduler.ReduceLROnPlateau(decoder_optimizer, 'min', patience=6, factor=0.5, min_lr=0.000001)\r\n\r\nen_best_model_wts = copy.deepcopy(EncoderModel.state_dict())\r\nbest_loss = 1000\r\nprint_inteval = 30\r\nnotimproveNum = 0\r\nclip = 5\r\n\r\ntensor = torch.randn(180, 180).cuda()\r\n\r\nfor epoch in range(num_epochs):\r\n print('Epoch {}/{}'.format(epoch+1, num_epochs))\r\n print('-' * 10)\r\n if notimproveNum > 55:\r\n print('Valloss do not improve at {} epochs,so break'.format(notimproveNum))\r\n break\r\n for phase in ['train', 'val']:\r\n if phase == 'train':\r\n #scheduler.step()\r\n EncoderModel.train() # Set model to training mode\r\n DecoderModel.train()\r\n EncoderModel.batch_size = train_batchsize\r\n loader = train_loader\r\n\r\n else:\r\n EncoderModel.eval() # Set model to evaluate mode\r\n DecoderModel.eval()\r\n EncoderModel.batch_size = val_batchsize\r\n loader = val_loader\r\n\r\n running_loss = 0.0\r\n pxl_num = 180\r\n # filename = os.listdir(Pic_dir)\r\n num_batch = 1\r\n for ii, (images, target) in enumerate(loader):\r\n time_start = time.time()\r\n image_var = torch.as_tensor(images).cuda()\r\n gt_val = torch.as_tensor(target).cuda()\r\n if image_var.size(0) != train_batchsize:\r\n break;\r\n\r\n # DATA = np.zeros((train_batchsize, pxl_num, 3, 32, 32))\r\n LABEL = np.zeros((train_batchsize, pxl_num))\r\n MASK = np.zeros((train_batchsize, pxl_num))\r\n DATA_LENS = np.zeros(train_batchsize)\r\n vertices_batch = dict()\r\n edges_batch = dict()\r\n boundary_batch = dict()\r\n centers_batch = dict()\r\n max_dist_batch = dict()\r\n img_batch = []\r\n roi_batch = []\r\n\r\n for k in range(train_batchsize):\r\n # print(gt_val.size())\r\n img_rgb, v2, v3, v4, v5, v6, v7, v8, roi = prepare_image_loader(image_var[k, :, :, :].permute(1,2,0), gt_val[k, :, :, :].permute(1,2,0))\r\n vertices_batch[k], edges_batch[k], boundary_batch[k], centers_batch[k], max_dist_batch[k] = v3, v4, v5, v6, v7\r\n LABEL[k, :v2.shape[0]] = v2\r\n MASK[k, :v2.shape[0]] = np.ones(v2.shape[0])\r\n img_batch.append(img_rgb)\r\n roi_batch.append(roi)\r\n DATA_LENS[k] = v2.shape[0]\r\n img_batch = torch.cat(img_batch)\r\n roi_batch = np.array(roi_batch)\r\n\r\n DATA_MAX_LEN = int(max(DATA_LENS))\r\n\r\n LABEL = torch.from_numpy(LABEL).float().cuda()\r\n # LABEL = Variable(LABEL)\r\n\r\n MASK = torch.from_numpy(MASK).float()\r\n MASK = Variable(MASK.cuda())\r\n\r\n # EncoderModel.train()\r\n # DecoderModel.train()\r\n EncoderModel.zero_grad()\r\n encoder_outputs = EncoderModel(img_batch, roi_batch)\r\n target_length_col = encoder_outputs.data.shape[1]\r\n\r\n # opt_w = encoder_opt(encoder_outputs, DATA_MAX_LEN, vertices_batch, edges_batch, boundary_batch, centers_batch, max_dist_batch)\r\n #\r\n # encoder_outputs = encoder_outputs.data.cpu().numpy()\r\n # for i in range(train_batchsize):\r\n # opt_wex = opt_w[i, :]\r\n # for j in range(target_length_col-1):\r\n # opt_wex = np.column_stack((opt_wex, opt_w[i, :]))\r\n # encoder_outputs[i, :, :] = encoder_outputs[i, :, :] * opt_wex\r\n #\r\n # encoder_outputs = Variable(torch.from_numpy(encoder_outputs).float())\r\n\r\n #-----decoder process\r\n DecoderModel.zero_grad()\r\n decoder_output, out_bg = DecoderModel(encoder_outputs, LABEL)\r\n\r\n variable = Variable(tensor, requires_grad=True)\r\n variable = variable.squeeze(0)\r\n U, S, V = torch.svd(variable)\r\n S1=torch.zeros(180).cuda()\r\n sval_nums = 32\r\n S1[0:sval_nums]=S[0:sval_nums]\r\n variable = torch.mm(U[:, 0:sval_nums], torch.mm(S1.diag(), V[0:sval_nums,:].t()).t())\r\n variable = variable.unsqueeze(0)\r\n loss2 = torch.norm(out_bg - variable.matmul(out_bg))/(1024)\r\n\r\n # target_length = encoder_outputs.data.shape[1]\r\n # decoder_output = []\r\n # for current_index in range(target_length):\r\n # decoder_output.append(DecoderModel(encoder_outputs.cuda(), current_index))\r\n # decoder_output = torch.stack(decoder_output).permute(1,0,2).reshape(train_batchsize*target_length, 1)\r\n\r\n # loss using low rank or not\r\n # total_loss = criterion(decoder_output*MASK, LABEL*MASK)\r\n total_loss = criterion(decoder_output, LABEL) + loss2\r\n # total_loss = criterion(decoder_output, LABEL)\r\n\r\n #-----end of decoder process\r\n if phase == 'train':\r\n total_loss.backward()\r\n utils.clip_grad_norm_(EncoderModel.parameters(), clip)\r\n utils.clip_grad_norm_(DecoderModel.parameters(), clip)\r\n encoder_optimizer.step()\r\n decoder_optimizer.step()\r\n\r\n variable = variable.squeeze(0)\r\n U, S, V = torch.svd(variable)\r\n S1 = torch.zeros(180).cuda()\r\n S1[0:sval_nums] = S[0:sval_nums]\r\n variable = torch.mm(U[:, 0:sval_nums], torch.mm(S1.diag(), V[0:sval_nums, :].t()).t())\r\n variable = variable.unsqueeze(0)\r\n\r\n if ii % print_inteval == 0:\r\n print('{}: {} Average_BatchLoss: {:.4f} '.format(ii, phase, total_loss.data))\r\n\r\n en_eachbatch_model_wts = copy.deepcopy(EncoderModel.state_dict())\r\n de_eachbatch_model_wts = copy.deepcopy(DecoderModel.state_dict())\r\n\r\n time_end = time.time()\r\n if ii % print_inteval == 0:\r\n print('cost {:.1f} secs'.format(time_end - time_start))\r\n\r\n else:\r\n time_end = time.time()\r\n if ii % print_inteval == 0:\r\n print('{}: {} Average_BatchLoss: {:.4f}: '.format(ii, phase, total_loss.data))\r\n print('cost {:.1f} secs'.format(time_end - time_start))\r\n running_loss += total_loss.data\r\n num_batch = ii+1\r\n\r\n epoch_loss = running_loss/num_batch\r\n\r\n if phase == 'val':\r\n # print('num_batch'.format(num_batch))\r\n en_former_lr = encoder_optimizer.param_groups[0]['lr']\r\n encoder_scheduler.step(epoch_loss)\r\n en_current_lr = encoder_optimizer.param_groups[0]['lr']\r\n\r\n de_former_lr = decoder_optimizer.param_groups[0]['lr']\r\n decoder_scheduler.step(epoch_loss)\r\n de_current_lr = decoder_optimizer.param_groups[0]['lr']\r\n\r\n #writer.add_scalar('Epoch_VALLoss', epoch_loss, epoch)\r\n print('Encoder learning rate is {}'.format(encoder_optimizer.param_groups[0]['lr']))\r\n print('Decoder learning rate is {}'.format(decoder_optimizer.param_groups[0]['lr']))\r\n\r\n if epoch_loss < best_loss:\r\n best_loss = epoch_loss\r\n en_best_model_wts = copy.deepcopy(EncoderModel.state_dict())\r\n de_best_model_wts = copy.deepcopy(DecoderModel.state_dict())\r\n print('BestLoss: {:.4f} is Epoch{} '.format(best_loss, epoch+1))\r\n notimproveNum = 0\r\n else:\r\n notimproveNum = notimproveNum + 1\r\n\r\n torch.save(EncoderModel.state_dict(), 'data/en_check_paramsBCE.pkl')\r\n torch.save(DecoderModel.state_dict(), 'data/de_check_paramsBCE.pkl')\r\n\r\n print('{} EpochLoss: {} '.format(phase, epoch_loss))\r\n\r\n# EncoderModel.load_state_dict(en_best_model_wts)\r\n# torch.save(EncoderModel.state_dict(), 'data/en_best_params.pkl')\r\n# DecoderModel.load_state_dict(de_best_model_wts)\r\n# torch.save(DecoderModel.state_dict(), 'data/de_best_params.pkl')\r\n\r\n\r\n\r\n# ##------------evaluate----------------------------------------------##\r\nPic_save_dir = 'data/DUT_SalBCE'\r\n#load the parameters\r\n# checkpoint_encoder = torch.load('data/en_check_params.pkl')\r\n# EncoderModel.load_state_dict(checkpoint_encoder)\r\n# checkpoint_decoder = torch.load('data/de_check_params.pkl')\r\n# DecoderModel.load_state_dict(checkpoint_decoder)\r\n\r\nfor ii, (images, filename) in enumerate(test_loader):\r\n\r\n image_var = torch.tensor(images).squeeze().cuda() #async=True\r\n\r\n img_rgb, DATA, grid, vertices, edges, boundary, centers, max_dist, roi = prepare_image4test_loader(image_var.permute(1,2,0))\r\n\r\n EncoderModel.eval()\r\n roi_batch = []\r\n roi_batch.append(roi)\r\n roi_batch = np.array(roi_batch)\r\n encoder_outputs = EncoderModel(img_rgb, roi_batch) # .unsqueeze(0)\r\n target_length = encoder_outputs.data.shape[1]\r\n target_length_col = encoder_outputs.data.shape[2]\r\n vertices_batch = dict()\r\n edges_batch = dict()\r\n boundary_batch = dict()\r\n centers_batch = dict()\r\n max_dist_batch = dict()\r\n vertices_batch[0] = vertices\r\n edges_batch[0] = edges\r\n boundary_batch[0] = boundary\r\n centers_batch[0] = centers\r\n max_dist_batch[0] = max_dist\r\n\r\n sal_img = grid\r\n DecoderModel.eval()\r\n Lable = None\r\n decoder_output, out_bg = DecoderModel(encoder_outputs, Lable)\r\n\r\n for current_index in range(target_length):\r\n mm = decoder_output[current_index]\r\n sal_img[grid == current_index] = mm.detach().cpu()*255\r\n b = np.array(sal_img)\r\n b = b.astype(np.uint8)\r\n xx = Image.fromarray(b)\r\n print(filename[0])\r\n xx.save(Pic_save_dir+'/'+filename[0])\r\n # -----end of decoder process","repo_name":"cvcoding/EDNet","sub_path":"EDNet/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":24687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40122613692","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('report', views.report, name='report'),\n path('report1', views.report_revised, name='report1'),\n path('report2', views.report_switch_between_items, name='report2')\n]\n\n\n\n","repo_name":"sisjuy/code_fin_bert","sub_path":"fin_bert_web/catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40860303040","text":"from django.test import TestCase\n\nfrom ..models import Group, Post, User\n\n\nclass PostModelTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='author')\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='Тестовый слаг',\n description='Тестовое описание',\n )\n cls.post = Post.objects.create(\n author=cls.user,\n text='Тестовый пост',\n )\n\n def test_models_have_correct_object_names(self):\n \"\"\"Проверяем, что у моделей корректно работает __str__.\"\"\"\n group = PostModelTest.group\n post = PostModelTest.post\n expected_object_names = (\n group.title,\n post.text,\n )\n for expected_object_name in expected_object_names:\n with self.subTest():\n self.assertEqual(\n expected_object_name, str(expected_object_name))\n","repo_name":"Kolupanov/hw05_final","sub_path":"yatube/posts/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7839124722","text":"import urllib2\n\"\"\"Writes the_page to /tmp/44\"\"\"\nurl = 'https://api.github.com/search/repositories?q=language:python&sort=stars&order=desc'\n\nrequest_headers = {\n 'User-Agent': 'Holberton_School',\n 'Authorization': 'token 3e0df28fcee55ed9d9bfbfda999ac5428e1ab96c'\n}\n\nreq = urllib2.Request(url, headers=request_headers)\nresponse = urllib2.urlopen(req)\nthe_page = response.read()\n\ntarget = open('/tmp/44', 'w')\ntarget.truncate\ntarget.write(the_page)\ntarget.close\n","repo_name":"johndspence/holbertonschool_higher_level_programming","sub_path":"python_intro_2/write_them_to_disc.py","file_name":"write_them_to_disc.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18498214793","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport io\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.preprocessing.text import Tokenizer\n\n\nimdb, info = tfds.load(\"imdb_reviews\", with_info=True, as_supervised=True )\n\ntraining_data, testing_data = imdb['train'], imdb['test']\n\ntraining_sentences = []\ntesting_sentence = []\n\ntesting_labels = []\ntraining_labels = []\n\nprint(len(training_data))\nprint(len(testing_data))\n\nfor s, l in training_data:\n training_sentences.append(s.numpy().decode('utf8'))\n training_labels.append(l.numpy())\n\nfor s, l in testing_data:\n testing_sentence.append(s.numpy().decode('utf8'))\n testing_labels.append(l.numpy())\n\ntesting_labels_final = np.array(testing_labels)\ntraining_labels_final = np.array(training_labels)\n\n\nVOCAB_SIZE = 10000\nOOV_TOKEN = \"\"\nMAX_LEN = 120\nTRUNCATE = 'post'\nEMBEDDED_DIM = 16\n\ntokenizer = Tokenizer(num_words=VOCAB_SIZE, oov_token=OOV_TOKEN)\ntokenizer.fit_on_texts(training_sentences)\nword_index = tokenizer.word_index\n\nreverse_word_index = dict((value, key) for key, value in word_index.items())\n\nsequences = tokenizer.texts_to_sequences(training_sentences)\npadded = pad_sequences(sequences=sequences, maxlen=MAX_LEN, truncating=TRUNCATE)\n\ntest_sequences = tokenizer.texts_to_sequences(testing_sentence)\ntest_padded = pad_sequences(sequences=test_sequences, maxlen=MAX_LEN, truncating=TRUNCATE)\n\n# model\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(VOCAB_SIZE, EMBEDDED_DIM, input_length=MAX_LEN),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation=tf.nn.relu),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\n\nmodel.compile(optimizer='adam', loss=tf.keras.losses.binary_crossentropy, metrics=['accuracy'])\n\nhistory = model.fit(\n padded,\n training_labels_final,\n epochs=10,\n validation_data=(test_padded,testing_labels_final),\n verbose=1\n)\n\nmodel.summary()\n\ntrain_accuracy = history.history['accuracy']\ntrain_loss = history.history['loss']\n\nval_accuracy = history.history['val_accuracy']\nval_loss = history.history['val_loss']\n\nepochs = range(len(train_accuracy))\nplt.plot(epochs, train_accuracy)\nplt.plot(epochs, val_accuracy)\nplt.figure()\n\nplt.plot(epochs, train_loss)\nplt.plot(epochs, val_loss)\nplt.figure()\nplt.show()\n\n#weights\ne = model.layers[0]\nweights = e.get_weights()[0]\n\n# generate files for projector tensorflow\n\nout_v = io.open(\"vec.tsv\", \"w\", encoding='utf-8')\nout_m = io.open(\"met.tsv\", \"w\", encoding=\"utf-8\")\n\nfor word_num in range(1, VOCAB_SIZE):\n word = reverse_word_index[word_num]\n embeddings = weights[word_num]\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in embeddings]) + \"\\n\")\nout_v.close()\nout_m.close()\n\nprint(\"Done\")\n\n\n\n\n\n\n","repo_name":"praveenwork/ml","sub_path":"Python/nlp/imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29318564949","text":"#Contestador de celular\nnumero = int(input())\nhora = int(input())\nrespuesta = str(0)\n\nif (hora>=0) and (hora<=7):\n print(CONTESTAR)\nif hora<14:\n respuesta = 1>0\n if (respuesta == True) and (numero%1000==909):\n print(\"CONTESTAR\")\n else:\n print(\"NO CONTESTAR\")\nif (hora>=17) and (hora<=19):\n respuesta = 1>0\n if (respuesta==True) and (numero//10000):\n print(\"NO CONTESTAR\")\n else:\n print(\"CONTESTAR\")\nif hora>19:\n print(\"NO CONTESTAR\")","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej2/hito1_ej2_2ae96bebb0d2da59a9737c3e50bc5a00.py","file_name":"hito1_ej2_2ae96bebb0d2da59a9737c3e50bc5a00.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32637879682","text":"from pico2d import *\nimport game_framework\nimport game_world\n\nfrom mario import Mario\nfrom background import Background\nfrom mushroom import Mushroom\n\nbackground = None\nchar = None\nenemy = None\n\ndef handle_events():\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif (event.type, event.key) == (SDL_KEYDOWN, SDLK_ESCAPE):\n game_framework.quit()\n else:\n char.handle_event(event)\n\n# 초기화\ndef enter():\n global char, background, running, enemy\n char = Mario()\n enemy = Mushroom()\n background = Background()\n game_world.add_object(background, 0)\n game_world.add_object(char, 1)\n game_world.add_object(enemy, 2)\n\n\ndef exit():\n game_world.clear()\n\n\ndef update():\n for game_object in game_world.all_objects():\n game_object.update()\n\ndef draw_world():\n for game_object in game_world.all_objects():\n game_object.draw()\n\n\ndef draw():\n clear_canvas()\n draw_world()\n update_canvas()\n\ndef pause():\n pass\n\ndef resume():\n pass\n\n\n\n\ndef test_self():\n import play_state\n\n pico2d.open_canvas()\n game_framework.run(play_state)\n pico2d.clear_canvas()\n\nif __name__ == '__main__':\n test_self()\n","repo_name":"JHKimy/2020180048_2DGP_PROJECT","sub_path":"수정전/play_state.py","file_name":"play_state.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20145853136","text":"\"\"\"\r\n\r\nThis python demo is created to make \"Multi-class_Weather_Dataset_for_Image_Classification\" dataset\r\n\r\neasy to be processed by my demo.\r\n\r\nimage_dir format:\r\n\r\n-JPEGImage\r\n -classname+index.jpg\r\n -.....\r\ncsv format:\r\n\r\nindex filename filepath label\r\n0 ... ... ...\r\n1 ... ... ...\r\n2 ... ... ...\r\n3 ... ... ...\r\n......\r\n\r\n\"\"\"\r\nimport os\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\n# 写一个函数方便把classname和index分开\r\ndef split_classname(filename):\r\n num_start = 0\r\n for i in range(len(filename)):\r\n if ord(filename[i]) >= 48 and ord(filename[i]) <= 57:\r\n num_start = i\r\n break\r\n return filename[:num_start]\r\n\r\n\r\n# 生成classes.txt文件\r\ndef generate_classes_txt():\r\n image_path = './JPEGImage'\r\n classes = []\r\n for name in os.listdir(image_path):\r\n filename = name[:-4]\r\n classes.append(split_classname(filename))\r\n classes = list(set(classes))\r\n with open('../classes.txt', 'w') as f:\r\n content = \"\"\r\n for i, classes_name in enumerate(classes):\r\n content = content + \"{} {}\\n\".format(i, classes_name)\r\n f.write(content)\r\n\r\n\r\ndef read_classes():\r\n path = '../classes.txt'\r\n content = \"\"\r\n with open(path, 'r') as f:\r\n content = f.read()\r\n content = content.split(\"\\n\")[:-1] # 最后一个是空列表,不需要\r\n output = []\r\n for item in content:\r\n item_list = item.split(\" \")\r\n output.append(item_list[1]) # list\r\n return output\r\n\r\n\r\ndef multi_weather_csv(image_path,csv_path):\r\n if os.path.exists('../classes.txt') == False:\r\n generate_classes_txt()\r\n classes = read_classes()\r\n infomation_array = [] # shape=(n,3)\r\n for name in os.listdir(image_path):\r\n filename = name[:-4]\r\n path = image_path + '/{}'.format(name)\r\n class_name = split_classname(filename)\r\n infomation_array.append([filename, path, classes.index(class_name)])\r\n info_arr = np.array(infomation_array)\r\n col = ['filename', 'filepath', 'label']\r\n df = pd.DataFrame(info_arr, columns=col)\r\n df.to_csv(csv_path, encoding='utf-8')\r\n\r\n\r\nmulti_weather_csv()\r\n","repo_name":"Alexisxty/PytorchImageClassify-master","sub_path":"data/folder/Multi-class_Weather_Dataset_for_Image_Classification/demo/csv_generator.py","file_name":"csv_generator.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1425903836","text":"import PySimpleGUI as sg\nfrom random import randint\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nMAX_ROWS = 20\nMAX_COL = 50\nbox_size = 1\n# board = [[randint(0, 1) for j in range(MAX_COL)] for i in range(MAX_ROWS)]\nimg_list = np.zeros((MAX_ROWS, MAX_COL), dtype=np.uint8)\nlayout = [[[\n sg.Button('',\n size=(box_size, box_size),\n button_color=('white'),\n key=(i, j),\n pad=(0, 0)) for j in range(MAX_COL)\n] for i in range(MAX_ROWS)], [sg.Button('変換', key='check', expand_x=True)]]\n\nwindow = sg.Window('Minesweeper', layout)\n\nb_c = 'white'\nwhile True:\n event, values = window.read()\n if event in (sg.WIN_CLOSED, 'Exit'):\n break\n # window[(row, col)].update('New text') # To change a button's text, use this pattern\n # For this example, change the text of the button to the board's value and turn color black\n\n if event == 'check':\n for i in range(MAX_ROWS):\n for j in range(MAX_COL):\n if img_list[i][j] == 0:\n img_list[i][j] = 255\n else:\n img_list[i][j] = 0\n print(img_list)\n # plt.imshow(img_list,\n # cmap='gray',\n # vmin=0,\n # vmax=255,\n # interpolation='none')\n # plt.show()\n plt.imsave('C:\\\\Users\\\\Atsushi\\\\Pictures\\\\map\\\\buf.png',\n img_list) #拡張子を.pngとかに変えてもちゃんと保存してくれる。\n\n im_gray = np.array(\n Image.open('C:\\\\Users\\\\Atsushi\\\\Pictures\\\\map\\\\buf.png').convert(\n 'L'))\n pil_img_gray = Image.fromarray(im_gray)\n pil_img_gray.save('C:\\\\Users\\\\Atsushi\\\\Pictures\\\\map\\\\map.pgm')\n\n # img_list = np.array(save_list)\n # print(\"OK\")\n # pil_img_gray = Image.fromarray(img_list)\n # print(pil_img_gray.mode)\n # # L\n # pil_img_gray.save('C:\\\\Users\\\\Atsushi\\\\Pictures\\\\map\\\\a.pgm')\n break\n\n position = list(event)\n print(position)\n if img_list[position[0]][position[1]] == 1:\n b_c = 'white'\n img_list[position[0]][position[1]] = 0\n else:\n b_c = 'black'\n img_list[position[0]][position[1]] = 1\n window[event].update('', button_color=(b_c))\n\nwindow.close()","repo_name":"Odake-Atsushi/RosMapGenerator","sub_path":"dot/pazzle.py","file_name":"pazzle.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28108446922","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n index=0\n seen=set()\n for i in range(len(nums)):\n if nums[i] not in seen:\n seen.add(nums[i])\n nums[index]=nums[i]\n index+=1\n return index","repo_name":"neeraj027/LeetCode","sub_path":"RemoveDuplicatesfromSortedArray..py","file_name":"RemoveDuplicatesfromSortedArray..py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13165779257","text":"import numpy as np\nfrom scipy.misc import imsave\n\nconfig = {}\nconfig[\"epsilon\"] = 1e-7\n\nEPSILON = config[\"epsilon\"]\n\n\ndef min_max_normalization(x, l_range = 0.0, r_range = 1.0, axis = None):\n\n\tassert (l_range < r_range), \"l_range must be less than r_range\"\n\n\tmin_x = np.amin(x, axis = axis, keepdims = True)\n\tmax_x = np.amax(x, axis = axis, keepdims = True)\n\n\tout = (x - min_x) / np.maximum(max_x - min_x, EPSILON)\n\tout = ((r_range - l_range) * out) + l_range\n\n\treturn out\n\ndef min_max_normalization_multiple_images(x, l_range = 0.0, r_range = 1.0, axis = None):\n\n\tif isinstance(axis, tuple):\n\t\tfor a in axis:\n\t\t\tassert (a > 0), \"Axis is out of bound or First axis represent different images.\"\n\telse:\n\t\tassert (axis > 0), \"Axis is out of bound or First axis represent different images.\"\n\n\tout = np.zeros_like(x)\n\n\tfor i in range(x.shape[0]):\n\t\tout[i] = min_max_normalization(x[i], l_range = l_range, r_range = r_range, axis = axis - 1)\n\n\treturn out\n\ndef mean_std_normalization(x, mean = 0.0, stddev = 1.0, axis = None):\n\tout = None\n\n\tmean_x = np.mean(x, axis = axis, keepdims = True)\n\tstd_x = np.std(x, axis = axis, keepdims = True)\n\n\tout = (x - mean_x) / np.maximum(std_x, EPSILON)\n\tout = (out * stddev) + mean\n\n\treturn out\n\ndef mean_std_normalization_multiple_images(x, mean = 0.0, stddev = 1.0, axis = None):\n\n\tif isinstance(axis, tuple):\n\t\tfor a in axis:\n\t\t\tassert (a > 0), \"Axis is out of bound or First axis represent different images.\"\n\telse:\n\t\tassert (axis > 0), \"Axis is out of bound or First axis represent different images.\"\n\n\tout = np.zeros_like(x)\n\n\tfor i in range(x.shape[0]):\n\t\tout[i] = mean_std_normalization(x[i], mean = mean, stddev = stddev, axis = axis - 1)\n\n\treturn out\n\ndef clip_pixel_value(x, l_bound = 0.0, r_bound = 1.0):\n\treturn np.clip(x, a_min = l_bound, a_max = r_bound)\n\ndef save_image(image, path, fmt = None):\n\tis_success = True\n\n\tassert (len(image.shape) in [2, 3]), \"image argument must be 2-D or 3-D\"\n\tif len(image.shape) == 3:\n\t\tassert (image.shape[-1] in [3, 4]), \"if image argument is 3-D then last dimension must be 3 or 4\"\n\n\tif not os.path.exists(path):\n\t\ttry:\n\t\t\tos.mkdir(os.path.dirname(path))\n\t\texcept:\n\t\t\traise(\"Error in making directory. Try with sudo.\")\n\n\ttry:\n\t\timsave(path, image, format = fmt)\n\texcept:\n\t\tis_success = False\n\n\treturn is_success\n\ndef save_images(images, dirpath, file_name, fmt = None):\n\timages_saved = 0\n\n\tassert (images.shape[0] < len(file_name)), \"Please provide enough image file names.\"\n\n\tfor i in range(images.shape[0]):\n\t\tif save_image(images[i], path = os.path.join(dirpath, file_name[i]), fmt = fmt):\n\t\t\timages_saved += 1\n\n\treturn images_saved\n","repo_name":"BhagyeshVikani/ImagesHelper","sub_path":"images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74427626807","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport pickle\n\nfrom Reader import Reader\nfrom Preprocessing import nltkSentenceSplit, nltkTokenize\nfrom embeddings.Embeddings import Embeddings, writeEmbeddingsPickle, readEmbeddingsPickle\nfrom embeddings.Vocabulary import createVocabularyFile, createFasttextModel\n\n\ndef runEmbeddingCreationPipeline(settings):\n\n createVocabulary(settings)\n createEmbeddingModels(settings)\n for corpus in (\"train\", \"test\"):\n createEmbeddingsPickle(settings, corpus)\n\ndef createVocabulary(settings):\n if not os.path.exists(settings[\"embeddings\"][\"vocabulary_path\"]):\n sentenceList = list()\n for corpus in (\"train\", \"test\"):\n reader = Reader(dataSettings=settings, corpus=corpus)\n filesRead = reader.loadDataSet()\n for fileName in filesRead:\n sentences = nltkSentenceSplit(filesRead[fileName], verbose=False)\n sentenceList.extend(sentence for sentence in sentences)\n createVocabularyFile(sentenceList, settings[\"embeddings\"][\"vocabulary_path\"], verbose=False)\n\ndef createEmbeddingModels(settings):\n # Create smaller biowordvec embedding models\n if not (os.path.exists(settings[\"embeddings\"][\"biowordvec_original\"]) or os.path.exists(settings[\"embeddings\"][\"biowordvec_normalized\"])):\n print(\"just testing\")\n createFasttextModel(settings[\"embeddings\"][\"vocabulary_path\"], settings[\"embeddings\"][\"wordvec_path\"],\n settings[\"embeddings\"][\"biowordvec_original\"], settings[\"embeddings\"][\"biowordvec_normalized\"])\n\ndef createEmbeddingsPickle(settings, corpus):\n if corpus == \"train\": picklePath = settings[\"embeddings\"][\"train_embeddings_pickle\"]\n elif corpus == \"test\": picklePath = settings[\"embeddings\"][\"test_embeddings_pickle\"]\n\n tokenizedSentenceList = list()\n if not os.path.exists(picklePath):\n reader = Reader(dataSettings=settings, corpus=corpus)\n filesRead = reader.loadDataSet()\n for fileName in filesRead:\n sentences = nltkSentenceSplit(filesRead[fileName], verbose=False)\n for sentence in sentences:\n sentence = nltkTokenize(sentence)\n tokenizedSentenceList.extend([sentence])\n\n embeddings = Embeddings(settings[\"embeddings\"][\"biowordvec_original\"], settings[\"embeddings\"][\"biowordvec_normalized\"],\n int(settings[\"embeddings\"][\"wordvec_size\"]))\n\n embeddingsVec = embeddings.wordvec_concat(tokenizedSentenceList)\n writeEmbeddingsPickle(embeddingsVec, picklePath)\n print(\"Created pickle file {}\".format(picklePath))\n\n\ndef createSentencesFile(settings):\n if not os.path.exists(settings[\"embeddings\"][\"sentences_path\"]):\n sentenceList = list()\n for corpus in (\"train\", \"test\"):\n reader = Reader(dataSettings=settings, corpus=corpus)\n filesRead = reader.loadDataSet()\n for fileName in filesRead:\n sentences = nltkSentenceSplit(filesRead[fileName], verbose=False)\n sentenceList.extend(nltkTokenize(sentence) for sentence in sentences)\n\n with open(settings[\"embeddings\"][\"sentences_path\"], 'wb') as pickle_handle:\n pickle.dump(sentenceList, pickle_handle, protocol=4)","repo_name":"ieeta-pt/PatientFM","sub_path":"src/embeddings/Pipeline.py","file_name":"Pipeline.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"19236867227","text":"# -*-coding:utf-8 -*-\n\"\"\"\nGiven two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.\n\nNote:\n\nThe number of elements initialized in nums1 and nums2 are m and n respectively.\nYou may assume that nums1 has enough space (size that is greater or equal to m + n) to hold additional elements from nums2.\nExample:\n\nInput:\nnums1 = [1,2,3,0,0,0], m = 3\nnums2 = [2,5,6], n = 3\n\nOutput: [1,2,2,3,5,6]\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if m == 0:\n nums1[:] = nums2\n elif n == 0:\n pass\n else:\n nums1_index = nums2_index = 0\n while nums1_index < m + n:\n if nums1[nums1_index] < nums2[nums2_index]:\n nums1_index = nums1_index + 1\n else:\n nums1.insert(nums1_index, nums2[nums2_index])\n nums1_index = nums1_index + 1\n nums2_index = nums2_index + 1\n\n if nums1_index == m + nums2_index:\n nums1[nums1_index:] = nums2[nums2_index:]\n break\n elif nums2_index > n - 1:\n nums1[:] = nums1[0:m + n]\n break\n\n\nif __name__ == '__main__':\n solution = Solution()\n\n nums1_1 = [1, 2, 3]\n m_1 = 3\n nums2_1 = []\n n_1 = 0\n solution.merge(nums1_1, m_1, nums2_1, n_1)\n print(nums1_1)\n\n nums1_2 = [1, 2, 3, 0, 0, 0]\n m_2 = 3\n nums2_2 = [2, 5, 6]\n n_2 = 3\n solution.merge(nums1_2, m_2, nums2_2, n_2)\n print(nums1_2)\n\n nums1_3 = [0]\n m_3 = 0\n nums2_3 = [1]\n n_3 = 1\n solution.merge(nums1_3, m_3, nums2_3, n_3)\n print(nums1_3)\n\n nums1_4 = [1, 0]\n m_4 = 1\n nums2_4 = [2]\n n_4 = 1\n solution.merge(nums1_4, m_4, nums2_4, n_4)\n print(nums1_4)\n\n nums1_5 = [2, 0]\n m_5 = 1\n nums2_5 = [1]\n n_5 = 1\n solution.merge(nums1_5, m_5, nums2_5, n_5)\n print(nums1_5)\n\n nums1_6 = [4,5,6,0,0,0]\n m_6 = 3\n nums2_6 = [1,2,3]\n n_6 = 3\n solution.merge(nums1_6, m_6, nums2_6, n_6)\n print(nums1_6)\n\n","repo_name":"thaisday/Leecode","sub_path":"088_MergeSortedArray.py","file_name":"088_MergeSortedArray.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72862035448","text":"__author__ = 'Stuart Gordon Reid'\n\nfrom AssetSimulator import AssetSimulator\nfrom Barebones import BarebonesOptimizer\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport matplotlib\nimport Portfolio\nimport pandas\nimport numpy\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n\ndef plot_paths(r, sim):\n \"\"\"\n This method plots a number of asset paths generated using Geometric Brownian Motion\n \"\"\"\n asset_prices = sim.asset_prices(500, returns=r)\n for p in asset_prices:\n plt.plot(p)\n plt.show()\n\n\ndef plot_results(results, labels, ylabel, path):\n plt.figure(figsize=(10.5, 6))\n plt.style.use(\"grayscaleb\")\n plt.ylabel(ylabel)\n plt.xlabel(\"Iterations\")\n linestyles = ['--', ':', '-.', '-']\n linewidth = [3.0, 2, 2, 2, 1.5]\n if len(results) == 5:\n for i in range(len(results)):\n plt.plot(results[i], label=labels[i], linestyle=linestyles[i % 4], linewidth=linewidth[i % 5])\n else:\n for i in range(len(results) + 1):\n if i == 0:\n plt.plot([], linestyle=' ')\n else:\n plt.plot(results[i - 1], label=labels[i - 1], linestyle=linestyles[i % 4], linewidth=linewidth[i % 5])\n plt.legend(loc=\"best\")\n plt.savefig(path)\n plt.cla()\n\n\ndef three_dimensional_landscape(returns, corr_m, size, c_e=1.0, c_b=1.0, m_e=1.0, m_b=1.0):\n \"\"\"\n This method plots the fitness landscape for each three methods in three dimensions (surface plot)\n \"\"\"\n step = float(1 / size)\n x_axis = numpy.arange(0.0, 1.0, step)\n y_axis = numpy.arange(0.0, 1.0, step)\n z_axis_nochange = numpy.zeros(shape=(size, size))\n z_axis_repaired = numpy.zeros(shape=(size, size))\n z_axis_penaltym = numpy.zeros(shape=(size, size))\n z_axis_lagrange = numpy.zeros(shape=(size, size))\n for i in range(size):\n for j in range(size):\n x, y = x_axis[i], y_axis[j]\n p = Portfolio.Portfolio(returns, corr_m, numpy.array([x, y]))\n z_axis_nochange[i][j] = p.min_objective()\n z_axis_penaltym[i][j] = p.penalty_objective(c_e, c_b)\n z_axis_lagrange[i][j] = p.lagrange_objective(c_e, c_b, m_e, m_b)\n z_axis_repaired[i][j] = p.repair_objective()\n x_axis, y_axis = numpy.meshgrid(x_axis, y_axis)\n plot_surface(x_axis, y_axis, z_axis_nochange, \"N\")\n plot_surface(x_axis, y_axis, z_axis_repaired, \"R\")\n plot_surface(x_axis, y_axis, z_axis_penaltym, \"P\")\n plot_surface(x_axis, y_axis, z_axis_lagrange, \"L\")\n\n\ndef plot_surface(X, Y, Z, label):\n \"\"\"\n This method actually plots and shows the three dimensional surface\n \"\"\"\n fig = plt.figure(figsize=(15, 10))\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0)\n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\n fig.colorbar(surf, shrink=0.5, aspect=5)\n '''\n for ii in range(0, 360, 90):\n ax.view_init(elev=10., azim=ii)\n plt.savefig(\"Views/View II \" + label + \" \" + str(ii) + \".png\")\n '''\n plt.show()\n\n\ndef runner_all(n, sigma, delta, mu, time, iterations, simulations, path, ce, cb, le, lb):\n print(\"Experiment\", path, \"starting\")\n asset_simulator = AssetSimulator(delta, sigma, mu, time)\n\n Portfolio.memoizer = {}\n none, penalty, lagrange, repair, preserve, ss = [], [], [], [], [], 30\n none_ve, penalty_ve, lagrange_ve, repair_ve, preserve_ve = [], [], [], [], []\n none_vb, penalty_vb, lagrange_vb, repair_vb, preserve_vb = [], [], [], [], []\n\n for i in range(simulations):\n print(\"Simulation\", i, \"starting\")\n asset_returns = asset_simulator.assets_returns(n)\n corr = pandas.DataFrame(asset_returns).transpose().corr()\n # three_dimensional_landscape(asset_returns, corr, 100)\n\n none_opt = BarebonesOptimizer(ss, asset_returns, corr)\n result, violation_e, violation_b = none_opt.optimize_none(iterations + 1, ce, cb, le, lb)\n none_ve.append(violation_e)\n none_vb.append(violation_b)\n none.append(result)\n print(\"\\tAlgorithm 1 Done\")\n\n lagrange_opt = BarebonesOptimizer(ss, asset_returns, corr)\n result, violation_e, violation_b = lagrange_opt.optimize_penalty(iterations + 1, ce, cb, le, lb)\n penalty_ve.append(violation_e)\n penalty_vb.append(violation_b)\n penalty.append(result)\n print(\"\\tAlgorithm 2 Done\")\n\n lagrange_opt = BarebonesOptimizer(ss, asset_returns, corr)\n result, violation_e, violation_b = lagrange_opt.optimize_lagrange(iterations + 1, ce, cb, le, lb)\n lagrange_ve.append(violation_e)\n lagrange_vb.append(violation_b)\n lagrange.append(result)\n print(\"\\tAlgorithm 3 Done\")\n\n repair_opt = BarebonesOptimizer(ss, asset_returns, corr)\n result, violation_e, violation_b = repair_opt.optimize_repair(iterations + 1, ce, cb, le, lb)\n repair_ve.append(violation_e)\n repair_vb.append(violation_b)\n repair.append(result)\n print(\"\\tAlgorithm 4 Done\")\n\n preserve_opt = BarebonesOptimizer(ss, asset_returns, corr)\n result, violation_e, violation_b = preserve_opt.optimize_preserving(iterations + 1, ce, cb, le, lb)\n preserve_ve.append(violation_e)\n preserve_vb.append(violation_b)\n preserve.append(result)\n print(\"\\tAlgorithm 5 Done\")\n\n n_r, n_ve, n_vb = pandas.DataFrame(none), pandas.DataFrame(none_ve), pandas.DataFrame(none_vb)\n r_r, r_ve, r_vb = pandas.DataFrame(repair), pandas.DataFrame(repair_ve), pandas.DataFrame(repair_vb)\n p_r, p_ve, p_vb = pandas.DataFrame(preserve), pandas.DataFrame(preserve_ve), pandas.DataFrame(preserve_vb)\n pr_r, pr_ve, pr_vb = pandas.DataFrame(penalty), pandas.DataFrame(penalty_ve), pandas.DataFrame(penalty_vb)\n l_r, l_ve, l_vb = pandas.DataFrame(lagrange), pandas.DataFrame(lagrange_ve), pandas.DataFrame(lagrange_vb)\n\n n_r.to_csv(path + \"/None Fitness.csv\")\n n_ve.to_csv(path + \"/None Equality.csv\")\n n_vb.to_csv(path + \"/None Boundary.csv\")\n\n r_r.to_csv(path + \"/Repair Fitness.csv\")\n r_ve.to_csv(path + \"/Repair Equality.csv\")\n r_vb.to_csv(path + \"/Repair Boundary.csv\")\n\n p_r.to_csv(path + \"/Preserve Fitness.csv\")\n p_ve.to_csv(path + \"/Preserve Equality.csv\")\n p_vb.to_csv(path + \"/Preserve Boundary.csv\")\n\n pr_r.to_csv(path + \"/Penalty Fitness.csv\")\n pr_ve.to_csv(path + \"/Penalty Equality.csv\")\n pr_vb.to_csv(path + \"/Penalty Boundary.csv\")\n\n l_r.to_csv(path + \"/Lagrangian Fitness.csv\")\n l_ve.to_csv(path + \"/Lagrangian Equality.csv\")\n l_vb.to_csv(path + \"/Lagrangian Boundary.csv\")\n\n plot_results([n_r.mean(), r_r.mean(), pr_r.mean(), l_r.mean(), p_r.mean()],\n [\"A1 (No Method)\", \"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Fitness f()\", path + \"/1 Fitness\")\n\n plot_results([r_r.mean(), pr_r.mean(), l_r.mean(), p_r.mean()],\n [\"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Fitness f()\", path + \"/1 Fitness Ex None\")\n\n plot_results([n_ve.mean(), r_ve.mean(), pr_ve.mean(), l_ve.mean(), p_ve.mean()],\n [\"A1 (No Method)\", \"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Equality Constraint Violation, C_E()\", path + \"/2 Equality Violations\")\n\n plot_results([r_ve.mean(), pr_ve.mean(), l_ve.mean(), p_ve.mean()],\n [\"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Equality Constraint Violation, C_E()\", path + \"/2 Equality Violations Ex None\")\n\n plot_results([n_vb.mean(), r_vb.mean(), pr_vb.mean(), l_vb.mean(), p_vb.mean()],\n [\"A1 (No Method)\", \"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Boundary Constraint Violation, C_B()\", path + \"/3 Boundary Violations\")\n\n plot_results([r_vb.mean(), pr_vb.mean(), l_vb.mean(), p_vb.mean()],\n [\"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Boundary Constraint Violation, C_B()\", path + \"/3 Boundary Violations Ex None\")\n\n plot_results([n_r.std(), r_r.std(), pr_r.std(), l_r.std(), p_r.std()],\n [\"A1 (No Method)\", \"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Fitness Standard Deviation f()\", path + \"/4 Fitness Stdev\")\n\n plot_results([r_r.std(), pr_r.std(), l_r.std(), p_r.std()],\n [\"A2 (Particle Repair Method)\", \"A3 (Penalty Function Method)\",\n \"A4 (Augmented Lagrangian Method)\", \"A5 (Preserving Feasibility Method)\"],\n \"Average Global Best Fitness Standard Deviation f()\", path + \"/4 Fitness Stdev Ex None\")\n\n\ndef surface_plotter(n, sigma, delta, mu, time, c_e, c_b, m_e, m_b):\n asset_simulator = AssetSimulator(delta, sigma, mu, time)\n asset_returns = asset_simulator.assets_returns(n)\n corr = pandas.DataFrame(asset_returns).transpose().corr()\n three_dimensional_landscape(asset_returns, corr, 200, c_e, c_b, m_e, m_b)\n\n\ndef run():\n matplotlib.rc('font', family='Arial')\n coeff_e, coeff_b, lagrange_e, lagrange_b = 2.0, 2.0, 0.5, 0.5\n runner_all(4, 0.125, float(1 / 252), 0.08, 500, 80, 60, \"Results (A)\", coeff_e, coeff_b, lagrange_e, lagrange_b)\n runner_all(8, 0.125, float(1 / 252), 0.08, 500, 80, 60, \"Results (B)\", coeff_e, coeff_b, lagrange_e, lagrange_b)\n runner_all(16, 0.125, float(1 / 252), 0.08, 500, 80, 60, \"Results (C)\", coeff_e, coeff_b, lagrange_e, lagrange_b)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"malliwi88/SimplexProjectors","sub_path":"Python/v0.1/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":10392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40425951499","text":"from PyQt4 import QtGui, QtCore\n\nfrom library.InDocTable import CInDocTableView\nfrom models.RCTableModel import CRCLocItemDelegate, CRCLocItemFieldDelegate\n\nclass CInDocTableViewModifyPopup(CInDocTableView):\n def addPopupAction(self, action):\n self.popupMenu().addAction(action)\n\n def initPopupAction(self, act, objectName, actName, slot):\n act = QtGui.QAction(actName, self)\n act.setObjectName(objectName)\n self.connect(act, QtCore.SIGNAL('triggered()'), slot)\n self.addPopupAction(act)\n return act\n\n\n\nclass CRCTableFieldsView(CInDocTableViewModifyPopup):\n def __init__(self, parent):\n CInDocTableView.__init__(self, parent)\n self.setItemDelegate(CRCLocItemFieldDelegate(self))\n self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)\n self.setDragDropMode(QtGui.QAbstractItemView.InternalMove)\n self._actDelete = None\n self._actEdit = None\n\n self.addPopupDeleteCol()\n\n def setColSize(self):\n self.horizontalHeader().setStretchLastSection(False)\n self.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.Stretch)\n\n def addPopupDeleteCol(self):\n self._actDelete = self.initPopupAction(self._actDelete, 'actDelete', u'Удалить', self.on_actDelete_triggered)\n\n def addPopupEdit(self):\n self._actEdit = self.initPopupAction(self._actEdit, 'actEdit', u'Изменить', self.on_actEdit_triggered)\n\n def on_actDelete_triggered(self):\n row = self.currentIndex().row()\n self.model().deleteItem(row)\n\n def on_actEdit_triggered(self):\n self.model().setExtededEditMode(self.curretIndex.row())\n\nclass CRCTableParamsView(CInDocTableViewModifyPopup):\n def __init__(self, parent):\n CInDocTableView.__init__(self, parent)\n self._actDelete = None\n\n self.addPopupDeleteCol()\n\n def addPopupDeleteCol(self):\n self._actDelete = self.initPopupAction(self._actDelete, 'actDelete', u'Удалить', self.on_actDelete_triggered)\n\n def on_actDelete_triggered(self):\n row = self.currentIndex().row()\n self.model().deleteItem(row)\n\nclass CRCTableCapView(CInDocTableViewModifyPopup):\n def __init__(self, parent):\n CInDocTableView.__init__(self, parent)\n self.setItemDelegate(CRCLocItemDelegate(self))\n self.buffer = []\n self.verticalHeader().show()\n self.verticalHeader().setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n self.horizontalHeader().setSelectionBehavior(QtGui.QAbstractItemView.SelectColumns)\n self.horizontalHeader().setSortIndicatorShown(False)\n self.horizontalHeader().setStretchLastSection(False)\n\n self._actAddColBeforeCurrent = None\n self._actAddColAfterCurrent = None\n self._actDeleteCol = None\n self._actAddRowBeforeCurrent = None\n self._actAddRowAfterCurrent = None\n self._actDeleteRow = None\n self._actSpan = None\n self._actClearCurrentSpan = None\n self._actAddGroupRow = None\n self._actDeleteGroupRow = None\n\n self.addPopupAddColBeforeCurrent()\n self.addPopupAddColAfterCurrent()\n self.addPopupDeleteCol()\n self.addPopupAddRowBeforeCurrent()\n self.addPopupAddRowAfterCurrent()\n self.addPopupDeleteRow()\n self.addPopupSpan()\n self.addPopupClearCurrentSpan()\n self.addPopupAddGroupRow()\n self.addPopupDeleteGroupRow()\n\n def addPopupAddColBeforeCurrent(self):\n self._actAddColBeforeCurrent = self.initPopupAction(self._actAddColBeforeCurrent, 'actAddColBeforeCurrent', u'Вставить столбец до', self.on_actAddColBeforeCurrent_triggered)\n\n def addPopupAddColAfterCurrent(self):\n self._actAddColBeforeCurrent = self.initPopupAction(self._actAddColAfterCurrent, 'actAddColAfterCurrent', u'Вставить столбец после', self.on_actAddColAfterCurrent_triggered)\n\n def addPopupDeleteCol(self):\n self._actDeleteCol = self.initPopupAction(self._actDeleteCol, 'actDeleteCol', u'Удалить колонку', self.on_actDeleteCol_triggered)\n\n def addPopupAddRowBeforeCurrent(self):\n self._actAddRowBeforeCurrent = self.initPopupAction(self._actAddRowBeforeCurrent, 'actAddRowBeforeCurrent', u'Вставить строку до', self.on_actAddRowBeforeCurrent_triggered)\n\n def addPopupAddRowAfterCurrent(self):\n self._actAddRowAfterCurrent = self.initPopupAction(self._actAddRowAfterCurrent, 'actAddRowAfterCurrent', u'Вставить строку после', self.on_actAddRowAfterCurrent_triggered)\n\n def addPopupDeleteRow(self):\n self._actDeleteRow = self.initPopupAction(self._actDeleteRow, 'actDeleteRow', u'Удалить строку', self.on_actDeleteRow_triggered)\n\n def addPopupSpan(self):\n self._actSpan = self.initPopupAction(self._actSpan, 'actSpan', u'Объединить ячейки', self.on_actSpan_triggered)\n\n def addPopupClearCurrentSpan(self):\n self._actClearCurrentSpan = self.initPopupAction(self._actClearCurrentSpan, 'actClearCurrentSpan', u'Разделить ячейки', self.on_actClearCurrentSpan_triggered)\n\n def addPopupAddGroupRow(self):\n self._actAddGroupRow = self.initPopupAction(self._actAddGroupRow, 'actAddGroupRow', u'Добавить группировку', self.on_actAddGroupRow_triggered)\n\n def addPopupDeleteGroupRow(self):\n self._actDeleteGroupRow = self.initPopupAction(self._actDeleteGroupRow, 'actDeleteGroupRow', u'Удалить группировку', self.on_actDeleteGroupRow_triggered)\n\n def on_actAddColBeforeCurrent_triggered(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n self.model().addColumn(column)\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_actAddColAfterCurrent_triggered(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n self.model().addColumn(column + columnCount)\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_actDeleteCol_triggered(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n self.model().deleteColumn(column + 1)\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_actAddRowBeforeCurrent_triggered(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n self.model().addRow(row)\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_actAddRowAfterCurrent_triggered(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n self.model().addRow(row + rowCount)\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_actDeleteRow_triggered(self):\n indexes = self.selectionModel().selectedRows()\n for index in indexes:\n row = index.row()\n self.model().deleteRow(row)\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_actSpan_triggered(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n\n self.setSpan(row, column, rowCount, columnCount)\n\n index = self.model().createIndex(row, column)\n item = self.model().getItem(index)\n item.setRowSpan(rowCount)\n item.setColumnSpan(columnCount)\n self.model().setItem(index, item)\n\n def on_actClearCurrentSpan_triggered(self):\n indexes = self.selectionModel().selectedIndexes()\n for index in indexes:\n item = self.model().getItem(index)\n item.setRowSpan(1)\n item.setColumnSpan(1)\n self.model().setItem(index, item)\n self.spanUpdate()\n\n def on_actAddGroupRow_triggered(self):\n self.model().addGroupRow()\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_actDeleteGroupRow_triggered(self):\n row = self.currentIndex().row()\n self.model().deleteGroupRow(row)\n self.model().reset()\n self.resizeColumnsToContents()\n\n def on_popupMenu_aboutToShow(self):\n row = self.getSelectionRectangle()[0]\n checkSelectionEqCurrentSpan = self.checkSelectionEqCurrentSpan()\n checkSelectionContainsFieldRow = self.checkSelectionContainsFieldRow()\n checkSelectionContainsGroupRow = self.checkSelectionContainsGroupRow()\n multiSelection = bool(len(self.selectionModel().selectedIndexes()) > 1)\n singleSelection = bool(len(self.selectionModel().selectedIndexes()) == 1)\n selectedColumns = bool(len(self.selectionModel().selectedColumns()))\n selectedRows = bool(len(self.selectionModel().selectedRows()))\n selectedOneRow = self.checkSelectedOneRow()\n fieldRow = bool(row == self.model()._fieldRow)\n groupRow = self.model().isGroupRow(row)\n capRow = not fieldRow and not groupRow\n\n if self._actSpan:\n self._actSpan.setEnabled(multiSelection and not checkSelectionEqCurrentSpan\n and not checkSelectionContainsFieldRow\n and (not checkSelectionContainsGroupRow or (selectedOneRow and bool(groupRow))))\n if self._actClearCurrentSpan:\n self._actClearCurrentSpan.setEnabled(multiSelection and checkSelectionEqCurrentSpan)\n if self._actDeleteCol:\n self._actDeleteCol.setEnabled(singleSelection)\n if self._actDeleteRow:\n self._actDeleteRow.setEnabled(selectedRows and not fieldRow)\n if self._actAddRowAfterCurrent:\n self._actAddRowAfterCurrent.setEnabled((singleSelection or checkSelectionEqCurrentSpan) and capRow)\n if self._actAddColBeforeCurrent:\n self._actAddColBeforeCurrent.setEnabled(singleSelection or checkSelectionEqCurrentSpan)\n if self._actAddColAfterCurrent:\n self._actAddColAfterCurrent.setEnabled(singleSelection or checkSelectionEqCurrentSpan)\n if self._actAddRowBeforeCurrent:\n self._actAddRowBeforeCurrent.setEnabled((singleSelection or checkSelectionEqCurrentSpan) and capRow)\n if self._actAddGroupRow:\n self._actAddGroupRow.setEnabled(True)\n if self._actDeleteGroupRow:\n self._actDeleteGroupRow.setEnabled(bool(groupRow) and singleSelection)\n\n def checkSelectionEqCurrentSpan(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n index = self.model().createIndex(row, column)\n item = self.model().getItem(index)\n if item and (rowCount == item.rowSpan()) and (columnCount == item.columnSpan()):\n return True\n return False\n\n def checkSelectionContainsFieldRow(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n fieldRow = self.model()._fieldRow\n if fieldRow < row + rowCount and fieldRow >= row:\n return True\n return False\n\n def checkSelectionContainsGroupRow(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n for idx in range(row, row + rowCount):\n if self.model().isGroupRow(idx):\n return True\n return False\n\n def checkSelectedOneRow(self):\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n return bool(rowCount == 1)\n\n def getSelectionRectangle(self):\n indexes = self.selectionModel().selectedIndexes()\n if not indexes:\n return 0, 0, 0, 0\n rows = [index.row() for index in indexes]\n columns = [index.column() for index in indexes]\n minRow = min(rows)\n minColumn = min(columns)\n rowCount = max(rows) - min(rows) + 1\n columnCount = max(columns) - min(columns) + 1\n return minRow, minColumn, rowCount, columnCount\n\n def spanUpdate(self):\n self.clearSpans()\n for row, items in enumerate(self.model().items()):\n for col, item in items.items():\n self.setSpan(row, col, item.rowSpan(), item.columnSpan())\n\n def copy(self):\n self.buffer = {}\n indexes = self.selectionModel().selectedIndexes()\n row, column, rowCount, columnCount = self.getSelectionRectangle()\n for index in indexes:\n self.buffer.setdefault(index.row() - row, {})[index.column() - column] = self.model()._items[index.row()][index.column()]\n self.buffer\n\n def paste(self):\n curRow, curColumn, rowCount, columnCount = self.getSelectionRectangle()\n for row, rowItems in self.buffer.items():\n for column, item in rowItems.items():\n newRow = curRow + row\n newColumn = curColumn + column\n if newRow < self.model().rowCount() and newColumn < self.model().columnCount():\n self.pasteCell(newRow, newColumn, item)\n self.model().reset()\n self.resizeColumnsToContents()\n\n\n def pasteCell(self, row, column, item):\n oldItem = self.model().getItemEx(row, column)\n if item._type.startswith('g') != oldItem._type.startswith('g') and item._type != oldItem._type:\n return\n oldItem._alignment = item._alignment\n oldItem._bold = item._bold\n oldItem._name = item._name\n oldItem._columnSpan = item._columnSpan\n oldItem._rowSpan = item._rowSpan\n oldItem._value = item._value\n oldItem._readOnly = item._readOnly\n\n def keyPressEvent(self, event):\n key = event.key()\n text = unicode(event.text())\n if event.matches(QtGui.QKeySequence.Copy):\n event.ignore()\n self.copy()\n if event.matches(QtGui.QKeySequence.Paste):\n event.ignore()\n self.paste()\n else:\n CInDocTableViewModifyPopup.keyPressEvent(self, event)","repo_name":"dio4/vista_1","sub_path":"Reports/ReportsConstructor/RCTableView.py","file_name":"RCTableView.py","file_ext":"py","file_size_in_byte":13868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73790403767","text":"from util import average_around, thresholding_algo\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\n\ndef moving_window_SLR(f, window_size=100):\n models = []\n x = np.arange(window_size).reshape((-1, 1))\n for left in np.asarray(range(len(f) // window_size)) * window_size:\n slr = LinearRegression()\n slr.fit(x, f[left:left + window_size])\n models.append({'slope': slr.coef_,\n 'intercept': slr.intercept_,\n 'score': slr.score(x, f[left:left + window_size])})\n return models\n\n\ndef top_finder(f, window_size=100):\n models = moving_window_SLR(f, window_size=window_size)\n slopes = np.asarray([model['slope'] for model in models])\n ignore_from = (len(slopes) // 5) * 4\n peak = np.argmax(slopes[:ignore_from])\n threshold = max(slopes) / 5\n dips = []\n in_dip = False\n current_dip = 0\n for index, slope in enumerate(slopes[peak:ignore_from]):\n if not in_dip:\n if slope < threshold:\n in_dip = True\n dips.append([index + peak])\n else:\n if slope >= threshold:\n in_dip = False\n current_dip = 0\n else:\n dips[current_dip].append(index + peak)\n return int(dips[np.argmax([len(dip) for dip in dips])][0] * 100 - window_size / 2)\n\n#def top_finder_2(f,\n\n\ndef get_first_trough_index(f, last=False, debug=False):\n \"\"\" Tries to find stationary/return point of trace including pulling and \n relaxation. Looks at standard deviation of a running mean and signals at\n abrupt drops.\n \"\"\"\n stds = []\n for i in range(25, len(f) - 25):\n std = average_around(f, i, half_n=25)[\"std\"]\n if last:\n stds.insert(0, std)\n else:\n stds.append(std)\n\n div = 4\n peaksign = thresholding_algo(stds, int(len(f) / div), 4., 0)[\"signals\"]\n while min(peaksign) > -1:\n div = div + 1\n peaksign = thresholding_algo(stds, int(len(f) / div), 4., 0)[\"signals\"]\n if debug:\n print(div)\n if last:\n print(len(f) - np.arange(25, len(stds) + 25)[peaksign <= -1][0])\n else:\n print(np.arange(25, len(stds) + 25)[peaksign <= -1][0])\n if last:\n return len(f) - np.arange(25, len(stds) + 25)[peaksign <= -1][0]\n return np.arange(25, len(stds) + 25)[peaksign <= -1][0]\n\n\ndef find_transitions(y: np.ndarray, noise_estimation_window: tuple = None):\n \"\"\" Tries to find unfolding events by looking for negative outliers in\n force change that exceed by a factor of background noise.\n Thanks goes out to Christopher Battle for providing the original code.\n \"\"\"\n EPS = 1e-4 # SNR stabilization factor\n\n # Magic numbers\n SNR_SCALE_FACTOR = 10\n MIN_OUTLIER_FACTOR = 1.5\n MAX_OUTLIER_FACTOR = 4.5\n MIN_PERCENTILE = 10\n\n # Get noise estimation window\n if noise_estimation_window is None:\n end_slice = max(int(len(y)/10), 3)\n s = slice(0, end_slice)\n else:\n s = slice(*noise_estimation_window)\n\n # Calculate outlier threshold\n snr = (y.max() - y.min()) / (y[s].std() + EPS)\n outlier_factor = min(max(snr/SNR_SCALE_FACTOR, MIN_OUTLIER_FACTOR),\n MAX_OUTLIER_FACTOR)\n\n # Find outliers that deviate below the threshold (since force transitions are always negative in slope)\n dy = np.diff(y)\n low_percentile = np.nanpercentile(dy, MIN_PERCENTILE)\n median_low_diff = np.nanmedian(dy) - low_percentile\n outlier_threshold = low_percentile - outlier_factor * median_low_diff\n\n where = np.where(dy < outlier_threshold)[0]\n if len(where) > 1:\n for i in reversed(range(1, len(where))):\n if where[i] - where[i - 1] <= 5: # 5 is arbitrary guess\n where = np.delete(where, i)\n\n return where, outlier_threshold\n\n\ndef plot_events(fdcurves):\n \"\"\" Constructs a plot for each member of fdcurves which highlights events of\n interest and targets for fitting.\n \"\"\"\n plt.figure(figsize=(8, 24))\n i = 1\n for key, val in fdcurves.items():\n fdata = val['force_data']\n unfolds = list(val['unfolds'])\n unfolds.insert(0, 0)\n legs = val['legs']\n top = val['top']\n plt.subplot(len(fdcurves), 1, i)\n plt.plot(np.arange(len(fdata)), fdata, c='tab:blue')\n for j in range(1, len(unfolds)):\n #plt.plot(np.arange(unfolds[j-1]+5, unfolds[j]),\n #fdata[unfolds[j-1]+5:unfolds[j]])\n plt.plot(np.arange(unfolds[j], unfolds[j]+5),\n fdata[unfolds[j]:unfolds[j]+5], c='tab:orange')\n\n for leg in legs:\n plt.plot(np.arange(len(fdata))[leg],\n fdata[leg], c='tab:green')\n plt.plot(np.arange(top[0], top[1]), fdata[top[0]:top[1]], c='tab:red')\n\n i += 1\n\ndef spline_residuals(y, k=3, s=1000):\n \"\"\" Exaggerate unfolding events in data `y` by subtracting a polynomial\n spline fit (`scipy.interpolate.UnivariateSpline`), returning the residuals.\n \n # Arguments:\n - y: array of timeseries data\n - k: degree of polynomial. defaults to 3, i.e. cubic\n - s: smoothing factor.\n \"\"\"\n from scipy.interpolate import UnivariateSpline\n x = np.arange(len(y))\n spline = UnivariateSpline(x,y,k=k,s=s)\n\n return y - spline(x)\n\n","repo_name":"kaveeken/tweez-CV","sub_path":"event_finding.py","file_name":"event_finding.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24625214236","text":"\nimport gevent\nfrom gevent import monkey\nimport requests\nfrom bs4 import BeautifulSoup\nimport multiprocessing\nimport time\nimport os\n\n\n \ndef create_url(base_url):\n count = 0\n urls = []\n while count >= 0 and count <= 250:\n page_url = base_url + '?start=' + str(count) + '&filter='\n count += 25\n urls.append(page_url)\n\n return urls\n \ndef run(url):\n header = {'Referer': 'https://www.douban.com/',\n 'User-Agent':\n 'ozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\n res = requests.get(url, headers=header)\n soup = BeautifulSoup(res.text, 'html.parser')\n for contents in soup.select('.info'):\n if contents.select('.hd') != []:\n titles = ''.join(contents.select('.hd')[0].text.split())\n # print(titles)\n if contents.select('.bd p') != []:\n peoples = contents.select('.bd p')[0]\n name = peoples.contents[0].strip()\n addrs = peoples.contents[2].strip()\n # print(name)\n # print(addrs)\n score = contents.select('.bd .star .rating_num')[0].text\n numbers = contents.select('.bd .star span')[3].text # .contents[6]\n # print (score)\n # print(numbers)\n if contents.select('.bd .quote .inq') != []:\n message = contents.select('.bd .quote .inq')[0].text\n # print(message)\n\n content = [titles, name, addrs,\n score, numbers, message]\n\n # with open('C:\\\\Users\\\\fred\\\\Desktop\\\\douban.txt', 'a', encoding='utf-8') as file:\n # for each in content:\n # file.write(each)\n\n # file.write('\\n')\n # file.write('\\n')\n # file.write('\\n')\n # print()\n for each in content:\n print(each)\n print('\\n')\n print('-------------------------')\n print('\\n\\n')\n\n\ndef main():\n\n # st = time.time()\n monkey.patch_all()\n\n base = 'https://movie.douban.com/top250'\n urls = create_url(base)\n gens = []\n for each in urls:\n g = gevent.spawn(run, each)\n gens.append(g)\n\n gevent.joinall(gens)\n\n # end = time.time()\n\n # print(end - st)\n\nif __name__ == '__main__':\n main()","repo_name":"yangjp22/web-scraping-projs","sub_path":"multi_tasks/concurrent_douban.py","file_name":"concurrent_douban.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"16610302737","text":"from Rdf_thing import Rdf_thing\r\nfrom utlis import xstr, obj_print, print_and_write\r\n\r\nglobal_skills = []\r\n\r\nglobal_companies = []\r\n\r\nglobal_schools = []\r\n\r\n\r\nclass Member(Rdf_thing):\r\n def __init__(self, id, url, about):\r\n super().__init__(id)\r\n self.URL = url\r\n self.about = about\r\n if self.URL:\r\n self.URL = xstr(self.URL.strip())\r\n if self.about:\r\n self.about = xstr(self.about.strip())\r\n self.about = self.about.replace(\"\\n\", \" \")\r\n self.about = self.about.replace(\"\\r\", \" \")\r\n self.about = self.about.replace('\"', \" \")\r\n\r\n\r\nclass MyPerson(Member):\r\n def __init__(self, id, url, about, location, firstName, secondName):\r\n super().__init__(id, url, about)\r\n self.first_name = firstName\r\n self.second_name = secondName\r\n self.location = location\r\n\r\n self.working_experiences = []\r\n self.education_experiences = []\r\n self.skills = []\r\n self.interests = []\r\n\r\n def add_working_experience(self,exp):\r\n self.working_experiences.append(exp)\r\n\r\n def add_education_experience(self,exp):\r\n self.education_experiences.append(exp)\r\n\r\n def add_skill(self, skill):\r\n self.skills.append(skill)\r\n\r\n def add_interest(self,interest):\r\n self.interests.append(interest)\r\n\r\n def get_rdf_id(self):\r\n return 'lkn:person' + xstr(self.id)\r\n\r\n def print_rdf_info(self):\r\n rdf_id = self.get_rdf_id()\r\n print_and_write(rdf_id + \" rdf:type \" + \"lkn:Person.\")\r\n if self.first_name:\r\n print_and_write(rdf_id + \" lkn:firstName \" + obj_print(self.first_name))\r\n if self.second_name:\r\n print_and_write(rdf_id + \" lkn:secondName \" + obj_print(self.second_name))\r\n if self.location:\r\n print_and_write(rdf_id + \" lkn:location \" + obj_print(self.location))\r\n if self.URL:\r\n print_and_write(rdf_id + \" lkn:URL \" + obj_print(self.URL))\r\n if self.about:\r\n print_and_write(rdf_id + \" lkn:about \" + obj_print(self.about))\r\n for exp in self.working_experiences:\r\n exp.print_rdf_info()\r\n print_and_write(rdf_id + \" lkn:hasWorkingExperience \" + exp.get_rdf_id()+ \".\")\r\n for edu in self.education_experiences:\r\n edu.print_rdf_info()\r\n print_and_write(rdf_id + \" lkn:hasEducationExperience \" + edu.get_rdf_id()+ \".\")\r\n for skill in self.skills:\r\n skill.print_rdf_info()\r\n print_and_write(rdf_id + \" lkn:hasSkill \" + skill.get_rdf_id()+ \".\")\r\n for interest in self.interests:\r\n interest.print_rdf_info()\r\n print_and_write(rdf_id + \" lkn:hasInterest \" + interest.get_rdf_id()+ \".\")\r\n\r\n\r\nclass Place(Member):\r\n def __init__(self, id, url, about, placeName, website, phone, industry, companySize, headquarter, type, founded, speciality):\r\n super().__init__(id, url, about)\r\n\r\n self.placeName = placeName\r\n self.website = website\r\n self.phone = phone\r\n self.industry = industry\r\n self.companySize = companySize\r\n self.headquarter = headquarter\r\n self.type = type\r\n self.founded = founded\r\n self.speciality = speciality\r\n\r\n if placeName:\r\n self.placeName = xstr( self.placeName.strip())\r\n if website:\r\n self.website = xstr(self.website.strip())\r\n if phone:\r\n self.phone = xstr(self.phone.strip())\r\n self.phone = self.phone.replace(\"\\n\", \" \")\r\n self.phone = self.phone.replace(\"\\r\", \" \")\r\n if industry:\r\n self.industry = xstr(self.industry.strip())\r\n if companySize:\r\n self.companySize = xstr(self.companySize.strip())\r\n if headquarter:\r\n self.headquarter = xstr(self.headquarter.strip())\r\n if type:\r\n self.type = xstr(self.type.strip())\r\n if founded:\r\n self.founded = xstr(self.founded.strip())\r\n if speciality:\r\n self.speciality = xstr(self.speciality.strip())\r\n\r\n def print_rdf_info(self):\r\n rdf_id = self.get_rdf_id()\r\n if self.website:\r\n print_and_write(rdf_id + \" lkn:website \" + obj_print(self.website))\r\n if self.phone:\r\n print_and_write(rdf_id + \" lkn:phone \" + obj_print(self.phone))\r\n if self.companySize:\r\n print_and_write(rdf_id + \" lkn:companySize \" + obj_print(self.companySize))\r\n if self.about:\r\n print_and_write(rdf_id + \" lkn:about \" + obj_print(self.about))\r\n if self.industry:\r\n print_and_write(rdf_id + \" lkn:industry \" + obj_print(self.industry))\r\n if self.headquarter:\r\n print_and_write(rdf_id + \" lkn:headquarter \" + obj_print(self.headquarter))\r\n if self.type:\r\n print_and_write(rdf_id + \" lkn:type \" + obj_print(self.type))\r\n if self.founded:\r\n print_and_write(rdf_id + \" lkn:founded \" + obj_print(self.founded))\r\n if self.speciality:\r\n print_and_write(rdf_id + \" lkn:speciality \" + obj_print(self.speciality))\r\n\r\n\r\nclass Company(Place):\r\n def __init__(self, id, url, about, place_name, website, phone, industry, companySize, headquarter, type, founded, speciality):\r\n super().__init__(id, url, about, place_name, website, phone, industry, companySize, headquarter, type, founded, speciality)\r\n\r\n def print_rdf_info(self):\r\n\r\n if self.placeName not in [s.placeName for s in global_companies]:\r\n global_companies.append(self)\r\n print_and_write(self.get_rdf_id() + \" rdf:type \" + \"lkn:Company.\")\r\n if self.placeName:\r\n print_and_write(self.get_rdf_id() + \" lkn:placeName \" + obj_print(self.placeName))\r\n super().print_rdf_info()\r\n else:\r\n for company in global_companies:\r\n if company.placeName == self.placeName:\r\n self.id = company.id\r\n\r\n def get_rdf_id(self):\r\n return 'lkn:company' + xstr(self.id)\r\n\r\n\r\nclass School(Place):\r\n def __init__(self, id, url, about, place_name, website, phone, industry, companySize, headquarter, type, founded, speciality):\r\n super().__init__(id, url, about, place_name, website, phone, industry, companySize, headquarter, type, founded, speciality)\r\n\r\n def print_rdf_info(self):\r\n if self.placeName not in [s.placeName for s in global_schools]:\r\n global_schools.append(self)\r\n print_and_write(self.get_rdf_id() + \" rdf:type \" + \"lkn:School.\")\r\n if self.placeName:\r\n print_and_write(self.get_rdf_id() + \" lkn:placeName \" + obj_print(self.placeName))\r\n super().print_rdf_info()\r\n else:\r\n for school in global_schools:\r\n if school.placeName == self.placeName:\r\n self.id = school.id\r\n\r\n\r\n def get_rdf_id(self):\r\n return 'lkn:school' + xstr(self.id)","repo_name":"divanoLetto/LinkedinRDF","sub_path":"project/Member.py","file_name":"Member.py","file_ext":"py","file_size_in_byte":6989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24545902272","text":"from typing import Dict, Any, List\n\n\n# quick & simple replacement for the pyjq basics of attribute chained together to search in a dict\n# because pyjq does not install properly on Windows\ndef check_dict_against_attributes(dict_or_final: Dict[str, Any] | Any, parts: List[str]):\n if not parts:\n return True, dict_or_final\n else:\n if not isinstance(dict_or_final, dict) or parts[0] not in dict_or_final:\n return False, parts[0]\n else:\n return check_dict_against_attributes(dict_or_final[parts[0]], parts[1:])\n\n\ndef check_dict_against_attributes_string(dict_or_final: Dict[str, Any] | Any, attributes_string: str):\n split = attributes_string.split('.')\n return check_dict_against_attributes(dict_or_final, split[1:] if split[0] == '' else split)\n\ndef set_dict_against_attributes_string(dict_or_final: Dict[str, Any] | Any, attributes_string: str, value: Any):\n split = attributes_string.split('.')\n it = dict_or_final\n for s in (split[1:] if split[0] == '' else split)[:-1]: # the latest one is the value to set\n it = it.setdefault(s, {})\n it[split[-1]] = value\n","repo_name":"Alkanoor/core","sub_path":"core/core99_misc/fakejq/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70855204410","text":"import os.path as osp\n\nimport cv2\n\nfrom applications.common.path_global import md5_name\n\n\ndef median_blur(src_dir, save_dir, names):\n temps = list()\n for name in names:\n Gn = cv2.imread(osp.join(src_dir, name))\n Gf = cv2.medianBlur(Gn, 3)\n new_name = md5_name(name)\n cv2.imwrite(osp.join(save_dir, new_name), Gf)\n temps.append(new_name)\n return temps\n","repo_name":"PaddleCV-SIG/GeoView","sub_path":"backend/applications/image_processing/median_blur.py","file_name":"median_blur.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"77"} +{"seq_id":"11993609074","text":"from discord.ext import commands as cmd\nimport discord\nfrom datetime import datetime\nimport pytz\nfrom prettytable import PrettyTable\n\nfrom utils import formatter, helpers, checks\n\n\nclass Users(cmd.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @bot.check\n async def not_blacklisted(ctx):\n entry = await ctx.db.users.find_one({\"id\": ctx.author.id, \"blacklist.state\": True})\n if entry is None:\n return True\n\n raise cmd.CommandError(\"Sorry, **you are blacklisted**.\\n\\n\"\n f\"**Reason**: {entry['blacklist']['reason']}\")\n\n @cmd.group(aliases=[\"bl\"], hidden=True, invoke_without_command=True)\n @checks.has_role_on_support_guild(\"Staff\")\n async def blacklist(self, ctx):\n table = PrettyTable()\n table.field_names = [\"User\", \"Reason\", \"Admin\", \"Timestamp\"]\n\n blacklist = ctx.db.users.find({\"blacklist.state\": True})\n async for entry in blacklist:\n user = await self.bot.fetch_user(entry[\"_id\"])\n admin = await self.bot.fetch_user(entry[\"blacklist\"][\"admin\"])\n\n table.add_row([\n f\"{user} ({user.id})\",\n entry[\"blacklist\"][\"reason\"],\n f\"{admin} ({entry['blacklist']['admin']})\",\n helpers.datetime_to_string(entry[\"blacklist\"][\"timestamp\"])\n ])\n\n pages = formatter.paginate(str(table))\n for page in pages:\n await ctx.send(f\"```diff\\n{page}```\")\n\n @blacklist.command()\n @checks.has_role_on_support_guild(\"Admin\")\n async def add(self, ctx, user: discord.User, *, reason):\n await ctx.db.users.update_one({\"id\": user.id}, {\"$set\": {\n \"_id\": user.id,\n \"blacklist\": {\n \"state\": True,\n \"reason\": reason,\n \"admin\": ctx.author.id,\n \"timestamp\": datetime.now(pytz.utc)\n }\n }}, upsert=True)\n await ctx.send(**ctx.em(f\"Successfully **blacklisted** the user **{str(user)}** (<@{user.id}>).\", type=\"success\"))\n\n @blacklist.command(aliases=[\"rm\", \"remove\", \"del\"])\n @checks.has_role_on_support_guild(\"Admin\")\n async def delete(self, ctx, user: discord.User):\n await ctx.db.users.update_one({\"_id\": user.id}, {\"$set\": {\"blacklist\": {\"state\": False}}})\n await ctx.send(**ctx.em(f\"Successfully **removed** the user **{str(user)}** (<@{user.id}>) from the **blacklist**.\", type=\"success\"))\n\n\ndef setup(bot):\n bot.add_cog(Users(bot))\n","repo_name":"julianborghuis/test","sub_path":"xenon/cogs/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37795064292","text":"\"\"\"Exercício Python 067: Faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor\n digitado pelo usuário. O programa será interrompido quando o número solicitado for negativo. \"\"\"\nwhile True:\n try:\n n = int(input('Quer ver uma tabuada de qual número? (número negativo para sair): '))\n except:\n print('Caracter inválido!')\n continue\n if n < 0:\n break\n for c in range(0,11):\n if n >= 0:\n print(f' {n} X {c:2} = {n*c}')\n\n\n","repo_name":"Matheusfarmaceutico/Exercicios-Python","sub_path":"Exercícios do Guanabara sendo refeitos em 2022/Revisaoguanabara/ex67.py","file_name":"ex67.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26458659114","text":"from buildingblocks.utils import overrides\nfrom buildingblocks.paeAutomationLog import PaeAutomationLog\nfrom workstates.CrTestState.crTestState import CrTestState\nfrom buildingblocks.utils import RESULTS\nimport buildingblocks.utils as util\nimport os\n\nclass ArchiveDataState(CrTestState):\n def _movDataFile(self, source, target):\n try:\n os.rename(source, target)\n except Exception as e:\n print(str(e))\n @overrides(CrTestState)\n def DoWork(self):\n try:\n PaeAutomationLog().Close()\n for d in ['log', 'report']:\n self._createDirectory('{0}/{1}'.format(cwd, r'archive/'), d)\n\n for key in self._parentWorkThread.TestResultsDictionary.keys():\n resultFolder = r'{0}/{1}'.format(cwd, r'archive/results')\n for g in [RESULTS.FAILED, RESULTS.SKIPPED]:\n if self._createDirectory(resultFolder, key):\n for k, v in self._parentWorkThread.TestResultsDictionary.iteritems():\n ids = [x for x in v if x[x.keys()[0]] == g]\n if len(ids) > 0:\n filename = '{0}/{1}/{2}_{3}.txt'.format(resultFolder,\n key,\n g.value,\n util.GetCurrentTimestamp('%Y-%m-%d-%H-%M-%S'))\n outFile = open(filename, 'w')\n for id in ids:\n outFile.writelines(id)\n outFile.flush()\n pass\n\n files = os.listdir(cwd)\n for l in [x for x in files if x.endswith('.log')]:\n source = os.path.join(cwd, l)\n target = os.path.join(cwd, r'archive/log/{0}_{1}.log'.format(l.split('.')[0], util.GetCurrentTimestamp('%Y-%m-%d-%H-%M-%S')))\n self._movDataFile(source, target)\n for l in [x for x in files if x.endswith('.html')]:\n source = os.path.join(cwd, l)\n target = os.path.join(cwd, r'archive/report/{0}'.format(l))\n self._movDataFile(source, target)\n\n except Exception as e:\n print(str(e))\n\n def _createDirectory(self, parentfolder, folder):\n try:\n path = os.path.join(parentfolder, folder)\n if not os.path.exists(path):\n os.makedirs(path)\n return True\n except IOError as e:\n self._logger.error(str(e))\n return False","repo_name":"lyh3/automation","sub_path":"crHealthCheck/buildingblocks/crHealthCheck/workstates/archiveDataState.py","file_name":"archiveDataState.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21655373597","text":"import numpy as np\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\nfrom benchmark_suite import *\r\n# Define a simple neural network\r\nclass Net1(nn.Module):\r\n def __init__(self):\r\n super(Net1, self).__init__()\r\n self.fc1 = nn.Linear(10, 5)\r\n self.fc2 = nn.Linear(5, 2)\r\n\r\n def forward(self, x):\r\n x = F.relu(self.fc1(x))\r\n x = self.fc2(x)\r\n return x\r\n\r\n# Define another simple neural network\r\nclass Net2(nn.Module):\r\n def __init__(self):\r\n super(Net2, self).__init__()\r\n self.fc1 = nn.Linear(10, 3)\r\n self.fc2 = nn.Linear(3, 1)\r\n\r\n def forward(self, x):\r\n x = F.relu(self.fc1(x))\r\n x = self.fc2(x)\r\n return x.squeeze() # Add this line to match the output dimension with the label dimension\r\n\r\n\r\n# Create inputs and labels\r\nnp.random.seed(0) # Set random seed for reproducibility\r\ninputs = [np.random.rand(10) for i in range(100)]\r\nlabels = [[np.sum(input)] for input in inputs] # Labels are the sum of input values, wrapped in a list\r\n\r\n\r\n\r\n# Define evaluation metrics\r\nmetrics = [nn.MSELoss(), nn.L1Loss()]\r\n\r\n# Create neural networks and test them using the benchmark function\r\nnet1 = Net1()\r\nnet2 = Net2()\r\nmetric_values1 = benchmark([net1, net2], inputs, labels, metrics)\r\n\r\n# Plot the benchmark results and save the plots\r\nplot_benchmark(metric_values1, metrics)","repo_name":"HarshitGupta29/Benchmarking-InstantNGP","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34263134534","text":"import os\nimport shutil\nimport uuid\nimport json\nfrom urllib.parse import quote\nfrom datetime import datetime, timedelta\nfrom dateutil import tz, parser\n\ntry:\n from PIL import Image, ImageFont, ImageDraw\n import io\n ImageOverlay = True\nexcept:\n ImageOverlay = False\n\nimport xbmcvfs\nimport xbmc\nimport xbmcaddon\nimport xbmcgui\n\nAddon = xbmcaddon.Addon(\"plugin.video.emby-next-gen\")\nKodiVersion = xbmc.getInfoLabel(\"System.BuildVersionCode\").split(\".\")\n\nif int(KodiVersion[1]) > 20:\n KodiMajorVersion = str(int(KodiVersion[0]) + 1)\nelse:\n KodiMajorVersion = KodiVersion[0]\n\nPluginId = \"plugin.video.emby-next-gen\"\naddon_version = Addon.getAddonInfo('version')\naddon_name = Addon.getAddonInfo('name')\nicon = \"\"\nCustomDialogParameters = (Addon.getAddonInfo('path'), \"default\", \"1080i\")\nEmbyServers = {}\nMinimumVersion = \"8.2.0\"\nrefreshskin = False\ndevice_name = \"Kodi\"\nxspplaylists = False\nanimateicon = True\nTranscodeFormatVideo = \"\"\nTranscodeFormatAudio = \"\"\nvideoBitrate = 0\naudioBitrate = 0\nresumeJumpBack = 0\ndisplayMessage = 0\nnewvideotime = 1\nnewmusictime = 1\nstartupDelay = 0\nbackupPath = \"\"\nenablehttp2 = False\nMinimumSetup = \"\"\nlimitIndex = 5\nautoclose = 5\nmaxnodeitems = \"25\"\ndeviceName = \"Kodi\"\nuseDirectPaths = False\nmenuOptions = False\nnewContent = False\nrestartMsg = False\nconnectMsg = False\nenableDeleteByKodiEvent = False\naddUsersHidden = False\nenableContextDelete = False\nverifyFreeSpace = True\nenableContext = False\ntranscode_h264 = False\ntranscode_hevc = False\ntranscode_av1 = False\ntranscode_vp8 = False\ntranscode_vp9 = False\ntranscode_wmv3 = False\ntranscode_mpeg4 = False\ntranscode_mpeg2video = False\ntranscode_mjpeg = False\ntranscode_msmpeg4v3 = False\ntranscode_aac = False\ntranscode_mp3 = False\ntranscode_mp2 = False\ntranscode_dts = False\ntranscode_ac3 = False\ntranscode_eac3 = False\ntranscode_pcm_mulaw = False\ntranscode_pcm_s24le = False\ntranscode_vorbis = False\ntranscode_wmav2 = False\ntranscode_ac4 = False\ntranscode_livetv_video = False\ntranscode_livetv_audio = False\ntranscode_select_audiostream = False\nskipintroembuarydesign = False\nenableCinemaMovies = False\nenableCinemaEpisodes = False\nenableSkipIntro = False\nenableSkipCredits = False\naskSkipIntro = False\naskSkipCredits = False\naskCinema = False\nlocalTrailers = False\nTrailers = False\nofferDelete = False\ndeleteTV = False\ndeleteMovies = False\nenableCoverArt = False\ncompressArt = False\ngetDateCreated = False\ngetGenres = False\ngetStudios = False\ngetTaglines = False\ngetOverview = False\ngetProductionLocations = False\ngetCast = False\ndeviceNameOpt = False\nartworkcacheenable = True\ndevice_id = \"\"\nsyncdate = \"\"\nsynctime = \"\"\nsyncduringplayback = False\nusekodiworkaroundswidget = False\nusekodiworkaroundsepisodebookmarks = True\nusepathsubstitution = False\nuniquepeoplemovies = False\nuniquepeopletvshows = False\nuniquepeopleepisodes = False\nuniquepeoplemusicvideos = True\nbusyMsg = True\nwebsocketenabled = True\nremotecontrol_force_clients = True\nremotecontrol_client_control = True\nremotecontrol_sync_clients = True\nremotecontrol_wait_clients = 30\nremotecontrol_drift = 200\nremotecontrol_auto_ack = False\nremotecontrol_resync_clients = False\nremotecontrol_resync_time = 10\nremotecontrol_keep_clients = False\nwatchtogeter_start_delay = 20\ncompressArtLevel = 100\nArtworkLimitations = False\nArtworkLimitationPrimary = 50\nArtworkLimitationArt = 50\nArtworkLimitationBanner = 30\nArtworkLimitationDisc = 30\nArtworkLimitationLogo = 30\nArtworkLimitationThumb = 40\nArtworkLimitationBackdrop = 100\nArtworkLimitationChapter = 20\ncurltimeouts = 120\nFolderAddonUserdata = f\"special://profile/addon_data/{PluginId}/\"\nFolderEmbyTemp = f\"special://profile/addon_data/{PluginId}/temp/\"\nFolderAddonUserdataLibrary = f\"special://profile/addon_data/{PluginId}/library/\"\nFolderUserdataThumbnails = \"special://profile/Thumbnails/\"\nSystemShutdown = False\nSyncPause = {} # keys: playing, kodi_sleep, embyserverID, , kodi_rw, priority (thread with higher priorit needs access)\nWidgetRefresh = False\nWidgetRefreshAudio = False\nDialog = xbmcgui.Dialog()\nXbmcPlayer = xbmc.Player() # Init Player\nWizardCompleted = True\nAssignEpisodePostersToTVShowPoster = False\nPluginStarted = False\nsslverify = False\nProgressBar = [xbmcgui.DialogProgressBG(), 0, False, False] # obj, Counter, Open, Init in progress\nAddonModePath = \"http://127.0.0.1:57342/\"\nTranslationsCached = {}\nPlaylists = (xbmc.PlayList(0), xbmc.PlayList(1))\nScreenResolution = (1920, 1080)\nHTTPQueryDoublesFilter = {}\n\ndef refresh_widgets():\n xbmc.log(\"EMBY.helper.utils: Refresh widgets initialized\", 1) # LOGINFO\n\n if not WidgetRefresh:\n xbmc.log(\"EMBY.helper.utils: Refresh widgets started\", 1) # LOGINFO\n globals()[\"WidgetRefresh\"] = True\n SendJson('{\"jsonrpc\":\"2.0\",\"method\":\"VideoLibrary.Scan\",\"params\":{\"showdialogs\":false,\"directory\":\"widget_refresh_trigger\"},\"id\":1}')\n\ndef SendJson(JsonString, ForceBreak=False):\n LogSend = False\n Ret = {}\n\n for Index in range(70): # retry -> timout 25 seconds\n Ret = xbmc.executeJSONRPC(JsonString)\n\n if not Ret: # Valid but not correct Kodi return value -> Kodi bug\n return True\n\n Ret = json.loads(Ret)\n\n if not Ret.get(\"error\", False):\n return Ret\n\n if ForceBreak:\n return False\n\n if not LogSend:\n xbmc.log(f\"Emby.helper.utils: Json error, retry: {JsonString}\", 2) # LOGWARNING\n LogSend = True\n\n if Index < 50: # 5 seconds rapidly\n if sleep(0.1):\n return {}\n else: # after 5 seconds delay cycle by 1 second for the last 20 seconds\n if sleep(1):\n return {}\n\n xbmc.log(f\"Emby.helper.utils: Json error, timeout: {Ret} / {JsonString}\", 3) # LOGERROR\n return {}\n\ndef image_overlay(ImageTag, ServerId, EmbyID, ImageType, ImageIndex, OverlayText):\n xbmc.log(f\"EMBY.helper.utils: Add image text overlay: {EmbyID}\", 1) # LOGINFO\n\n if ImageTag == \"noimage\":\n BinaryData = noimagejpg\n else:\n BinaryData, _, _ = EmbyServers[ServerId].API.get_Image_Binary(EmbyID, ImageType, ImageIndex, ImageTag)\n\n if not BinaryData:\n BinaryData = noimagejpg\n\n if not ImageOverlay:\n return BinaryData\n\n img = Image.open(io.BytesIO(BinaryData))\n ImageWidth, ImageHeight = img.size\n draw = ImageDraw.Draw(img, \"RGBA\")\n BoxY = int(ImageHeight * 0.9)\n BorderSize = int(ImageHeight * 0.01)\n fontsize = 1\n font = ImageFont.truetype(FontPath, 1)\n\n #Use longest possible text to determine font width\n ImageWidthMod = ImageHeight / 3 * 4\n\n while font.getsize(\"Title Sequence\")[0] < 0.80 * ImageWidthMod and font.getsize(\"Title Sequence\")[1] < 0.80 * BoxY:\n fontsize += 1\n font = ImageFont.truetype(FontPath, fontsize)\n\n FontSizeY = font.getsize(OverlayText)[1]\n draw.rectangle((-BorderSize, BoxY - FontSizeY, ImageWidth + BorderSize, BoxY), fill=(0, 0, 0, 127), outline=\"white\", width=BorderSize)\n draw.text(xy=(ImageWidth / 2, BoxY - FontSizeY / 2), text=OverlayText, fill=\"#FFFFFF\", font=font, anchor=\"mm\", align=\"center\")\n imgByteArr = io.BytesIO()\n img.save(imgByteArr, format=img.format)\n return imgByteArr.getvalue()\n\ndef restart_kodi():\n xbmc.log(\"EMBY.helper.utils: Restart Kodi\", 1) # LOGINFO\n globals()[\"SystemShutdown\"] = True\n xbmc.executebuiltin('RestartApp')\n\ndef sleep(Seconds):\n for _ in range(int(Seconds * 10)):\n if SystemShutdown:\n return True\n\n xbmc.sleep(100)\n\n return False\n\ndef progress_open(Header):\n while ProgressBar[3]:\n sleep(1)\n\n globals()[\"ProgressBar\"][1] += 1\n\n if ProgressBar[1] == 1:\n globals()[\"ProgressBar\"][3] = True\n globals()[\"ProgressBar\"][0].create(Translate(33199), Header)\n globals()[\"ProgressBar\"][3] = False\n globals()[\"ProgressBar\"][2] = True\n\n xbmc.log(f\"EMBY.helper.utils: Progress Bar open: {ProgressBar[1]}\", 1) # LOGINFO\n\ndef progress_close():\n while ProgressBar[3]:\n sleep(1)\n\n globals()[\"ProgressBar\"][1] -= 1\n\n if ProgressBar[1] == 0:\n globals()[\"ProgressBar\"][3] = True\n globals()[\"ProgressBar\"][0].close()\n globals()[\"ProgressBar\"][2] = False\n globals()[\"ProgressBar\"][3] = False\n\n xbmc.log(f\"EMBY.helper.utils: Progress Bar close: {ProgressBar[1]}\", 1) # LOGINFO\n\ndef progress_update(Progress, Heading, Message):\n if ProgressBar[2]:\n ProgressBar[0].update(Progress, heading=Heading, message=Message)\n\n# Delete objects from kodi cache\ndef delFolder(path, Pattern=\"\"):\n xbmc.log(\"EMBY.helper.utils: --[ delete folder ]\", 0) # LOGDEBUG\n dirs, files = listDir(path)\n SelectedDirs = ()\n\n if not Pattern:\n SelectedDirs = dirs\n else:\n for Dir in dirs:\n if Pattern in Dir:\n SelectedDirs += (Dir,)\n\n delete_recursive(path, SelectedDirs)\n\n for Filename in files:\n if Pattern in Filename:\n delFile(f\"{path}{Filename}\")\n\n if path:\n rmFolder(path)\n\n xbmc.log(f\"EMBY.helper.utils: DELETE {path}\", 2) # LOGWARNING\n\n# Delete files and dirs recursively\ndef delete_recursive(path, dirs):\n for directory in dirs:\n dirs2, files = listDir(f\"{path}{directory}\")\n\n for Filename in files:\n delFile(f\"{path}{directory}/{Filename}\")\n\n delete_recursive(f\"{path}{directory}\", dirs2)\n rmFolder(f\"{path}{directory}\")\n\ndef rmFolder(Path):\n Path = translatePath(Path)\n\n if os.path.isdir(Path):\n try:\n os.rmdir(Path)\n except Exception as Error:\n xbmc.log(f\"EMBY.helper.utils: Delete folder issue: {Error} / {Path}\", 3) # LOGERROR\n\ndef mkDir(Path):\n Path = translatePath(Path)\n\n if not os.path.isdir(Path):\n os.mkdir(Path)\n\ndef delFile(Path):\n Path = translatePath(Path)\n\n if os.path.isfile(Path):\n os.remove(Path)\n\ndef copyFile(SourcePath, DestinationPath):\n SourcePath = translatePath(SourcePath)\n DestinationPath = translatePath(DestinationPath)\n\n if checkFileExists(DestinationPath):\n xbmc.log(f\"EMBY.helper.utils: copy: File exists: {SourcePath} to {DestinationPath}\", 0) # LOGDEBUG\n return\n\n try:\n shutil.copy(SourcePath, DestinationPath)\n xbmc.log(f\"EMBY.helper.utils: copy: {SourcePath} to {DestinationPath}\", 0) # LOGDEBUG\n except Exception as Error:\n xbmc.log(f\"EMBY.helper.utils: copy issue: {SourcePath} to {DestinationPath} -> {Error}\", 3) # LOGERROR\n\ndef readFileBinary(Path):\n Path = translatePath(Path)\n\n if os.path.isfile(Path):\n with open(Path, \"rb\") as infile:\n data = infile.read()\n\n return data\n\n return b\"\"\n\ndef readFileString(Path):\n Path = translatePath(Path)\n\n if os.path.isfile(Path):\n with open(Path, \"rb\") as infile:\n data = infile.read()\n\n return data.decode('utf-8')\n\n return \"\"\n\ndef writeFileString(Path, Data):\n Data = Data.encode('utf-8')\n Path = translatePath(Path)\n\n with open(Path, \"wb\") as outfile:\n outfile.write(Data)\n\ndef getFreeSpace(Path):\n if verifyFreeSpace:\n try:\n Path = translatePath(Path)\n space = os.statvfs(Path)\n free = space.f_bavail * space.f_frsize / 1024\n # total = space.f_blocks * space.f_frsize / 1024\n return free\n except Exception as Error: # not suported by Windows\n xbmc.log(f\"EMBY.helper.utils: getFreeSpace: {Error}\", 2) # LOGWARNING\n return 9999999\n else:\n return 9999999\n\ndef writeFileBinary(Path, Data):\n Path = translatePath(Path)\n\n with open(Path, \"wb\") as outfile:\n outfile.write(Data)\n\ndef checkFileExists(Path):\n Path = translatePath(Path)\n\n if os.path.isfile(Path):\n return True\n\n return False\n\ndef checkFolderExists(Path):\n Path = translatePath(Path)\n\n if os.path.isdir(Path):\n return True\n\n return False\n\ndef listDir(Path):\n Files = ()\n Folders = ()\n Path = translatePath(Path)\n\n if os.path.isdir(Path):\n for FilesFolders in os.listdir(Path):\n FilesFoldersPath = os.path.join(Path, FilesFolders)\n\n if os.path.isdir(FilesFoldersPath):\n FilesFolders = os.path.join(FilesFolders, b\"\") # add trailing / or \\\n Folders += (FilesFolders.decode('utf-8'),)\n else:\n Files += (FilesFolders.decode('utf-8'),)\n\n return Folders, Files\n\ndef translatePath(Data):\n Path = xbmcvfs.translatePath(Data)\n Path = Path.encode('utf-8')\n return Path\n\ndef currenttime():\n return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n\ndef currenttime_kodi_format():\n return datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\ndef currenttime_kodi_format_and_unixtime():\n Current = datetime.now()\n KodiFormat = Current.strftime('%Y-%m-%d %H:%M:%S')\n UnixTime = int(datetime.timestamp(Current))\n return KodiFormat, UnixTime\n\ndef unixtimeInMicroseconds():\n Current = datetime.now()\n UnixTime = int(datetime.timestamp(Current))\n return UnixTime + Current.microsecond / 1000000\n\n# Remove all emby playlists\ndef delete_playlists():\n SearchFolders = ['special://profile/playlists/video/', 'special://profile/playlists/music/']\n\n for SearchFolder in SearchFolders:\n _, files = listDir(SearchFolder)\n\n for Filename in files:\n if Filename.startswith('emby'):\n delFile(f\"{SearchFolder}{Filename}\")\n\n# Remove all nodes\ndef delete_nodes():\n delFolder(\"special://profile/library/video/\", \"emby_\")\n delFolder(\"special://profile/library/music/\", \"emby_\")\n mkDir(\"special://profile/library/video/\")\n mkDir(\"special://profile/library/music/\")\n\n# Convert the gmt datetime to local\ndef convert_to_gmt(local_time):\n if not local_time:\n return \"\"\n\n if isinstance(local_time, str):\n local_time = parser.parse(local_time.encode('utf-8'))\n utc_zone = tz.tzutc()\n local_zone = tz.tzlocal()\n local_time = local_time.replace(tzinfo=local_zone)\n utc_time = local_time.astimezone(utc_zone)\n return utc_time.strftime('%Y-%m-%dT%H:%M:%SZ')\n\n return \"\"\n\n# Convert the gmt datetime to local\ndef convert_to_local(date, DateOnly=False, YearOnly=False):\n if not date or str(date) == \"0\":\n return \"0\"\n\n if isinstance(date, int):\n date = str(date)\n\n if isinstance(date, str):\n date = parser.parse(date.encode('utf-8'))\n\n if not date.tzname():\n date = date.replace(tzinfo=tz.tzutc())\n\n timestamp = (date - datetime(1970, 1, 1, tzinfo=tz.tzutc())).total_seconds()\n\n try:\n if timestamp >= 0:\n timestamp = datetime.fromtimestamp(timestamp)\n else:\n timestamp = datetime(1970, 1, 1) + timedelta(seconds=int(timestamp))\n except Exception as Error:\n xbmc.log(f\"EMBY.helper.utils: invalid timestamp: {Error}\", 2) # LOGWARNING\n return \"0\"\n\n if timestamp.year < 1900:\n xbmc.log(f\"EMBY.helper.utils: invalid timestamp < 1900: {timestamp.year}\", 2) # LOGWARNING\n return \"0\"\n\n if DateOnly:\n return timestamp.strftime('%Y-%m-%d')\n\n if YearOnly:\n return int(timestamp.strftime('%Y'))\n\n return timestamp.strftime('%Y-%m-%d %H:%M:%S')\n\ndef Translate(Id):\n if Id in TranslationsCached:\n return TranslationsCached[Id]\n\n result = Addon.getLocalizedString(Id)\n\n if not result:\n result = xbmc.getLocalizedString(Id)\n\n globals()['TranslationsCached'][Id] = result\n return result\n\ndef PathToFilenameReplaceSpecialCharecters(Path):\n Pos = Path.rfind(\"/\")\n\n if Pos == -1: # Windows\n Pos = Path.rfind(\"\\\\\")\n\n Path = Path[Pos + 1:]\n Filename = quote(Path)\n\n while Filename.find(\"%\") != -1:\n Pos = Filename.find(\"%\")\n Filename = Filename.replace(Filename[Pos:Pos + 3], \"_\")\n\n return Filename\n\ndef SizeToText(size):\n suffixes = ['B', 'KB', 'MB', 'GB', 'TB']\n suffixIndex = 0\n\n while size > 1024 and suffixIndex < 4:\n suffixIndex += 1\n size /= 1024.0\n\n return f\"1.{size}{suffixes[suffixIndex]}\"\n\n# Copy folder content from one to another\ndef copytree(path, dest):\n dirs, files = listDir(path)\n mkDir(dest)\n\n if dirs:\n copy_recursive(path, dirs, dest)\n\n for Filename in files:\n CopyFile = f\"{path}{Filename}\"\n\n if CopyFile.endswith('.pyo'):\n continue\n\n copyFile(CopyFile, f\"{dest}{Filename}\")\n\n xbmc.log(f\"EMBY.helper.utils: Copied {path}\", 1) # LOGINFO\n\ndef copy_recursive(path, dirs, dest):\n for directory in dirs:\n dirs_dir = f\"{path}{directory}\"\n dest_dir = f\"{dest}{directory}\"\n mkDir(dest_dir)\n dirs2, files = listDir(dirs_dir)\n\n if dirs2:\n copy_recursive(dirs_dir, dirs2, dest_dir)\n\n for Filename in files:\n CopyFile = f\"{dirs_dir}{Filename}\"\n\n if CopyFile.endswith('.pyo'):\n continue\n\n copyFile(CopyFile, f\"{dest_dir}{Filename}\")\n\ndef get_device_id(reset):\n if device_id:\n return\n\n mkDir(FolderAddonUserdata)\n emby_guid = f\"{FolderAddonUserdata}emby_guid\"\n globals()[\"device_id\"] = readFileString(emby_guid)\n\n if not device_id or reset:\n xbmc.log(\"EMBY.helper.utils: Generating a new GUID\", 1) # LOGINFO\n globals()[\"device_id\"] = str(uuid.uuid4())\n writeFileString(emby_guid, device_id)\n\n if reset: # delete login data -> force new login\n _, files = listDir(FolderAddonUserdata)\n\n for Filename in files:\n if Filename.startswith('servers_'):\n delFile(f\"{FolderAddonUserdata}{Filename}\")\n\n xbmc.log(f\"EMBY.helper.utils: device_id loaded: {device_id}\", 1) # LOGINFO\n\n# Kodi Settings\ndef InitSettings():\n load_settings('TranscodeFormatVideo')\n load_settings('TranscodeFormatAudio')\n load_settings('videoBitrate')\n load_settings('audioBitrate')\n load_settings('resumeJumpBack')\n load_settings('autoclose')\n load_settings('displayMessage')\n load_settings('newvideotime')\n load_settings('newmusictime')\n load_settings('startupDelay')\n load_settings('backupPath')\n load_settings('MinimumSetup')\n load_settings('limitIndex')\n load_settings('deviceName')\n load_settings('useDirectPaths')\n load_settings('syncdate')\n load_settings('synctime')\n load_settings('maxnodeitems')\n load_settings('remotecontrol_wait_clients')\n load_settings('watchtogeter_start_delay')\n load_settings('remotecontrol_drift')\n load_settings('remotecontrol_resync_time')\n load_settings('compressArtLevel')\n load_settings('ArtworkLimitationPrimary')\n load_settings('ArtworkLimitationArt')\n load_settings('ArtworkLimitationBanner')\n load_settings('ArtworkLimitationDisc')\n load_settings('ArtworkLimitationLogo')\n load_settings('ArtworkLimitationThumb')\n load_settings('ArtworkLimitationBackdrop')\n load_settings('ArtworkLimitationChapter')\n load_settings('curltimeouts')\n load_settings_bool('ArtworkLimitations')\n load_settings_bool('sslverify')\n load_settings_bool('syncduringplayback')\n load_settings_bool('usekodiworkaroundswidget')\n load_settings_bool('usekodiworkaroundsepisodebookmarks')\n load_settings_bool('refreshskin')\n load_settings_bool('animateicon')\n load_settings_bool('enablehttp2')\n load_settings_bool('menuOptions')\n load_settings_bool('xspplaylists')\n load_settings_bool('newContent')\n load_settings_bool('restartMsg')\n load_settings_bool('connectMsg')\n load_settings_bool('addUsersHidden')\n load_settings_bool('enableContextDelete')\n load_settings_bool('enableContext')\n load_settings_bool('transcode_h264')\n load_settings_bool('transcode_hevc')\n load_settings_bool('transcode_av1')\n load_settings_bool('transcode_vp8')\n load_settings_bool('transcode_vp9')\n load_settings_bool('transcode_wmv3')\n load_settings_bool('transcode_mpeg4')\n load_settings_bool('transcode_mpeg2video')\n load_settings_bool('transcode_mjpeg')\n load_settings_bool('transcode_msmpeg4v3')\n load_settings_bool('transcode_aac')\n load_settings_bool('transcode_mp3')\n load_settings_bool('transcode_mp2')\n load_settings_bool('transcode_dts')\n load_settings_bool('transcode_ac3')\n load_settings_bool('transcode_eac3')\n load_settings_bool('transcode_pcm_mulaw')\n load_settings_bool('transcode_pcm_s24le')\n load_settings_bool('transcode_vorbis')\n load_settings_bool('transcode_wmav2')\n load_settings_bool('transcode_ac4')\n load_settings_bool('transcode_livetv_video')\n load_settings_bool('transcode_livetv_audio')\n load_settings_bool('transcode_select_audiostream')\n load_settings_bool('enableCinemaMovies')\n load_settings_bool('enableCinemaEpisodes')\n load_settings_bool('askCinema')\n load_settings_bool('localTrailers')\n load_settings_bool('Trailers')\n load_settings_bool('offerDelete')\n load_settings_bool('deleteTV')\n load_settings_bool('deleteMovies')\n load_settings_bool('enableCoverArt')\n load_settings_bool('compressArt')\n load_settings_bool('getDateCreated')\n load_settings_bool('getGenres')\n load_settings_bool('getStudios')\n load_settings_bool('getTaglines')\n load_settings_bool('getOverview')\n load_settings_bool('getProductionLocations')\n load_settings_bool('getCast')\n load_settings_bool('deviceNameOpt')\n load_settings_bool('useDirectPaths')\n load_settings_bool('enableDeleteByKodiEvent')\n load_settings_bool('enableSkipIntro')\n load_settings_bool('enableSkipCredits')\n load_settings_bool('askSkipIntro')\n load_settings_bool('askSkipCredits')\n load_settings_bool('skipintroembuarydesign')\n load_settings_bool('busyMsg')\n load_settings_bool('AssignEpisodePostersToTVShowPoster')\n load_settings_bool('WizardCompleted')\n load_settings_bool('verifyFreeSpace')\n load_settings_bool('usepathsubstitution')\n load_settings_bool('uniquepeoplemovies')\n load_settings_bool('uniquepeopletvshows')\n load_settings_bool('uniquepeopleepisodes')\n load_settings_bool('uniquepeoplemusicvideos')\n load_settings_bool('remotecontrol_force_clients')\n load_settings_bool('remotecontrol_client_control')\n load_settings_bool('remotecontrol_sync_clients')\n load_settings_bool('remotecontrol_auto_ack')\n load_settings_bool('remotecontrol_resync_clients')\n load_settings_bool('remotecontrol_keep_clients')\n load_settings_bool('websocketenabled')\n load_settings_bool('WidgetRefreshAudio')\n\n if ArtworkLimitations:\n globals()[\"ScreenResolution\"] = (int(xbmc.getInfoLabel('System.ScreenWidth')), int(xbmc.getInfoLabel('System.ScreenHeight')))\n xbmc.log(f\"EMBY.helper.utils: Screen resolution: {ScreenResolution}\", 1) # LOGINFO\n\n if usepathsubstitution:\n globals()[\"AddonModePath\"] = \"/emby_addon_mode/\"\n else:\n globals()[\"AddonModePath\"] = \"http://127.0.0.1:57342/\"\n\n if not deviceNameOpt:\n globals()[\"device_name\"] = xbmc.getInfoLabel('System.FriendlyName')\n else:\n globals()[\"device_name\"] = deviceName.replace(\"/\", \"_\")\n\n if not device_name:\n globals()[\"device_name\"] = \"Kodi\"\n else:\n globals()[\"device_name\"] = quote(device_name) # url encode\n\n ToggleIcon = []\n\n if animateicon:\n if icon and icon != \"special://home/addons/plugin.video.emby-next-gen/resources/icon-animated.gif\":\n ToggleIcon = [\"resources/icon.png\", \"resources/icon-animated.gif\"]\n\n globals()[\"icon\"] = \"special://home/addons/plugin.video.emby-next-gen/resources/icon-animated.gif\"\n else:\n if icon and icon != \"special://home/addons/plugin.video.emby-next-gen/resources/icon.png\":\n ToggleIcon = [\"resources/icon-animated.gif\", \"resources/icon.png\"]\n\n globals()[\"icon\"] = \"special://home/addons/plugin.video.emby-next-gen/resources/icon.png\"\n\n if ToggleIcon:\n xbmc.log(\"EMBY.helper.utils: Toggle icon\", 1) # LOGINFO\n AddonXml = readFileString(\"special://home/addons/plugin.video.emby-next-gen/addon.xml\")\n AddonXml = AddonXml.replace(ToggleIcon[0], ToggleIcon[1])\n writeFileString(\"special://home/addons/plugin.video.emby-next-gen/addon.xml\", AddonXml)\n\n # Change type to integer\n globals().update({\"limitIndex\": int(limitIndex), \"startupDelay\": int(startupDelay), \"videoBitrate\": int(videoBitrate), \"audioBitrate\": int(audioBitrate), \"remotecontrol_wait_clients\": int(remotecontrol_wait_clients), \"remotecontrol_drift\": int(remotecontrol_drift), \"remotecontrol_resync_time\": int(remotecontrol_resync_time)})\n\ndef set_syncdate(timestamp):\n TimeStamp = parser.parse(timestamp.encode('utf-8'))\n set_settings(\"syncdate\", TimeStamp.strftime('%Y-%m-%d'))\n set_settings(\"synctime\", TimeStamp.strftime('%H:%M'))\n\ndef load_settings_bool(setting):\n value = Addon.getSetting(setting)\n\n if value == \"true\":\n globals()[setting] = True\n else:\n globals()[setting] = False\n\ndef load_settings(setting):\n value = Addon.getSetting(setting)\n globals()[setting] = value\n\ndef set_settings(setting, value):\n globals()[setting] = value\n Addon.setSetting(setting, value)\n\ndef set_settings_bool(setting, value):\n globals()[setting] = value\n\n if value:\n Addon.setSetting(setting, \"true\")\n else:\n Addon.setSetting(setting, \"false\")\n\ndef nodesreset():\n delete_nodes()\n\n for EmbyServer in list(EmbyServers.values()):\n EmbyServer.Views.update_nodes()\n\nmkDir(FolderAddonUserdata)\nmkDir(FolderEmbyTemp)\nmkDir(FolderUserdataThumbnails)\nmkDir(FolderAddonUserdataLibrary)\nInitSettings()\nget_device_id(False)\nDatabaseFiles = {'texture': \"\", 'texture-version': 0, 'music': \"\", 'music-version': 0, 'video': \"\", 'video-version': 0, 'epg': \"\", 'epg-version': 0, 'tv': \"\", 'tv-version': 0}\n_, FolderDatabasefiles = listDir(\"special://profile/Database/\")\nFontPath = translatePath(\"special://home/addons/plugin.video.emby-next-gen/resources/font/LiberationSans-Bold.ttf\")\nnoimagejpg = readFileBinary(\"special://home/addons/plugin.video.emby-next-gen/resources/noimage.jpg\")\n\nfor FolderDatabaseFilename in FolderDatabasefiles:\n if not FolderDatabaseFilename.endswith('-wal') and not FolderDatabaseFilename.endswith('-shm') and not FolderDatabaseFilename.endswith('db-journal'):\n if FolderDatabaseFilename.startswith('Textures'):\n Version = int(''.join(i for i in FolderDatabaseFilename if i.isdigit()))\n\n if Version > DatabaseFiles['texture-version']:\n DatabaseFiles['texture'] = translatePath(f\"special://profile/Database/{FolderDatabaseFilename}\")\n DatabaseFiles['texture-version'] = Version\n elif FolderDatabaseFilename.startswith('MyMusic'):\n Version = int(''.join(i for i in FolderDatabaseFilename if i.isdigit()))\n\n if Version > DatabaseFiles['music-version']:\n DatabaseFiles['music'] = translatePath(f\"special://profile/Database/{FolderDatabaseFilename}\")\n DatabaseFiles['music-version'] = Version\n elif FolderDatabaseFilename.startswith('MyVideos'):\n Version = int(''.join(i for i in FolderDatabaseFilename if i.isdigit()))\n\n if Version > DatabaseFiles['video-version']:\n DatabaseFiles['video'] = translatePath(f\"special://profile/Database/{FolderDatabaseFilename}\")\n DatabaseFiles['video-version'] = Version\n elif FolderDatabaseFilename.startswith('Epg'):\n Version = int(''.join(i for i in FolderDatabaseFilename if i.isdigit()))\n\n if Version > DatabaseFiles['epg-version']:\n DatabaseFiles['epg'] = translatePath(f\"special://profile/Database/{FolderDatabaseFilename}\")\n DatabaseFiles['epg-version'] = Version\n elif FolderDatabaseFilename.startswith('TV'):\n Version = int(''.join(i for i in FolderDatabaseFilename if i.isdigit()))\n\n if Version > DatabaseFiles['tv-version']:\n DatabaseFiles['tv'] = translatePath(f\"special://profile/Database/{FolderDatabaseFilename}\")\n DatabaseFiles['tv-version'] = Version\n\nif not artworkcacheenable: # reset if Kodi crashed during artwork cache\n set_settings_bool('artworkcacheenable', True)\n","repo_name":"MediaBrowser/plugin.video.emby","sub_path":"helper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":28205,"program_lang":"python","lang":"en","doc_type":"code","stars":278,"dataset":"github-code","pt":"77"} +{"seq_id":"22517620747","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n\n def reverseList(self, head: ListNode) -> ListNode:\n '''\n 反转链表\n '''\n prev = None\n curr = head\n while curr is not None:\n nxt = curr.next\n curr.next = prev\n prev = curr\n curr = nxt\n\n return prev\n\n def reorderList(self, head: ListNode) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n if head is None:\n return\n\n # 查找中间节点(如果是偶数个节点则返回前半末尾,如1234返回2而不是3)\n slow = fast = head\n while fast.next and fast.next.next: # 返回2而不是3在这里控制\n slow = slow.next\n fast = fast.next.next\n mid_node = slow\n\n # 将后半链表反转\n rear_node_start = mid_node.next\n mid_node.next = None # 注意在这里切断前后两个列表,要不然就奇怪了\n mid_node = self.reverseList(rear_node_start)\n\n # merge\n f = head\n r = mid_node\n while f and r:\n next_f = f.next\n next_r = r.next\n f.next = r\n r.next = next_f\n f = next_f\n r = next_r","repo_name":"ftakanashi/JobProjects","sub_path":"LeetCode/143.重排链表/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"40310953999","text":"import supp\nimport tensorflow as tf\nimport ML_functions as ml\nimport array as arr\nimport datetime\nfrom keras.preprocessing import sequence\nfrom keras.callbacks import LambdaCallback\nfrom keras import optimizers\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import classification_report\nimport csv\nimport ManipuleraData as mani\nfrom sklearn.metrics import confusion_matrix\n\n# GPU Tester\nfrom tensorflow.python.client import device_lib\n\nprint(device_lib.list_local_devices())\n\nvector_size = 52\nstarttime = datetime.datetime.now()\ninput_file = \"ArenSwipeNext1\"\n\n\ninput_button = ['AlexButton1.csv', 'AlexButton5.csv', 'AndreasButton1.csv', 'AndreasButton2.csv', 'AndreasButton3.csv', 'AndreasButton4.csv', 'AndreasButton5.csv', 'ArenButton1.csv', 'ArenButton2.csv', 'ArenButton3.csv', 'JohanButton1.csv', 'JohanButton2.csv', 'JuliaButton1.csv', 'LinusButton1.csv', 'MartinButton1.csv', 'MatildaButton1.csv']\n\ninput_swipenext = ['AlexSwipeNext1.csv', 'AlexSwipeNext5.csv', 'AndreasSwipeNext1.csv', 'AndreasSwipeNext2.csv', 'AndreasSwipeNext3.csv', 'AndreasSwipeNext4.csv', 'AndreasSwipeNext5.csv', 'ArenSwipeNext1.csv', 'ArenSwipeNext2.csv', 'ArenSwipeNext3.csv', 'JohanSwipeNext1.csv', 'JohanSwipeNext2.csv', 'JuliaSwipeNext1.csv', 'LinusSwipeNext.csv', 'LinusSwipeNext1.csv', 'MartinSwipeNext1.csv', 'MatildaSwipeNext1.csv']\n\ninput_swipeprev = ['AlexSwipePrev1.csv', 'ArenSwipePrev1.csv', 'JohanSwipePrev1.csv', 'JohanSwipePrev2.csv', 'JuliaSwipePrev1.csv', 'LinusSwipePrev.csv', 'LinusSwipePrev1.csv', 'MartinSwipePrev1.csv', 'MatildaSwipePrev.csv', 'MatildaSwipePrev1.csv']\n\ninput_slideup = ['AlexSlideUp1.csv', 'AlexSlideUp5.csv', 'AndreaSlideUp1.csv', 'AndreasSlideUp1.csv', 'AndreasSlideUp2.csv', 'AndreasSlideUp3.csv', 'AndreasSlideUp4.csv', 'AndreasSlideUp5.csv', 'ArenSlideUp1.csv', 'ArenSlideUp2.csv', 'ArenSlideUp3.csv', 'JohanSlideUp2.csv', 'JuliaSlideUp1.csv', 'LindaSlideUp1.csv', 'LinusSlideUp1.csv', 'MartinSlideUp1.csv', 'MatildaSlideUp1.csv']\n\ninput_slidedown = ['AlexSlideDown1.csv', 'ArenSlideDown1.csv', 'JohanSlideDown1.csv', 'JohanSlideDown2.csv', 'JuliaSlideDown1.csv', 'JuliaSlideDown2.csv', 'LinusSlideDown1.csv', \"MartinSlideDown1'.csv\", 'MartinSlideDown1.csv', 'MatildaSlideDown1.csv']\n\ninput_flop = ['AlexFlop1.csv', 'ArenFlop1.csv', 'JohanFlop1.csv', 'JohanFlop2.csv', 'JuliaFlop1.csv', 'LinusFlop1.csv', 'MartinFlop1.csv', 'MatildaFlop1.csv']\n\ninput_background = [\"GoodBackground1.csv\", \"GoodBackground2.csv\"]\n\n\ninput_files = input_button + input_swipenext + input_background + input_swipeprev + \\\n input_slideup + input_slidedown + input_flop\n\ninput_folder = \"ProcessedData\"\nart_folder = \"TranslatedData\"\n\n# Number of categories\noutputs = 7\n\n# training hyperparameters\n\nepochs = 500\ntime_steps = 10\nbatch_size = 10\nlearning_rate = 0.00025\ndecay = 2.5/(10**6)\n\ntraining_ratio = 0.7\n\n# used in both models\nlstm_output = 20\nstateful = True\n\n# only used in combined model\nnum_filters = 64\nkernel_size = 5\n\nrepeats = 1\n\n# for saving the model and weights\nexport = True\nmodelSaveFile = f'ts{time_steps}bs{batch_size}lstmout{lstm_output}st{stateful}lr{learning_rate}.json'\nweightSaveFile = f'ts{time_steps}bs{batch_size}lstmout{lstm_output}st{stateful}lr{learning_rate}.h5'\n\n# Model loading data\nload = False\nmodelFile = \"ts10bs10lstmout20stTruelr0.00025.json\"\nweightFile = \"ts10bs10lstmout20stTruelr0.00025.h5\"\n\n# optimizers\n# adam standard: (lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\noptadam = optimizers.adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=decay, amsgrad=False)\n\n# rmsprop standard: (lr=0.001, rho=0.9, epsilon=None, decay=0.0)\noptprop = optimizers.rmsprop(lr=learning_rate, rho=0.9, epsilon=None, decay=decay)\n\nrunopt = optadam\n\n# saves plot\nplot = True\nplotFile = f'Plots\\\\ts{time_steps}bs{batch_size}lstmout{lstm_output}st{stateful}lr{learning_rate}.svg'\n\n# saves Result\nresultFile = \"results.csv\"\n\n\ndata_norm, means, maxs = ml.load_zero_mean_normalize_data_folder(input_folder)\n\ndata = supp.shuffle_gestures(data_norm)\n\n# art_data = ml.load_folder(art_folder)\n# art_background = ml.load_data(\"GoodBackground1.csv\")\n\n# art_data = supp.shuffle_gestures(np.concatenate\n# ([art_data, art_background], axis=0))\n\nx_train, x_test, y_train, y_test = ml.split_data(data, vector_size, outputs,\n training_ratio)\n\n# art_x, _, art_y, _ = ml.split_data(art_data, vector_size, outputs, 1)\n\n# x_train = np.concatenate([x_train, art_x], axis=0)\n# y_train = np.concatenate([y_train, art_y], axis=0)\n\nx_train = x_train[:len(x_train) // 1000 * 1000 + time_steps]\nx_test = x_test[:len(x_test) // 1000 * 1000 + time_steps]\ny_train = y_train[:len(y_train) // 1000 * 1000 + time_steps]\ny_test = y_test[:len(y_test) // 1000 * 1000 + time_steps]\n\n\nprint(ml.count_gestures(y_train))\nprint(ml.count_gestures(y_test))\n\nprint(f'{len(x_train)}, {len(x_test)}, {len(y_train)}, {len(y_test)}')\n\n\ntrain_seq = sequence.TimeseriesGenerator(x_train, y_train, length=time_steps, batch_size=batch_size)\ntest_seq = sequence.TimeseriesGenerator(x_test, y_test, length=time_steps, batch_size=batch_size)\n\n\nseqtest = []\n\n\nfor i in range(repeats):\n\n if load:\n model = ml.loadModel(modelFile, weightFile)\n else:\n model = ml.build_lstm(time_steps, vector_size, outputs, batch_size, lstm_output, stateful)\n # model = ml.build_clstm(time_steps, vector_size, outputs, num_filters, kernel_size, lstm_output)\n # model = ml.build_crrr(time_steps, vector_size, outputs, num_filters, batch_size, kernel_size, lstm_output, stateful)\n\n model.compile(loss='categorical_crossentropy',\n optimizer=runopt,\n metrics=['accuracy'])\n\n history = model.fit_generator(train_seq,\n callbacks=[LambdaCallback(\n on_epoch_begin=lambda epoch, logs: print('Repeats', i + 1, '/', repeats))],\n epochs=epochs,\n validation_data=test_seq)\n\n seqtest.append(model.evaluate_generator(test_seq))\n\n predictions = model.predict_generator(test_seq)\n predictions = np.argmax(predictions, axis=1)\n cm = confusion_matrix(np.argmax(y_test[time_steps:], axis=1), predictions)\n print(cm)\n print()\n print()\n\n cm = ml.cm_to_percentage(cm)\n print(cm)\n with open(\"ConfusionMatrix_dropout.csv\", 'w', newline='') as cm_file:\n writer = csv.writer(cm_file)\n for row in cm:\n writer.writerow(row)\n\n plt.subplot(2, 1, 1)\n plt.plot(history.history['loss'], color='blue')\n plt.plot(history.history['val_loss'], color='orange')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.subplot(2, 1, 2)\n plt.plot(history.history['acc'], color='blue')\n plt.plot(history.history['val_acc'], color='orange')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n\n if plot:\n plt.tight_layout()\n plt.savefig(plotFile, format='svg')\n\n\n if export:\n json_model = model.to_json()\n with open(\"Model\\\\\" + modelSaveFile, 'w') as file:\n file.write(json_model)\n model.save_weights(\"Model\\\\\" + weightSaveFile)\n\nplt.show()\nwith open(resultFile, 'w') as file:\n writer = csv.writer(file)\n for row in seqtest:\n writer.writerow(row)\n\nml.sum_print(starttime, repeats, seqtest)\n\n#pyplot.plot(history['train'], color='blue')\n#pyplot.plot(history['test'], color='orange')\n#print('%d) TrainRMSE=%f, TestRMSE=%f' % (i, history['train'].iloc[-1], history['test'].iloc[-1]))\n","repo_name":"thomjohs/Kandidat","sub_path":"ML_tester.py","file_name":"ML_tester.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37889725279","text":"import random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom network.english import EnglishLSTM, EnglishCNN\nfrom network.en_rythm import EnglishRythmLSTM\nfrom network.japanese import JapaneseLSTM\nfrom network.ja_rythm import JapaneseRythmLSTM\nfrom util.data import DataManager\nimport sys\n\nfrom chainer import Variable\nfrom chainer import optimizers\nfrom chainer import serializers\nimport chainer.functions as F\n\n\n###\n# English -> LSTM\n###\ndef train(params):\n en_model = EnglishLSTM(len(params['en_list']))\n en_rythm_model = EnglishRythmLSTM(len(params['en_rythm_list']))\n ja_model = JapaneseLSTM(len(params['ja_list']))\n ja_rythm_model = JapaneseRythmLSTM(len(params['ja_rythm_list']))\n data = {\n # 'english': en_model.get_train_data(params['english'], params['batch_size']), #並列\n # 'en_rythm': en_model.get_train_data(params['en_rythm'], params['batch_size'])\n 'english' : params['english'],\n 'en_rythm': params['en_rythm'],\n 'japanese' : params['japanese'],\n 'ja_rythm': params['ja_rythm'],\n }\n\n # 最適化アルゴリズムにAdamを採用\n optimizer = [\n optimizers.Adam().setup(en_model),\n optimizers.Adam().setup(en_rythm_model),\n optimizers.Adam().setup(ja_model),\n optimizers.Adam().setup(ja_rythm_model),\n ]\n\n loss_list = []\n step = []\n for epoch in range(params['epoch_num']):\n print(\"epoch: %d\" % (epoch+1))\n loss = 0.0\n # 英語歌詞の学習\n en_model.reset()\n for index, (en_phrase, en_rythm_phrase, ja_phrase, ja_rythm_phrase) in enumerate(zip(data['english'], data['en_rythm'], data['japanese'], data['ja_rythm'])):\n # 曲が違う場合は状態をリセット\n if len(en_phrase) == 0:\n en_model.reset()\n en_rythm_model.reset()\n continue\n # if len(en_rythm_phrase) == 0:\n # en_rythm_model.reset()\n # continue\n # if len(ja_phrase) == 0:\n # ja_model.reset()\n # continue\n # 英語の歌詞\n for word in en_phrase:\n y_en = en_model.forward(word, params['en_list'])\n # 英語のリズム\n for rythm in en_rythm_phrase:\n y_en_rythm = en_rythm_model.forward(rythm, params['en_rythm_list'])\n\n # 出力を足し合わせる\n h = y_en + y_en_rythm\n\n # hから日本語の1単語目を推測\n tx = Variable(np.array([params['ja_list'][ja_phrase[0]]], dtype=np.int32))\n loss += F.softmax_cross_entropy(ja_model.predict(h), tx)\n # 足し合わせた出力から日本語を出力\n for index, word in enumerate(ja_phrase):\n y_ja = ja_model.forward(word, params['ja_list'])\n if word != '':\n tx = Variable(np.array([params['ja_list'][ja_phrase[index+1]]], dtype=np.int32))\n # print(y_ja, tx)\n loss += F.softmax_cross_entropy(y_ja, tx)\n\n # hから日本語の1つ目のリズムを推測\n tx = Variable(np.array([params['ja_rythm_list'][ja_rythm_phrase[0]]], dtype=np.int32))\n loss += F.softmax_cross_entropy(ja_rythm_model.predict(h), tx)\n # 足し合わせた出力から日本語のリズムを出力\n for index, rythm in enumerate(ja_rythm_phrase):\n y_ja_rythm = ja_rythm_model.forward(rythm, params['ja_rythm_list'])\n if rythm != '':\n tx = Variable(np.array([params['ja_rythm_list'][ja_rythm_phrase[index+1]]], dtype=np.int32))\n # print(y_ja, tx)\n loss += F.softmax_cross_entropy(y_ja_rythm, tx)\n # print(ja_model.l1.upward.W.grad)\n en_model.cleargrads()\n en_rythm_model.cleargrads()\n ja_model.cleargrads()\n ja_rythm_model.cleargrads()\n\n loss.backward()\n loss.unchain_backward()\n ja_model.reset()\n ja_rythm_model.reset()\n for opt in optimizer:\n opt.update()\n\n # lossの可視化\n step.append(epoch+1)\n loss_list.append(loss.data)\n\n print(loss)\n # モデルとして保存\n serializers.save_hdf5('models/en_model_' + str(params['epoch_num']), en_model)\n serializers.save_hdf5('models/en_rythm_model_' + str(params['epoch_num']), en_rythm_model)\n serializers.save_hdf5('models/ja_model_' + str(params['epoch_num']), ja_model)\n serializers.save_hdf5('models/ja_rythm_model_' + str(params['epoch_num']), ja_rythm_model)\n\n # 学習過程のlossグラフ\n plt.plot(step, loss_list)\n plt.title(\"Training Data\")\n plt.xlabel(\"step\")\n plt.ylabel(\"loss\")\n plt.grid(True)\n plt.show()\n\ndef predict(params, filename):\n en_model = EnglishLSTM(len(params['en_list']))\n en_rythm_model = EnglishRythmLSTM(len(params['en_rythm_list']))\n ja_model = JapaneseLSTM(len(params['ja_list']))\n ja_rythm_model = JapaneseRythmLSTM(len(params['ja_rythm_list']))\n\n serializers.load_hdf5('models/en_model_' + str(params['epoch_num']), en_model)\n serializers.load_hdf5('models/en_rythm_model_' + str(params['epoch_num']), en_rythm_model)\n serializers.load_hdf5('models/ja_model_' + str(params['epoch_num']), ja_model)\n serializers.load_hdf5('models/ja_rythm_model_' + str(params['epoch_num']), ja_rythm_model)\n x1 = [\n 'are',\n 'you',\n 'going',\n 'to',\n 'scarborough',\n 'fair',\n '?',\n ''\n ]\n x2 = [\n '48',\n '24',\n '24',\n '24',\n '24',\n '36',\n '12',\n '24',\n '72',\n '',\n ]\n arr = [k for k in params['ja_list']]\n arr2 = [k for k in params['ja_rythm_list']]\n ja_y = \"\"\n ja_rythm_y = \"\"\n while((ja_y != '') and (ja_rythm_y != '')):\n for x in x1:\n y1 = en_model.forward(x, params['en_list'])\n for x in x2:\n y2 = en_rythm_model.forward(x, params['en_rythm_list'])\n\n h = y1 + y2\n\n # hから1つ目の単語を推測\n y3 = ja_model.predict(h)\n\n prob = F.softmax(y3.data).data\n prob = prob.argmax(axis=1)\n prob = int(prob)\n ja_y = arr[prob]\n # ja_y = str(np.random.choice(arr, p = prob[0]))\n print(ja_y)\n\n while(ja_y != ''):\n y3 = ja_model.forward(ja_y, params['ja_list'])\n prob = F.softmax(y3.data).data\n prob = prob.argmax(axis=1)\n prob = int(prob)\n ja_y = arr[prob]\n # ja_y = str(np.random.choice(arr, p = prob[0]))\n print(ja_y)\n\n\n # hから1つ目のリズムを推測\n y4 = ja_rythm_model.predict(h)\n\n prob = F.softmax(y4.data).data\n ja_rythm_y = str(np.random.choice(arr2, p = prob[0]))\n print(ja_rythm_y)\n\n while(ja_rythm_y != ''):\n y4 = ja_rythm_model.forward(ja_rythm_y, params['ja_rythm_list'])\n prob = F.softmax(y4.data).data\n ja_rythm_y = str(np.random.choice(arr2, p = prob[0]))\n print(ja_rythm_y)\n\ndef get_data_arr(filename):\n file = open(filename)\n line = file.read()\n line = line.strip()\n file.close()\n return line.split(\"\\n\")\n\nif __name__==\"__main__\":\n data_manager = DataManager()\n epoch_num = 350\n # read data\n en_data = get_data_arr(\"./data/english.txt\")\n en_rythm = get_data_arr(\"./data/en_rythm.txt\")\n ja_data = get_data_arr(\"./data/japanese.txt\")\n ja_rythm = get_data_arr(\"./data/ja_rythm.txt\")\n\n params = {\n 'epoch_num': epoch_num,\n 'english': data_manager.get_splite_list(en_data[:]),\n 'en_rythm': data_manager.get_splite_list(en_rythm[:]),\n 'japanese': data_manager.get_splite_list(ja_data[:]),\n 'ja_rythm': data_manager.get_splite_list(ja_rythm[:]),\n 'batch_size': 5,\n 'en_list': data_manager.get_word_list(en_data[:]),\n 'en_rythm_list': data_manager.get_word_list(en_rythm[:]),\n 'ja_list': data_manager.get_word_list(ja_data[:]),\n 'ja_rythm_list': data_manager.get_word_list(ja_rythm[:]),\n }\n\n # train(params)\n predict(params, 'models/model_3.npz')\n","repo_name":"yanoooooo/translation_ML","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41332836576","text":" \n # JUAN DIEGO RIVERA MENESES \n# FICHA: 2274935\n\nimport math\n#ITERACION SIMPLE DE WHILE \nn = 2\nwhile n<=10:\n print(n)\n n += 1\nprint(\"Ciclo terminado\") \n \n\n# CICLO WHILE \nnum=int(input(\"Please insert a number\"))\n\nwhile num <0:\n print(f\"this number is negative, insert a positive\")\n num=int(input(\"Please insert a number again\"))\nelse:\n print(f\"This number is valid\") \n\n\n\n# CICLO WHILE CON OPERACION MATEMATICA RAIZ CUADRADA\nnumber= int(input(\"Insert a number:\"))\n\nwhile number<0:\n print(\"Alert, the number must be positive\")\n number= int(input(\"Insert number again:\"))\n\nprint(f\"\\nYour square root is : {(math.sqrt(number)):.2f}\")\n# :.2F -->LLAMA A SOLO 2 DECIMALES DE RESULTADO \n","repo_name":"DiegoRiveraDev97/pythonexercises","sub_path":"videos_youtube/ciclo while.py","file_name":"ciclo while.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33591151008","text":"from ogrencii import Ogrenci\n\nfrom sınıf import Ogretmen\n\nögrenciList = []\nögretmenList = []\n\n\nwhile(1):\n\n grup = int(input(\"öğrenci için 1,ogretmen icin 2 tikla: \" ))\n islem = int(input(\"Yapacağiniz islemi seciniz(1 = ekleme,2=listeleme): \"))\n name = input(\"name: \")\n major = input(\"major: \")\n\n \n\n\n\n\n def ekleme():\n if grup == 1:\n ogrenci = Ogrenci(name,major)\n ögrenciList.append(ogrenci)\n elif grup == 2 :\n ogretmen = Ogretmen(name,major)\n ögretmenList.append(ogretmen)\n\n\n def listele():\n if grup == 1:\n for i in range(len(ögrenciList)):\n print(ögrenciList[i].name , ögrenciList[i].major)\n \n\n ekleme() \n listele()\n\n \n\n\n\n \n\n\n\n\n\n\n\n\n\n","repo_name":"sercanulasss/etiyaakademi","sub_path":"pair1104/sınıflistele.py","file_name":"sınıflistele.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21998639117","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\nfrom users.views import GetTokenView, SignUpView, UserViewSet\n\nfrom .views import (CategoryViewSet, CommentViewSet, GenreViewSet,\n ReviewViewSet, TitleViewSet)\n\nrouter = DefaultRouter()\n\nrouter.register('categories', CategoryViewSet, basename='categories')\nrouter.register('genres', GenreViewSet, basename='genres')\nrouter.register(r'titles', TitleViewSet, basename='titles')\nrouter.register(r'titles/(?P\\d+)/reviews',\n ReviewViewSet, basename='reviews')\nrouter.register(r'titles/(?P\\d+)/reviews/(?P\\d+)'\n r'/comments', CommentViewSet, basename='comments')\nrouter.register(r'users', UserViewSet, basename='users')\n\nurlpatterns = [\n path('v1/', include(router.urls)),\n path('v1/auth/signup/', SignUpView.as_view()),\n path('v1/auth/token/', GetTokenView.as_view()),\n]\n","repo_name":"DostovaK/api_yamdb","sub_path":"api_yamdb/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"26278464088","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 7 15:28:00 2020\r\n\r\n@author: Frank\r\n\"\"\"\r\n\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql import functions as func\r\nfrom pyspark.sql.types import StructType, StructField, IntegerType, LongType\r\nimport codecs\r\n\r\ndef loadMovieNames():\r\n movieNames = {}\r\n # CHANGE THIS TO THE PATH TO YOUR u.ITEM FILE:\r\n with codecs.open(\"E:/SparkCourse/ml-100k/u.ITEM\", \"r\", encoding='ISO-8859-1', errors='ignore') as f:\r\n for line in f:\r\n fields = line.split('|')\r\n movieNames[int(fields[0])] = fields[1]\r\n return movieNames\r\n\r\nspark = SparkSession.builder.appName(\"PopularMovies\").getOrCreate()\r\n\r\nnameDict = spark.sparkContext.broadcast(loadMovieNames())\r\n\r\n# Create schema when reading u.data\r\nschema = StructType([ \\\r\n StructField(\"userID\", IntegerType(), True), \\\r\n StructField(\"movieID\", IntegerType(), True), \\\r\n StructField(\"rating\", IntegerType(), True), \\\r\n StructField(\"timestamp\", LongType(), True)])\r\n\r\n# Load up movie data as dataframe\r\nmoviesDF = spark.read.option(\"sep\", \"\\t\").schema(schema).csv(\"file:///SparkCourse/ml-100k/u.data\")\r\n\r\nmovieCounts = moviesDF.groupBy(\"movieID\").count()\r\n\r\n# Create a user-defined function to look up movie names from our broadcasted dictionary\r\ndef lookupName(movieID):\r\n return nameDict.value[movieID]\r\n\r\nlookupNameUDF = func.udf(lookupName)\r\n\r\n# Add a movieTitle column using our new udf\r\nmoviesWithNames = movieCounts.withColumn(\"movieTitle\", lookupNameUDF(func.col(\"movieID\")))\r\n\r\n# Sort the results\r\nsortedMoviesWithNames = moviesWithNames.orderBy(func.desc(\"count\"))\r\n\r\n# Grab the top 10\r\nsortedMoviesWithNames.show(10, False)\r\n\r\n# Stop the session\r\nspark.stop()\r\n","repo_name":"ShubhamGupta505/Spark","sub_path":"Advance_Spark/popular-movies-nice-dataframe.py","file_name":"popular-movies-nice-dataframe.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"37665126425","text":"nota_1 = float(input(\"Digite a primeira nota: \"))\nnota_2 = float(input(\"Digite a segunda nota: \"))\nmedia = round((nota_1 + nota_2)/ 2, 1)\nprint (\"As notas foram = \", nota_1, \"e\", nota_2)\nprint (\"A média é = \", round(media, 1))\nif(media >= 9.0):\n conceito = \"A\"\nelif ((media >= 7.5) and (media < 9)):\n conceito = \"B\"\nelif ((media >= 6) and (media < 7.5)):\n conceito = \"C\"\nelif ((media >= 4) and (media < 6)):\n conceito = \"D\"\nelse:\n conceito = \"E\"\n\nprint (\"O conceito = \"+ conceito)\n\nif (media >= 6):\n print (\"O aluno está Aprovado.\")\nelse:\n print (\"O aluno está Reprovado.\")\n","repo_name":"andreFatec4/ProgBD2","sub_path":"aula2ExProp2.py","file_name":"aula2ExProp2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11933296578","text":"#João Vitor Dias Ximenez - 9351203\r\n#Tarefa 1 - NEU - Data Science\r\n\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n\r\ndef main():\r\n train = pd.read_csv('train.csv')\r\n resposta = 10\r\n while resposta != 0:\r\n resposta = int(input(\"Boa tarde, digite o número do exercício que quer resolver\\n1-Exercicio 1\\n2-Exercicio 2\\n3-Exercicio 3\\n4-Exercicio 4\\nResposta: \"))\r\n if resposta == 1:\r\n print('Tarefa 1:')\r\n print(\"O gráfico de barras é o melhor, pois mostra a variação dos valores com o tempo. Como o resultado de vendas não esta necessariamente ligado ao resultado das vendas anteriores, um grafico de dispersão ou curva não se adequaria\")\r\n train['Media'] = train['Weekly_Sales']*(0.022) #divisão de valores para se obter a média\r\n train.groupby(by='Date').agg({'Media':'sum'} ).plot.bar(legend='', fontsize=3)\r\n plt.title('Vendas totais por semana')\r\n plt.xlabel('Data')\r\n plt.ylabel('Vendas por semana')\r\n plt.xticks(rotation=45)\r\n plt.show()\r\n \r\n\r\n \r\n \r\n if resposta == 2: \r\n print('Tarefa 2')\r\n train2 = train.groupby(by='Store').agg({'Weekly_Sales':'sum'})\r\n train2 = train2.sort_values(by='Weekly_Sales',ascending=False)\r\n\r\n train2.head(10).plot.bar(legend='')\r\n plt.title('Melhores lojas')\r\n plt.xlabel('Lojas')\r\n plt.ylabel('Vendas no periodo')\r\n plt.xticks(rotation=45)\r\n plt.show()\r\n\r\n train2 = train2.sort_values(by='Weekly_Sales',ascending=False).reset_index()\r\n\r\n\r\n \r\n array = []\r\n for i in range(10):\r\n array.append(train2['Store'][i])\r\n print(\"\\n Lojas que tiveram maior performance: \")\r\n print(array)\r\n print(\"\\n Para evitar a poluição dos dados, o grafico de cada uma das lojas pode ser visto na função abaixo, separadamente\")\r\n graf = int(input('de 1 a 10, qual grafico gostaria de ver? '))\r\n while graf > 0 and graf < 11:\r\n train.groupby(by='Date').agg({'Weekly_Sales':'sum'}).plot.bar(legend='')\r\n filtro = train['Store'] == train2['Store'][graf-1]\r\n train3 = train[filtro].groupby(by='Date').agg({'Weekly_Sales':'sum'}).plot.bar(legend='')\r\n plt.xlabel('Data')\r\n plt.ylabel('Vendas por semana')\r\n plt.xticks(rotation=45)\r\n plt.show()\r\n graf = int(input('de 1 a 10, qual grafico gostaria de ver? ')) \r\n \r\n\r\n\r\n if resposta == 3:\r\n\r\n print('Tarefa 3')\r\n train2 = train.groupby(by='Store').agg({'Weekly_Sales':'sum'})\r\n train2 = train2.sort_values(by='Weekly_Sales',ascending=True)\r\n\r\n train2.head(10).plot.bar(legend='')\r\n plt.title('Melhores lojas')\r\n plt.xlabel('Lojas')\r\n plt.ylabel('Vendas no periodo')\r\n plt.xticks(rotation=45)\r\n plt.show()\r\n\r\n train2 = train2.sort_values(by='Weekly_Sales',ascending=False).reset_index()\r\n\r\n\r\n \r\n array = []\r\n for i in range(10):\r\n array.append(train2['Store'][i])\r\n print(\"\\n Lojas que tiveram pior performance: \")\r\n print(array)\r\n print(\"\\n Para evitar a poluição dos dados, o grafico de cada uma das lojas pode ser visto na função abaixo, separadamente\")\r\n graf = int(input('de 1 a 10, qual grafico gostaria de ver? '))\r\n while graf > 0 and graf < 11:\r\n train.groupby(by='Date').agg({'Weekly_Sales':'sum'})\r\n filtro = train['Store'] == train2['Store'][graf-1]\r\n train3 = train[filtro].groupby(by='Date').agg({'Weekly_Sales':'sum'}).plot.bar(legend='')\r\n plt.xlabel('Data')\r\n plt.ylabel('Vendas por semana')\r\n plt.xticks(rotation=45)\r\n plt.show()\r\n graf = int(input('de 1 a 10, qual grafico gostaria de ver? '))\r\n\r\n\r\n\r\n print('Tarefa 3:')\r\n df2 = train.groupby(by='Date').mean()\r\n plt.bar(df2.index,df2['Weekly_Sales'],label='Vendas Totais por data')\r\n plt.xticks(rotation=45)\r\n plt.title('Vendas totais por semana')\r\n plt.legend()\r\n plt.show()\r\n\r\n print(\"Comparação entre os graficos de maior e menos performance: \")\r\n train2 = train.groupby(by='Store').agg({'Weekly_Sales':'sum'})\r\n train2 = train2.sort_values(by='Weekly_Sales',ascending=False)\r\n train3 = train.groupby(by='Store').agg({'Weekly_Sales':'sum'})\r\n train3 = train3.sort_values(by='Weekly_Sales',ascending=True)\r\n df2 = train2.head(10)\r\n df3 = train3.head(10)\r\n plt.bar(df2.index,df2['Weekly_Sales'],label='Melhores')\r\n plt.bar(df3.index,df3['Weekly_Sales'],label='Piores')\r\n\r\n \r\n plt.title('Melhores e piores lojas')\r\n plt.xlabel('Lojas')\r\n plt.ylabel('Vendas no periodo')\r\n plt.xticks(rotation=0)\r\n plt.legend()\r\n plt.show()\r\n\r\n \r\n \r\n\r\n if resposta == 4:\r\n \r\n\r\n filtro = train['IsHoliday'] == False\r\n filtro2 = train['IsHoliday'] == True\r\n train3 = train[filtro].groupby(by='Date').agg({'Weekly_Sales':'sum'})\r\n train4 = train[filtro2].groupby(by='Date').agg({'Weekly_Sales':'sum'})\r\n train5 = train.groupby(by='IsHoliday').agg({'Weekly_Sales':'sum'}).reset_index()\r\n \r\n cferiado = train5['Weekly_Sales'][0]/train3.shape[0]\r\n sferiado = train5['Weekly_Sales'][1]/train4.shape[0]\r\n print('Media por semana sem feriado', sferiado)\r\n print('Media por semana com feriado', cferiado,train4.shape[0])\r\n print(cferiado/sferiado*100,'%')\r\n plt.bar(['Com Feriado','Sem Feriado'],[cferiado,sferiado])\r\n print('A princípio, os feriados afetam positivamente nas vendas')\r\n train3 = train[filtro].groupby(by='Date').agg({'Weekly_Sales':'median'})\r\n train4 = train[filtro2].groupby(by='Date').agg({'Weekly_Sales':'median'})\r\n print('Mediana por semana sem feriado', train5['Weekly_Sales'][0])\r\n print('Mediana por semana com feriado',train5['Weekly_Sales'][1] )\r\n\r\n \r\n plt.show()\r\n \r\n df2 = train3\r\n df3 = train4\r\n plt.bar(df3.index,df3['Weekly_Sales'],label='Sem Feriado')\r\n plt.bar(df2.index,df2['Weekly_Sales'],label='Feriado')\r\n\r\n \r\n plt.title('Melhores e piores lojas')\r\n plt.xlabel('Lojas')\r\n plt.ylabel('Vendas no periodo')\r\n plt.xticks(rotation=45)\r\n plt.legend()\r\n plt.show()\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n \r\n #filtro = train.groupby(by=['IsHoliday'].agg({'Weekly_Sales':'sum'})\r\n #train2 = train2.sort_values(by='Weekly_Sales',ascending=True)\r\n\r\n print('Tarefa 4:')\r\n\r\n \r\n \r\n \r\n \r\nmain()\r\n\r\n","repo_name":"jvximenez/CursoNEU","sub_path":"JoaoXimenez- Tarefa 1 Data science.py","file_name":"JoaoXimenez- Tarefa 1 Data science.py","file_ext":"py","file_size_in_byte":7389,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25364297183","text":"from typing import List, cast\nfrom exponent_server_sdk import PushClient, PushMessage\nfrom i18n import t\n\nfrom backend.dal import user as user_dal\n\n\ndef notify_mobile(recipients: List[str], title: str, message: str):\n message += t('notifications.details')\n for user_email in recipients:\n user_devices = cast(List[str], user_dal.get_attributes(\n user_email, ['devices_to_notify']).get('devices_to_notify', []))\n for device_token in user_devices:\n PushClient().publish(\n PushMessage(\n body=message,\n sound='default',\n title=title,\n to=device_token,\n )\n )\n","repo_name":"tom-vanbraband-sonarsource/integrates","sub_path":"django-apps/integrates-back/backend/utils/notifications.py","file_name":"notifications.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25768483058","text":"from sys import argv, stderr\nfrom struct import unpack\nfrom os import path\nfrom collections import namedtuple\nfrom traceback import print_exc\n\nSIZEOFINT = 4\n\n\nclass IMX_IMG_Reader:\n imgfile = None\n ivt = None\n dcd_header = None\n Header = None\n dcd_cmdseq = []\n\n def __init__(self, imgpath):\n self.imgfile = open(imgpath, 'rb')\n self.Header = namedtuple('Header', 'tag length version')\n self.WriteCmd = namedtuple('WriteCmd', 'bytes mask set oplist')\n self.WriteOp = namedtuple('WriteOp', 'address value')\n\n\n def __header_get(self):\n data = self.imgfile.read(SIZEOFINT)\n return self.Header._make(unpack('>BHB', data))\n\n\n def ivt_check(self):\n if not self.ivt:\n self.ivt_read()\n\n if not getattr(self.ivt, 'tag') == 0xd1:\n print(\"IVT incorrect header tag (0x%x vs 0xd1)\" %\n getattr(self.ivt, 'tag'))\n return False\n\n if not getattr(self.ivt, 'length') == 32:\n print(\"IVT incorrect header length (0x%x vs 0x20)\" %\n getattr(self.ivt, 'length'))\n return False\n\n version = getattr(self.ivt, 'version')\n if version != 0x40 and version != 0x41:\n print(\"IVT incorrect version field (0x%x)\" % version)\n return False\n\n if getattr(self.ivt, 'reserved1') != 0:\n print(\"IVT incorrect reserved field 1 (0x%x)\" %\n getattr(self.ivt, 'reserved1'))\n return False\n if getattr(self.ivt, 'reserved2') != 0:\n print(\"IVT incorrect reserved field 2 (0x%x)\" %\n getattr(self.ivt, 'reserved2'))\n return False\n\n if version == 0x40:\n print(\"Image version 0x40\")\n elif version == 0x41:\n print(\"Image version 0x41\")\n\n return True\n\n\n def ivt_read(self):\n self.imgfile.seek(0, 0)\n Ivt = namedtuple('IVT', 'tag length version entry, reserved1, dcd, ' \\\n 'bootdata, selfaddr, csf, reserved2')\n header = self.__header_get()\n data = self.imgfile.read(SIZEOFINT * 7)\n self.ivt = Ivt._make(header + unpack(\"II', data[pos:pos + 8]))\n cmdlist.append(cmd)\n pos += 8\n\n param = getattr(header, 'version')\n nbbytes = param & 0x7\n maskbit = param & (1 << 3)\n setbit = param & (1 << 4)\n if nbbytes != 1 and nbbytes != 2 and nbbytes != 4:\n print(\"Write cmd: Invalid number of bytes\")\n return\n\n cmd = self.WriteCmd(nbbytes, maskbit, setbit, cmdlist)\n self.dcd_cmdseq.append(cmd)\n\n\n def __dcd_cmd_check(self, header):\n print(\"Command check unmanaged yet\")\n return None\n\n\n def __dcd_cmd_nop(self, header):\n print(\"Command nop unmanaged yet\")\n return None\n\n\n def __dcd_cmd_unlock(self, header):\n print(\"Command unlock unmanaged yet\")\n return None\n\n\n def dcd_read(self):\n if not self.ivt:\n self.ivt_read()\n self.dcd_cmdseq = []\n\n offset = getattr(self.ivt, 'dcd') - getattr(self.ivt, 'selfaddr')\n self.imgfile.seek(offset, 0)\n self.dcd_header = self.__header_get()\n\n length = getattr(self.dcd_header, 'length')\n # We already read 4 bytes, the header\n length -= SIZEOFINT\n pos = 0\n while length > 0:\n cmd_header = self.__header_get()\n tag = getattr(cmd_header, 'tag')\n if tag == 0xCC:\n self.__dcd_cmd_write(cmd_header)\n elif tag == 0xCF:\n self.__dcd_cmd_check(cmd_header)\n elif tag == 0xCF:\n self.__dcd_cmd_check(cmd_header)\n elif tag == 0xCF:\n self.__dcd_cmd_check(cmd_header)\n else:\n print(\"Unknown command\", hex(tag))\n length -= getattr(cmd_header, 'length')\n return self.dcd_cmdseq\n\n\n def __dcd_cmd_write_dump(self, cmd):\n nbbytes = getattr(cmd, 'bytes')\n setbit = getattr(cmd, 'set')\n maskbit = getattr(cmd, 'mask')\n print(\"%d-bytes write sequence (%d %d)\" %\n (getattr(cmd, 'bytes'), setbit, maskbit))\n if not maskbit:\n op = '='\n elif setbit:\n op = '|='\n else:\n op = '~='\n for cmdop in getattr(cmd, 'oplist'):\n address = getattr(cmdop, \"address\")\n address &= (1 << (nbbytes * 8)) - 1\n print(\"*0x%08x %s 0x%08x\" % (address, op,\n getattr(cmdop, \"value\")))\n\n\n def dcd_dump(self):\n for cmd in self.dcd_cmdseq:\n if isinstance(cmd, self.WriteCmd):\n try:\n self.__dcd_cmd_write_dump(cmd)\n except:\n print(\"Error in commands:\")\n print_exc(2)\n\n\n def __dcd_cmd_write_dump2tcl(self, cmd):\n nbbytes = getattr(cmd, 'bytes')\n setbit = getattr(cmd, 'set')\n maskbit = getattr(cmd, 'mask')\n\n if maskbit:\n stderr.print(\"Operation not managed yet\")\n return\n\n for cmdop in getattr(cmd, 'oplist'):\n address = getattr(cmdop, \"address\")\n address &= (1 << (nbbytes * 8)) - 1\n value = getattr(cmdop, \"value\")\n print(\"mww phys 0x%08x 0x%08x\" % (address, value))\n\n\n def dcd_dump2tcl(self):\n for cmd in self.dcd_cmdseq:\n if isinstance(cmd, self.WriteCmd):\n try:\n self.__dcd_cmd_write_dump2tcl(cmd)\n except:\n print(\"Error in commands:\")\n print_exc(2)\n\n\ndef main(argv):\n if len(argv) < 2:\n print(\"Missing file argument\")\n return -1\n\n try:\n reader = IMX_IMG_Reader(argv[1])\n reader.ivt_read()\n reader.dcd_read()\n reader.dcd_dump2tcl()\n except IOError as e:\n print(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n\nmain(argv)\n","repo_name":"JimmyDurandWesolowski/env-xvisor","sub_path":"scripts/imxheader2tcl.py","file_name":"imxheader2tcl.py","file_ext":"py","file_size_in_byte":7042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"44671936205","text":"# --- Day 11: Monkey in the Middle ---\nfrom pprint import pprint\nfrom numpy import prod\nfrom tqdm import trange\nfrom copy import deepcopy\n\nmonkeys = {}\nwith open(\"example.txt\") as fp:\n for monkey in fp.read().split(\"\\n\\n\"):\n a, b, c, d, e, f = monkey.splitlines()\n i = int(a[-2])\n items = [int(x) for x in b[18:].split(\", \")]\n op = c[13:]\n divisor = int(d.split()[-1])\n true = int(e.split()[-1])\n false = int(f.split()[-1])\n monkeys[i] = {\n \"items\": items,\n \"op\": op,\n \"divisor\": divisor,\n True: true,\n False: false,\n \"inspections\": 0,\n }\n\n\nmonkeys_one = deepcopy(monkeys)\nfor _ in trange(20):\n for monkey in monkeys_one.values():\n monkey[\"inspections\"] += len(monkey[\"items\"])\n for old in monkey[\"items\"].copy():\n exec(monkey[\"op\"])\n new //= 3\n monkeys_one[monkey[new % monkey[\"divisor\"] == 0]][\"items\"].append(new)\n monkey[\"items\"].remove(old)\n\nmonkeys_two = deepcopy(monkeys)\nfor _ in trange(20):\n for monkey in monkeys_two.values():\n monkey[\"inspections\"] += len(monkey[\"items\"])\n for old in monkey[\"items\"].copy():\n exec(monkey[\"op\"])\n monkeys_two[monkey[new % monkey[\"divisor\"] == 0]][\"items\"].append(new)\n monkey[\"items\"].remove(old)\n\n\nfor k, v in monkeys_one.items():\n print(\"Monkey\", k, \":\", v[\"inspections\"])\n\npart_one = prod(sorted([val[\"inspections\"] for val in monkeys_one.values()])[-2:])\nprint(part_one)\n","repo_name":"lbreede/advent-of-code","sub_path":"python/2022/day/11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"29323323438","text":"#!/usr/bin/env python3\n\nimport argparse, codecs, hashlib, os, sys # do not use any other imports/libraries\nfrom pyasn1.codec.der import decoder, encoder\n\n# took 6 hours (please specify here how much time your solution required)\n\n# parse arguments\nparser = argparse.ArgumentParser(description='issue TLS server certificate based on CSR', add_help=False)\nparser.add_argument(\"CA_cert_file\", help=\"CA certificate (in PEM or DER form)\")\nparser.add_argument(\"CA_private_key_file\", help=\"CA private key (in PEM or DER form)\")\nparser.add_argument(\"csr_file\", help=\"CSR file (in PEM or DER form)\")\nparser.add_argument(\"output_cert_file\", help=\"File to store certificate (in PEM form)\")\nargs = parser.parse_args()\n\ndef nb(i, length=False):\n # converts integer to bytes\n b = b''\n if length==False:\n length = (i.bit_length()+7)//8\n for _ in range(length):\n b = bytes([i & 0xff]) + b\n i >>= 8\n return b\n\ndef bn(b):\n # converts bytes to integer\n i = 0\n for byte in b:\n i <<= 8\n i |= byte\n return i\n\n#==== ASN1 encoder start ====\ndef string_to_bytes(string):\n corresponding_integers = map(lambda x: ord(x), string)\n return bytes(corresponding_integers)\n\ndef asn1_len(bs):\n number_of_bytes = len(bs)\n if number_of_bytes == 0:\n return bytes([0])\n length_bytes = nb(number_of_bytes) #bytes encoding the number of value bytes\n if number_of_bytes > 127:\n #we want 1 as the most significant bit, and the rest of the bits as is\n first_byte = len(length_bytes) | 0b10000000 \n result = [first_byte]\n for b in length_bytes:\n result.append(b)\n return bytes(result)\n else:\n return bytes(length_bytes)\n\ndef asn1_boolean(bool):\n if bool:\n bool = bytes([0xff])\n else:\n bool = bytes([0x00])\n return bytes([0x01]) + asn1_len(bool) + bool\n\ndef asn1_null():\n return bytes([5,0]) \n\ndef asn1_integer(i):\n result = bytes([2]) #universal, primitive, tag 2 is 0b00000010 which is 2 in base 10\n value_bytes = bytes([0]) if i == 0 else nb(i)\n if (value_bytes[0] >> 7) == 1:\n value_bytes = bytes([0]) + value_bytes\n result += asn1_len(value_bytes)\n result += value_bytes\n return result\n \n\ndef asn1_bitstring(octets):\n result = bytes([3]) + asn1_len(octets + b'\\x00') + b'\\x00' + octets\n return result\n \ndef asn1_octetstring(octets):\n return bytes([4]) + asn1_len(octets) + octets\n\ndef get_7bit_integers_from_int(int_value):\n int_array = []\n while int_value > 0:\n int_array.insert(0,int_value & 0b1111111)\n int_value = int_value >> 7\n return int_array\n\ndef asn1_objectidentifier(oid):\n if oid == []:\n return bytes([6, 0])\n id_byte = bytes([6]) #universal, primitive, tag 6 is 0b000000110 which is 6 base 10\n first_element = oid[0] if len(oid) > 0 else 0\n second_element = oid[1] if len(oid) > 1 else 0\n first_value_byte = bytes([40*first_element + second_element])\n other_bytes = b''\n\n if len(oid) > 2:\n for i in range(2, len(oid)):\n int_array = get_7bit_integers_from_int(oid[i])\n for i in range(0,len(int_array)-1):\n int_array[i] = int_array[i] | 0b10000000 #each element except the last should have leftmost bit at 1 \n other_bytes = other_bytes + bytes(int_array)\n length_byte = asn1_len(first_value_byte + other_bytes)\n return id_byte + length_byte + first_value_byte + other_bytes\n \n\ndef asn1_sequence(der):\n return bytes([0b00110000]) + asn1_len(der) + der\n\ndef asn1_set(der):\n return bytes([0b00110001]) + asn1_len(der) + der\n\ndef asn1_printablestring(string):\n value_bytes = string_to_bytes(string)\n return bytes([0b00010011]) + asn1_len(value_bytes) + value_bytes\n\ndef asn1_utctime(time):\n value_bytes = string_to_bytes(time)\n return bytes([23]) + asn1_len(value_bytes) + value_bytes\n\ndef asn1_tag_explicit(der, tag):\n first_byte = bytes([0b10100000 | tag])\n length_bytes = asn1_len(der)\n return first_byte + length_bytes + der\n\ndef encode_digest_info(obj_id, digest):\n return asn1_sequence(asn1_sequence(asn1_objectidentifier(obj_id) + asn1_null()) + asn1_octetstring(digest))\n#==== ASN1 encoder end ====\n\ndef _encode_subject_public_key_info(obj_id, n, e):\n return asn1_sequence(\n asn1_sequence(asn1_objectidentifier(obj_id) + asn1_null()) +\n asn1_bitstring(\n asn1_sequence(\n asn1_integer(n) + asn1_integer(e)\n )\n )\n )\n\ndef _encode_algorithm_identifier():\n return asn1_sequence(\n asn1_objectidentifier([1,2,840,113549,1,1,11]) + asn1_null()\n )\n\ndef _encode_key_usage():\n return asn1_sequence(\n asn1_objectidentifier([2,5,29,15]) +\n asn1_boolean(True) +\n asn1_octetstring(\n asn1_bitstring(bytes([1 << 7]))\n )\n )\n\ndef _encode_extended_key_usage():\n return asn1_sequence(\n asn1_objectidentifier([2,5,29,37]) +\n asn1_boolean(True) +\n asn1_octetstring(\n asn1_sequence(\n asn1_objectidentifier([1,3,6,1,5,5,7,3,1])\n )\n )\n )\n\ndef _encode_basic_constraints():\n return asn1_sequence(\n asn1_objectidentifier([2,5,29,19]) +\n asn1_boolean(True) +\n asn1_octetstring(\n asn1_sequence(\n asn1_boolean(False)\n )\n )\n )\n\ndef _encode_validity():\n start = \"210324000000Z\"\n end = \"220324000000Z\"\n return asn1_sequence(\n asn1_utctime(start) + asn1_utctime(end)\n )\n\ndef _bitstring_to_int(bitstring):\n int_value = 0\n len_bitstring = len(bitstring)\n for bit_index in range(0,len(bitstring)):\n bit_value = 1 if bitstring[bit_index] == \"1\" else 0\n int_value += (bit_value << (len_bitstring-1-bit_index))\n return int_value\n\ndef _bitstring_to_bytes(bitstring):\n index = 0\n len_bitstring = len(bitstring)\n result = b''\n while index < len_bitstring:\n current_byte = bitstring[index:index+8]\n byte_int = _bitstring_to_int(current_byte)\n result = result + bytes([byte_int])\n index += 8\n return result\n\ndef pem_to_der(content):\n # converts PEM content (if it is PEM) to DER\n if content[:2] == b'--':\n content = content.replace(b\"-----BEGIN CERTIFICATE REQUEST-----\", b\"\")\n content = content.replace(b\"-----END CERTIFICATE REQUEST-----\", b\"\")\n content = content.replace(b\"-----BEGIN CERTIFICATE-----\", b\"\")\n content = content.replace(b\"-----END CERTIFICATE-----\", b\"\")\n content = content.replace(b\"-----BEGIN PUBLIC KEY-----\", b\"\")\n content = content.replace(b\"-----END PUBLIC KEY-----\", b\"\")\n content = content.replace(b\"-----BEGIN RSA PRIVATE KEY-----\", b\"\")\n content = content.replace(b\"-----END RSA PRIVATE KEY-----\", b\"\")\n content = codecs.decode(content, 'base64')\n return content\n\ndef get_privkey(filename):\n file_content = open(filename, 'rb').read()\n decoded_der = decoder.decode(pem_to_der(file_content))\n n = int(decoded_der[0][1])\n e = int(decoded_der[0][2])\n d = int(decoded_der[0][3])\n return n,e,d\n\ndef pkcsv15pad_sign(plaintext, n):\n padded_plaintext = b'\\x00\\x01'\n n_bytes = nb(n)\n padding_length = len(n_bytes) - len(plaintext) - 3 #3 is for the default padding bytes 0x0001 and 0x00\n if len(n_bytes) - len(plaintext) < 3:\n print('[+] Halt: plaintext must be at least 3 bytes smaller than modulus')\n exit(1)\n padding = b'\\xff' * padding_length\n return padded_plaintext + padding + b'\\x00' + plaintext\n\ndef digestinfo_der(m):\n sha256 = hashlib.sha256()\n index = 0\n bytes = m[index:index+512]\n while bytes:\n sha256.update(bytes)\n index+=512\n bytes = m[index:index+512]\n digest = sha256.digest()\n der = encode_digest_info([2,16,840,1,101,3,4,2,1], digest)\n return der\n\n\ndef sign(m, keyfile):\n digest_info = digestinfo_der(m)\n n, e, d = get_privkey(keyfile)\n padded = pkcsv15pad_sign(digest_info, n)\n padded_int = bn(padded)\n signature = pow(padded_int, d, n)\n modulus_byte_length = len(nb(n))\n signature_bytes = nb(signature, modulus_byte_length)\n return signature_bytes\n\n\ndef get_subject_cn(csr_der):\n entries = csr_der[0][0][1]\n for e in entries:\n if str(e[0][0]) == \"2.5.4.3\":\n return e[0][1] \n\ndef get_subjectPublicKeyInfo(csr_der):\n bitstring = csr_der[0][0][2][1]\n bytes_representation = _bitstring_to_bytes(str(bitstring))\n decoded = decoder.decode(bytes_representation)\n return int(decoded[0][0]),int(decoded[0][1])\n\ndef get_subjectName(cert_der):\n return encoder.encode(decoder.decode(cert_der)[0][0][5])\n\ndef issue_certificate(private_key_file, issuer, subject, pubkey):\n CERTIFICATE_HEADER = \"-----BEGIN CERTIFICATE-----\\n\"\n CERTIFICATE_FOOTER = \"-----END CERTIFICATE-----\\n\"\n\n n, e, d = get_privkey(private_key_file)\n version = asn1_tag_explicit(asn1_integer(2), 0)\n serial_number = asn1_integer(666)\n signature = _encode_algorithm_identifier()\n subject_public_key_info_der = _encode_subject_public_key_info([1,2,840,113549,1,1,1],n,e)\n extensions = asn1_tag_explicit(asn1_sequence(\n _encode_key_usage() +\n _encode_extended_key_usage() +\n _encode_basic_constraints() \n ), 3)\n tbs_certificate = asn1_sequence(\n version + \n serial_number + \n signature + \n issuer + \n _encode_validity() +\n subject +\n subject_public_key_info_der\n + extensions\n )\n tbs_certificate_signature = sign(tbs_certificate,private_key_file) \n signature_der = asn1_bitstring(tbs_certificate_signature)\n der = asn1_sequence(tbs_certificate + _encode_algorithm_identifier() + signature_der)\n open('test.cert.der', 'wb').write(der)\n base64_bytes = codecs.encode(der, 'base64')\n base64_message = base64_bytes.decode('ascii')\n pem = CERTIFICATE_HEADER + base64_message + CERTIFICATE_FOOTER\n return pem\n\n# obtain subject's CN from CSR\ncsr_der = decoder.decode(pem_to_der(open(args.csr_file, 'rb').read()))\nsubject_cn_text = get_subject_cn(csr_der)\n\nprint(\"[+] Issuing certificate for \\\"%s\\\"\" % (subject_cn_text))\n\n# obtain subjectPublicKeyInfo from CSR\npubkey = get_subjectPublicKeyInfo(csr_der)\n\n# construct subject name DN for end-entity's certificate\nsubject = asn1_sequence(asn1_set(asn1_sequence(asn1_objectidentifier([2,5,4,3]) + asn1_printablestring(subject_cn_text))))\n\n# get subject name DN from CA certificate\nCAcert = pem_to_der(open(args.CA_cert_file, 'rb').read())\nCAsubject = get_subjectName(CAcert)\n\n# issue certificate\ncert_pem = issue_certificate(args.CA_private_key_file, CAsubject, subject, pubkey)\nopen(args.output_cert_file, 'w').write(cert_pem)","repo_name":"La-Buse/applied_cryptography","sub_path":"07/issue_cert.py","file_name":"issue_cert.py","file_ext":"py","file_size_in_byte":10773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10063918855","text":"import wikipedia\nfrom flask import Flask, session\nfrom flask import request\nimport mysql.connector\nimport json\nimport requests\nimport goslate\n\napp = Flask(__name__)\n\n#API to get the first sentance of wikipedia articles, their titles and their links\n@app.route(\"/titleandarticle/term=/\")\ndef titleandarticle (term):\n results = []\n try:\n #search for term\n results = wikipedia.search(term)\n #check no disambiguation error will occur\n wikipedia.summary(term)\n except Exception as e:\n #if disabmbiguation error occurs take the error message\n results_disambiguation = str(e)\n #extract the disabmbiguations given in message and add these to results\n results_disambiguation = results_disambiguation.split(\"\\n\")\n results_disambiguation.pop()\n results_disambiguation.pop(0)\n results = results_disambiguation + results\n for i in range(len(results)-1):\n #remove unwanted disabmbiguation articles (list of disabmbiguations of word)\n if \"(disambiguation)\" in results[i]:\n results.pop(i)\n\n dict_list = []\n for result in results:\n try:\n #extract only one sentace\n article = wikipedia.summary(result, sentences = 1)\n link = \"https://en.wikipedia.org/wiki/\"+result.replace(\" \",\"_\")\n #add to list\n dict_list.append({\"title\" : result, \"text\" : article, \"link\" : link})\n except:\n #if error occurred, take the suggestion if available\n suggestion = wikipedia.suggest(result)\n if suggestion:\n article = wikipedia.summary(suggestion, sentences = 1)\n link = \"https://en.wikipedia.org/wiki/\"+suggestion.replace(\" \",\"_\")\n #add to list\n dict_list.append({\"title\" : result, \"text\" : article, \"link\" : link})\n\n return json.dumps(dict_list)\n\n#get definitions of term\n@app.route(\"/definitions/term=/\")\ndef definitions(term):\n #URL for wordnik API\n API_URL = \"http://api.wordnik.com:80/v4/word.json/\"+term.lower()+\"/definitions?limit=20&includeRelated=true&useCanonical=false&includeTags=false&api_key=a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5\"\n\n data = requests.get(API_URL)\n\n #convert to python list\n data_list = data.json()\n definitions_list = []\n #extract text from response\n for definition in data_list:\n definitions_list.append(definition[\"text\"])\n return json.dumps(definitions_list)\n\n#get translation of term, from one language to another\n@app.route(\"/translate/term=/langfrom=/langto=/\")\ndef translate(term, langfrom, langto):\n #use goslate API to get translation\n gs = goslate.Goslate()\n translation =(gs.translate(term, source_language = langfrom, target_language = langto))\n return translation\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=5001)\n","repo_name":"ben-graves/F454-Web-App","sub_path":"apis/wikitest.py","file_name":"wikitest.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3017149011","text":"from datetime import date\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import Session\n\nfrom app.api.dependencies import get_db\nfrom app.crud import task, week, day, relationship_collectors\nfrom app.schemas import TaskSchema, TaskUpdate, TaskCreate, DaySchema, WeekSchema\nfrom app.services.dependencies_annotations import CurrentVerifiedUser\n\nrouter = APIRouter()\n\n\n@router.put(\"/{pk}\", response_model=TaskSchema)\nasync def update_task(\n pk: int,\n task_obj: TaskUpdate,\n owner: CurrentVerifiedUser,\n db: Session = Depends(get_db),\n):\n task_to_update = await task.get(db, pk=pk)\n if not task_to_update:\n raise HTTPException(status_code=404, detail=\"Task not found\")\n return await task.update(db=db, obj_in=task_obj, obj_db=task_to_update)\n\n\n@router.delete(\"/{pk}\", response_model=TaskSchema)\nasync def delete_task(\n pk: int,\n owner: CurrentVerifiedUser,\n db: Session = Depends(get_db),\n):\n deleted_task = await task.delete(db, pk=pk)\n if not deleted_task:\n raise HTTPException(status_code=404, detail=\"Instance not found\")\n return deleted_task\n\n\n@router.get(\"/{pk}\", response_model=TaskSchema)\nasync def get_task(\n pk: int,\n owner: CurrentVerifiedUser,\n db: Session = Depends(get_db),\n):\n task_obj = await task.get(db, pk=pk)\n if not task_obj:\n raise HTTPException(status_code=404, detail=\"Task not found\")\n return task_obj\n\n\n@router.post(\"/{day_pk}\", response_model=TaskSchema)\nasync def add_task(\n task_in: TaskCreate,\n day_pk: int,\n owner: CurrentVerifiedUser,\n db: Session = Depends(get_db),\n):\n day_obj = await day.get_day_with_owner(db, pk=day_pk, owner_id=owner.pk)\n if not day_obj:\n raise HTTPException(status_code=404, detail=\"Day not found\")\n return await task.create_task_with_day(db=db, day_id=day_pk, task_obj=task_in)\n\n\n@router.get(\"/day/{pk}\", response_model=DaySchema)\nasync def get_tasks_for_day(\n pk: int,\n owner: CurrentVerifiedUser,\n db: AsyncSession = Depends(get_db),\n):\n day_obj = await day.get_day_with_owner(db, pk=pk, owner_id=owner.pk)\n\n if not day_obj:\n raise HTTPException(status_code=404, detail=\"Day not found\")\n\n return day_obj\n\n\n@router.get(\"/week/{week_start}\", response_model=WeekSchema)\nasync def get_tasks_for_week(\n week_start: date,\n owner: CurrentVerifiedUser,\n db: AsyncSession = Depends(get_db),\n):\n current_week = await week.get_week_with_owner(db=db, start_day=week_start, owner_id=owner.pk)\n tasks = []\n if current_week.week_days:\n weekdays = current_week.week_days\n tasks = relationship_collectors.collect_tasks(weekdays)\n else:\n weekdays = await day.create_days_for_week(db, current_week.pk, week_start)\n return WeekSchema(start_day=current_week.start_day, week_days=weekdays, tasks=tasks)\n","repo_name":"Axeratos/TODOListApp","sub_path":"backend/app/api/api_routes/endpoints/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10529089751","text":"import random\nfrom functools import partial\nimport numpy as np\nimport torch\nimport math\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import RandomSampler\nfrom torch.utils.data.dataloader import _InfiniteConstantSampler\n\nfrom data.build import build_dataset\nfrom data.collate import COLLATE_FN\nfrom utils import distributed as du\n\nimport webdataset as wds\n\n\ndef construct_loader(cfg, split):\n \"\"\"\n Constructs the data loader for the given dataset.\n Args:\n cfg (CfgNode): configs. Details can be found in\n slowfast/config/defaults.py\n split (str): the split of the data loader. Options include `train`,\n `val`, and `test`.\n \"\"\"\n collate_fn = None\n if split in [\"train\"]:\n dataset_name = cfg.TRAIN.DATASET\n batch_size = int(cfg.TRAIN.BATCH_SIZE / cfg.SOLVER.GRADIENT_ACCUMULATION_STEPS)\n batch_size = int(batch_size / du.get_world_size())\n drop_last = True\n length = int(cfg.TRAIN.DATASET_SIZE / du.get_world_size())\n nominal = int(length / batch_size)\n elif split in [\"val\"]:\n dataset_name = cfg.VAL.DATASET\n batch_size = int(cfg.TRAIN.BATCH_SIZE / du.get_world_size())\n drop_last = False\n length = int(cfg.VAL.DATASET_SIZE / du.get_world_size())\n nominal = int(length / batch_size)\n elif split in [\"test\"]:\n dataset_name = cfg.TEST.DATASET\n batch_size = int(cfg.TEST.BATCH_SIZE / du.get_world_size())\n drop_last = False\n length = math.ceil(cfg.TEST.DATASET_SIZE / du.get_world_size())\n nominal = math.ceil(length / batch_size)\n\n # Construct the dataset\n dataset = build_dataset(dataset_name, cfg, split)\n if dataset_name == \"KineticsSounds\":\n collate_fn = COLLATE_FN[\"kinetics\"]\n\n # Create a loader\n if cfg.DATA_LOADER.NUM_WORKERS > 0:\n loader = wds.MultiDataset(\n dataset,\n workers=cfg.DATA_LOADER.NUM_WORKERS,\n nominal=nominal,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY,\n )\n if split in [\"train\"]:\n loader = loader.shuffle(batch_size)\n loader = loader.batched(batch_size)\n else:\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY,\n drop_last=drop_last,\n collate_fn=collate_fn,\n )\n\n return loader\n\n\ndef shuffle_dataset(loader, cur_epoch):\n \"\"\"\"\n Shuffles the data.\n Args:\n loader (loader): data loader to perform shuffle.\n cur_epoch (int): number of the current epoch.\n \"\"\"\n if not isinstance(loader, (wds.MultiDataset, )):\n assert isinstance(\n loader.sampler, (RandomSampler, DistributedSampler, _InfiniteConstantSampler)\n ), \"Sampler type '{}' not supported\".format(type(loader.sampler))\n # RandomSampler handles shuffling automatically\n if isinstance(loader.sampler, DistributedSampler):\n # DistributedSampler shuffles data based on epoch\n loader.sampler.set_epoch(cur_epoch)\n","repo_name":"sangho-vision/wds_example","sub_path":"data/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"41545263125","text":"import os\nimport requests\nimport pandas as pd\nfrom koketData import KoketData\nfrom bs4 import BeautifulSoup\nimport sys\nsys.setrecursionlimit(30000)\n\n\ndef parse_sitemap(sitemap_url):\n headers = {\n \"User-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36\"}\n url_list = []\n soup = BeautifulSoup(requests.get(\n sitemap_url, headers=headers).text, 'lxml')\n for loc in soup.select('url > loc'):\n url = loc.text\n url_list.append(url)\n return url_list\n\n\ndef collect_recipes_list(url_list):\n recipe_dict_list = []\n for url in url_list:\n recipe_object = KoketData(url)\n recipe_object.extract()\n recipe_dict_list.append(recipe_object.recipe_dict)\n return recipe_dict_list\n\n\ndef collect_recipes():\n current_urls = parse_sitemap(\"https://www.koket.se/sitemap.xml\")\n if(len(current_urls) == 0):\n print(\"Sitemap could not be parsed\")\n return\n\n recipes = pd.read_csv(\"recipe_data_final.csv\")\n print(len(recipes))\n # Keep only recipes on the site currently\n recipes = recipes[recipes['url'].isin(current_urls)]\n print(len(recipes))\n recipes.to_csv(\"recipe_data_final.csv\", index=False)\n old_collected_urls = recipes['url'].tolist()\n new_urls = list(set(current_urls).difference(set(old_collected_urls)))\n print(f\"{len(new_urls)} new recipes\")\n chunksize = 20\n for ind in range(0, len(new_urls), chunksize):\n recipes_list = collect_recipes_list(new_urls[ind:ind + chunksize])\n\n pd.DataFrame(recipes_list, columns=recipes.columns).to_csv(\"recipe_data_final.csv\", index=False,\n header=not os.path.exists(\"recipe_data_final.csv\"), mode='a')\n print(f\"{ind+chunksize}\\\\{len(new_urls)}\")\n\n\nif __name__ == \"__main__\":\n collect_recipes()\n","repo_name":"JoachimNilsson/Recipe-Recommendation-App","sub_path":"collectRecipes.py","file_name":"collectRecipes.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35561078474","text":"import time\nimport datetime\nimport paramiko as paramiko\nimport configparser\nimport keyring\nimport argparse\n\nCONFIGCOMMAND = \"conf t\"\nWRITECOMMAND = \"save\"\nTIMEOUTREGULARCOMMSNDSEC = 1\nTIMEOUTLONGCOMMANDSEC = 3\n\ndef domassconfig(PGFG, logfile, WriteToLog,timeout_intercmd):\n for confcmd in PGFG:\n chan.send(confcmd + '\\n')\n time.sleep(int(timeout_intercmd))\n ret = chan.recv(99999)\n rstr1 = ret.decode('utf-8')\n if DEBUG:\n print(rstr1)\n if WriteToLog == 1:\n logfile.write(rstr1)\n\n\nif __name__ == '__main__':\n WriteToLog = 0\n DEBUG = False\n parcer = argparse.ArgumentParser(description=\"MOXA EDS-510/518 relay on/off\")\n parcer.add_argument('-i', type=str, help=\"switch IP\", required=True)\n parcer.add_argument('-c', type=str, help=\"file with commands\", required=True)\n parcer.add_argument('-l', type=str, help=\"write log\")\n parcer.add_argument('-u', type=str, help=\"Username - if You want use other Username rather User in config file\")\n parcer.add_argument('-p', type=str, help=\"Password - if You want use other Username rather User in config file\")\n parcer.add_argument('-f', type=str, default=\"configmoxa.txt\", help=\"config - default 'moxaconfig.txt\")\n args = parcer.parse_args()\n\n now = datetime.datetime.now()\n\n cpsw = configparser.ConfigParser()\n\n cpcmd = configparser.ConfigParser()\n cpcmd.read(args.c)\n\n if args.u != None:\n if args.p == None:\n print(\"Please provide passowrd\")\n exit(1)\n USER = args.u\n PASSWORD = args.p\n else:\n configname = 'configmoxa.txt'\n cp = configparser.ConfigParser()\n cp.read(args.f)\n\n USER = cp.get('access', 'username')\n KEYCHAINNAME = cp.get('access', 'keychainname')\n\n # Читаем из Keyring OS пароль нашего пользователя\n PASSWORD = keyring.get_password(KEYCHAINNAME, USER)\n\n HOST = args.i\n logfileneme = None\n\n if args.l != None:\n WriteToLog = 1\n logfileneme = 'sessionlog.txt'\n logtofile = open(logfileneme, 'a')\n\n cmdparam = cpcmd.get('configcmd', 'ccmd')\n cmdstr = cpcmd.get('cmd', cmdparam)\n CONFCOMMAND = cmdstr.split('\\n')\n\n timeout_intercmd = cpcmd.get('timesettings', 'timeout')\n\n if DEBUG:\n print(CONFCOMMAND)\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # apply command to each switch\n try:\n client.connect(HOST, username=USER, password=PASSWORD)\n if DEBUG:\n print(\"connected\")\n if WriteToLog == 1:\n logtofile.write((HOST + ' ' + 'connected' + '\\r\\n'))\n chan = client.invoke_shell()\n time.sleep(TIMEOUTREGULARCOMMSNDSEC)\n chan.send('term len 0\\n')\n time.sleep(TIMEOUTREGULARCOMMSNDSEC)\n\n output = chan.recv(99999)\n if DEBUG:\n print(output.decode('utf-8'))\n if WriteToLog == 1:\n logtofile.write(output.decode('utf-8'))\n\n chan.send(CONFIGCOMMAND + '\\n')\n time.sleep(TIMEOUTREGULARCOMMSNDSEC)\n ret = chan.recv(99999)\n\n if DEBUG:\n print(ret.decode('utf-8'))\n\n domassconfig(CONFCOMMAND, logfileneme, WriteToLog,timeout_intercmd)\n\n chan.send('exit\\n')\n time.sleep(TIMEOUTREGULARCOMMSNDSEC)\n chan.send(WRITECOMMAND + '\\n')\n time.sleep(TIMEOUTLONGCOMMANDSEC)\n ret = chan.recv(99999)\n\n if DEBUG:\n print(ret.decode('utf-8'))\n if WriteToLog == 1:\n logtofile.write(ret.decode('utf-8'))\n client.close()\n if WriteToLog == 1:\n logtofile.write((HOST + ' ' + 'disconnected' + '\\r\\n'))\n except Exception as e:\n print(e)\n if WriteToLog == 1:\n logtofile.write((HOST + ' ' + str(e) + '\\r\\n'))\nif WriteToLog == 1:\n logtofile.close()\n","repo_name":"OlegPowerC/moxarelaycontrol","sub_path":"relayonoff.py","file_name":"relayonoff.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5542357038","text":"# -*- coding:utf-8 -*-\nimport pymysql\nimport re\n\ndef connection():\n conn = pymysql.connect(\n host='localhost',\n user='root',\n password='admin',\n db='circuit',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor\n )\n return conn\n\ndef get_job():\n conn = connection()\n cursor = conn.cursor()\n sql_str = 'SELECT * FROM `job_shanghai_clean` '\n cursor.execute(sql_str)\n results = cursor.fetchall()\n cursor.close()\n conn.close()\n return results\n\ndef update_data(job_id,main_mess,job_class):\n conn = connection()\n cursor = conn.cursor()\n sql = \"update `job_shanghai_clean_tiqu` set job_class = '{}',job_zhuanye='{}' where job_id ='{}'\".format(job_class , main_mess,job_id)\n print(sql)\n cursor.execute(sql)\n conn.commit()\n cursor.close()\n conn.close()\n\ndef find_chinese(file):\n pattern = re.compile(r'[^\\u4e00-\\u9fa5]')\n chinese = re.sub(pattern, '', file)\n # print(chinese)\n return chinese\n\ndef main():\n results = get_job()\n for rs in results:\n job_detail = str(rs['job_detail']).strip()\n job_id = str(rs['job_id'])\n detail_list = job_detail.split('\\n')\n l = len(detail_list)\n # job_class = re.findall('职能类别:(.+?)关键字',str(job_detail ))\n # print(detail_list)\n main_mess = ''\n job_class = ''\n class_check = False\n for i in range(l-1,0,-1):\n d = detail_list[i]\n if '职能类别' in d:\n job_class = d[5:]\n break\n d_2_list = re.split('[;,。 ]', job_detail)\n for d2 in d_2_list:\n if '专业' in d2:\n main_mess = main_mess + d2\n main_mess_str = find_chinese(main_mess)\n\n print(job_id,main_mess_str, job_class)\n try:\n update_data(job_id,main_mess_str,job_class)\n except Exception as e:\n continue\n # break\n\nif __name__ == '__main__':\n main()","repo_name":"SherryLee725/circuit_talent_needs","sub_path":"get_job_class.py","file_name":"get_job_class.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"29005182586","text":"#!/usr/bin/python3\n\n\ndef list_division(my_list_1, my_list_2, list_length):\n result = []\n for i in range(list_length):\n try:\n if i >= len(my_list_1) or i >= len(my_list_2):\n raise IndexError(\"Out of range\")\n\n value_1 = my_list_1[i]\n value_2 = my_list_2[i]\n\n if not (isinstance(value_1, (int, float))\n and isinstance(value_2, (int, float))):\n raise ValueError(\"wrong type\")\n\n if value_2 == 0:\n raise ZeroDivisionError(\"division by 0\")\n\n result.append(value_1 / value_2)\n except ZeroDivisionError:\n print(\"division by 0\")\n result.append(0)\n except ValueError:\n print(\"wrong type\")\n result.append(0)\n except IndexError:\n print(\"out of range\")\n result.append(0)\n finally:\n pass\n\n return result\n","repo_name":"EljonesA/alx-higher_level_programming","sub_path":"0x05-python-exceptions/4-list_division.py","file_name":"4-list_division.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17607566556","text":"import unittest\n\nimport pymongo.errors\nimport pymongo.mongo_replica_set_client\nfrom nose.plugins.skip import SkipTest\nfrom tornado import iostream\nfrom tornado.testing import gen_test\n\nimport motor\nfrom test import host, port, MotorReplicaSetTestBase, assert_raises, MotorTest\n\n\nclass MotorReplicaSetTest(MotorReplicaSetTestBase):\n @gen_test\n def test_replica_set_client(self):\n cx = motor.MotorReplicaSetClient(\n '%s:%s' % (host, port), replicaSet=self.name, io_loop=self.io_loop)\n\n self.assertEqual(cx, (yield cx.open()))\n self.assertTrue(isinstance(\n cx.delegate._MongoReplicaSetClient__monitor,\n motor.MotorReplicaSetMonitor))\n\n self.assertEqual(\n self.io_loop,\n cx.delegate._MongoReplicaSetClient__monitor.io_loop)\n\n @gen_test\n def test_open_callback(self):\n cx = motor.MotorReplicaSetClient(\n '%s:%s' % (host, port), replicaSet=self.name, io_loop=self.io_loop)\n yield self.check_optional_callback(cx.open)\n cx.close()\n\n def test_io_loop(self):\n with assert_raises(TypeError):\n motor.MotorReplicaSetClient(\n '%s:%s' % (host, port), replicaSet=self.name, io_loop='foo')\n\n @gen_test\n def test_auto_reconnect_exception_when_read_preference_is_secondary(self):\n old_write = iostream.IOStream.write\n iostream.IOStream.write = lambda self, data: self.close()\n\n try:\n cursor = self.rsc.pymongo_test.test_collection.find(\n read_preference=pymongo.ReadPreference.SECONDARY)\n\n with assert_raises(pymongo.errors.AutoReconnect):\n yield cursor.fetch_next\n finally:\n iostream.IOStream.write = old_write\n\n\nclass TestReplicaSetClientAgainstStandalone(MotorTest):\n \"\"\"This is a funny beast -- we want to run tests for MotorReplicaSetClient\n but only if the database at DB_IP and DB_PORT is a standalone.\n \"\"\"\n def setUp(self):\n super(TestReplicaSetClientAgainstStandalone, self).setUp()\n response = self.sync_cx.admin.command('ismaster')\n if 'setName' in response:\n raise SkipTest(\n \"Connected to a replica set, not a standalone mongod\")\n\n @gen_test\n def test_connect(self):\n with self.assertRaises(pymongo.errors.ConnectionFailure):\n yield motor.MotorReplicaSetClient(\n '%s:%s' % (host, port), replicaSet='anything',\n connectTimeoutMS=600).test.test.find_one()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"chinyue/motor","sub_path":"test/test_motor_replica_set.py","file_name":"test_motor_replica_set.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"5003017306","text":"import os\nimport io\nimport sys\nimport json\nimport time\nimport telepot\nimport requests\nfrom telepot.loop import MessageLoop\nimport logging\nimport logging.handlers\n\n#####################################################################################################################################################\n# CONSTANTS #\n#####################################################################################################################################################\n\nCONFIGURATION_FILE_PATH = \"configuration.json\" # Configuration file name.\n\nLOG_LEVEL = logging.INFO # Logging level.\nLOG_DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\" # Date-time format used in all log files.\nLOG_FILE_NAME = \"ipbot.log\" # Bot log file name.\nLOG_FILE_ENCODING = \"utf-8\" # Encoding used for log files.\nLOG_FILE_MAX_BYTES = 32 * 1024 * 1024 # Maximum number of bytes a single log file can take up.\nLOG_FILE_BACKUP_COUNT = 5 # Number of log file backups to maintain.\n\n#####################################################################################################################################################\n# LOGGING #\n#####################################################################################################################################################\n\n# Create basic configuration for logging. This will make the root logger write to stdout:\nlogging.basicConfig()\n\n# Create the log formatter:\nLOG_FORMATTER = logging.Formatter(\"[{asctime}] [{levelname}] {name}: {message}\", LOG_DATE_FORMAT, style='{')\n\n# Create the log stream handler:\nLOG_STREAM_HANDLER = logging.StreamHandler()\nLOG_STREAM_HANDLER.setLevel(LOG_LEVEL)\nLOG_STREAM_HANDLER.setFormatter(LOG_FORMATTER)\n\n# Create the log file handler:\nLOG_FILE_HANDLER = logging.handlers.RotatingFileHandler(\n filename = LOG_FILE_NAME,\n encoding = LOG_FILE_ENCODING,\n maxBytes = LOG_FILE_MAX_BYTES,\n backupCount = LOG_FILE_BACKUP_COUNT\n)\nLOG_FILE_HANDLER.setFormatter(LOG_FORMATTER)\n\n# configure_logger\n# logger_name: Name of the logger.\n# This method creates and configures a logger, then finally returns it. \ndef configure_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(LOG_LEVEL)\n for handler in logger.handlers:\n logger.removeHandler(handler)\n logger.addHandler(LOG_STREAM_HANDLER)\n logger.addHandler(LOG_FILE_HANDLER)\n return logger\n\n# Configure the root logger:\nLOGGER = configure_logger(\"root\")\n\n#####################################################################################################################################################\n# FILESYSTEM LOGIC #\n#####################################################################################################################################################\n\n# file_exists\n# path: Path to the target file.\n# This method returns true if the file path specified is a readable file that exists.\ndef file_exists(path):\n return os.path.isfile(path) and os.access(path, os.R_OK)\n\n# read_json\n# path: Path to the JSON file.\n# This method returns the JSON object read from the target file path. None is returned if the read operation failed.\ndef read_json(path):\n # Try to read the JSON data from the specified path:\n try:\n # Open the file in read mode:\n with open(path, \"r\") as file:\n # Try load the JSON data from the target file:\n data = json.load(file)\n # Log the read operation to the root logger:\n LOGGER.info(f\"Read JSON data at `{path}`.\")\n # Return the read JSON object:\n return data\n # Catch any exception that occurs during the read operation:\n except Exception as exception:\n # Log the exception with an error message:\n LOGGER.exception(exception)\n LOGGER.error(f\"Failed to read JSON data from `{path}`.\")\n # Return None because nothing was read:\n return None\n\n# write_json\n# path: Path to write the JSON file to.\n# data: JSON file data to write to the file.\n# Writes JSON data to a JSON file and returns either true or false depending on if the operation was successful.\ndef write_json(path, data):\n # Try to write the JSON data to the specified path:\n try:\n # Opent the target file in write mode:\n with open(path, \"w\") as file:\n # Dump the JSON data to the target file:\n json.dump(file, data, indent = 4)\n # Log the write operation to the root logger:\n LOGGER.info(f\"Wrote JSON data to `{path}`.\")\n # Return True since the write operation was successful:\n return True\n # Catch any exception that occurs during the write operation:\n except Exception as exception:\n # Log the exception with an error message:\n LOGGER.exception(exception)\n LOGGER.error(f\"Failed to write JSON data to `{path}`\")\n # Return False since the write operation was unsuccessful:\n return False\n\n#####################################################################################################################################################\n# JSON CONFIGURATION #\n#####################################################################################################################################################\n\n# read_field\n# key: Name of the key within the dictionary.\n# dictionary: Dictionary to read from.\n# Reads and returns the value of a field. If the value cannot be read, None is returned.\ndef read_field(key, dictionary):\n try:\n value = configuration[key]\n if value != None:\n return value\n LOGGER.info(f\"Read value of `{key}`.\")\n except Exception as exception:\n LOGGER.exception(exception)\n LOGGER.error(f\"Failed to read value of `{key}`.\")\n return None\n\n# Try read configuration file:\nif file_exists(CONFIGURATION_FILE_PATH):\n # Read JSON configuration:\n configuration = read_json(CONFIGURATION_FILE_PATH)\n # Check if configuration file was read successfully:\n if configuration != None:\n # Read configuration fields:\n bot_token = read_field(\"bot_token\", configuration)\n admin_username = read_field(\"admin_username\", configuration)\n admin_chat_id = read_field(\"admin_chat_id\", configuration)\n if bot_token == None or admin_username == None or admin_chat_id == None:\n LOGGER.error(\"Failed to read JSON configuration.\")\n print(\"Either repair the existing JSON configuration, or delete it and re-run this script.\")\n sys.exit(1)\n # Log read successful operation:\n LOGGER.info(\"Successfully read JSON configuration.\")\n # Read operation was not successful:\n else:\n LOGGER.error(\"Failed to read JSON configuration file.\")\n sys.exit(1)\n\n# Configuration file does not exist:\nelse:\n LOGGER.warning(\"No configuration file found, creating one...\")\n write_json(\n CONFIGURATION_FILE_PATH,\n {\n \"bot_token\": \"bot token here\",\n \"admin_username\": \"Telegram username here\",\n \"admin_chat_id\": 12345\n }\n )\n print(\"Please edit the configuration file and re-run this script.\")\n sys.exit(0)\n\n#####################################################################################################################################################\n# GLOBAL VARIABLES #\n#####################################################################################################################################################\n\nlast_ip = None # Last public IP address recorded for the host machine.\n\n#####################################################################################################################################################\n# IP FEATURES #\n#####################################################################################################################################################\n\n# get_ip\n# Gets and returns the public ip address of the host machine. If this method fails to get the host machine IP address, None is returned; otherwise,\n# the body of the response is returned as a UTF-8 string.\ndef get_ip():\n try:\n # Query the public IP address of the host machine:\n response = requests.get(\"https://api.ipify.org\", verify=False, timeout=10.0)\n # Validate the response status code is \"200 OK\":\n if response.status_code == 200:\n # Decode the response body:\n response_body = response.content.decode(\"utf8\")\n # Log response:\n LOGGER.info(f\"Response from `https://api.ipify.org`: `{response_body}`.\")\n # Return the response body:\n return response_body\n # The response code is not \"200 OK\":\n else:\n # Log that something has gone wrong:\n LOGGER.error(f\"Failed to get IP address (response.status_code: `{response.status_code}`).\")\n # Return None:\n return None\n except Exception as exception:\n # Log that an unexpected exception occurred while trying to obtain the host machine public IP address:\n LOGGER.exception(exception)\n LOGGER.error(\"An unexpected exception occurred while trying to obtain the public IP address of the host machine.\")\n # Return None:\n return None\n\n# check_ip\n# Checks the public IP address of the host machine and attempts to send the new IP address into the admin_chat_id\ndef check_ip():\n global last_ip\n # Get the current public IP address of the host machine:\n current_ip = get_ip()\n # Check if IP address is None:\n if current_ip == None:\n LOGGER.error(\"Failed to update IP address.\")\n # Check if the current IP of the host machine has changed:\n elif current_ip != last_ip:\n # Construct a message containing the new IP addresses:\n message = f\"New public IP address detected: `{current_ip}`.\"\n # Print the message to the console:\n LOGGER.info(message)\n # Send the new IP address to the target chat ID:\n update_ip = True\n if admin_chat_id != None:\n update_ip = send_message(admin_chat_id, message)\n # Update last IP address:\n if update_ip:\n last_ip = current_ip\n\n# Get the current IP address:\n#last_ip = get_ip()\n\n#####################################################################################################################################################\n# TELEGRAM BOT #\n#####################################################################################################################################################\n\n# send_message\n# Sends a message from the Telegram bot.\ndef send_message(chat_id, message):\n global bot\n try:\n bot.sendMessage(chat_id, message)\n LOGGER.info(f\"Sent message: `{message}` to chat ID: `{chat_id}`.\")\n return True\n except Exception as exception:\n LOGGER.exception(exception)\n LOGGER.error(\"An unexpected exception occurred while trying to send a message.\")\n return False\n\n# telepot_handle\n# Telepot message loop handle.\ndef telepot_handle(msg):\n try:\n # Get basic information about the incoming message:\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_username = msg[\"from\"][\"username\"]\n message = msg[\"text\"]\n LOGGER.info(f\"Received `{chat_type} {content_type}` message from `{chat_username}` (chat_id: `{chat_id}`): `{message}`.\")\n # must be text, private chat, and by specified user\n if (content_type == 'text') and (chat_type == 'private') and (chat_username == admin_username) and (chat_id == admin_chat_id):\n # Get public ip address:\n ip = get_ip()\n # Check if IP is none:\n if ip == None:\n send_message(\"Failed to obtain IP address.\")\n # Send IP address to user:\n else:\n send_message(chat_id, f\"IP: `{ip}`.\")\n #bot.sendMessage(chat_id, str(chat_id))\n else: # Assume anyone else who is communicating with the bot is not authorized\n # Log this:\n LOGGER.warning(f\"Received message from unauthorized user: `{chat_username}` (chat_id: `{chat_id}`).\")\n # Send message:\n send_message(chat_id, \"You are not authorized to interact with this bot.\")\n except Exception as exception:\n # Log exception:\n LOGGER.exception(exception)\n LOGGER.error(\"An unexpected exception occurred while responding to a chat callback.\")\n\n# Start Telegram bot:\nbot = telepot.Bot(bot_token)\nMessageLoop(bot, telepot_handle).run_as_thread()\nLOGGER.info(\"Telegram bot started and is listening for messages.\")\n\n#####################################################################################################################################################\n# MAIN LOOP #\n#####################################################################################################################################################\n\n# Check for change in IP address:\nwhile True:\n try:\n check_ip()\n except Exception as exception:\n LOGGER.exception(exception)\n LOGGER.error(\"An unexpected exception occurred while checking the public IP address of the host machine.\")\n time.sleep(60)\n","repo_name":"alexjthomson1882/Telegram-IP-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"22163719034","text":"# Name: Pranesh Shrestha\n# Course: ECE-4332\n# Assignment: 1\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\ndf = pd.read_excel('proj1Dataset.xlsx')\n#sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')\nmean_value = df['Horsepower'].mean(skipna=True)\ndf.fillna(mean_value, inplace=True) # fills Nan with mean of the Horsepower\n\n# closed-form solution\ndf['just ones'] = 1\n# Normalizes the data ranging from 0 to 1\nweight_norm = df['Weight'].max()\nhorsepower_norm = df['Horsepower'].max()\ndf['Weight'] = df[['Weight']] / weight_norm\ndf['Horsepower'] = df[['Horsepower']] / horsepower_norm\n\nx = np.array(df['Weight'])\nX = np.array(df[['Weight', 'just ones']]) # Design Matrix\nY = np.array(df['Horsepower']) # Target Matrx\n\nweight_matrix = (np.linalg.inv(np.transpose(X)@X)\n )@np.transpose(X)@Y # Closed Form Equation\nprint(weight_matrix.shape)\npredict1 = X@weight_matrix # Prediction\nplt.figure(figsize=(10.5, 5.6))\nplt.subplot(1, 2, 1)\nplt.scatter(x=x*weight_norm, y=Y*horsepower_norm)\nplt.plot(x*weight_norm, predict1*horsepower_norm, color='purple')\nplt.xlabel('Weight')\nplt.ylabel('Horsepower')\nplt.title('Closed form')\n\n# Gradient Descent Method\n\n# assumed weight and counter is initialized within function\n\n\ndef gradient_(g_weight=np.array([0.1, 0.2]), counter=0):\n g_weight = g_weight - 0.001 * 2 * \\\n (np.transpose(g_weight)@np.transpose(X)@X - np.transpose(Y)@X)\n counter = counter + 1\n if counter == 300:\n return g_weight\n return gradient_(g_weight, counter)\n\n\n# for calculating weight from gradient descent method (Iterative method)\nfinal_gradient = gradient_()\npredict2 = X@final_gradient\nplt.subplot(1, 2, 2)\nplt.scatter(x=x*weight_norm, y=Y*horsepower_norm)\nplt.plot(x*weight_norm, predict2*horsepower_norm, color='purple')\nplt.xlabel('Weight')\nplt.ylabel('Horsepower')\nplt.title('Gradient Descent Method')\n\nplt.show()\n","repo_name":"shresthapranesh/Machine-Learning","sub_path":"Project1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"23909472545","text":"#!/bin/sh\n#script per alzare il volume di un tiling wm\nfrom os import system\n\nVOL = \"amixer sget Master | grep 'Left:' | awk -F'[][]' '{ print $2 }' > vol\"\nsystem(VOL)\nVOL = open('vol','r').read().replace('\\n','')\n#print(\"vol=[\"+VOL+\"]\")\n\nif (VOL!=\"0%\"):\n\tsystem(\"amixer -q sset 'Master' 0%\")\nelse:\n\tsystem(\"amixer -q sset 'Master' 50%\")\nsystem(\"aplay ~/Musica/notifica-mini.wav -q\")\n\n","repo_name":"Sbatushe/void-setup","sub_path":"polybar/scripts/vol_mute.py","file_name":"vol_mute.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"73414539450","text":"# Huffman.py\n# Succinct Huffman encoder with canonical code output\n# based on https://github.com/adamldoyle/Huffman\n# Written by https://github.com/simondotm 2019\n# https://github.com/simondotm/lz4enc-python\n\nfrom heapq import *\nimport array\nimport argparse\nimport os\nimport sys\nfrom collections import defaultdict\n\n# Notes about this implementation:\n# 1) It does not support EOF huffman codes. This makes it simpler for use with 8-bit/byte based alphabets.\n# Instead we transmit the unpacked size as an indicator for how many symbols exist in the file. We also transmit the number of padding bits.\n# 2) We only support huffman code sizes upto and including 16 bits in length.\n# 3) Intended for use on small files (ie. < 10Mb), since much of the code uses in-memory manipulation. \n# 4) It is binary byte based rather than text based\n# 5) It generates a canonical code table, and emits a header as follows:\n# [4 bytes][Uncompressed data size]\n# [1 byte][Number of symbols Ns in symbol table, 0 means 256]\n# [1 byte][Number of entries Nb in the bitlength table]\n# [Nb bytes][bit length table]\n# [Ns bytes][symbol table]\n# [Data...]\n# 6) See decode() for example parsing\n#\n# TODO: add a peek table\n\nif sys.version_info[0] > 2:\n print(\"Python 2 only\")\n sys.exit()\n\n\nclass Huffman:\n\n MAX_CODE_BIT_LENGTH = 20 # change this if you need to check the codes are within a specific bit length range\n MAX_SYMBOLS = 256 # just for clarity of code. \n VERBOSE = False\n\n def __init__(self):\n self.key = {}\n self.rKey = {}\n self.table_bitlengths = []\n self.table_symbols = []\n\n def build(self, phrase):\n self.setFrequency(phrase)\n self.buildTree()\n self.buildKey()\n self.buildCanonical() # convert tree to canonical codes. \n\n def setFrequency(self, phrase):\n self.frequency = defaultdict(int)\n for c in phrase:\n self.frequency[c] += 1\n \n\n def buildTree(self):\n self.heap = [[v, k] for k, v in self.frequency.iteritems()]\n heapify(self.heap)\n while len(self.heap) > 1:\n left, right = heappop(self.heap), heappop(self.heap)\n heappush(self.heap, [left[0] + right[0], left, right])\n\n def buildKey(self, root=None, code=''):\n if root is None:\n self.buildKey(self.heap[0])\n for k,v in self.key.iteritems():\n self.rKey[v] = k\n elif len(root) == 2:\n self.key[root[1]] = code\n else:\n self.buildKey(root[1], code+'0')\n self.buildKey(root[2], code+'1')\n\n # replace the previously calculated huffman tree codes with canonical codes\n def buildCanonical(self):\n\n # convert the tree to an array of (bitlength, symbol) tuples\n ktable = []\n for n in range(self.MAX_SYMBOLS):\n if n in self.key:\n ktable.append( (len(self.key[n]), n ) )\n\n # sort them into bitlength then symbol order\n ktable.sort( key=lambda x: (x[0], x[1]) )\n\n # get bit range\n minbits = ktable[0][0]\n maxbits = ktable[-1][0]\n # make sure our codes comply with the length constraints\n assert minbits > 0\n assert maxbits <= self.MAX_CODE_BIT_LENGTH\n\n # now we build the canonical codes, replacing the previously calculated codes as we go.\n bitlength = ktable[0][0] # start with smallest code length, always the first entry since sort\n code = 0\n numsymbols = len(ktable)\n for n in range(numsymbols):\n k = ktable[n] # tuple (bitlength, symbol)\n bitlength = k[0]\n codestring = format(code, '0' + str(bitlength) + 'b') # convert the code to a binary format string, leading zeros set to bitlength \n self.key[k[1]] = codestring\n code = (code + 1) \n if n < (numsymbols - 1):\n code <<= ( ktable[n+1][0] - bitlength )\n if self.VERBOSE:\n print(\"code=\" + str(n) + \", bitlength=\" + str(k[0]) + \", symbol=\" + str(k[1]) + \", code=\" + codestring + \", check=\" + str(len(codestring)==bitlength))\n\n # build the tables needed for decoding \n # - a sorted array where array[n] is the number of symbols with bitlength n\n # - an array of the symbols, in sorted ascending order \n # create a local table for the sorted bitlengths and tables\n self.table_bitlengths = [0] * (self.MAX_CODE_BIT_LENGTH+1)\n self.table_symbols = []\n for k in ktable:\n self.table_bitlengths[k[0]] += 1\n self.table_symbols.append(k[1])\n\n if self.VERBOSE:\n print(\"decoder tables (size=\" + str(len(self.table_bitlengths)+len(self.table_symbols)) + \")\")\n print(self.table_bitlengths)\n print(self.table_symbols)\n\n\n\n def addHeader(self, src_data, cmp_data, wastedBits = 0):\n\n block = bytearray()\n\n # emit table header for the decoder\n # 4 byte header, representing:\n # 4 bytes unpacked size with top 3 bits being number of wasted bits in the stream. \n # this informs the decoder of the size of the uncompressed stream (ie. number of symbols to decode) and how many bits were wasted\n data_size = len(src_data)\n block.append( data_size & 255 )\n block.append( (data_size >> 8) & 255 )\n block.append( (data_size >> 16) & 255 )\n block.append( ((data_size >> 24) & 31) )\n\n # 1 byte symbol count\n # Note: this could be alternatively calculated as the sum of the non-zero bitlengths. \n block.append( (len(self.table_symbols) & 255) ) # size of symbol table (0 means 256) \n \n # emit N bytes for the code bit lengths (ie. the number of symbols that have a code of the given bit length)\n assert len(self.table_bitlengths) == (self.MAX_CODE_BIT_LENGTH+1)\n\n mincodelen = 65536\n maxcodelen = 0\n for v in self.key:\n codelen = len(self.key[v])\n mincodelen = min(mincodelen, codelen)\n maxcodelen = max(maxcodelen, codelen)\n\n #print(\" codes from \" + str(mincodelen) + \" to \" + str(maxcodelen) + \" bits in length\")\n # make sure our codes comply with the length constraint\n #assert maxcodelen <= self.MAX_CODE_BIT_LENGTH\n\n # We exploit the fact that no codes have a bit length of zero, so we use that field to transmit how long the bit length table is (in bytes)\n # This way we have a variable length header, and transmit the minimum amount of header data.\n self.table_bitlengths[0] = maxcodelen #len(self.table_symbols)\n for n in range(maxcodelen+1):\n block.append(self.table_bitlengths[n])\n\n # emit N bytes for the symbols table\n for n in self.table_symbols:\n block.append(n & 255)\n\n block += cmp_data\n return block\n\n # Huffman compress the given bytearray 'phrase' using the tree calculated by build()\n # Returns a bytearray() of the encoded data, with optional header data\n def encode(self, phrase, header = True):\n\n output = bytearray()\n\n # huffman encode and transmit the data stream\n currentbyte = 0 # The accumulated bits for the current byte, always in the range [0x00, 0xFF]\n numbitsfilled = 0 # Number of accumulated bits in the current byte, always between 0 and 7 (inclusive)\n\n sz = 0\n # for each symbol in the input data, fetch the assigned code and emit it to the output bitstream\n fastcount = 0\n bitsize_to_count = 8\n for c in phrase:\n k = self.key[c]\n sz += len(k)\n if len(k) <= bitsize_to_count:\n fastcount += 1\n for b in k:\n bit = int(b)\n assert bit == 0 or bit == 1\n currentbyte = (currentbyte << 1) | bit\n numbitsfilled += 1\n if numbitsfilled == 8: # full byte, flush to output\n output.append(currentbyte)\n currentbyte = 0\n numbitsfilled = 0 \n\n if self.VERBOSE:\n print(\" \" + str(fastcount) + \" of \" + str(len(phrase)) + \" symbols were \" + str(bitsize_to_count) + \" bits or less in size (\" + str(fastcount*100/len(phrase)) + \"%)\")\n\n # align to byte. we could emit code >7 bits in length to prevent decoder finding a spurious code at the end, but its likely\n # some data sets may contain codes <7 bits. Easier to just pad wasted bytes.\n wastedbits = (8 - numbitsfilled) & 7\n while (numbitsfilled < 8) and wastedbits:\n currentbyte = (currentbyte << 1) | 1\n numbitsfilled += 1\n output.append(currentbyte)\n\n # add headers if required.\n if header:\n output = self.addHeader(phrase, output, wastedBits = wastedbits)\n\n if header:\n # test decode\n self.decode(output, phrase)\n\n return output\n\n # test decoder\n def decode(self, data, source):\n\n # read the header\n if self.VERBOSE:\n print(\"Checking data...\")\n\n # get the unpacked size - this tells us how many symbols to decode\n unpacked_size = data[0] + (data[1]<<8) + (data[2]<<16) + ((data[3] & 31)<<24) # uncompressed size\n wastedbits = data[3] >> 5\n \n symbol_table_size = data[4] # fetch the number of symbols in the symbol table\n length_table_size = data[5] + 1 # fetch the number of entries in the bit length table (+1 because we include zero)\n\n # interpret 0 as 256\n if symbol_table_size == 0:\n symbol_table_size = 256\n\n length_table = data[5:5+length_table_size]\n symbol_table = data[5+length_table_size:5+length_table_size+symbol_table_size]\n\n # decode the stream\n currentbyte = 5 + length_table_size + symbol_table_size\n\n output = bytearray()\n\n bitbuffer = 0\n numbitsbuffered = 0\n code = 0\n code_size = 0\n\n firstCodeWithNumBits = 0\n startIndexForCurrentNumBits = 0\n\n sourceindex = 0\n unpacked = 0\n while unpacked < unpacked_size:\n\n # keep the bitbuffer going\n if numbitsbuffered == 0:\n # we're out of data, so any wip codes are invalid due to byte padding.\n bitbuffer = data[currentbyte]\n currentbyte += 1\n numbitsbuffered += 8\n\n # get a bit\n bit = (bitbuffer & 128) >> 7\n bitbuffer <<= 1\n numbitsbuffered -= 1\n\n # build code\n code = (code << 1) | bit\n code_size += 1\n\n # how many canonical codes have this many bits\n assert code_size <= self.MAX_CODE_BIT_LENGTH\n numCodes = length_table[code_size]\n\n # if input code so far is within the range of the first code with the current number of bits, it's a match\n indexForCurrentNumBits = code - firstCodeWithNumBits\n if indexForCurrentNumBits < numCodes:\n code = startIndexForCurrentNumBits + indexForCurrentNumBits\n\n symbol = symbol_table[code]\n output.append(symbol)\n expected = source[sourceindex]\n assert symbol == expected\n sourceindex += 1\n\n code = 0\n code_size = 0\n\n firstCodeWithNumBits = 0\n startIndexForCurrentNumBits = 0 \n\n unpacked += 1 \n\n else:\n # otherwise, move to the next bit length\n firstCodeWithNumBits = (firstCodeWithNumBits + numCodes) << 1\n startIndexForCurrentNumBits += numCodes\n\n assert len(output) == len(source)\n assert output == source\n\n if self.VERBOSE:\n print(\" Test decode OK.\")\n\n\n\n# Determine if running as a script\nif __name__ == '__main__':\n\n print(\"Huffman.py : Canonical Huffman compressor\")\n print(\"Written in 2019 by Simon M, https://github.com/simondotm/\")\n print(\"\")\n\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument(\"input\", help=\"read from file [input]\")\n parser.add_argument(\"output\", help=\"output to file [output]\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Enable verbose mode\", action=\"store_true\")\n args = parser.parse_args()\n\n\n src = args.input\n dst = args.output\n if dst == None:\n dst = src + \".lz4\"\n\n # check for missing files\n if not os.path.isfile(src):\n print(\"ERROR: File '\" + src + \"' not found\")\n sys.exit()\n\n # load the file\n src_data = bytearray(open(src, \"rb\").read())\n\n huffman = Huffman()\n huffman.VERBOSE = args.verbose\n huffman.build(src_data)\n\n dst_data = huffman.encode( src_data, header = True ) \n\n open(dst, \"wb\").write(dst_data)\n\n src_size = len(src_data)\n dst_size = len(dst_data)\n if src_size == 0:\n ratio = 0\n else:\n ratio = 100 - (int)((dst_size*100 / src_size))\n\n print(\" Compressed '\" + src + \"', \" + str(src_size) + \" into \" + str(dst_size) + \" bytes => \" + str(ratio) + \"%\")\n","repo_name":"simondotm/vgm-packer","sub_path":"modules/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":13284,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"77"} +{"seq_id":"8151404177","text":"from collections import *\nfor _ in range(int(input())):\n n=int(input())\n a=[int(i) for i in input().split()]\n k=int(input())\n d=defaultdict(lambda:0)\n for e in a:\n d[e] += 1\n a=set(a)\n if k==0:\n c=0\n for ke,v in d.items():\n if v>1:\n c+=1\n print(c)\n else:\n for e in a:\n d[e]=1\n visited=defaultdict(lambda:1)\n c=0\n for e in a:\n if visited[e]:\n visited[e]=0\n if d[e+k]:\n c+=1\n print(c)\n","repo_name":"nikhil7737/python-programs","sub_path":"pairs_with_diff_k.py","file_name":"pairs_with_diff_k.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6669407856","text":"# 스택 수열\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\ncount = 1\narr = []\nstack = []\nresult = []\n\nfor i in range(n):\n num = int(input())\n if num >= count:\n for j in range(count, num+1):\n stack.append(j)\n result.append(\"+\")\n count = num + 1\n elif stack[-1] != num:\n print(\"NO\")\n break\n result.append(\"-\")\n stack.pop()\n\nelse:\n print(\"\\n\".join(result))","repo_name":"surpmh/algorithms","sub_path":"BaekJoon/스택/5_1874.py","file_name":"5_1874.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10919188212","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 6 08:28:02 2022\n\n@author: tedwards\n\"\"\"\n\n\"\"\"\nThe aim of this part of the project is to test if Hubie Brown's theory can be exteneded to looking at how teams defend. \nAll of the shots against a certain team will be recorded by noting their Hubie value and make/miss status. \nAll of the data for each team will be amalgamated into a single dataframe for an exploratory analysis in RStudio. \n\"\"\"\n\n\"\"\"\nIn order for this data to be useful for predictions, we need to record the wins and losses along with the Hubie values per game \n\"\"\"\n\n#from basketball_reference_scraper.teams import get_roster_stats\nfrom basketball_reference_scraper.shot_charts import get_shot_chart\nfrom basketball_reference_scraper.seasons import get_schedule, get_standings\n#import matplotlib.pyplot as plt\nfrom BasketballConstants import Constant\nimport re\nfrom helper_hubie import get_abbreviation, classify_hubie\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport time\n\nseason_endyear = 2023\n#Currently, an error is being thrown——Length mismatch:\nlatest_schedule = pd.DataFrame({\n 'DATE': [],\n 'VISITOR': [],\n 'VISITOR_PTS': [],\n 'HOME': [],\n 'HOME_PTS': []\n \n })\nhubie_stats = pd.DataFrame({\n 'Team_Name': [],\n 'Zone_1_Makes': [],\n 'Zone_1_Attempts': [],\n 'Zone_2_Makes': [],\n 'Zone_2_Attempts': [],\n 'Zone_3_Makes': [],\n 'Zone_3_Attempts': [],\n 'Zone_4_Makes': [],\n 'Zone_4_Attempts': [],\n 'Date':[],\n 'Win_or_Lose': []\n })\n\nlatest_schedule = get_schedule(season_endyear, playoffs=False)\nlatest_standings = get_standings(date=None)\neast_teams = latest_standings['EASTERN_CONF']['TEAM']\nwest_teams = latest_standings['WESTERN_CONF']['TEAM']\n\n\nfor z in range(0, len(east_teams)):\n curr_team_games = latest_schedule[((latest_schedule.VISITOR == east_teams[z]) | (latest_schedule.HOME == east_teams[z])) & (latest_schedule.DATE < (datetime.now()-timedelta(days=4)))]\n curr_team = get_abbreviation(east_teams[z].upper())\n print(curr_team)\n \n for index, row in curr_team_games.iterrows():\n zone_one_makes, zone_one_attempts, zone_two_makes, zone_two_attempts = (0,0,0,0)\n zone_three_makes, zone_three_attempts, zone_four_makes, zone_four_attempts = (0,0,0,0)\n win_lose = 0 #0 will be used to denote a loss\n date = curr_team_games.DATE[index].date()\n #date = date.date()\n print(date)\n \n home_team = get_abbreviation(curr_team_games.HOME[index].upper())\n away_team = get_abbreviation(curr_team_games.VISITOR[index].upper())\n home_team_pts = curr_team_games.HOME_PTS[index]\n away_team_pts = curr_team_games.VISITOR_PTS[index]\n \n curr_game_shot_chart = get_shot_chart(date, home_team, away_team)\n if(curr_game_shot_chart == None): print(\"Curr Game Shot Chart is Empty\")\n time.sleep(5)\n \n #Here we want to access the shot chart for the opposing team, not the shot chart for the team we currently are on in our loop.\n #This is accomplished by switching from == to != in our assignment of shot_chart\n if(home_team != curr_team):\n shot_chart = curr_game_shot_chart[home_team]\n #Here we need to check if the home_team which is not the team being examined won the game\n if(home_team_pts < away_team_pts): win_lose = 1\n \n else:\n shot_chart = curr_game_shot_chart[away_team]\n if(home_team_pts > away_team_pts): win_lose = 1\n \n for w in range(0, len(shot_chart)):\n x_loc = re.findall('[\\d]*[.][\\d]+', shot_chart.loc[w, 'x'])\n y_loc = re.findall('[\\d]*[.][\\d]+', shot_chart.loc[w, 'y'])\n \n hubie_value = classify_hubie(((Constant.Y_MAX) - float(x_loc[0]) -1), float(y_loc[0]) + 1)\n \n if(hubie_value == 1):\n zone_one_attempts+=1\n if(shot_chart.loc[w, 'MAKE_MISS'] == 'MAKE'): zone_one_makes+=1\n elif(hubie_value == 2):\n zone_two_attempts+=1\n if(shot_chart.loc[w, 'MAKE_MISS'] == 'MAKE'): zone_two_makes+=1\n elif(hubie_value == 3):\n zone_three_attempts+=1\n if(shot_chart.loc[w, 'MAKE_MISS'] == 'MAKE'): zone_three_makes+=1\n else:\n zone_four_attempts+=1\n if(shot_chart.loc[w, 'MAKE_MISS'] == 'MAKE'): zone_four_makes+=1\n final_row = [curr_team, zone_one_makes, zone_one_attempts, zone_two_makes, zone_two_attempts, zone_three_makes, zone_three_attempts, zone_four_makes, zone_four_attempts, date, win_lose]\n hubie_stats.loc[len(hubie_stats.index)] = final_row\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"terrelledwards/Hubie_Brown_Theory_Test","sub_path":"HubieBrownTeam.py","file_name":"HubieBrownTeam.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42592711464","text":"import logging\nimport traceback\n\nfrom services.pdf_document_service import PDFDocumentService\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef run(event, context):\n try:\n PDFDocumentService.update_air_table()\n except Exception:\n logger.error(traceback.format_exc())\n","repo_name":"ACAPSproject/SOPHIA_v.0.1.1","sub_path":"routers/air_table_updater.py","file_name":"air_table_updater.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38084742274","text":"import sys\nsys.stdin = open(\"/Users/seokkyuhong/dev/python/algorithm/divide/input.txt\",\"r\")\n\nn, m = map(int, input().split()) \n\narr = []\ndef go():\n if len(arr) == m: # 3.arr 리스트의 길이가 m이면 출력하고 회기\n print(' '.join(map(str,arr)))\n return\n \n for i in range(1, n+1): # 1.출력 해야할 수가 1부터 n까지기 떄문에\n arr.append(i) # 2.예를 들어 1이 처음 들어가고 해당 1을 기준으로 재귀를 돌며 반복\n go()\n arr.pop() # 4.재귀가 들어갔다 나오면 1번째 인덱스 값 추출하여 다시 for 문을 돌게 한다. \n\ngo()","repo_name":"SeokKyuHong/algorithm_study","sub_path":"divide/15651.py","file_name":"15651.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1733421897","text":"file=open(\"rhyme.txt\",\"r+\")\ndict1={}\ni=0\nlines=file.readlines()\nfor line in lines:\n dict1[i]=line.strip()\n i=i+1\nprint(dict1)\nl=0\nstr1=str()\nfor k in dict1:\n str1=str1+dict1[k]+\" \"\nstr1=str1.lower()\n#print(str1)\nfor k in dict1:\n dict1[k]=dict1[k].split()\n l=l+len(dict1[k])\nprint(\"Number of words in the file are:\",l)\ndef word_count(str):\n counts=dict()\n words=str.split()\n for word in words:\n if word in counts:\n counts[word]+=1\n else:\n counts[word]=1\n return counts\nprint(\"Unique occurences are:\")\nprint(word_count(str1))\nfile=open(\"words.txt\",\"w\",1)\nfile.write(\"Number of words in the file are 29\")\nfile.write(\"\"\"Unique occurences are:\n{'a': 1, 'fun': 1, 'horse': 1, 'open': 1, 'it': 1, 'in': 1, 'sleigh': 1, 'is': 1, 'all': 2, 'oh': 1, 'the': 2, 'one': 1, 'to': 1, 'ride': 1, 'jingle': 6, 'way': 2, 'what': 1, 'bells': 4}\"\"\")\n\n","repo_name":"NishkarshRaj/Programming-in-Python","sub_path":"Module 1 Assignments/assignment34.py","file_name":"assignment34.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"38245308424","text":"from django.test import TestCase\nfrom rest_framework.test import APITestCase\nfrom groupsessions.models import GroupSession\nfrom rest_framework.authtoken.models import Token\nimport os\n\nclass TestGroupSessions(APITestCase):\n\n\tfixtures = ['test_fixtures.json']\n\n\tdef setUp(self):\n\t\ttoken = Token.objects.get(username='DeerDoe')\n\t\tself.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n\t\tself.client.content_type = 'application/json'\n\n\tdef test_create_public_session_valid_info(self):\n\t\tdata = {\n\t\t\t'title': 'Rap Session Title',\n\t\t\t'is_private': False,\n\t\t\t'clip': ''\n\n\t\t}\n\t\tres = self.client.post(\n\t\t\t'/sessions/',\n\t\t\tdata=data\n\t\t)\n\t\tself.assertEqual(res.status_code, 201)\n\t\tself.assertIsNotNone(res.data['session'])\n\n\tdef test_create_session_create_crowd(self):\n\t\tdata = {\n\t\t\t'title': 'Group Session',\n\t\t\t'use_existing_crowd': False,\n\t\t\t'crowd_title': 'Crowd Title Wooooh',\n\t\t\t'crowd_members': ['WhoAmI', 'Superrhymes']\n\t\t}\n\t\tres = self.client.post(\n\t\t\t'/sessions/',\n\t\t\tdata = data\n\t\t)\n\t\tself.assertEqual(res.status_code, 201)\n\t\tself.assertIsNotNone(res.data['session'])\n\t\tself.assertEqual(len(res.data['session']['crowd']['members']), 3)\n\t\tself.assertEqual(res.data['session']['crowd']['title'], 'Crowd Title Wooooh')\n\n\n\tdef test_get_sessions(self):\n\t\tres = self.client.get('/sessions/')\n\t\tself.assertEqual(res.status_code, 200)\n\t\tself.assertEqual(res.data['sessions'][0]['title'], 'Rap Sesh')\n\t\tself.assertEqual(res.data['sessions'][0]['comments'][0]['text'], 'This is a comment')\n\n\t# def test_upload_file(self):\n\t# \tpath = os.path.dirname(__file__)\n\t# \tpath = os.path.join(path, 'test_upload.mp4')\n\t# \tf = open(path, 'rb')\n\t# \tdata = {\n\t# \t\t'clip': f,\n\t# \t\t'session': 1,\n\t# \t\t'duration': 7\n\t# \t}\n\t# \tres = self.client.post(\n\t# \t\t'/sessions/addclip/',\n\t# \t\tdata=data\n\t# \t)\n\t# \tf.close()\n\t# \tself.assertEqual(res.status_code, 200)\n\n\tdef test_get_comments(self):\n\t\tdata = {'session': 1}\n\t\tres = self.client.get('/sessions/comments/1/')\n\t\tself.assertEqual(res.status_code, 200)\n\t\tself.assertIsNotNone(res.data['comments'])\n\t\tself.assertEqual(len(res.data['comments']), 2)\n\n\tdef test_add_comment_to_session(self):\n\t\tdata = {\n\t\t\t'session': 1,\n\t\t\t'comment_text': 'This was pretty tight...'\n\t\t}\n\t\tres = self.client.post(\n\t\t\t'/sessions/comments/',\n\t\t\tdata = data\n\t\t)\n\t\tself.assertEqual(res.status_code, 200)\n\t\tself.assertIsNotNone(res.data['comment'])","repo_name":"mikeparisstuff/rapchat-django","sub_path":"groupsessions/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27448802321","text":"N = int(input())\nA = [int(_) for _ in input().split()]\n\ni, j, k = 1, 2, 3\nB, C, D, E = A[0], A[1], A[2], sum(A[3:])\n\nresult = 10 ** 20\n\nwhile 1:\n\n while 1: \n newB, newC = B + A[i], C - A[i]\n if abs(B - C) < abs(newB - newC):\n break\n i += 1\n B, C = newB, newC\n\n while 1: \n newD, newE = D + A[k], E - A[k]\n if abs(D - E) < abs(newD - newE):\n break\n k += 1\n D, E = newD, newE\n\n r = max(B, C, D, E) - min(B, C, D, E)\n result = min(result, r)\n\n C, D = C + A[j], D - A[j]\n j += 1\n if j == N - 1:\n break\n \nprint(result)\n","repo_name":"hirosuzuki/procon","sub_path":"atcoder/abc102/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70549983608","text":"import datetime as dt\nimport gzip\n\nfrom bs4 import BeautifulSoup\n\nimport numpy as np\nimport pandas as pd\n\nfrom watchcbb.scrape.common import get_html\n\n\nclass SportsRefScrape:\n \"\"\"Class to perform various web-scraping routines from sports-reference.com/cbb\"\"\"\n\n def __init__(self):\n pass\n\n def get_gid(self, date, t1, t2):\n \"\"\" Return unique game id, with date and alphabetized teams, like 2020-02-15_indiana_purdue\" \"\"\"\n tnames = sorted([t1, t2])\n return \"{0}_{1}_{2}\".format(date,tnames[0],tnames[1])\n\n\n def get_team_list(self, season=2020):\n \"\"\" Return a list of all teams in D-I for a given season \"\"\"\n\n teams_url = f\"http://www.sports-reference.com/cbb/seasons/{season}-school-stats.html\"\n teams_html = get_html(teams_url)\n teams_soup = BeautifulSoup(teams_html, \"html.parser\")\n teams = []\n table = teams_soup.find(\"table\", id=\"basic_school_stats\").find(\"tbody\")\n for td in table.find_all(\"td\", {\"data-stat\":\"school_name\"}):\n team = td.find(\"a\")[\"href\"].split(\"/\")[3]\n teams.append(team)\n\n return teams\n\n\n def get_game_data(self, season, fout=None, overwrite=False, gids=None, teams=None, startdate=None, enddate=None, verbose=False):\n \"\"\"Retrieve individual game statistics for a set of teams in a given season\n \n Parameters:\n season: year of the season (i.e. 2020 for 2019-20 season)\n fout: file to write output CSV to (None to not write to file)\n overwrite: True to overwrite file, False to append to it (taking care to avoid duplicates)\n gids: optional list of gids to get. If not None, this overrides anything in teams, startdate, enddate\n teams: list of team IDs (from sports-reference) to retrive games for.\n If None, use all teams in D-I for the given season\n startdate: date to start retrieving games, defaults to beginning of season\n enddate: date to end retrieving games, defaults to full season\n verbose: print extra info\n\n Returns: list of comma-separated strings, as would be written into the lines of a CSV\n \"\"\"\n\n if teams is not None:\n if gids is not None:\n raise Exception(\"Only one of gids, teams can be non-null\")\n else:\n if gids is None:\n teams = self.get_team_list(season)\n\n gids_to_get = None\n if gids is not None:\n gids_to_get = gids\n teams = [gid.split(\"_\")[1] for gid in gids]\n teams = list(set(teams))\n \n gids = {}\n lines = {}\n rows = []\n\n # if we want to update the game file, record everything in the old file\n if fout is not None and overwrite==False:\n for line in open(fout).readlines()[1:]:\n sp = line.strip().split(\",\")\n date = sp[1]\n gid = self.get_gid(date,sp[3], sp[5])\n if date not in gids.keys():\n gids[date] = []\n lines[date] = []\n lines[date].append(line)\n gids[date].append(gid)\n\n stats = [\"pts\",\"fg\",\"fga\",\"fg3\",\"fg3a\",\"ft\",\"fta\",\"orb\",\"trb\",\"ast\",\"stl\",\"blk\",\"tov\",\"pf\"]\n for team in teams:\n if verbose:\n print(\"Getting games for \"+team+\"...\")\n\n url = f\"http://www.sports-reference.com/cbb/schools/{team}/{season}-gamelogs.html\"\n html = get_html(url)\n soup = BeautifulSoup(html, \"html.parser\")\n\n # this page only for \"game type\" (reg season, conf tourney, etc.) If before March, guaranteed Reg Season\n if enddate==None or enddate.month >= 2:\n url2 = \"http://www.sports-reference.com/cbb/schools/{0}/{1}-schedule.html\".format(team,season)\n html2 = get_html(url2)\n soup2 = BeautifulSoup(html2, \"html.parser\")\n\n table = soup.find(\"table\", id=\"sgl-basic\").find(\"tbody\")\n for tr in table.find_all(\"tr\"):\n if tr.get(\"id\") == None:\n continue\n\n date = tr.find(\"td\", {\"data-stat\":\"date_game\"})\n if date.find(\"a\") != None:\n date = date.find(\"a\").string\n else:\n continue\n opp = tr.find(\"td\", {\"data-stat\":\"opp_id\"})\n\n if startdate!=None and startdate > dt.date(*[int(x) for x in date.split(\"-\")]):\n continue \n\n if enddate!=None and enddate < dt.date(*[int(x) for x in date.split(\"-\")]):\n continue \n\n if opp.find(\"a\")==None:\n continue\n opp = opp.find(\"a\")[\"href\"].split(\"/\")[3]\n gid = self.get_gid(date, team, opp)\n\n if gids_to_get is not None and gid not in gids_to_get:\n continue\n\n datem1day = str(dt.date(*[int(x) for x in date.split(\"-\")]) - dt.timedelta(1))\n gidm1day = self.get_gid(datem1day, team, opp)\n if date not in gids.keys():\n gids[date] = []\n lines[date] = [] \n if gid in gids[date] or (datem1day in gids.keys() and gidm1day in gids[datem1day]):\n continue\n else:\n gids[date].append(gid)\n\n if enddate==None or enddate.month >= 2:\n gtype = soup2.find(\"td\",{\"csk\":date}).find_parent(\"tr\").find(\"td\",{\"data-stat\":\"game_type\"}).string\n else:\n gtype = \"REG\"\n if gtype == \"REG\":\n gtype = \"RG\"\n if gtype == \"CTOURN\":\n gtype = \"CT\"\n\n loc = tr.find(\"td\", {\"data-stat\":\"game_location\"}).string\n if loc==None: loc=\"H\"\n elif loc==\"@\": loc=\"A\"\n elif loc==\"N\": loc=\"N\"\n else:\n raise Exception(loc)\n\n numot = tr.find(\"td\", {\"data-stat\":\"game_result\"})\n if numot.find(\"small\") != None:\n numot = int(numot.find(\"small\").string.split(\"(\")[1].split()[0])\n else:\n numot = 0\n\n statdict = {}\n opp_statdict = {}\n getint = lambda x: (0 if x is None else int(x))\n for stat in stats:\n statdict[stat] = getint(tr.find(\"td\",{\"data-stat\":stat}).string)\n opp_statdict[stat] = getint(tr.find(\"td\",{\"data-stat\":\"opp_\"+stat}).string)\n\n if statdict[\"pts\"] > opp_statdict[\"pts\"]:\n wd, ld = statdict, opp_statdict\n wteam, lteam = team, opp\n else:\n wd, ld = opp_statdict, statdict\n wteam, lteam = opp, team\n if loc==\"H\": loc=\"A\"\n elif loc==\"A\": loc=\"H\"\n\n rowvals = [season,date,gtype,wteam,wd[\"pts\"],lteam,ld[\"pts\"],loc,numot,\n wd[\"fg\"],wd[\"fga\"],wd[\"fg3\"],wd[\"fg3a\"],wd[\"ft\"],wd[\"fta\"],wd[\"orb\"],\n wd[\"trb\"]-wd[\"orb\"],wd[\"ast\"],wd[\"tov\"],wd[\"stl\"],wd[\"blk\"],wd[\"pf\"],\n ld[\"fg\"],ld[\"fga\"],ld[\"fg3\"],ld[\"fg3a\"],ld[\"ft\"],ld[\"fta\"],ld[\"orb\"],\n ld[\"trb\"]-ld[\"orb\"],ld[\"ast\"],ld[\"tov\"],ld[\"stl\"],ld[\"blk\"],ld[\"pf\"]\n ]\n rows.append(rowvals)\n\n string = \",\".join([str(x) for x in rowvals]) + '\\n'\n\n lines[date].append(string)\n\n colnames = [\"Season\",\"Date\",\"Type\",\"WTeamID\",\"WScore\",\"LTeamID\",\"LScore\",\"WLoc\",\"NumOT\",\n \"WFGM\",\"WFGA\",\"WFGM3\",\"WFGA3\",\"WFTM\",\"WFTA\",\"WOR\",\"WDR\",\"WAst\",\"WTO\",\"WStl\",\n \"WBlk\",\"WPF\",\"LFGM\",\"LFGA\",\"LFGM3\",\"LFGA3\",\"LFTM\",\"LFTA\",\"LOR\",\"LDR\",\"LAst\",\n \"LTO\",\"LStl\",\"LBlk\",\"LPF\"\n ]\n if fout:\n fout = open(fout, 'w')\n fout.write(\",\".join(colnames)+'\\n')\n for date in sorted(gids.keys()):\n for s in lines[date]:\n fout.write(s)\n fout.close()\n\n return pd.DataFrame(rows, columns=colnames)\n\n\n def get_gids_on_date(self, startdate, enddate=None):\n \"\"\"\n Return gids of all games between startdate and enddate (inclusive)\n If enddate is None, use only startdate\n \"\"\"\n\n if enddate is None:\n enddate = startdate\n\n gids = []\n date = startdate\n while date <= enddate:\n url = f'https://www.sports-reference.com/cbb/boxscores/index.cgi?month={date.month:02d}&day={date.day:02d}&year={date.year}'\n html = str(get_html(url))\n if html.find(\"No games found\") > -1:\n date += dt.timedelta(1)\n continue\n\n soup = BeautifulSoup(html, 'html.parser')\n for table in soup.find_all('table', {'class':'teams'}):\n td = table.find_all(\"tr\")[0].find(\"td\")\n a = td.find(\"a\")\n if a==None: # usually a non-DI team\n continue\n if not a.has_attr(\"href\"):\n continue\n t1 = td.find(\"a\")[\"href\"].split(\"/\")[3]\n td = table.find_all(\"tr\")[1].find(\"td\")\n a = td.find(\"a\")\n if a==None: # usually a non-DI team\n continue\n if not a.has_attr(\"href\"):\n continue\n t2 = td.find(\"a\")[\"href\"].split(\"/\")[3]\n \n gids.append(self.get_gid(date, t1, t2))\n\n date += dt.timedelta(1)\n\n return gids\n\n\n def get_ap_rankings(self, season):\n \"\"\"\n Given the season, return a dictionary where the keys are dates \n and values are length-25 lists giving the rankings 1-25 on that date.\n \"\"\"\n\n url = f'https://www.sports-reference.com/cbb/seasons/{season}-polls.html'\n html = get_html(url)\n soup = BeautifulSoup(html, 'html.parser')\n\n table = soup.find('table', {'id':'ap-polls'})\n\n # get the poll dates\n polls = {}\n date_row = table.find('thead').find_all('tr')[2]\n for th in date_row.find_all('th')[2:]:\n s = th.string\n if s==\"Pre\":\n date = dt.date(season-1,10,1)\n elif s==\"Final\":\n date = dt.date(season,5,1)\n else:\n month = int(s.split('/')[0])\n day = int(s.split('/')[1])\n year = season\n if month > 7:\n year -= 1\n date = dt.date(year,month,day)\n polls[date] = [[] for i in range(25)]\n\n sorted_dates = sorted(polls.keys())\n\n for tr in table.find('tbody').find_all('tr'):\n tds = tr.find_all('td')\n if len(tds)==0:\n continue\n tid = tr.find('th').find('a').get('href').split('/')[3]\n for date, td in zip(sorted_dates,tds[1:]):\n if td.string is not None and td.string != \"\":\n idx = int(td.string)-1\n polls[date][idx].append(tid)\n \n for date in polls:\n if sum(len(teams) for teams in polls[date]) < 25:\n raise Exception(f'Less than 25 teams for date {date}')\n\n return polls\n\n def get_roster_info(self, season, teams=None, stats=[\"MP\",\"WS\"], use_adv=True, est_file=None, fout=None, out_type=\"df\"):\n \"\"\"Get player IDs and statistics for a given season for every team in teams\"\"\"\n\n if teams==None:\n teams = self.get_team_list(season)\n\n data = {'team_id':[], 'players':[]}\n for stat in stats:\n data[stat] = []\n\n if est_file:\n est_rosters = pd.read_pickle(est_file, compression='gzip').set_index('team_id')\n est_rosters = est_rosters.to_dict(orient='index')\n\n for tid in teams:\n print(f\"Getting roster for {tid}\")\n\n url = f\"https://www.sports-reference.com/cbb/schools/{tid}/{season}.html\"\n html = str(get_html(url))\n\n tablestart = html.find('\",tablestart)\n if use_adv:\n tablestartAdv = html.find('
\",tablestartAdv)\n htmlAdv = html[tablestartAdv:tableendAdv+8]\n soupAdv = BeautifulSoup(htmlAdv, \"html.parser\")\n tableAdv = soupAdv.find(\"table\", {\"id\":\"advanced\"})\n\n html = html[tablestart:tableend+8]\n soup = BeautifulSoup(html, \"html.parser\")\n \n table = soup.find(\"table\", {\"id\":\"roster\"})\n if use_adv:\n tableAdv = soupAdv.find(\"table\", {\"id\":\"advanced\"})\n\n data['team_id'].append(tid)\n if table is None:\n print(\" School not found for year {0}! Using estimated roster\".format(season))\n for c in ['players'] + stats:\n data[c].append(est_rosters[tid][c])\n\n continue\n \n for s in ['players']+stats:\n data[s].append([])\n \n for tr in table.find(\"tbody\").find_all(\"tr\"): \n player = tr.find(\"th\",{\"data-stat\":\"player\"}).find(\"a\")[\"href\"].split(\"/\")[3].split(\".\")[0]\n data['players'][-1].append(player)\n\n if use_adv:\n for tr in tableAdv.find(\"tbody\").find_all(\"tr\"):\n for stat in stats:\n x = tr.find(\"td\",{\"data-stat\":stat.lower()}).string\n data[stat][-1].append(float(x) if x is not None else 0.0)\n else:\n for stat in stats:\n for p in data['players'][-1]:\n data[stat][-1].append(0.0)\n\n if fout:\n if out_type == \"df\":\n df = pd.DataFrame(data, columns=['team_id','players']+stats)\n df.to_pickle(fout, compression='gzip')\n\n return data\n\n\nif __name__==\"__main__\":\n \n sr = SportsRefScrape()\n\n # sr.get_game_data(2020, fout=\"../scratch/test.csv\", overwrite=True, teams=['purdue'], verbose=True)\n\n sr.get_roster_info(2021, teams=['purdue', 'princeton'], use_adv=False, est_file='../../data/rosters/estimated_rosters/2021.pkl.gz', fout='test.pkl.gz')\n\n # for gid in sr.get_gids_on_date(dt.date(2020,2,15), dt.date(2020,2,16)):\n # print(gid)\n \n # gids = sr.get_gids_on_date(dt.date(2020,2,16), dt.date(2020,2,16))\n # print(sr.get_game_data(2020, gids=gids, verbose=True))\n","repo_name":"bjmarsh/WatchCBB","sub_path":"watchcbb/scrape/SportsRefScrape.py","file_name":"SportsRefScrape.py","file_ext":"py","file_size_in_byte":14838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43159039916","text":"# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# author: Timo Wicki\r\n# date: 25.03.2022\r\n#\r\n# GIS-Daten in die Template SWMM-Eingabedatei (.inp) importieren:\r\n# Für die Simulation eines Modells benötigt es eine SWMM-Input-Datei. Dabei handelt es sich um ein strukturiertes \r\n# Textformat (.inp) mit allen Angaben (Berechnungsoptionen, Niederschlagsganglinie, Knoten, Haltungen, Teileinzugsgebiete u. a.) \r\n# die für die Simulation benötigt werden. Es wird eine SWMM-Input-Datei benötigt, in welcher Berechnungsoptionen und die \r\n# Niederschlagsganglinie enthalten ist. Das Skript importiert die GIS-Datensätze Knoten, Haltungen und Teileinzugsgebiete \r\n# in die Template-Dateie, um eine vollständige Datei für die Simulation zu generieren. Anschliessend werden die erstellten Modelle mit \r\n# der Software SWMM ausgeführt (aktuell im Skirpt auskommentiert -> Simulation besser in SWMM-Software ausführen).\r\n#\r\n# Die SWMM-Objekte EVAPORATION, RAINGAGES, MAP, REPORT, STORAGE, DWF, CURVES, ORIFICES, WEIRS, LOSSES, TIMESERIES, \r\n# TAGS, SYMBOLS, LABELS sind noch nicht berücksichtigt und müssten bei Bedarf in der SWMM-Software erstellt werden.\r\n# Bei den SWMM-Objekten OUTFALLS und PUMPS werden nicht alle Felder berücksichtigt.\r\n# -----------------------------------------------------------------------------\r\n\"\"\"gisswmm2swmm\"\"\"\r\nimport os, sys, time, json, shutil, re\r\nimport arcpy\r\nimport swmmio\r\nfrom swmm_api import swmm5_run\r\nimport pandas as pd\r\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '0_BasicFunctions'))\r\nimport logging_functions as lf\r\n\r\ndef coords_to_list(coords):\r\n \"\"\"Konvertiert einen String mit Koordinaten zu einer Liste aus Koordinatenpaaren\r\n\r\n Required:\r\n coords -- String mit Koordinaten nach folgendem Schema: '[(x,y),(x,y),...]' z. B. '[(2664547.6, 1210716.7), (2664545.4, 1210718.5)]'\r\n\r\n Return:\r\n coords_list -- Liste mit Koordinatenpaar-Listen [x,y]\r\n \"\"\"\r\n # Eckige Klammern entfernen\r\n coords = coords.replace(\"[\",\"\").replace(\"]\",\"\")\r\n # String bei Klammern und Komma auftrennen\r\n cs = re.split('\\(|\\)|,', coords)\r\n coords_list = []\r\n idx = 0\r\n xy_check = \"x\" \r\n for val in cs:\r\n if val in [\"(\",\",\",\")\",\"\",\" \"]:\r\n continue\r\n else:\r\n if xy_check == \"x\":\r\n coords_list.append([float(val)])\r\n xy_check = \"y\"\r\n elif xy_check == \"y\":\r\n coords_list[idx].append(float(val))\r\n xy_check = \"x\"\r\n idx += 1\r\n\r\n return coords_list\r\n\r\n\r\n# Main module: Input-Daten aufbereiten und Funktionen aufrufen\r\ndef main(out_node, out_link, out_subcatchment, template_swmm_file, sim_nr):\r\n \"\"\"Input-Daten aufbereiten und Funktionen für die Konvertierung der GIS Feature-Klassen (node, link, subcatchment)\r\n in das SWMM-Datenformat (.inp) aufrufen.\r\n\r\n Required:\r\n out_node -- Name der Feature-Klasse mit den Schächten (ohne Postfix)\r\n out_link -- Name der Feature-Klasse mit den Haltungen (ohne Postfix)\r\n out_subcatchment -- Name der Feature-Klasse mit den Teileinzugsgebieten (ohne Postfix)\r\n template_swmm_file -- Template .inp-Datei die alle Angaben ausser der Bauwerke enthält\r\n sim_nr -- Wird als Postfix für Log-Dateinamen und Feature-Klassen verwendet\r\n \"\"\"\r\n # Pfad zur Output SWMM-Datei\r\n in_path, in_name = os.path.split(template_swmm_file)\r\n out_path = os.path.join(in_path, sim_nr)\r\n out_name = in_name.split(\".inp\")[0] + \"_\" + sim_nr + '.inp'\r\n\r\n # Ordner mit SWMM-Dateien erstellen\r\n if not os.path.isdir(out_path):\r\n os.mkdir(out_path)\r\n\r\n swmm_out_file = os.path.join(out_path, out_name)\r\n\r\n # swmmio Objekt erstellen\r\n mymodel = swmmio.Model(template_swmm_file)\r\n\r\n ## Nodes hinzufügen (STORAGE, DWF,.. noch nicht berücksichtigt)\r\n # Dataframes laden\r\n junctions = mymodel.inp.junctions\r\n outfalls = mymodel.inp.outfalls\r\n coordinates = mymodel.inp.coordinates\r\n # Name dataframe index (swmmio)\r\n junctions.index.name = \"Name\"\r\n coordinates.index.name = \"Name\"\r\n outfalls.index.name = \"Name\"\r\n # GISSWMM-Felder definieren (erstes Feld -> Index, zweites Feld -> Typ)\r\n node_fields_gis = [\"Name\", \"SWMM_TYPE\", \"InvertElev\", \"InitDepth\", \"MaxDepth\", \"SurchargeDepth\", \"PondedArea\", \"OutfallType\", \"coords\", \"tag\"]\r\n # Mapping GISSWMM-Feld:swmmio-Feld für junction\r\n junction_fields = {\"InvertElev\":\"InvertElev\", \"MaxDepth\":\"MaxDepth\", \"InitDepth\":\"InitDepth\", \"SurchargeDepth\":\"SurchargeDepth\", \"PondedArea\":\"PondedArea\"}\r\n # Mapping GISSWMM-Feld:swmmio-Feld für outfall \r\n outfall_fields = {\"InvertElev\":\"InvertElev\", \"OutfallType\":\"OutfallType\"}\r\n # Daten aus GIS-Datensatz extrahieren\r\n with arcpy.da.SearchCursor(out_node, node_fields_gis) as cursor:\r\n for row in cursor:\r\n for ii, val in enumerate(row):\r\n in_field = node_fields_gis[ii]\r\n if row[1] in [\"INLET\", \"JUNCTION\"] and in_field in list(junction_fields.keys()):\r\n junctions.loc[row[0], junction_fields[in_field]]= val\r\n elif row[1] == \"OUTFALL\" and in_field in list(outfall_fields.keys()):\r\n outfalls.loc[row[0], outfall_fields[in_field]] = val\r\n elif in_field == \"coords\":\r\n coordinates.loc[row[0]] = coords_to_list(val)[0]\r\n #elif in_field == \"tag\":\r\n # Modell aktualisieren\r\n mymodel.inp.junctions = junctions\r\n mymodel.inp.outfalls = outfalls\r\n mymodel.inp.coordinates = coordinates\r\n\r\n ## Links hinzufügen (ORIFICES, WEIRS, LOSSES noch nicht berücksichtigt)\r\n conduits = mymodel.inp.conduits\r\n pumps = mymodel.inp.pumps\r\n xsections = mymodel.inp.xsections\r\n vertices = mymodel.inp.vertices\r\n # Name dataframe index (swmmio)\r\n conduits.index.name = \"Name\"\r\n pumps.index.name = \"Name\"\r\n xsections.index.name = \"Link\"\r\n vertices.index.name = \"Link\"\r\n # GISSWMM-Felder definieren (erstes Feld -> Index, zweites Feld -> Typ)\r\n link_fields_gis = [\"Name\", \"SWMM_TYPE\", \"InletNode\", \"OutletNode\", \"Length\", \"Roughness\", \"InOffset\", \"OutOffset\", \r\n \"InitFlow\", \"MaxFlow\", \"ShapeType\", \"Geom1\", \"Geom2\", \"Geom3\" , \"Geom4\", \"Barrels\", \"coords\"]\r\n # Mapping GISSWMM-Feld:swmmio-Feld für conduit (muss evtl. je nach SWMM-Version angepasst werden)\r\n conduit_fields = {\"InletNode\":\"InletNode\", \"OutletNode\":\"OutletNode\", \"Length\":\"Length\", \"Roughness\":\"Roughness\", \r\n \"InOffset\": \"InOffset\", \"OutOffset\":\"OutOffset\", \"InitFlow\":\"InitFlow\", \"MaxFlow\":\"MaxFlow\"}\r\n # Mapping GISSWMM-Feld:swmmio-Feld für pump\r\n pump_fields = {\"InletNode\":\"InletNode\", \"OutletNode\":\"OutletNode\"} # PumpCurve, InitStatus, StartupDepth, ShutoffDepth nicht berücksichtigt\r\n # Mapping GISSWMM-Feld:swmmio-Feld für xsection\r\n xsections_fields = {\"ShapeType\":\"Shape\", \"Geom1\":\"Geom1\", \"Geom2\":\"Geom2\", \"Geom3\":\"Geom3\", \"Geom4\":\"Geom4\", \"Barrels\":\"Barrels\"} # Geom3, Geom4, Barrels nicht berücksichtigt\r\n # Daten aus GIS-Datensatz extrahieren\r\n with arcpy.da.SearchCursor(out_link, link_fields_gis) as cursor:\r\n for row in cursor:\r\n for ii, val in enumerate(row):\r\n in_field = link_fields_gis[ii]\r\n if row[1] == \"CONDUIT\" and in_field in list(conduit_fields.keys()):\r\n conduits.loc[row[0], conduit_fields[in_field]] = val\r\n elif row[1] == \"PUMP\" and in_field in list(pump_fields.keys()):\r\n pumps.loc[row[0], pump_fields[in_field]] = val\r\n elif in_field in list(xsections_fields.keys()):\r\n xsections.loc[row[0], xsections_fields[in_field]] = val\r\n elif in_field == \"coords\":\r\n coords_list = coords_to_list(val)\r\n for coords in coords_list:\r\n # temp dataframe\r\n df = pd.DataFrame({\"X\":coords[0],\"Y\":coords[1]}, index = [row[0]] )\r\n df.index.name = \"Link\"\r\n vertices = vertices.append(df)\r\n #elif in_field == \"tag\":\r\n\r\n # Modell aktualisieren\r\n mymodel.inp.conduits = conduits\r\n mymodel.inp.pumps = pumps\r\n mymodel.inp.xsections = xsections\r\n mymodel.inp.vertices = vertices\r\n\r\n ## Subcatchment hinzufügen\r\n subcatchments = mymodel.inp.subcatchments\r\n subareas = mymodel.inp.subareas\r\n infiltration = mymodel.inp.infiltration\r\n polygons = mymodel.inp.polygons\r\n\r\n # Name dataframe index (swmmio)\r\n subcatchments.index.name = \"Name\"\r\n subareas.index.name = \"Subcatchment\"\r\n subareas.index.name = \"Subcatchment\"\r\n polygons.index.name = \"Subcatchment\"\r\n\r\n # GISSWMM-Felder definieren (erstes Feld -> Index, zweites Feld -> Typ)\r\n subcatchments_fields_gis = [\"Name\", \"Raingage\", \"Outlet\", \"Area\", \"PercImperv\", \"Width\", \"PercSlope\", \"N_Imperv\", \r\n \"N_Perv\", \"S_Imperv\", \"S_Perv\", \"PctZero\", \"RouteTo\", \"CurbLength\", \"SnowPack\",\r\n \"MaxRate\", \"MinRate\", \"Decay\", \"DryTime\", \"MaxInfil\", \"coords\"]\r\n # Mapping GISSWMM-Feld:swmmio-Feld für conduit (muss evtl. je nach SWMM-Version angepasst werden)\r\n subcatchments_fields = {\"Raingage\":\"Raingage\", \"Outlet\":\"Outlet\", \"Area\":\"Area\", \"PercImperv\":\"PercImperv\", \r\n \"Width\": \"Width\", \"PercSlope\":\"PercSlope\", \"CurbLength\":\"CurbLength\", \"SnowPack\":\"SnowPack\"}\r\n subareas_fields = {\"N_Imperv\":\"N-Imperv\", \"N_Perv\":\"N-Perv\", \"S_Imperv\":\"S-Imperv\", \"S_Perv\":\"S-Perv\", \r\n \"PctZero\":\"PctZero\", \"RouteTo\": \"RouteTo\"}\r\n infiltration_fields = {\"MaxRate\":\"MaxRate\", \"MinRate\":\"MinRate\", \"Decay\":\"Decay\", \"DryTime\":\"DryTime\", \"MaxInfil\":\"MaxInfil\"} \r\n\r\n # Daten aus GIS-Datensatz extrahieren\r\n with arcpy.da.SearchCursor(out_subcatchment, subcatchments_fields_gis) as cursor:\r\n for row in cursor:\r\n for ii, val in enumerate(row):\r\n in_field = subcatchments_fields_gis[ii]\r\n if in_field in list(subcatchments_fields.keys()):\r\n subcatchments.loc[row[0], subcatchments_fields[in_field]] = val\r\n elif in_field in list(subareas_fields.keys()):\r\n subareas.loc[row[0], subareas_fields[in_field]] = val\r\n elif in_field in list(infiltration_fields.keys()):\r\n infiltration.loc[row[0], infiltration_fields[in_field]] = val \r\n elif in_field == \"coords\":\r\n coords_list = coords_to_list(val)\r\n for coords in coords_list:\r\n # temp dataframe\r\n df = pd.DataFrame({\"X\":coords[0],\"Y\":coords[1]}, index = [row[0]] )\r\n df.index.name = \"Subcatchment\"\r\n polygons = polygons.append(df)\r\n\r\n # Modell aktualisieren\r\n mymodel.inp.subcatchments = subcatchments\r\n mymodel.inp.subareas = subareas\r\n mymodel.inp.infiltration = infiltration\r\n mymodel.inp.polygons = polygons\r\n\r\n ## save model to new file\r\n mymodel.inp.save(swmm_out_file)\r\n\r\n ## run model\r\n #swmm5_run(swmm_out_file)\r\n\r\n\r\n# Daten einlesen \r\n# Logginig initialisieren\r\nif __name__ == \"__main__\":\r\n # Globale Variabel für logging\r\n global logger\r\n # Input JSON-Datei\r\n # Falls das Skript mittels einer Batch-Datei ausgeführt wird, wird die JSON-Datei als Parameter übergeben:\r\n paramFile = arcpy.GetParameterAsText(0)\r\n # Falls das Skript direkt ausgeführt wird, wird die JSON-Datei hier angeben:\r\n if len(paramFile) == 0:\r\n paramFile = os.path.join(os.path.dirname(__file__), '..', 'settings_v1.json')\r\n\r\n\r\n if paramFile:\r\n #Einlesen der json-Datei\r\n with open(paramFile, encoding='utf-8') as f:\r\n data = json.load(f)\r\n # Der Pfad zum Ordner, in dem die log-Datei gespeichert werden soll. \r\n log_folder = data[\"log_folder\"]\r\n # Wird als Postfix für Log-Dateinamen und die SWMM Feature-Klassen (node, link, subcatchment) verwendet.\r\n sim_nr = data[\"sim_nr\"]\r\n # Pfad zu arcpy Workspace GISSWMM (.gdb) mit dem Knoten (out_node), Haltungen (out_link) und Teileinzugsgebieten (out_subcatchment).\r\n gisswmm_workspace = data[\"gisswmm_workspace\"]\r\n # Der Name der Feature-Klasse mit den Knoten (ohne Postfix \"_sim_nr\"!).\r\n out_node = data[\"out_node\"]\r\n # Der Name der Feature-Klasse mit den Haltungen (ohne Postfix \"_sim_nr\"!).\r\n out_link = data[\"out_link\"]\r\n # Der Name der Feature-Klasse mit den Teileinzugsgebieten (ohne Postfix \"_sim_nr\"!).\r\n out_subcatchment = data[\"out_subcatchment\"]\r\n # Der Pfad zur Template SWMM-Eingabedatei (.inp).\r\n template_swmm_file = data[\"template_swmm_file\"]\r\n else:\r\n raise ValueError('keine json-Datei mit den Parametern angegeben')\r\n\r\n # Prüfen ob Logfolder existiert\r\n if not os.path.isdir(log_folder):\r\n try:\r\n os.mkdir(log_folder)\r\n except:\r\n raise ValueError(f'Logfolder \"{log_folder}\" konnte nicht erstellt werden!')\r\n \r\n # Logging initialisieren\r\n filename = 'gisswmm2swmm_' + sim_nr + \"_\" + template_swmm_file.split(\"/\")[-1].split(\".\")[0] + '.log'\r\n log = os.path.join(log_folder, filename)\r\n logger= lf.init_logging(log)\r\n logger.info('****************************************************************')\r\n logger.info(f'Start logging: {time.ctime()}')\r\n start_time = time.time()\r\n\r\n # Aktuelle Workspace definieren\r\n arcpy.env.workspace = gisswmm_workspace\r\n\r\n # Prüfen ob Eingabedatensätze vorhanden sind\r\n postfix = \"_\" + sim_nr\r\n if not postfix in out_node:\r\n out_node = out_node + postfix\r\n if not postfix in out_link:\r\n out_link = out_link + postfix\r\n if not postfix in out_subcatchment:\r\n out_subcatchment = out_subcatchment + postfix\r\n if not arcpy.Exists(out_node):\r\n err_txt = f'Die angegebene Feature-Klasse {out_node} ist nicht vorhanden!'\r\n logger.error(err_txt)\r\n raise ValueError(err_txt) \r\n if not arcpy.Exists(out_link):\r\n err_txt = f'Die angegebene Feature-Klasse {out_link} ist nicht vorhanden!'\r\n logger.error(err_txt)\r\n raise ValueError(err_txt)\r\n if not arcpy.Exists(out_subcatchment):\r\n err_txt = f'Die angegebene Feature-Klasse {out_subcatchment} ist nicht vorhanden!'\r\n logger.error(err_txt)\r\n raise ValueError(err_txt)\r\n\r\n # Koordinatensystem\r\n spatial_ref = arcpy.Describe(out_node).spatialReference\r\n\r\n # Main module aufrufen\r\n with arcpy.EnvManager(workspace = gisswmm_workspace, outputCoordinateSystem = spatial_ref):\r\n main(out_node, out_link, out_subcatchment, template_swmm_file, sim_nr)\r\n\r\n # Logging abschliessen\r\n end_time = time.time()\r\n i = lf.search_in_file(log, \"error\")\r\n logger.info(\"Skript Laufzeit: \" + str(round(end_time - start_time)) + \" sec.\")\r\n logger.info(str(i) + \" Fehler gefunden. Check Log.\")\r\n endtime = time.ctime()\r\n logger.info(f'End time: {time.ctime()}')\r\n logger.info('****************************************************************\\n')\r\n\r\n\r\n\r\n\r\n","repo_name":"wickit7/pygisswmm","sub_path":"4_GISSWMM2SWMM/gisswmm2swmm.py","file_name":"gisswmm2swmm.py","file_ext":"py","file_size_in_byte":15402,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"37346420598","text":"from tkinter import *\nfrom tkinter import ttk\nimport pymysql\nfrom tkinter import messagebox\n\n# Class\nclass Student():\n def __init__(self,root):\n self.root = root\n self.root.title(\"Student Management System\")\n self.root.geometry(\"1350x700+0+0\")\n\n # Database Information\n self.hostName_db = \"localhost\"\n self.userName_db = \"admin\"\n self.password_db = \"admin@1234\"\n self.databaseName_db = \"SMS_db\"\n \n def Window(self):\n title = Label(self.root,text=\"Student Management System\",font=(\"times new roman\",40,\"bold\"),bg=\"yellow\",fg=\"red\",bd=10,relief=GROOVE).pack(side=TOP,fill=X)\n\n # ================Variable Define===============================\n self.Roll_No_var = StringVar()\n self.Name_var = StringVar()\n self. Email_var = StringVar()\n self.DOB_var = StringVar()\n self.Gender_var = StringVar()\n self.Contact_var = StringVar()\n self.search_by = StringVar()\n self.search_txt = StringVar()\n \n\n # ================== Manage Frame =============================\n Manage_Frame = Frame(self.root,bd=4,relief=RIDGE,bg=\"crimson\")\n Manage_Frame.place(x=20,y=100,width=450,height=600)\n\n m_title = Label(Manage_Frame,text=\"Manage Student\",font=(\"times new roman\",30,\"bold\"),bg=\"crimson\",fg=\"white\")\n m_title.grid(row=0,columnspan=2,pady=20)\n\n # =============================Label and Entry Field ===============================\n # Roll Number\n lbl_roll = Label(Manage_Frame,text=\"Roll No\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=1,column=0,pady=10,padx=20,sticky=\"w\")\n\n txt_roll = Entry(Manage_Frame,textvariable=self.Roll_No_var,font=(\"times new roman\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_roll.grid(row=1,column=1,pady=10,padx=20,sticky=\"w\")\n\n # Name\n lbl_name = Label(Manage_Frame,text=\"Name\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=2,column=0,pady=10,padx=20,sticky=\"w\")\n\n txt_name = Entry(Manage_Frame,textvariable=self.Name_var,font=(\"times new roman\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_name.grid(row=2,column=1,pady=10,padx=20,sticky=\"w\")\n\n # Email\n lbl_email = Label(Manage_Frame,text=\"Email\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=3,column=0,pady=10,padx=20,sticky=\"w\")\n\n txt_email = Entry(Manage_Frame,textvariable=self.Email_var,font=(\"times new roman\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_email.grid(row=3,column=1,pady=10,padx=20,sticky=\"w\")\n\n # Gender\n lbl_gender = Label(Manage_Frame,text=\"Gender\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=4,column=0,pady=10,padx=20,sticky=\"w\")\n\n combo_gender = ttk.Combobox(Manage_Frame,textvariable=self.Gender_var,font=(\"times new roman\",12,\"bold\"),state=\"readonly\")\n combo_gender[\"values\"] = (\"Male\",\"Female\",\"Other\")\n combo_gender.grid(row=4,column=1,pady=10,padx=20,sticky=\"w\")\n\n # Contact\n lbl_contact = Label(Manage_Frame,text=\"Contact\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=5,column=0,pady=10,padx=20,sticky=\"w\")\n\n txt_contact = Entry(Manage_Frame,textvariable=self.Contact_var,font=(\"times new roman\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_contact.grid(row=5,column=1,pady=10,padx=20,sticky=\"w\")\n\n # DOB\n lbl_dob = Label(Manage_Frame,text=\"DOB\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=6,column=0,pady=10,padx=20,sticky=\"w\")\n\n txt_dob = Entry(Manage_Frame,textvariable=self.DOB_var,font=(\"times new roman\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_dob.grid(row=6,column=1,pady=10,padx=20,sticky=\"w\")\n\n # Address\n lbl_address = Label(Manage_Frame,text=\"Address\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=7,column=0,pady=10,padx=20,sticky=\"w\")\n\n self.txt_address = Text(Manage_Frame,width=30,height=4,font=(\"times new roman\",10,\"bold\"))\n self.txt_address.grid(row=7,column=1,pady=10,padx=20,sticky=\"w\")\n\n # ===========================Button Frame ===================================\n # Buttons\n btn_Frame = Frame(Manage_Frame,bd=4,relief=RIDGE,bg=\"crimson\")\n btn_Frame.place(x=15,y=510,width=420)\n\n # Add Button\n addBtn = Button(btn_Frame,text=\"Add\",width=6,command=self.add_student).grid(row=0,column=0,padx=10,pady=10)\n # Update Button\n updateBtn = Button(btn_Frame,text=\"Update\",width=6,command=self.update_data).grid(row=0,column=1,padx=10,pady=10)\n # Delete\n deleteBtn = Button(btn_Frame,text=\"Delete\",width=6,command=self.delete_data).grid(row=0,column=2,padx=10,pady=10)\n # Clear BUtton\n clearBtn = Button(btn_Frame,text=\"Clear\",width=6,command=self.clear).grid(row=0,column=3,padx=10,pady=10)\n\n # ================== Detail Frame =============================\n Detail_Frame = Frame(self.root,bd=4,relief=RIDGE,bg=\"crimson\")\n Detail_Frame.place(x=500,y=100,width=850,height=560)\n\n # Search Bar\n lbl_search = Label(Detail_Frame,text=\"Search By\",font=(\"times new roman\",20,\"bold\"),bg=\"crimson\",fg=\"white\").grid(row=0,column=0,pady=10,padx=20,sticky=\"w\")\n\n combo_search = ttk.Combobox(Detail_Frame,textvariable=self.search_by,font=(\"times new roman\",12,\"bold\"),state=\"readonly\",width=8)\n combo_search[\"values\"] = (\"Roll_NO\",\"Name\",\"Contact\")\n combo_search.grid(row=0,column=1,pady=10,padx=20,sticky=\"\")\n\n #Search bar Entry\n txt_search = Entry(Detail_Frame,textvariable=self.search_txt,font=(\"times new roman\",15,\"bold\"),bd=5,relief=GROOVE)\n txt_search.grid(row=0,column=2,pady=10,padx=20,sticky=\"w\")\n\n # Btn\n searchBtn = Button(Detail_Frame,text=\"Search\",width=6,command=self.search_data).grid(row=0,column=3,padx=10,pady=10)\n\n # Show All\n showallBtn = Button(Detail_Frame,text=\"Show All\",width=6,command=self.fetch_data).grid(row=0,column=4,padx=10,pady=10)\n\n # ====================== Table Frame =================================\n Table_Frame = Frame(Detail_Frame,bd=4,relief=RIDGE,bg=\"crimson\")\n Table_Frame.place(x=10,y=70,width=780,height=480)\n\n # Scroll Bar\n scroll_x = Scrollbar(Table_Frame,orient=HORIZONTAL)\n scroll_y = Scrollbar(Table_Frame,orient=VERTICAL)\n\n # Columns Declare\n self.Student_table = ttk.Treeview(Table_Frame,columns=(\"Roll\",\"Name\",\"Email\",\"DOB\",\"Gender\",\"Contact\",\"Address\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\n\n # \n scroll_x.pack(side=BOTTOM,fill=X)\n scroll_y.pack(side=RIGHT,fill=Y)\n\n scroll_x.config(command=self.Student_table.xview)\n scroll_y.config(command=self.Student_table.yview)\n\n # Heading (column name) In Table\n self.Student_table.heading(\"Roll\",text=\"Roll No\")\n self.Student_table.heading(\"Name\",text=\"Name\")\n self.Student_table.heading(\"Email\",text=\"Email\")\n self.Student_table.heading(\"DOB\",text=\"DOB\")\n self.Student_table.heading(\"Gender\",text=\"Gender\")\n self.Student_table.heading(\"Contact\",text=\"Contact\")\n self.Student_table.heading(\"Address\",text=\"Address\")\n\n # Show only defined Heading\n self.Student_table[\"show\"]=\"headings\"\n \n # Size width of columns\n self.Student_table.column(\"Roll\",width=100)\n self.Student_table.column(\"Name\",width=100)\n self.Student_table.column(\"Email\",width=150)\n self.Student_table.column(\"DOB\",width=100)\n self.Student_table.column(\"Gender\",width=60)\n self.Student_table.column(\"Contact\",width=100)\n self.Student_table.column(\"Address\",width=200)\n\n # \n self.Student_table.pack(fill=BOTH,expand=1)\n self.Student_table.bind(\"\",self.get_cursor)\n self.fetch_data()\n \n def add_student(self):\n if self.Roll_No_var.get() == \"\" or self.Name_var.get() == \"\" or self.Email_var.get() == \"\" or self.Gender_var.get() == \"\" or self.DOB_var.get() == \"\" or self.Contact_var.get() == \"\":\n messagebox.showerror(\"Error\",\"All Fields are required.\")\n else:\n # Creating a connection\n con = pymysql.connect(host=self.hostName_db,user=self.userName_db,password=self.password_db,database=self.databaseName_db)\n # Cursor\n cur = con.cursor()\n # Query\n cur.execute(\"INSERT INTO student_tb values(%s,%s,%s,%s,%s,%s,%s)\",(self.Roll_No_var.get(),self.Name_var.get(),self.Email_var.get(),self.DOB_var.get(),self.Gender_var.get(),self.Contact_var.get(),self.txt_address.get(\"1.0\",END)))\n\n # Commit Query\n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n messagebox.showinfo(\"Success\",\"Record Has been Inserted.\")\n\n # \n def fetch_data(self):\n # Creating a connection\n con = pymysql.connect(host=self.hostName_db,user=self.userName_db,password=self.password_db,database=self.databaseName_db)\n # Cursor\n cur = con.cursor()\n # Query\n cur.execute(\"SELECT * FROM student_tb\")\n rows = cur.fetchall()\n # \n if len(rows)!=0:\n self.Student_table.delete(*self.Student_table.get_children())\n \n for row in rows:\n self.Student_table.insert(\"\",END,values=row)\n con.commit()\n con.close()\n\n def clear(self):\n self.Roll_No_var.set(\"\")\n self.Name_var.set(\"\")\n self.Email_var.set(\"\")\n self.DOB_var.set(\"\")\n self.Gender_var.set(\"\")\n self.Contact_var.set(\"\")\n self.txt_address.delete(\"1.0\",END)\n\n def get_cursor(self,event):\n '''\n when we click on row, al the data from row where the cursor is pointed is copied in contents\n '''\n cursor_row = self.Student_table.focus()\n contents = self.Student_table.item(cursor_row)\n row = contents[\"values\"]\n \n # print(row[0])\n self.Roll_No_var.set(row[0])\n self.Name_var.set(row[1])\n self.Email_var.set(row[2])\n self.DOB_var.set(row[3])\n self.Gender_var.set(row[4])\n self.Contact_var.set(row[5])\n self.txt_address.delete(\"1.0\",END)\n self.txt_address.insert(END,row[6])\n\n def update_data(self):\n # Creating a connection\n con = pymysql.connect(host=self.hostName_db,user=self.userName_db,password=self.password_db,database=self.databaseName_db)\n # Cursor\n cur = con.cursor()\n # Query\n cur.execute(\"UPDATE student_tb SET Name = %s,Email = %s,DOB = %s,Gender = %s,Contact = %s,Address = %s WHERE Roll_NO = %s\",(self.Name_var.get(),self.Email_var.get(),self.DOB_var.get(),self.Gender_var.get(),self.Contact_var.get(),self.txt_address.get(\"1.0\",END),self.Roll_No_var.get()))\n\n # Commit Query\n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n \n def delete_data(self):\n # Creating a connection\n con = pymysql.connect(host=self.hostName_db,user=self.userName_db,password=self.password_db,database=self.databaseName_db)\n # Cursor\n cur = con.cursor()\n # Query\n cur.execute(\"DELETE FROM student_tb WHERE ROll_NO = %s\",self.Roll_No_var.get())\n con.commit()\n con.close()\n self.fetch_data()\n self.clear()\n\n def search_data(self):\n # Creating a connection\n con = pymysql.connect(host=self.hostName_db,user=self.userName_db,password=self.password_db,database=self.databaseName_db)\n # Cursor\n cur = con.cursor()\n # Query\n cur.execute(\"SELECT * FROM student_tb WHERE \"+str(self.search_by.get())+\" LIKE '%\"+str(self.search_txt.get())+\"%'\")\n rows = cur.fetchall()\n # \n if len(rows)!=0:\n self.Student_table.delete(*self.Student_table.get_children())\n \n for row in rows:\n self.Student_table.insert(\"\",END,values=row)\n con.commit()\n con.close()\n\n\n\n\n\nroot = Tk()\n# Object\napp = Student(root)\napp.Window()\nroot.mainloop()","repo_name":"kaushal-project/Projects","sub_path":"StudentManagementSystem-Tk/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24070299507","text":"from apps.accounts.factories import ProfileFactory\nfrom apps.candidates.factories import CandidateFactory\nfrom django.test import TestCase\nfrom rest_framework.reverse import reverse\nfrom rest_framework.test import APIClient\n\n\nclass TestCandidatesListView(TestCase):\n \"\"\"This class tests ListCreateCandidateAPIView.\"\"\"\n\n def setUp(self) -> None:\n self.profile = ProfileFactory()\n self.user = self.profile.user\n CandidateFactory.create_batch(5)\n self.client = APIClient()\n self.client.force_authenticate(user=self.user)\n\n def test_get_non_authorized(self):\n \"\"\"Unauthorized user should not be able to get Candidates.\"\"\"\n self.client.force_authenticate(user=None)\n response = self.client.get(reverse(\"candidates:candidates-list\"))\n\n self.assertEqual(response.status_code, 401)\n\n def test_get_authorized(self):\n \"\"\"Authorized user should be able to get Candidates.\"\"\"\n response = self.client.get(reverse(\"candidates:candidates-list\"))\n\n self.assertEqual(response.data.get(\"count\"), 5)\n self.assertEqual(response.status_code, 200)\n\n def test_create_non_authorized(self):\n \"\"\"Unauthorized user should not be able to create Events.\"\"\"\n self.client.force_authenticate(user=None)\n candidate_data = CandidateFactory.build()\n response = self.client.post(\n path=reverse(\"candidates:candidates-list\"),\n data={\n \"name\": candidate_data.name,\n \"surname\": candidate_data.surname,\n \"gender\": candidate_data.gender,\n \"phone_number\": str(candidate_data.phone_number),\n \"email\": candidate_data.email,\n \"level_of_english\": candidate_data.level_of_english,\n },\n )\n\n self.assertEqual(response.status_code, 401)\n\n def test_create_authorized(self):\n \"\"\"Authorized user should be able to create Events.\"\"\"\n candidate_data = CandidateFactory.build()\n response = self.client.post(\n path=reverse(\"candidates:candidates-list\"),\n data={\n \"name\": candidate_data.name,\n \"surname\": candidate_data.surname,\n \"gender\": candidate_data.gender,\n \"phone_number\": str(candidate_data.phone_number),\n \"email\": candidate_data.email,\n \"level_of_english\": candidate_data.level_of_english,\n },\n )\n\n self.assertEqual(response.status_code, 201)\n\n def test_create_authorized_wrong_data(self):\n \"\"\"Authorized user should not be able to create Events with missing data.\"\"\"\n candidate_data = CandidateFactory.build()\n response = self.client.post(\n path=reverse(\"candidates:candidates-list\"),\n data={\n \"name\": candidate_data.name,\n },\n )\n\n self.assertEqual(response.status_code, 400)\n","repo_name":"DevIhor/Recruiter","sub_path":"backend/apps/candidates/tests/test_candidate_listview.py","file_name":"test_candidate_listview.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7117198707","text":"'''\nmne environment is required, python 3.6\n'''\n\nimport pandas as pd\nimport os\nimport argparse\nimport mne_bids\n\nfrom ieeg_fmri_validation.iemu.routines import run_rest_speech_r_squared\nfrom ieeg_fmri_validation.iemu.classes import FilmDataset, RestDataset\n\ndef process_one(bids_dir, subject, acq):\n\n print(subject)\n\n film = FilmDataset(bids_dir, subject, acquisition=acq)\n film.preprocess()\n film.extract_events()\n film.extract_bands()\n ols_music = film.run_task_gamma_ols()\n r2_music = film.run_task_r_squared()\n\n if 'rest' in mne_bids.get_entity_vals(os.path.join(bids_dir, 'sub-' + subject, 'ses-iemu', 'ieeg'), 'task'):\n rest = RestDataset(bids_dir, subject, acquisition=acq)\n rest.preprocess()\n rest.extract_events()\n rest.extract_bands()\n r2_rest = run_rest_speech_r_squared(film, rest)\n else:\n rest, r2_rest = None, None\n\n return ols_music, r2_music, r2_rest\n\n##\ndef process_iemu(bids_dir):\n\n subjects = mne_bids.get_entity_vals(bids_dir, 'subject')\n ols_music, r2_music, r2_rest = [], [], []\n\n for subject in subjects:\n if 'iemu' in mne_bids.get_entity_vals(os.path.join(bids_dir, 'sub-' + subject), 'session'):\n for acq in mne_bids.get_entity_vals(os.path.join(bids_dir, 'sub-' + subject, 'ses-iemu', 'ieeg'),\n 'acquisition'):\n if acq != 'render':\n output = process_one(bids_dir, subject, acq)\n for x, lst in zip(output, [ols_music, r2_music, r2_rest]):\n lst.append(x)\n\n return pd.concat(ols_music, ignore_index=True), \\\n pd.concat(r2_music, ignore_index=True), \\\n pd.concat(r2_rest, ignore_index=True)\n\n\n##\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--bids_dir', '-i', type=str)\n args = parser.parse_args()\n\n process_iemu(args.bids_dir)\n","repo_name":"UMCU-RIBS/ieeg-fmri-dataset-validation","sub_path":"ieeg_fmri_validation/iemu/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"74837642167","text":"from app import app\nfrom flask import render_template, redirect, url_for, flash\nfrom forms import BACForm\nfrom methods import calculate\n\n# Simply returns the index.html template for the intro page.\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n# The Calcuation Page\n@app.route('/bac', methods = ['GET', 'POST'])\ndef bac():\n # Loads the BAC Form file (forms.py) which contains the fields for the calculator\n form = BACForm()\n # When the user submits, data is pulled\n if form.validate_on_submit():\n drinks = {\n 'beer': form.beer.data,\n 'wine': form.wine.data,\n 'liqour': form.liqour.data\n }\n # Form data is inputted to the BAC method, which returns the BAC level\n bac = calculate(drinks, form.weight.data, form.hours.data, form.gender.data)\n # Redirects to the results page with the bac value inputted\n return redirect(url_for('display', bac = bac))\n else:\n flash(\"Please make sure you input your weight and drinking time--we promise we'll keep it a secret.\")\n # Renders the bac.html template when first loaded\n return render_template(\"bac.html\",\n form = form)\n\n# Results page\n@app.route('/display/')\ndef display(bac):\n # Displays a rounded BAC value\n bac = \"%.2f\" % round(float(bac),2)\n # Color & Text Warning Ranges\n if float(bac) < .08:\n color = '#a1facd'\n text = \"You're starting to feel relaxed and a little light headed. You'll notice yourself feeling a little more outgoing and talking louder. Be careful as it's still early and you're in a good place.\"\n elif float (bac) >= .08 and float(bac) < .1:\n color = '#f1f99b'\n text = \"You've reached the legal point of intoxication in New York. Don't drive, and be careful: you're beginning to get quite intoxicated.\"\n elif float(bac) >= .1 and float(bac) < .16:\n color = '#f1f99b'\n text = \"You're at a high point of intoxication: your motor skills and coordination are most likely now impaired. You should stop drinking now and figure out a way to get home safely.\"\n elif float(bac) >= .16 and float(bac) < .2:\n color = '#f1f99b'\n text = \"You're beginning to reach a critical, harmful point of intoxication. Your memory is impaired and you will most likely forget much of the evening. The alcohol in your body is now supressing your gag reflex as well. You should think about contacting medical assistance.\"\n elif float(bac) >= .2:\n color = '#fc96a5'\n text = \"Your BAC is too high! Seek immediate medical attention!\"\n\n # Scrolling Carousel Content Ranges\n if float(bac) <= .06:\n feels = ['Relaxed', 'More confident', 'Slight euphoria', 'Feeling tipsy', 'Relaxed', 'More talkative', 'Happy']\n elif float(bac) > .06 and float(bac) <= .20:\n feels = ['In control', 'Unstoppable', '\"Buzzed\"', 'More emotional', 'The room is spinning', 'Groggy, nauseous', 'Uncoordinated', 'Drunk', 'Out of it', 'Over-confident', 'Angry, irrational, jumpy', 'Sick', 'Sleepy', 'Slurring your speech']\n elif float(bac) > .2:\n feels = ['Lost', 'Confused', 'Disoriented', 'Sick', 'Dizzy', 'Exhausted', 'Angry', 'Uncontrollable', 'Unintelligible', 'Unaware', 'Wasted', 'Cannot walk', 'Uncooperative', 'Loss of bladder control', 'Cold skin', 'Unresponsive', 'Puking', 'Slow breathing']\n\n # Renders results template (display.html) w/ various dynamic features\n return render_template(\"display.html\",\n bac = bac,\n color = color,\n text = text,\n feels = feels,\n length = len(feels))\n\n# Renders the static resources.html page\n@app.route('/resources')\ndef resources():\n return render_template(\"resources.html\")\n\n# Renders the static resources.html page (aka #LastTheNight)\n@app.route('/cups')\ndef cups():\n return render_template(\"cups.html\")\n","repo_name":"bceskavich/bewisecalc","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36584303252","text":"# Email Velidation (Check wheather entered email is correct or not.)\n\ndef email_validation(email):\n d,j,k = 0,0,0 # For condition 5\n\n # There are many conditions to check email velidation.\n\n # Condition 1 : Minimum length of the email is 6.\n if len(email) >= 6:\n\n # Condition 2 : The first letter of email must be a smallcase character.\n \n if email[0].isalpha():\n \n # Condition 3 : One '@' must be present in email.\n if (\"@\" in email) and (email.count(\"@\")==1):\n\n # Condition 4 : '.' must be present in the email in 3rd or 4th position.\n if (email[-3] == '.') ^ (email[-4] == '.'): # We use XOR operator here because if both the conditions are true then we get two '.' in our email which is also invalid.\n\n # Condition 5 : No wide space allow in email and all the characters in email must be in lowercase.\n for i in email:\n if i == i.isspace():\n k += 1\n elif i.isalpha():\n if i == i.upper():\n j += 1\n elif i.isdigit():\n continue\n elif i == '_' or i == '.' or i == '@':\n continue\n else:\n d += 1\n \n if k == 1 or j == 1 or d == 1:\n print(\"Wrong Email. Spaces are not allow in email name.\\nUppercase letter also not allowded into email name.\")\n else:\n print(\"Valid Email\")\n\n else:\n print(\"Wrong email. Full stop position must be inputed wrong.\")\n \n else:\n print(\"Wrong email. One '@' must present in the email.\")\n \n else:\n print(\"Wrong email. First letter of the email must be a letter.\")\n\n else:\n print(\"Wrong email. Email must be of atleast 6 character.\") \n\n\nemail = input(\"Enter your email : \")\nemail_validation(email)\n","repo_name":"Abhay-Kanwasi/Project","sub_path":"Email Validation/Email Validation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"12207076782","text":"from programmingalpha.MainPortal.Requester import RequesterServices\nimport logging\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nfmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')\nconsole = logging.StreamHandler()\nconsole.setFormatter(fmt)\nlogger.addHandler(console)\n\nconfig_file=\"portalService.json\"\nprint(\"staring server\")\nserver=RequesterServices(config_file)\n\nserver.start()\nprint(\"server started\")\n","repo_name":"zhangzhenyu13/ProgrammingAlpha","sub_path":"test/servers_test/kafka/test_portal.py","file_name":"test_portal.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14362224010","text":"# a program that controls a lunar rover \n# by Ayesha Bhutto \n\nimport random # imports random numbers\n\nx = 0 # a variable that sets x to equal 0\ny = 0 # a variable that sets y to equal 0 \n\nprint (\"Welcome to the Luner Rover! Let's see what adventures you go on today...\") # print statement addressing the introduction to the game \n\n\nrunning = True # the program will repeat itself as long as it is running \n\nwhile running: # the program loops here \n print (\"\\nThe rover is at (%i,%i).\" %(x,y)) # a print statement issuing where the lunar rover is \n \n position = input(\"Please enter the position: \") # user inputs the commannd with direction and coordinate \n \n\n space = position.find(\" \") # variable that finds the space between the command \n \n if space != -1: # space cannot equal a negative number so the direction is set to be anything before that\n direction = position[:space] # direction is anything before the space \n \n else: # if the space does equal a negative, then the direction equals directly to the position \n direction = position \n \n \n \n if direction == \"north\": # if statement about the direction being north \n coordinate = int(position[space:]) # the number after the space in the command is turned into an integer \n y += coordinate # since y is 0 at the moment, it will add whatever the integer is to make the lunar rover \"move\" \n \n elif direction == \"south\": # if statement about the driection being south\n coordinate = int(position[space:]) # the number after the space in the command is turned into an integer\n y -= coordinate # since y is 0 at the moment, y will subtract whatever number is given since the direction is south \n \n elif direction == \"east\": # if statement about the direction going east\n coordinate = int(position[space:]) # the number after the space is defined as an integer\n x += coordinate # x is 0 at the moment but this will add to the x coordinate since the direction is going east\n \n elif direction == \"west\": # if staement about the direction going west\n coordinate = int(position[space:]) # the number after the space is defined as an integer\n x -= coordinate # since x is 0, x will subtract with the given value since the direction is going west\n\n elif direction == \"dig\": # user is finding an object on the mooon \n object = random.choice([\"a special moon rock\",\"a spider\",\"an apple\", \"my report card\", \"an orange\"]) # program uses a random object to be found each time \n print (\"Oh look! You have found %s. That's not relevant anyway.\" %object) # print statement issuing what has been found \n \n elif direction == \"reset\": # resets the program to go back to the beginning which is (0,0)\n x = 0 # x is set to be 0\n y = 0 # y is set to be 0\n print (\"The rover has been reset to (%i,%i).\" %(x,y)) # a print statement issuing the rover has been reset\n \n \n elif direction == \"rest\": # if user enters rest, the program breaks \n break \n \n \n \n elif direction == \"moveto\": # the direction is defined if the position is moveto, which is the lunar rover moving to a certain area \n space = space+1 # space is created by addding one more space to it\n command = position[space:] # anything after the first space is the first movement\n secondspace = command.find(\" \") # after the command creates the second space between the moveto and integers\n xpoint = int[:secondspace] # turns anything before the secondspace to an integer\n ypoint = int[secondspace:] # turns anything after the second space to an integer\n print (\"You have moved the rover to (%i,%i),\" %(xpoint,ypoint)) # print statement issuing what the movement is \n \n \n \n else: # if user enters anything else rather than a valid direction, the program will appear as invalid and continue to ask until the command is inputted properly \n print(\"Error, this move cannot be made. Enter the direction and then your point (ex.north 4): \") \n \n\n \n \n \n \n \n \n \n","repo_name":"ayeshabhutto/lunar-rover","sub_path":"lunarRoverCode.py","file_name":"lunarRoverCode.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38651156796","text":"class Solution:\n def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:\n # sort by start\n intervals.sort(key=lambda i:i[0])\n prev_interval_end = intervals[0][1]\n \n removals = 0\n \n for i in range(1, len(intervals)):\n curr_interval = intervals[i]\n curr_interval_start = curr_interval[0]\n curr_interval_end = curr_interval[1]\n \n # check for overlap and increment count\n # set end to min of prev/current. We want to get rid of longer/larger end interval\n if curr_interval_start < prev_interval_end:\n removals += 1\n prev_interval_end = min(curr_interval_end, prev_interval_end)\n else:\n prev_interval_end = curr_interval_end\n \n return removals","repo_name":"emilyws27/Leetcode","sub_path":"435-non-overlapping-intervals/435-non-overlapping-intervals.py","file_name":"435-non-overlapping-intervals.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11694731310","text":"# encoding=utf8\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models import word2vec\n\nfilename = 'text8\\\\text8'\n\nwords = word2vec.Text8Corpus(filename)\nmodel = Word2Vec()\nmodel.build_vocab(words)\nmodel.train(words, total_examples=model.corpus_count, epochs=model.iter)\nprint(model['class'])\nprint(model.most_similar(['class']))\n","repo_name":"AidenLong/ai","sub_path":"nlp/NERuselocal/w2v_gensim.py","file_name":"w2v_gensim.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"1227841777","text":"# ssh jacob@45.55.199.182 tail -f /path/to/file | logstalgia --sync\n\nimport random\n\nnums = {\n '1':'1',\n '2':'2ABCabc',\n '3':'3DEFdef',\n '4':'4GHIghi',\n '5':'5JKLjkl',\n '6':'6MNOmno',\n '7':'7PQRSpqrs',\n '8':'8TUVtuv',\n '9':'9WXYZwxyz',\n '0':'0'\n}\n\nmessage = (\"The cryptrollgraphy problems in this competition seem to \"\n \"detract from it's image, as whoever keeps making them doesn't \"\n \"seem to take himself seriously. A phone pad? And in this format? \"\n \"It just doesn't seem right. This isn't even a one time pad. \"\n \"flag{7he_Harder_th3y_are}\")\n\nnms = list(''.join(str(r) for r in [ord(f) for f in message]))\nopen('multi.pad', 'w').write(''.join(random.choice(nums[p]) for p in nms).encode('hex'))\n\n","repo_name":"blockingthesky/CryptoCTF-1-Public","sub_path":"problems/multipad/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9248216657","text":"import csv\nimport datetime\nimport os\nimport re\nimport sqlite3\nimport time\n\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import WebDriverException\n\nfrom .settings import Settings\nfrom .time_util import sleep\nfrom .time_util import sleep_actual\n\n\ndef validate_username(browser,\n username_or_link,\n own_username,\n ignore_users,\n blacklist,\n potency_ratio,\n delimit_by_numbers,\n max_followers,\n max_following,\n min_followers,\n min_following,\n logger):\n \"\"\"Check if we can interact with the user\"\"\"\n\n # Some features may not povide `username` and in those cases we will get it from post's page.\n if '/' in username_or_link:\n link = username_or_link # if there is a `/` in `username_or_link`, then it is a `link`\n\n #Check URL of the webpage, if it already is user's profile page, then do not navigate to it again\n web_adress_navigator(browser, link)\n\n try:\n username = browser.execute_script(\n \"return window._sharedData.entry_data.\"\n \"PostPage[0].graphql.shortcode_media.owner.username\")\n except WebDriverException:\n try:\n browser.execute_script(\"location.relaod()\")\n username = browser.execute_script(\n \"return window._sharedData.entry_data.\"\n \"PostPage[0].graphql.shortcode_media.owner.username\")\n except WebDriverException:\n logger.error(\"Username validation failed! ~cannot get the post owner's username\")\n return False, \\\n \"---> Sorry, this page isn't available! ~link is broken, or page is removed\\n\"\n else:\n username = username_or_link # if there is no `/` in `username_or_link`, then it is a `username`\n\n if username == own_username:\n return False, \\\n \"---> Username '{}' is yours! ~skipping user\\n\".format(own_username)\n \n if username in ignore_users:\n return False, \\\n \"---> {} is in ignore_users list ~skipping user\\n\".format(username)\n \n if username in blacklist:\n return False, \\\n \"---> {} is in blacklist ~skipping user\\n\".format(username)\n \n \"\"\"Checks the potential of target user by relationship status in order to delimit actions within the desired boundary\"\"\"\n if potency_ratio or delimit_by_numbers and (max_followers or max_following or min_followers or min_following):\n\n relationship_ratio = None\n reverse_relationship = False\n\n # Get followers & following counts\n followers_count, following_count = get_relationship_counts(browser, username, logger)\n\n if potency_ratio and potency_ratio < 0:\n potency_ratio *= -1\n reverse_relationship = True\n\n if followers_count and following_count:\n relationship_ratio = (float(followers_count)/float(following_count)\n if not reverse_relationship\n else float(following_count)/float(followers_count))\n\n logger.info('User: {} >> followers: {} | following: {} | relationship ratio: {}'.format(username,\n followers_count if followers_count else 'unknown',\n following_count if following_count else 'unknown',\n float(\"{0:.2f}\".format(relationship_ratio)) if relationship_ratio else 'unknown'))\n\n if followers_count or following_count:\n if potency_ratio and not delimit_by_numbers:\n if relationship_ratio and relationship_ratio < potency_ratio:\n return False, \\\n \"{} is not a {} with the relationship ratio of {} ~skipping user\\n\".format(\n username, \"potential user\" if not reverse_relationship else \"massive follower\",\n float(\"{0:.2f}\".format(relationship_ratio)))\n\n elif delimit_by_numbers:\n if followers_count:\n if max_followers:\n if followers_count > max_followers:\n return False, \\\n \"User {}'s followers count exceeds maximum limit ~skipping user\\n\".format(username)\n if min_followers:\n if followers_count < min_followers:\n return False, \\\n \"User {}'s followers count is less than minimum limit ~skipping user\\n\".format(username)\n if following_count:\n if max_following:\n if following_count > max_following:\n return False, \\\n \"User {}'s following count exceeds maximum limit ~skipping user\\n\".format(username)\n if min_following:\n if following_count < min_following:\n return False, \\\n \"User {}'s following count is less than minimum limit ~skipping user\\n\".format(username)\n if potency_ratio:\n if relationship_ratio and relationship_ratio < potency_ratio:\n return False, \\\n \"{} is not a {} with the relationship ratio of {} ~skipping user\\n\".format(\n username, \"potential user\" if not reverse_relationship else \"massive follower\",\n float(\"{0:.2f}\".format(relationship_ratio)))\n\n\n # if everything ok\n return True, \"Valid user\"\n\n\ndef update_activity(action=None):\n \"\"\"Record every Instagram server call (page load, content load, likes,\n comments, follows, unfollow).\"\"\"\n\n conn = sqlite3.connect(Settings.database_location)\n with conn:\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n # collect today data\n cur.execute(\"SELECT * FROM statistics WHERE created == date('now')\")\n data = cur.fetchone()\n\n if data is None:\n # create a new record for the new day\n cur.execute(\"INSERT INTO statistics VALUES \"\n \"(0, 0, 0, 0, 1, date('now'))\")\n else:\n # sqlite3.Row' object does not support item assignment -> so,\n # convert it into a new dict\n data = dict(data)\n # update\n data['server_calls'] += 1\n\n if action == 'likes':\n data['likes'] += 1\n elif action == 'comments':\n data['comments'] += 1\n elif action == 'follows':\n data['follows'] += 1\n elif action == 'unfollows':\n data['unfollows'] += 1\n\n sql = (\"UPDATE statistics set likes = ?, comments = ?, \"\n \"follows = ?, unfollows = ?, server_calls = ? \"\n \"WHERE created = date('now')\")\n cur.execute(sql, (data['likes'], data['comments'], data['follows'],\n data['unfollows'], data['server_calls']))\n # commit\n conn.commit()\n\n\ndef add_user_to_blacklist(browser, username, campaign, action, logger, logfolder):\n\n file_exists = os.path.isfile('{}blacklist.csv'.format(logfolder))\n fieldnames = ['date', 'username', 'campaign', 'action']\n today = datetime.date.today().strftime('%m/%d/%y')\n\n try:\n with open('{}blacklist.csv'.format(logfolder), 'a+') as blacklist:\n writer = csv.DictWriter(blacklist, fieldnames=fieldnames)\n if not file_exists:\n writer.writeheader()\n writer.writerow({\n 'date': today,\n 'username': username,\n 'campaign': campaign,\n 'action': action\n })\n except Exception as err:\n logger.error(err)\n\n logger.info('--> {} added to blacklist for {} campaign (action: {})'\n .format(username, campaign, action))\n\n\ndef get_active_users(browser, username, posts, boundary, logger):\n \"\"\"Returns a list with usernames who liked the latest n posts\"\"\"\n\n user_link = 'https://www.instagram.com/{}/'.format(username)\n \n #Check URL of the webpage, if it already is user's profile page, then do not navigate to it again\n web_adress_navigator(browser, user_link)\n\n total_posts = format_number(browser.find_element_by_xpath(\n \"//span[contains(@class,'_t98z6')]//span\").text)\n\n # if posts > total user posts, assume total posts\n if posts >= total_posts:\n # reaches all user posts\n posts = total_posts\n\n # click latest post\n browser.find_element_by_xpath(\n \"(//div[contains(@class, '_si7dy')])[1]\").click()\n\n active_users = []\n sc_rolled = 0\n start_time = time.time()\n too_many_requests = 0 # this will help to prevent misbehaviours when you request the list of active users repeatedly within less than 10 min of breaks\n\n message = ((\"~collecting the entire usernames from posts without a boundary!\\n\") if boundary is None else\n (\n \"~collecting only the visible usernames from posts without scrolling at the boundary of zero..\\n\") if boundary == 0 else\n (\"~collecting the usernames from posts with the boundary of {}\\n\".format(boundary)))\n # posts argument is the number of posts to collect usernames\n logger.info(\"Getting active users who liked the latest {} posts:\\n {}\".format(posts, message))\n\n for count in range(1, posts + 1):\n try:\n sleep_actual(2)\n try:\n likers_count = browser.execute_script(\n \"return window._sharedData.entry_data.\"\n \"PostPage[0].graphql.shortcode_media.edge_media_preview_like.count\")\n except WebDriverException:\n try:\n likers_count = (browser.find_element_by_xpath(\n \"//a[contains(@class, '_nzn1h')]/span\").text)\n if likers_count: ##prevent an empty string scenarios\n likers_count = format_number(likers_count)\n else:\n logger.info(\"Failed to get likers count on your post {} ~empty string\".format(count))\n likers_count = None\n except NoSuchElementException:\n logger.info(\"Failed to get likers count on your post {}\".format(count))\n likers_count = None\n\n browser.find_element_by_xpath(\n \"//a[contains(@class, '_nzn1h')]\").click()\n sleep_actual(5)\n\n\n dialog = browser.find_element_by_xpath(\n \"//div[text()='Likes']/following-sibling::div\")\n\n scroll_it = True\n try_again = 0\n\n while scroll_it != False and boundary != 0:\n scroll_it = browser.execute_script('''\n var div = arguments[0];\n if (div.offsetHeight + div.scrollTop < div.scrollHeight) {\n div.scrollTop = div.scrollHeight;\n return true;}\n else {\n return false;}\n ''', dialog)\n\n if sc_rolled > 91 or too_many_requests > 1: # old value 100\n logger.info(\"Too Many Requests sent! ~will sleep some :>\")\n sleep_actual(600)\n sc_rolled = 0\n too_many_requests = 0 if too_many_requests >= 1 else too_many_requests\n else:\n sleep_actual(1.2) # old value 5.6\n sc_rolled += 1\n\n tmp_list = browser.find_elements_by_xpath(\n \"//a[contains(@class, '_2g7d5')]\")\n if boundary is not None:\n if len(tmp_list) >= boundary:\n break\n\n if (scroll_it == False and\n likers_count and\n likers_count - 1 > len(tmp_list)):\n if ((boundary is not None and likers_count - 1 > boundary) or\n boundary is None):\n if try_again <= 1: # you can increase the amount of tries here\n logger.info(\n \"Cor! ~failed to get the desired amount of usernames, trying again! | post:{} | attempt: {}\".format(\n posts, try_again + 1))\n try_again += 1\n too_many_requests += 1\n scroll_it = True\n nap_it = 4 if try_again == 0 else 7\n sleep_actual(nap_it)\n\n tmp_list = browser.find_elements_by_xpath(\n \"//a[contains(@class, '_2g7d5')]\")\n logger.info(\"Post {} | Likers: found {}, catched {}\".format(count, likers_count, len(tmp_list)))\n\n except NoSuchElementException:\n try:\n tmp_list = browser.find_elements_by_xpath(\n \"//div[contains(@class, '_3gwk6')]/a\")\n if len(tmp_list) > 0:\n logger.info(\"Post {} | Likers: found {}, catched {}\".format(count, len(tmp_list), len(tmp_list)))\n except NoSuchElementException:\n logger.error('There is some error searching active users')\n\n if len(tmp_list) is not 0:\n for user in tmp_list:\n active_users.append(user.text)\n\n sleep_actual(1)\n # if not reached posts(parameter) value, continue\n if count +1 != posts +1 and count != 0:\n try:\n # click next button\n browser.find_element_by_xpath(\n \"//a[@class='_3a693 coreSpriteRightPaginationArrow']\"\n \"[text()='Next']\").click()\n except:\n logger.error('Unable to go to next profile post')\n\n real_time = time.time()\n diff_in_minutes = int((real_time - start_time) / 60)\n diff_in_seconds = int((real_time - start_time) % 60)\n # delete duplicated users\n active_users = list(set(active_users))\n logger.info(\n \"Gathered total of {} unique active followers from the latest {} posts in {} minutes and {} seconds\".format(len(active_users),\n posts,\n diff_in_minutes,\n diff_in_seconds))\n\n return active_users\n\n\ndef delete_line_from_file(filepath, lineToDelete, logger):\n try:\n file_path_old = filepath+\".old\"\n file_path_Temp = filepath+\".temp\"\n\n f = open(filepath, \"r\")\n lines = f.readlines()\n f.close()\n\n f = open(file_path_Temp, \"w\")\n for line in lines:\n if not line.endswith(lineToDelete):\n f.write(line)\n else:\n logger.info(\"--> \\\"{}\\\" was removed from csv\".format(line.split(',\\n')[0]))\n f.close()\n\n # File leftovers that should not exist, but if so remove it\n while os.path.isfile(file_path_old):\n try:\n os.remove(file_path_old)\n except OSError as e:\n logger.error(\"Can't remove file_path_old {}\".format(str(e)))\n sleep(5)\n\n # rename original file to _old\n os.rename(filepath, file_path_old)\n # rename new temp file to filepath\n while os.path.isfile(file_path_Temp):\n try:\n os.rename(file_path_Temp, filepath)\n except OSError as e:\n logger.error(\"Can't rename file_path_Temp to filepath {}\".format(str(e)))\n sleep(5)\n\n # remove old and temp file\n os.remove(file_path_old)\n\n except BaseException as e:\n logger.error(\"delete_line_from_file error {}\".format(str(e)))\n\n\ndef scroll_bottom(browser, element, range_int):\n # put a limit to the scrolling\n if range_int > 50:\n range_int = 50\n\n for i in range(int(range_int / 2)):\n browser.execute_script(\n \"arguments[0].scrollTop = arguments[0].scrollHeight\", element)\n # update server calls\n update_activity()\n sleep(1)\n\n return\n\n# There are three (maybe more) different ways to \"click\" an element/button.\n# 1. element.click()\n# 2. element.send_keys(\"\\n\")\n# 3. browser.execute_script(\"document.getElementsByClassName('\" + element.get_attribute(\"class\") + \"')[0].click()\")\n\n# I'm guessing all three have their advantages/disadvantages\n# Before committing over this code, you MUST justify your change\n# and potentially adding an 'if' statement that applies to your\n# specific case. See the following issue for more details\n# https://github.com/timgrossmann/InstaPy/issues/1232\ndef click_element(browser, element, tryNum=0):\n # explaination of the following recursive function:\n # we will attempt to click the element given, if an error is thrown\n # we know something is wrong (element not in view, element doesn't\n # exist, ...). on each attempt try and move the screen around in\n # various ways. if all else fails, programmically click the button\n # using `execute_script` in the browser.\n\n try:\n # use Selenium's built in click function\n element.click()\n except:\n # click attempt failed\n # try something funky and try again\n\n if tryNum == 0:\n # try scrolling the element into view\n browser.execute_script(\"document.getElementsByClassName('\" + element.get_attribute(\"class\") + \"')[0].scrollIntoView({ inline: 'center' });\")\n elif tryNum == 1:\n # well, that didn't work, try scrolling to the top and then clicking again\n browser.execute_script(\"window.scrollTo(0,0);\")\n elif tryNum == 2:\n # that didn't work either, try scrolling to the bottom and then clicking again\n browser.execute_script(\"window.scrollTo(0,document.body.scrollHeight);\")\n else:\n # try `execute_script` as a last resort\n # print(\"attempting last ditch effort for click, `execute_script`\")\n browser.execute_script(\"document.getElementsByClassName('\" + element.get_attribute(\"class\") + \"')[0].click()\")\n return # end condition for the recursive function\n\n\n # sleep for 1 second to allow window to adjust (may or may not be needed)\n sleep_actual(1)\n\n tryNum += 1\n\n # try again!\n click_element(browser, element, tryNum)\n\n\ndef format_number(number):\n \"\"\"\n Format number. Remove the unused comma. Replace the concatenation with relevant zeros. Remove the dot.\n\n :param number: str\n\n :return: int\n \"\"\"\n formatted_num = number.replace(',', '')\n formatted_num = re.sub(r'(k)$', '00' if '.' in formatted_num else '000', formatted_num)\n formatted_num = re.sub(r'(m)$', '00000' if '.' in formatted_num else '000000', formatted_num)\n formatted_num = formatted_num.replace('.', '')\n return int(formatted_num)\n\ndef username_url_to_username(username_url):\n a = username_url.replace (\"https://www.instagram.com/\",\"\")\n username = a.split ('/')\n return username[0]\n \ndef get_number_of_posts(browser):\n \"\"\"Get the number of posts from the profile screen\"\"\"\n num_of_posts_txt = browser.find_element_by_xpath(\"//section/main/div/header/section/ul/li[1]/span/span\").text\n num_of_posts_txt = num_of_posts_txt.replace(\" \", \"\")\n num_of_posts_txt = num_of_posts_txt.replace(\",\", \"\")\n num_of_posts = int(num_of_posts_txt) \n return num_of_posts\n\n\ndef get_relationship_counts(browser, username, logger):\n \"\"\" Gets the followers & following counts of a given user \"\"\"\n\n user_link = \"https://www.instagram.com/{}/\".format(username)\n\n #Check URL of the webpage, if it already is user's profile page, then do not navigate to it again\n web_adress_navigator(browser, user_link)\n\n try:\n followers_count = format_number(browser.find_element_by_xpath(\"//a[contains\"\n \"(@href,'followers')]/span\").text)\n except NoSuchElementException:\n try:\n followers_count = browser.execute_script(\n \"return window._sharedData.entry_data.\"\n \"ProfilePage[0].graphql.user.edge_followed_by.count\")\n except WebDriverException:\n try:\n browser.execute_script(\"location.reload()\")\n followers_count = browser.execute_script(\n \"return window._sharedData.entry_data.\"\n \"ProfilePage[0].graphql.user.edge_followed_by.count\")\n except WebDriverException:\n try:\n followers_count = format_number(browser.find_element_by_xpath(\n \"//li[2]/a/span[contains(@class, 'g47SY')]\").text)\n except NoSuchElementException:\n logger.error(\"Error occured during getting the followers count of '{}'\\n\".format(username))\n followers_count = None\n\n try:\n following_count = format_number(browser.find_element_by_xpath(\"//a[contains\"\n \"(@href,'following')]/span\").text)\n except NoSuchElementException:\n try:\n following_count = browser.execute_script(\n \"return window._sharedData.entry_data.\"\n \"ProfilePage[0].graphql.user.edge_follow.count\")\n except WebDriverException:\n try:\n browser.execute_script(\"location.reload()\")\n following_count = browser.execute_script(\n \"return window._sharedData.entry_data.\"\n \"ProfilePage[0].graphql.user.edge_follow.count\")\n except WebDriverException:\n try:\n following_count = format_number(browser.find_element_by_xpath(\n \"//li[3]/a/span[contains(@class, 'g47SY')]\").text)\n except NoSuchElementException:\n logger.error(\"\\nError occured during getting the following count of '{}'\\n\".format(username))\n following_count = None\n \n return followers_count, following_count\n\n\ndef web_adress_navigator(browser, link):\n \"\"\"Checks and compares current URL of web page and the URL to be navigated and if it is different, it does navigate\"\"\"\n\n try:\n current_url = browser.current_url\n except WebDriverException:\n try:\n current_url = browser.execute_script(\"return window.location.href\")\n except WebDriverException:\n raise\n current_url = None\n \n if current_url is None or current_url != link:\n browser.get(link)\n # update server calls\n update_activity()\n sleep(2)\n\n","repo_name":"richzeng/instapy","sub_path":"instapy/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":23243,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"41652473140","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport time, numpy as np\nimport matplotlib.pyplot as plt\n\nclass VoltMeter(object):\n ''' Bar graph and history displays of effective voltage of input samples '''\n\n def __init__(self, OscConfDict):\n '''Args: Wtime: waiting time between updates\n conf: Configuration of channels\n '''\n # collect relevant configuration parameters\n self.Npoints = 120 # number of points for history\n self.bwidth = 0.5 # width of bars\n self.NChan = OscConfDict['NChannels']\n\n self.ChanLim = []\n CRanges = OscConfDict['CRanges']\n COffsets = OscConfDict['ChanOffsets']\n for i in range(self.NChan):\n # Channel Limits for effective voltage\n self.ChanLim.append( (0., CRanges[i]-COffsets[i]) )\n # Channel Limits for average voltage\n # self.ChanLim.append( (-CRanges[i]-COffsets[i], \n # CRanges[i]-COffsets[i]) )\n\n self.ChanNams = OscConfDict['Channels']\n self.ChanColors = OscConfDict['ChanColors']\n\n # data structures needed throughout the class\n self.ix = np.linspace(-self.Npoints+1, 0, self.Npoints) # history plot\n self.ind = self.bwidth + np.arange(self.NChan) # bar position for voltages\n # \n self.V = np.empty(self.NChan)\n self.stdV = np.empty(self.NChan)\n self.Vhist = np.zeros( [self.NChan, self.Npoints] )\n self.stdVhist = np.zeros( [self.NChan, self.Npoints] )\n\n# set up a figure to plot actual voltage and samplings from Picoscope\n fig = plt.figure(\"Voltmeter\", figsize=(4., 5.3) )\n fig.subplots_adjust(left=0.2, bottom=0.08, right=0.8, top=0.95,\n wspace=None, hspace=.25)\n axes=[]\n # history plot\n axes.append(plt.subplot2grid((6,1),(4,0), rowspan=2) )\n if self.NChan > 1:\n axes.append(axes[0].twinx())\n# for effective Voltage\n for i, C in enumerate(self.ChanNams):\n if i > 1:\n break # works for a maximum of 2 Channels only\n axes[i].set_ylim(*self.ChanLim[i])\n axes[i].set_ylabel('Chan ' + C + ' (Veff)', color=self.ChanColors[i])\n axes[i].grid(True, color=self.ChanColors[i], linestyle = '--', alpha=0.3)\n axes[0].set_xlabel('History')\n # barchart\n axes.append(plt.subplot2grid((6,1),(1,0), rowspan=3) )\n axbar1 = axes[-1]\n axbar1.set_frame_on(False)\n if self.NChan > 1:\n axbar2=axbar1.twinx()\n axbar2.set_frame_on(False)\n axbar1.get_xaxis().set_visible(False)\n axbar1.set_xlim(0., self.NChan)\n axbar1.axvline(0, color = self.ChanColors[0])\n if self.NChan > 1:\n axbar1.axvline(self.NChan, color = self.ChanColors[1])\n axbar1.set_ylim(*self.ChanLim[0])\n axbar1.axhline(0., color='k', linestyle='-', lw=2, alpha=0.5)\n axbar1.set_ylabel('Chan A (Veff)', color = self.ChanColors[0])\n if self.NChan > 1:\n axbar2.set_ylim(*self.ChanLim[1])\n axbar2.set_ylabel('Chan B (Veff)', color = self.ChanColors[1])\n # Voltage in Text format\n axes.append(plt.subplot2grid((6,1),(0,0)) )\n axtxt=axes[-1]\n axtxt.set_frame_on(False)\n axtxt.get_xaxis().set_visible(False)\n axtxt.get_yaxis().set_visible(False)\n axtxt.set_title('Picoscope as Voltmeter', size='xx-large')\n\n self.fig = fig\n self.axes = axes\n self.axbar1 = axbar1\n if self.NChan > 1:\n self.axbar2 = axbar2\n# -- end def grVMeterIni\n\n def init(self):\n # initialize objects to be animated\n\n # a bar graph for the actual voltages\n# self.bgraph = self.axes[0].bar(ind, np.zeros(self.NChan), self.bwidth,\n# align='center', color='grey', alpha=0.5)\n self.bgraph1, = self.axbar1.bar(self.ind[0], 0. , self.bwidth,\n align='center', color = self.ChanColors[0], alpha=0.5) \n if self.NChan > 1:\n self.bgraph2, = self.axbar2.bar(self.ind[1], 0. , self.bwidth,\n align='center', color = self.ChanColors[1], alpha=0.5) \n # history graphs\n self.graphs=()\n for i, C in enumerate(self.ChanNams):\n if i > 1:\n break # max. of 2 channels\n g,= self.axes[i].plot(self.ix, np.zeros(self.Npoints), \n color=self.ChanColors[i])\n self.graphs += (g,)\n self.animtxt = self.axes[-1].text(0.01, 0.05 , ' ',\n transform=self.axes[-1].transAxes,\n size='large', color='darkblue')\n\n self.t0=time.time() # remember start time\n\n if self.NChan > 1 :\n return (self.bgraph1,) + (self.bgraph2,) + self.graphs + (self.animtxt,) \n else:\n# -- end VoltMeter.init()\n return (self.bgraph1,) + self.graphs + (self.animtxt,) \n\n def __call__( self, evt ):\n n, evNr, evTime, evData = evt\n if n == 0:\n return self.init()\n\n k=n%self.Npoints\n txt_t='Time %.1fs' %(evTime) \n txt=[]\n for i, C in enumerate(self.ChanNams):\n if i > 1: \n break # works for 2 channels only\n self.V[i] = np.sqrt (np.inner(evData[i], evData[i])/len(evData[i]) )\n self.Vhist[i, k] = self.V[i]\n self.stdV[i] = evData[i].std()\n self.stdVhist[i, k] = self.stdV[i]\n # update history graph\n if n>1: # !!! fix to avoid permanent display of first object in blit mode\n self.graphs[i].set_data(self.ix,\n np.concatenate((self.Vhist[i, k+1:], self.Vhist[i, :k+1]), axis=0) )\n else:\n self.graphs[i].set_data(self.ix, np.zeros(self.Npoints))\n txt.append(' %s: %.3gV +/-%.2gV' % (C, self.Vhist[i,k], \n self.stdVhist[i,k]) )\n # update bar chart\n# for r, v in zip(bgraph, V):\n# r.set_height(v)\n if n>1: # !!! fix to avoid permanent display of first object in blit mode\n self.bgraph1.set_height(self.V[0])\n if self.NChan > 1:\n self.bgraph2.set_height(self.V[1])\n else: \n self.bgraph1.set_height(0.)\n if self.NChan > 1:\n self.bgraph2.set_height(0.)\n if self.NChan > 1:\n self.animtxt.set_text(txt_t + '\\n' + txt[0] + '\\n' + txt[1])\n else:\n self.animtxt.set_text(txt_t + '\\n' + txt[0])\n#\n if self.NChan > 1 :\n return (self.bgraph1,) + (self.bgraph2,) + self.graphs + (self.animtxt,)\n else:\n return (self.bgraph1,) + self.graphs + (self.animtxt,)\n#- -end def Voltmeter.__call__\n#-end class VoltMeter\n","repo_name":"GuenterQuast/picoDAQ","sub_path":"picodaqa/VoltMeter.py","file_name":"VoltMeter.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"11336783818","text":"# 5. Задайте число. Составьте список чисел Фибоначчи, в том числе для отрицательных индексов.\n# Пример:\n# для k = 8 список будет выглядеть так: [-21 ,13, -8, 5, −3, 2, −1, 1, 0, 1, 1, 2, 3, 5, 8, 13, 21]\n# F(-n)=(-1)**(n+1)*F(n)\n\n\ndef number_fibonacci(k: int) -> list:\n fibonacci_list = []\n fib_1, fib_2 = 1, 1\n\n for i in range(k):\n fibonacci_list.append(fib_1)\n fib_1, fib_2 = fib_2, fib_1 + fib_2 # fib_1 = fib_2, fib_2 = fib_1 + fib_2\n\n fib_1, fib_2 = 0, 1\n\n for i in range(k + 1):\n fibonacci_list.insert(0, fib_1) # fib_1 is added to the fibonacci_list by index [0].\n # All elements after the element are shifted to the right\n fib_1, fib_2 = fib_2, fib_1 - fib_2 # fib_1 = fib_2, fib_2 = fib_1 - fib_2\n\n return fibonacci_list\n\n\nk = int(input('Enter a number for the Fibonacci fibonacci_list: '))\n\nprint(number_fibonacci(k))\n","repo_name":"Udodov/Python_Homework_3","sub_path":"home_task_3.5(for_insert).py","file_name":"home_task_3.5(for_insert).py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15623621568","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymysql\n\n\nclass MysunproPipeline:\n def __init__(self):\n self.conn = pymysql.connect(host='localhost', user='root', passwd='rootpwd', db='sun', charset='utf8mb4')\n self.cursor = self.conn.cursor()\n def process_item(self, item, spider):\n # 判断item的类型\n # 将数据写入数据库时,如何保证数据的一致性,id和num相等\n if item.__class__.__name__ =='MyDetailItem':\n print('-----> 来自于详情页')\n print(item['id'], item['content'])\n # 想数据库表中写入数据\n # sql = 'insert into sun2(num,content) values (\"%s\",\"%s\")' % (item['id'], item['content'])\n sql = 'update sun2 set content=\"{0}\" where num={1}'.format(item['content'], item['id'])\n\n print('-----> sql语句是:', sql)\n self.cursor.execute(sql)\n self.conn.commit()\n else:\n # 向数据库表中写入数据\n print('-----> 来自于列表页')\n print(item['num'], item['title'])\n # sql = 'insert into sun2(title) values (\"%s\") where num = %s ' % (item['title'], item['num'])\n sql = 'insert into sun2(num,title) values (\"%s\",\"%s\")' % (item['num'], item['title'])\n\n print('-----> sql语句是:', sql)\n self.cursor.execute(sql)\n self.conn.commit()\n\n return item\n","repo_name":"wzl1368611/spider_collection","sub_path":"mySunPro/mySunPro/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"72661485049","text":"import urllib.request\nimport cv2\n\nurl = [0,'http://100.72.226.182:8080/video','http://100.72.226.182:8080/video']\nds_factor=0.6\ncap = cv2.VideoCapture(url[1])\nclass VideoCamera(object):\n def __init__(self):\n #capturing video\n self.video1 = cv2.VideoCapture(url[0])\n self.video2 = cv2.VideoCapture(url[1])\n self.video3 = cv2.VideoCapture(url[2])\n \n def __del__(self):\n #releasing camera\n self.video1.release()\n self.video2.release()\n self.video3.release()\n \n def get_frame(self):\n #extracting frames\n _, frame1 = self.video1.read()\n _, frame2 = self.video2.read()\n _, frame3 = self.video3.read()\n frame1=cv2.resize(frame1,None,fx=ds_factor,fy=ds_factor,\n interpolation=cv2.INTER_AREA)\n frame2=cv2.resize(frame2,None,fx=ds_factor,fy=ds_factor,\n interpolation=cv2.INTER_AREA)\n frame3=cv2.resize(frame3,None,fx=ds_factor,fy=ds_factor,\n interpolation=cv2.INTER_AREA)\n ret, jpeg1 = cv2.imencode('.jpg',frame1)\n ret, jpeg2 = cv2.imencode('.jpg',frame2)\n ret, jpeg3 = cv2.imencode('.jpg',frame3)\n return jpeg1.tobytes(), jpeg2.tobytes(), jpeg3.tobytes()\n ","repo_name":"jatinarora1/Theft-Detection","sub_path":"surveilliance/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"77"} +{"seq_id":"22985048719","text":"import argparse, pandas as pd, numpy\n\nif __name__ == '__main__':\n\n #arguments parsing\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file1\", help=\"File 1 to compare\")\n parser.add_argument(\"file2\", help=\"File 2 to compare\")\n parser.add_argument(\"outFile\", help=\"File to store the differences\")\n args = parser.parse_args()\n\n fileOne = pd.read_csv(args.file1, index_col=None, sep='\\t', dtype=str)\n fileTwo = pd.read_csv(args.file2, index_col=None, sep='\\t', dtype=str)\n\n with open(args.outFile, 'w') as outFile:\n outFile.write('\\t' + '\\t'.join(fileOne.columns) + '\\n')\n outFile.write(args.file1 + '\\t' + '\\t'.join(map(lambda col:str(fileOne[col].nunique()), fileOne.columns)) + '\\n')\n outFile.write(args.file2 + '\\t' + '\\t'.join(map(lambda col:str(fileTwo[col].nunique()), fileOne.columns)) + '\\n')\n outFile.write('\\t' + '\\t'.join(map(lambda col:str(fileOne[col].nunique() - fileTwo[col].nunique()), fileOne.columns)) + '\\n')\n","repo_name":"gbif/occurrence","sub_path":"occurrence-download/src/test/scripts/compare_files.py","file_name":"compare_files.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"22873513249","text":"import torch\n\nfrom lib.utils.misc import sample_descriptor\nfrom lib.utils.hard_mining.hard_example_mining_layer import hard_negative_mining\nfrom lib.utils.vis_logger import logger\n\n\nclass ConstrastiveEvaluator(object):\n def __call__(self, descs0, kps0, imgs0, descs1, kps1, imgs1, thresh=4, interval=4):\n \"\"\"\n Compute constrastive loss with hard negative mining\n \n :param descs0 descs1: (B, D, H', W'), downsampled\n :param kps0, kps1: (B, N, 2), original image scale\n :param imgs0, imgs1: (B, 3, H, W)\n :param thresh: mining threshold. NOTE, this is measure at descritpor map scale\n :param interval: mining interval. NOTE, this is measure at descritpor map scale\n :return:\n loss: total loss\n distance: distance between true correspondences\n similarity: similarity between false correspondences\n \"\"\"\n descs0 = sample_descriptor(descs0, kps0, imgs0) # [B, N, D]\n # descs2 = sample_descriptor(descr_maps1, kps2)\n descs2, kps2 = hard_negative_mining(descs0, descs1, kps1, imgs1, thresh, interval) # [B, N, D]\n logger.update(kps2=kps2[0])\n descs1 = sample_descriptor(descs1, kps1, imgs1) # [B, N, D]\n\n pos_dist = torch.norm(descs0 - descs1, 2, dim=2)\n neg_dist = torch.norm(descs0 - descs2, 2, dim=2)\n\n distance = torch.sum(pos_dist) / pos_dist.numel()\n # print(distance)\n\n similarity = 0.5 - neg_dist\n weight = similarity > 0\n similarity = torch.sum(similarity[weight]) / torch.clamp(torch.sum(weight).float(), min=1.)\n\n loss = distance + similarity\n\n return loss, distance, similarity\n","repo_name":"zhixuan-lin/descriptor-space","sub_path":"lib/modeling/evaluator/constrastive.py","file_name":"constrastive.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37903355583","text":"# -*- coding:utf-8 -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\nSTOP_RENDERING = runtime.STOP_RENDERING\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1601519179.6240678\n_enable_loop = True\n_template_filename = 'C:/Users/Trent/Documents/1. MISM (Semester 1)/LING 581/RestaurantReviews/restaurant/homepage/templates/base.htm'\n_template_uri = 'base.htm'\n_source_encoding = 'utf-8'\nimport django_mako_plus\nimport django.utils.html\n_exports = ['content']\n\n\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n self = context.get('self', UNDEFINED)\n def content():\n return render_content(context._locals(__M_locals))\n __M_writer = context.writer()\n __M_writer('\\r\\n\\r\\n \\r\\n \\r\\n\\r\\n Restaurant Reviews\\r\\n\\r\\n \\r\\n\\r\\n \\r\\n ')\n __M_writer(django_mako_plus.ExpressionPostProcessor(self)( django_mako_plus.links(self) ))\n __M_writer('\\r\\n\\r\\n \\r\\n \\r\\n\\r\\n
\\r\\n
Restaurant Reviews
\\r\\n
\\r\\n\\r\\n
\\r\\n ')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n __M_writer('\\r\\n
\\r\\n\\r\\n \\r\\n\\r\\n \\r\\n\\r\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def content():\n return render_content(context)\n __M_writer = context.writer()\n __M_writer('\\r\\n Site content goes here in sub-templates.\\r\\n ')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"filename\": \"C:/Users/Trent/Documents/1. MISM (Semester 1)/LING 581/RestaurantReviews/restaurant/homepage/templates/base.htm\", \"uri\": \"base.htm\", \"source_encoding\": \"utf-8\", \"line_map\": {\"18\": 0, \"26\": 2, \"27\": 12, \"28\": 12, \"33\": 24, \"39\": 22, \"45\": 22, \"51\": 45}}\n__M_END_METADATA\n\"\"\"\n","repo_name":"mcmtrnt/RestaurantReviews","sub_path":"homepage/templates/__dmpcache__/base.htm.py","file_name":"base.htm.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"70120042168","text":"#!/usr/bin/env python3\n\nimport boto3\nimport sys\nimport time\nimport subprocess\nimport datetime\n\n\n#Varaibles\nstring1 = 'Name'\nstring2 = 'My web Server' \nfileTxt = open('textFile.txt', 'w+')\nreadFileTxt = open('textFile.txt', 'r')\nuserText = \"\"\"#!/bin/bash\n yum update -y\n yum install httpd -y\n systemctl enable httpd\n systemctl start httpd\"\"\"\nec2 = boto3.resource('ec2')\ns3 = boto3.resource(\"s3\")\nobject_name = 'image.jpg'\nkey = 'witacsresources'\n\n#Menu layout\ndef print_menu():\n\n print('+-----------------------------------------------------------+')\n print('| |')\n print('| AWS |')\n print('| |')\n print('+-----------------------------------------------------------+')\n print('| -1- | Run Program |')\n print('+-----------------------------------------------------------+')\n print('| -2- | List Instance |')\n print('| -3- | Terminate Instance |')\n print('+-----------------------------------------------------------+')\n print('| -4- | List Buckets |')\n print('| -5- | Terminate Bucket |')\n print('+-----------------------------------------------------------+')\n print('| -6- | View Log File |')\n print('| -7- | Cloud Watch Monitoring |')\n print('+-----------------------------------------------------------+')\n print('| -8- | Exit |')\n print('+-----------------------------------------------------------+') \n print(\"=====>> \"); \n\n#runs menu for user to selection option\ndef run_menu():\n\n #prints user menu\n print_menu()\n\n\n option_loop=True \n\n while option_loop: \n\n option = input('Enter your choice [1-8]: ')\n option = int(option)\n\n if option == 1:\n run_script()\n\n elif option == 2:\n list_instances()\n\n elif option == 3:\n terminate_instance()\n\n elif option == 4:\n list_buckets()\n\n elif option == 5:\n delete_bucket()\n\n elif option == 6:\n view_log_file()\n\n elif option == 7:\n cloud_watch_data()\n\n elif option == 8:\n exit()\n\n else:\n print(\"Incorrect option, Enter any key to try again..\")\n \n#runs assignment script\ndef run_script():\n\n bucket_name = input( 'Please enter a bucket name: ')\n\n #download image from bucket\n s3.Bucket(key).download_file(object_name,'image.jpg')\n \n #create bucket\n try:\n print('.............Creating Bucket..............')\n bucket = s3.create_bucket(Bucket = bucket_name, ACL ='public-read', CreateBucketConfiguration = {'LocationConstraint': 'eu-west-1'})\n print ('Bucket Name : ' + bucket_name)\n fileTxt.write('\\n Bucket created, Name : ' + bucket_name)\n except Exception as error:\n print (error)\n\n\n #put image into bucket\n try:\n print('.............Putting Image Into Bucket..............')\n response = s3.Object(bucket_name, object_name).put(Body = open(object_name, 'rb'))\n object = s3.Bucket(bucket_name).Object(object_name)\n object.Acl().put(ACL='public-read')\n print ('Bucket Image uploaded')\n except Exception as error:\n fileTxt.write('\\n Error when creating bucket')\n print (error)\n\n #sg-06ed518f0b82030e0\n #Creating Instance\n print('...........Creating Instance........')\n security_group = input('Please enter your security group id : ')\n #ec2 = boto3.resource('ec2')\n instance = ec2.create_instances(\n ImageId='ami-099a8245f5daa82bf', # specifies the AMI ID of the instance we want to create\n MinCount=1, # min number of instances to launch\n MaxCount=1, # max number of instaces to launch\n KeyName='mbarcoeweb_keypair', # created in aws portal, key pair for access to the instance\n UserData= userText,\n InstanceType='t2.micro', # specifies what type/size of hardware needed\n SecurityGroupIds=[security_group],\n TagSpecifications=[\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': string1,\n 'Value': string2\n },\n ]\n },\n ]\n )\n\n\n #id Of instance\n instance_id = instance[0].id\n\n print('----------------Instance Created----------------')\n fileTxt.write('Instance Created')\n\n\n print ('Instance Id : ' + instance_id)\n fileTxt.write('\\n Instance Id : ' + instance_id)\n\n time.sleep(30)\n\n print('----------------Fetching Ip------------------')\n instance[0].wait_until_running()\n instance[0].load()\n\n instance_ip = instance[0].public_ip_address\n print('Instance Ip: ' + instance_ip)\n fileTxt.write('\\n Instance Public Ip : ' + instance_ip)\n\n print('Loading Instance.........')\n time.sleep(20)\n\n #instance_ip = '54.171.235.200'\n\n ssh_cmd = 'ssh -o StrictHostKeyChecking=no -i mbarcoeweb_keypair.pem ec2-user@' + instance_ip\n\n print('----------------Creating Html file----------------')\n fileTxt.write('\\n Creating Html file ')\n\n #creates html tag and file\n x1 = 'echo \"\" > index.html'\n subprocess.run(x1, shell = True)\n print('Created text file and added html tag')\n fileTxt.write('\\n Created text file and added html tag')\n\n #Writes a header text page to html file\n x2 = 'echo \"

Test page

\" >> index.html'\n subprocess.run(x2, shell = True)\n print('Added header to html file')\n fileTxt.write('\\n Added header to html file')\n\n time.sleep(10)\n\n #Prints Instance Ip to html file\n x3 = 'echo \"
Instance ID: \" >> index.html'\n subprocess.run(x3, shell = True)\n cmd1 = ssh_cmd + \" curl --silent http://169.254.169.254/latest/meta-data/instance-id/ >> index.html\"\n subprocess.run(cmd1, shell = True)\n print('Instance id loaded to html page')\n fileTxt.write('\\n Instance id loaded to html page')\n\n\n #Prints availability zone to html file\n x4 = 'echo \"
Availability zone: \" >> index.html'\n subprocess.run(x4, shell= True)\n cmd2 = ssh_cmd + \" curl --silent http://169.254.169.254/latest/meta-data/placement/availability-zone/ >> index.html\"\n subprocess.run(cmd2, shell= True)\n print('Availability-zone loaded to html page')\n fileTxt.write('\\n Availability-zone loaded to html page')\n\n\n #Prints ip address to html file\n x5 = 'echo \"
IP address : \" >> index.html'\n subprocess.run(x5, shell= True)\n cmd3 = ssh_cmd + \" curl --silent http://169.254.169.254/latest/meta-data/public-ipv4 >> index.html\"\n subprocess.run(cmd3, shell =True)\n print('Ip address loaded to html page')\n fileTxt.write('\\n Ip address loaded to html page')\n\n #Prints ip address to html file\n x55 = 'echo \"
DNS : \" >> index.html'\n subprocess.run(x55, shell= True)\n cmd3 = ssh_cmd + \" curl --silent http://169.254.169.254/latest/meta-data/public-hostname >> index.html\"\n subprocess.run(cmd3, shell =True)\n print('DNS loaded to html page')\n fileTxt.write('\\n DNS loaded to html page')\n\n #Prints Instance Type to html file\n x555 = 'echo \"
Type : \" >> index.html'\n subprocess.run(x555, shell= True)\n cmd333 = ssh_cmd + \" curl --silent http://169.254.169.254/latest/meta-data/instance-type >> index.html\"\n subprocess.run(cmd333, shell =True)\n print('Instance Type loaded to html page')\n fileTxt.write('Instance Type loaded to html page')\n\n #Print image to \n x6 = 'echo \"
Here is the image:
\" >> index.html'\n subprocess.run(x6, shell= True)\n cmd4 = 'echo \"\" >> index.html'\n subprocess.run(cmd4, shell= True)\n print('Image Loaded')\n fileTxt.write('\\n Image Loaded')\n\n print('Pushing Index File to Instance')\n fileTxt.write('\\n Pushing Index File to Instance')\n time.sleep(10)\n\n #Copies local file index.html and push it to instance\n cmd5 = \"scp -i mbarcoeweb_keypair.pem index.html ec2-user@\" + instance_ip + \"':.'\"\n subprocess.run(cmd5, shell = True)\n print('Index.html pushed to instance')\n fileTxt.write('\\n Index.html pushed to instance')\n\n time.sleep(40)\n\n # copies Index file and put it in /var/www/html/ directory\n cmd6 = ssh_cmd + \" ' sudo cp index.html /var/www/html/index.html'\"\n subprocess.run(cmd6, shell = True)\n print('File moved')\n fileTxt.write('\\n File moved')\n fileTxt.close()\n print('Completed')\n\n#Lists all instnaces and there states\ndef list_instances():\n\n print('------------Current Instances------------')\n for instance in ec2.instances.all():\n print ('Instance Id: ' + instance.id ,instance.state)\n\n#terminates instance that is requested\ndef terminate_instance():\n\n term_inst = input('Are you sure you want to terminate instance? [y/n] ')\n\n if term_inst == 'y' :\n instance_id = input('Enter Instance id: ')\n instance = ec2.Instance(instance_id)\n response = instance.terminate()\n print ('Instance:' + instance_id + ' is terminated' )\n elif term_inst =='n':\n print('Termination Cancelled')\n run_menu()\n else: \n print('error')\n terminate_instance()\n\n#list all buckets\ndef list_buckets():\n\n for bucket in s3.buckets.all():\n print (bucket.name)\n\n#terminates bucket if empty\ndef delete_bucket():\n\n t_bucket = input('Please Enter Bucket name: ')\n bucket = s3.Bucket(t_bucket)\n \n try:\n response = bucket.delete()\n print (response)\n except Exception as error:\n print (error)\n\n#prints log file\ndef view_log_file():\n print(fileTxt.read())\n\ndef cloud_watch_data():\n\n cloudwatch = boto3.resource('cloudwatch')\n \n instid = input(\"Please enter instance ID: \") # Prompt the user to enter an Instance ID\n time.sleep(1000)\n instance = ec2.Instance(instid)\n #instance = ec2.Instance(instance_id)\n instance.monitor() # Enables detailed monitoring on instance (1-minute intervals)\n\n metric_iterator = cloudwatch.metrics.filter(Namespace='AWS/EC2',\n MetricName='CPUUtilization',\n Dimensions=[{'Name':'InstanceId', 'Value': instance_id}])\n\n metric = list(metric_iterator)[0] # extract first (only) element\n\n response = metric.get_statistics(StartTime = datetime.utcnow() - timedelta(minutes=5), # 5 minutes ago\n EndTime=datetime.utcnow(), # now\n Period=300, # 5 min intervals\n Statistics=['Average'])\n\n print (\"Average CPU utilisation:\", response['Datapoints'][0]['Average'], response['Datapoints'][0]['Unit'])\n # print (response) # for debugging only\n\n time.sleep()\n\nrun_menu()","repo_name":"Barcoe98/run_webserver","sub_path":"run_newwebserver.py","file_name":"run_newwebserver.py","file_ext":"py","file_size_in_byte":11327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16528489079","text":"import ctypes.wintypes\n\n\nundname = ctypes.windll.dbghelp.UnDecorateSymbolName\nundname.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint]\n\n# If this does not work, it returns the input string.\ndef UndecorateSymbol(strSym):\n\tsizBuf = 200\n\twhile True:\n\t\tptrBuf = ctypes.create_string_buffer(\"\", sizBuf)\n\t\tsizActual = undname(strSym,ptrBuf,sizBuf,0)\n\t\tif sizActual < sizBuf - 2:\n\t\t\tstrRaw = ptrBuf.value\n\t\t\tbreak\n\t\tsizBuf *= 2\n\n\t# Now, some cleanup of useless strings. This tries to keep only the semantic information.\n\tfor subStr in [ \"__thiscall \", \"__cdecl\", \"class \",\"struct \",\" __ptr64\"]:\n\t\tstrRaw = strRaw.replace(subStr,\"\")\n\n\tfor subStr in [ \"private: \", \"public: \", \"protected: \"]:\n\t\tif strRaw.startswith(subStr):\n\t\t\tstrRaw = strRaw[ len(subStr): ]\n\t\t\tbreak\n\n\treturn strRaw\n\n","repo_name":"vchateauneu/survol","sub_path":"survol/lib_pefile.py","file_name":"lib_pefile.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"35406957501","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Word Embeddings\n\n# - The state-of-art method of vectorizing texts is to learn the numeric representations of words using deep learning methods.\n# - These deep-learning based numeric representations of linguistic units are commonly referred to as **embeddings**.\n# - Word embeddings can be learned either along with the target NLP task (e.g., the `Embedding` layer in RNN Language Model) or via an **unsupervised** method based on a large number of texts.\n# - In this tutorial, we will look at two main algorithms in `word2vec` that allow us to learn the word embeddings in an **unsupervised** manner from a large collection of texts.\n\n# - Strengths of word embeddings\n# - They can be learned using **unsupervised** methods.\n# - They include quite a proportion of the lexical **semantics**.\n# - They can be learned by **batch**. We don't have to process the entire corpus and create the word-by-document matrix for vectorization. \n# - Therefore, it is less likely to run into the **memory** capacity issue for huge corpora.\n\n# ## Overview\n\n# ### What is `word2vec`?\n# \n# - `Word2vec` is one of the most popular techniques to learn word embeddings using a two-layer neural network.\n# - The input is a **text corpus** and the output is a set of **word vectors**.\n# - Research has shown that these embeddings include rich semantic information of words, which allow us to perform interesting **semantic computation** (See Mikolov et al's works in References).\n\n# ### Basis of Word Embeddings: Distributional Semantics\n# \n# - \"*You shall know a word by the company it keeps*\" (Firth, 1975).\n# - Word distributions show a considerable amount of **lexical semantics**.\n# - Construction/Pattern distributions show a considerable amount of the **constructional semantics**.\n# - Semantics of linguistic units are implicitly or explicitly embedded in their distributions (i.e., *occurrences* and *co-occurrences*) in language use (**Distributional Semantics**).\n\n# ### Main training algorithms of `word2vec`\n# \n# - Continuous Bag-of-Words (**CBOW**): The general language modeling task for embeddings training is to learn a model that is capable of using the ***context*** words to predict a ***target*** word.\n# - **Skip-Gram**: The general language modeling task for embeddings training is to learn a model that is capable of using a ***target word*** to predict its ***context*** words.\n\n# ![](../images/word2vec.png)\n\n# - Other variants of embeddings training:\n# - `fasttext` from Facebook\n# - `GloVe` from Stanford NLP Group\n# - There are many ways to train work embeddings.\n# - `gensim`: Simplest and straightforward implementation of `word2vec`.\n# - Training based on deep learning packages (e.g., `keras`, `tensorflow`)\n# - `spacy` (It comes with the pre-trained embeddings models, using GloVe.)\n# - See Sarkar (2019), Chapter 4, for more comprehensive reviews.\n\n# ### An Intuitive Understanding of CBOW\n\n# ![](../images/word2vec-text-to-sequences.gif)\n\n# ![](../images/word2vec-cbow.gif)\n\n# ### An Intuitive Understanding of Skip-gram\n\n# ![](../images/word2vec-skipgram.gif)\n\n# ## Import necessary dependencies and settings\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport nltk\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rcParams['figure.dpi'] = 300\npd.options.display.max_colwidth = 200\n\n\n# In[2]:\n\n\n# # Google Colab Adhoc Setting\n# !nvidia-smi\n# nltk.download(['gutenberg','punkt','stopwords'])\n# !pip show spacy\n# !pip install --upgrade spacy\n# #!python -m spacy download en_core_web_trf\n# !python -m spacy download en_core_web_lg\n\n\n# ## Sample Corpus: A Naive Example\n\n# In[3]:\n\n\ncorpus = [\n 'The sky is blue and beautiful.', 'Love this blue and beautiful sky!',\n 'The quick brown fox jumps over the lazy dog.',\n \"A king's breakfast has sausages, ham, bacon, eggs, toast and beans\",\n 'I love green eggs, ham, sausages and bacon!',\n 'The brown fox is quick and the blue dog is lazy!',\n 'The sky is very blue and the sky is very beautiful today',\n 'The dog is lazy but the brown fox is quick!'\n]\nlabels = [\n 'weather', 'weather', 'animals', 'food', 'food', 'animals', 'weather',\n 'animals'\n]\n\ncorpus = np.array(corpus)\ncorpus_df = pd.DataFrame({'Document': corpus, 'Category': labels})\ncorpus_df = corpus_df[['Document', 'Category']]\ncorpus_df\n\n\n# ### Simple text pre-processing\n# \n# - Usually for unsupervised `word2vec` learning, we don't really need much text preprocessing.\n# - So we keep our preprocessing to the minimum.\n# - Remove only symbols/punctuations, as well as redundant whitespaces.\n# - Perform word tokenization, which would also determine the base units for embeddings learning.\n# \n\n# ### Suggestions\n# \n# - If you are using `keras` to build the network for embeddings training, please prepare your input corpus data for `Tokenizer()`in the format where each **token** is delimited by a **whitespace**.\n# - If you are using `gensim` to train word embeddings, please tokenize your corpus data first. That is, the `gensim` only requires a tokenized version of the corpus and it will learn the word embeddings for you. \n\n# In[4]:\n\n\nwpt = nltk.WordPunctTokenizer()\n# stop_words = nltk.corpus.stopwords.words('english')\ndef preprocess_document(doc):\n # lower case and remove special characters\\whitespaces\n doc = re.sub(r'[^a-zA-Z\\s]', '', doc, re.I | re.A)\n doc = doc.lower()\n doc = doc.strip()\n # tokenize document\n tokens = wpt.tokenize(doc)\n doc = ' '.join(tokens)\n return doc\n\ncorpus_norm = [preprocess_document(text) for text in corpus]\ncorpus_tokens = [preprocess_document(text).split(' ') for text in corpus]\n\n\n# In[5]:\n\n\nprint(corpus_norm)\nprint(corpus_tokens)\n\n\n# ### Training Embeddings Using word2vec\n# \n# - The expected inputs of `gensim.model.word2vec` is token-based corpus object.\n\n# In[6]:\n\n\nget_ipython().run_cell_magic('time', '', '\\nfrom gensim.models import word2vec\\n\\n# Set values for various parameters\\nfeature_size = 10 \\nwindow_context = 5 \\nmin_word_count = 1 \\n\\nw2v_model = word2vec.Word2Vec(\\n corpus_tokens,\\n size=feature_size, # Word embeddings dimensionality\\n window=window_context, # Context window size\\n min_count=min_word_count, # Minimum word count\\n sg=1, # `1` for skip-gram; otherwise CBOW.\\n seed = 123, # random seed\\n workers=1, # number of cores to use\\n negative = 5, # how many negative samples should be drawn\\n cbow_mean = 1, # whether to use the average of context word embeddings or sum(concat)\\n iter=10000, # number of epochs for the entire corpus\\n batch_words=10000, # batch size\\n)\\n')\n\n\n# ### Visualizing Word Embeddings\n# \n# - Embeddings represent words in multidimensional space.\n# - We can inspect the quality of embeddings using dimensional reduction and visualize words in a 2D plot.\n\n# In[7]:\n\n\nfrom sklearn.manifold import TSNE\n\nwords = w2v_model.wv.index2word ## get the word forms of voculary\nwvs = w2v_model.wv[words] ## get embeddings of all word forms\n\ntsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=5)\nnp.set_printoptions(suppress=True)\nT = tsne.fit_transform(wvs)\nlabels = words\n\nplt.figure(figsize=(12, 6))\nplt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')\nfor label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label,\n xy=(x + 1, y + 1),\n xytext=(0, 0),\n textcoords='offset points')\n\n\n# - All trained word embeddings are included in `w2v_model.wv`.\n# - We can extract all word forms in the vocabulary from `w2v_model.wv.index2word`.\n# - We can easily extract embeddings for any specific words from `w2v_model.wv`.\n\n# In[8]:\n\n\nw2v_model.wv.index2word[:5]\n\n\n# In[9]:\n\n\n[w2v_model.wv[w] for w in w2v_model.wv.index2word[:5]]\n\n\n# ### From Word Embeddings to Document Embeddings\n# \n# - With word embeddings, we can compute the **average embeddings** for the entire document, i.e., the ***document embeddings***.\n# - These document embeddings are also assumed to have included considerable semantic information of the document.\n# - We can for example use them for document classification/clustering.\n\n# In[10]:\n\n\ndef average_word_vectors(words, model, vocabulary, num_features):\n\n feature_vector = np.zeros((num_features, ), dtype=\"float64\")\n nwords = 0.\n\n for word in words:\n if word in vocabulary:\n nwords = nwords + 1.\n feature_vector = np.add(feature_vector, model[word])\n\n if nwords:\n feature_vector = np.divide(feature_vector, nwords)\n\n return feature_vector\n\n\ndef averaged_word_vectorizer(corpus, model, num_features):\n vocabulary = set(model.wv.index2word)\n features = [\n average_word_vectors(tokenized_sentence, model, vocabulary,\n num_features) for tokenized_sentence in corpus\n ]\n return np.array(features)\n\n\n# In[11]:\n\n\nw2v_feature_array = averaged_word_vectorizer(corpus=corpus_tokens,\n model=w2v_model,\n num_features=feature_size)\npd.DataFrame(w2v_feature_array, index=corpus_norm)\n\n\n# - Let's cluster these documents based on their **document embeddings**.\n\n# In[12]:\n\n\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport pandas as pd\n\nsimilarity_doc_matrix = cosine_similarity(w2v_feature_array)\nsimilarity_doc_df = pd.DataFrame(similarity_doc_matrix)\nsimilarity_doc_df\n\n\n# In[13]:\n\n\nfrom scipy.cluster.hierarchy import dendrogram, linkage\n\nZ = linkage(similarity_doc_matrix, 'ward')\nplt.title('Hierarchical Clustering Dendrogram')\nplt.xlabel('Data point')\nplt.ylabel('Distance')\ndendrogram(Z,\n labels=corpus_norm,\n leaf_rotation=0,\n leaf_font_size=8,\n orientation='right',\n color_threshold=0.5)\nplt.axvline(x=0.5, c='k', ls='--', lw=0.5)\n\n\n# In[14]:\n\n\n## Other Clustering Methods\n\nfrom sklearn.cluster import AffinityPropagation\n\nap = AffinityPropagation()\nap.fit(w2v_feature_array)\ncluster_labels = ap.labels_\ncluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])\npd.concat([corpus_df, cluster_labels], axis=1)\n\n## PCA Plotting\nfrom sklearn.decomposition import PCA\n\npca = PCA(n_components=2, random_state=0)\npcs = pca.fit_transform(w2v_feature_array)\nlabels = ap.labels_\ncategories = list(corpus_df['Category'])\nplt.figure(figsize=(8, 6))\n\nfor i in range(len(labels)):\n label = labels[i]\n color = 'orange' if label == 0 else 'blue' if label == 1 else 'green'\n annotation_label = categories[i]\n x, y = pcs[i]\n plt.scatter(x, y, c=color, edgecolors='k')\n plt.annotate(annotation_label,\n xy=(x + 1e-4, y + 1e-3),\n xytext=(0, 0),\n textcoords='offset points')\n\n\n# ## Using Pre-trained Embeddings: GloVe in `spacy`\n\n# In[15]:\n\n\nimport spacy\n\n\nnlp = spacy.load('en_core_web_lg',disable=['parse','entity'])\n\ntotal_vectors = len(nlp.vocab.vectors)\nprint('Total word vectors:', total_vectors)\n\n\n# In[16]:\n\n\nprint(spacy.__version__)\n\n\n# ### Visualize GloVe word embeddings\n# \n# - Let's extract the GloVe pretrained embeddings for all the words in our simple corpus.\n# - And we visualize their embeddings in a 2D plot via dimensional reduction.\n\n# :::{warning}\n# When using pre-trained embeddings, there are two important things:\n# - Be very careful of the **tokenization** methods used in your text preprocessing. If you use a very different word tokenization method, you may find a lot of **unknown** words that are not included in the pretrained model.\n# - Always check the **proportion of the unknown words** when vectorizing your corpus texts with pre-trained embeddings.\n# :::\n\n# In[17]:\n\n\n# get vocab of the corpus\nunique_words = set(sum(corpus_tokens,[]))\n\n# extract pre-trained embeddings of all words\nword_glove_vectors = np.array([nlp(word).vector for word in unique_words])\npd.DataFrame(word_glove_vectors, index=unique_words)\n\n\n# In[18]:\n\n\nfrom sklearn.manifold import TSNE\n\ntsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=5)\nnp.set_printoptions(suppress=True)\nT = tsne.fit_transform(word_glove_vectors)\nlabels = unique_words\n\nplt.figure(figsize=(12, 6))\nplt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')\nfor label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label,\n xy=(x + 1, y + 1),\n xytext=(0, 0),\n textcoords='offset points')\n \n\n\n# - It is clear to see that when embeddings are trained based on a larger corpus, they reflect more lexical semantic contents.\n# - Semantically similar words are indeed closer to each other in the 2D plot.\n\n# - We can of course perform the document-level clustering again using the GloVe embeddings.\n# - The good thing about `spacy` is that it can compute the document average embeddings automatically.\n\n# In[19]:\n\n\ndoc_glove_vectors = np.array([nlp(str(doc)).vector for doc in corpus_norm])\n\nimport sklearn\nfrom sklearn.cluster import KMeans\nkm = KMeans(n_clusters=3, random_state=0)\nkm.fit_transform(doc_glove_vectors)\ncluster_labels = km.labels_\ncluster_labels = pd.DataFrame(cluster_labels, columns=['ClusterLabel'])\npd.concat([corpus_df, cluster_labels], axis=1)\n\n\n# ## `fasttext`\n# \n# - This section shows a quick example how to train word embeddings based on the `nltk.corpus.brown` using another algorithm, i.e., `fasttext`.\n# - The FastText model was introduced by Facebook in 2016 as an improved and extended version of the `word2vec` (See Bojanowski et al [2017] in References below).\n# - We will focus more on the implementation. Please see the Bojanowski et al (2017) as well as Sarkar (2019) Chapter 4 for more comprehensive descriptions of the method.\n# - Pretrained FastText Embeddings are available [here](https://fasttext.cc/docs/en/english-vectors.html).\n\n# In[20]:\n\n\nfrom gensim.models.fasttext import FastText\nfrom nltk.corpus import brown\n\nbrown_tokens = [brown.words(fileids=f) for f in brown.fileids()]\n\n\n# In[21]:\n\n\nget_ipython().run_cell_magic('time', '', '# Set values for various parameters\\nfeature_size = 100 # Word vector dimensionality\\nwindow_context = 5 # Context window size\\nmin_word_count = 5 # Minimum word count\\n\\nft_model = FastText(brown_tokens,\\n size=feature_size,\\n window=window_context,\\n min_count=min_word_count,\\n sg=1,\\n iter=50)\\n')\n\n\n# - We can use the trained embeddings model to identify words that are similar to a set of seed words.\n# - And then we plot all these words (i.e., the seed words and their semantic neighbors) in one 2D plot based on the dimensional reduction of their embeddings.\n\n# In[22]:\n\n\n# view similar words based on gensim's model\nsimilar_words = {\n search_term:\n [item[0] for item in ft_model.wv.most_similar([search_term], topn=5)]\n for search_term in\n ['think', 'say','news', 'report','nation', 'democracy']\n}\nsimilar_words\n\n\n# In[23]:\n\n\nfrom sklearn.decomposition import PCA\n\nwords = sum([[k] + v for k, v in similar_words.items()], [])\nwvs = ft_model.wv[words]\n\npca = PCA(n_components=2)\nnp.set_printoptions(suppress=True)\nP = pca.fit_transform(wvs)\nlabels = words\n\nplt.figure(figsize=(12, 10))\nplt.scatter(P[:, 0], P[:, 1], c='lightgreen', edgecolors='g')\nfor label, x, y in zip(labels, P[:, 0], P[:, 1]):\n plt.annotate(label,\n xy=(x + 0.03, y + 0.03),\n xytext=(0, 0),\n textcoords='offset points')\n\n\n# In[24]:\n\n\nft_model.wv['democracy']\n\n\n# In[25]:\n\n\nprint(ft_model.wv.similarity(w1='taiwan', w2='freedom'))\nprint(ft_model.wv.similarity(w1='china', w2='freedom'))\n\n\n# ## Wrap-up\n# \n# - Two fundamental deep-learning-based models of word representation learning: CBOW and Skip-Gram.\n# - From word embeddings to document embeddings\n# - More advanced representation learning models: GloVe and FastText.\n# - What is more challenging is how to assess the quality of the learned representations (embeddings). Usually embedding models can be evaluated based on their performance on semantics related tasks, such as word similarity and analogy. For those who are interested, you can start with the following two papers on Chinese embeddings:\n# - Chi-Yen Chen, Wei-Yun Ma. 2018. \"[Word Embedding Evaluation Datasets and Wikipedia Title Embedding for Chinese](http://www.lrec-conf.org/proceedings/lrec2018/pdf/159.pdf),\" Language Resources and Evaluation Conference. \n# - Chi-Yen Chen, Wei-Yun Ma. 2017. \"[Embedding Wikipedia Title Based on Its Wikipedia Text and Categories](https://ieeexplore.ieee.org/document/8300566),\" International Conference on Asian Language Processing.\n# \n\n# ## References\n# \n# - Sarkar (2020) Ch 4 Feature Engineering for Text Representation\n# - Major Readings:\n# - Harris,Zellig. 1956. [Distributional structure](http://www.tandfonline.com/doi/pdf/10.1080/00437956.1954.11659520).\n# - Bengio, Yoshuan, et. al. 2003. [A Neural Probabilistic Language Model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf).\n# - Collobert, Ronana and Jason Weston. 2008. [A Unified Architecture for Natural Language Processing: Deep Neural Networks with Multitask Learning](https://ronan.collobert.com/pub/matos/2008_nlp_icml.pdf).\n# - Schwenk, Holger. 2007.[Continuous space language models](https://pdfs.semanticscholar.org/0fcc/184b3b90405ec3ceafd6a4007c749df7c363.pdf).\n# - Mikolov, Tomas, et al. 2013. [Efficient estimation of word representations in vector space](https://arxiv.org/abs/1301.3781). arXiv preprint arXiv:1301.3781. \n# - Mikolov, Tomas, et al. 2013. [Distributed representations of words and phrases and their compositionally](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). *Advances in neural information processing systems*. 2013.\n# - Baroni, Marco, et. al. 2014. [Don’t count, predict! A systematic comparison of context-counting vs. context-predicting semantic vectors](https://www.aclweb.org/anthology/P14-1023/). *ACL*(1).\n# - Pennington, Jeffrey, et al. 2014. [GloVe: Global Vectors for Word Representation](https://nlp.stanford.edu/pubs/glove.pdf). *EMNLP*. Vol. 14.\n# - Bojanowski, P., Grave, E., Joulin, A., & Mikolov, T. (2017). [Enriching word vectors with subword information](https://doi.org/10.1162/tacl_a_00051). *Transactions of the Association for Computational Linguistics*, 5, 135-146.\n# - [GloVe Project Official Website](https://nlp.stanford.edu/projects/glove/): You can download their pre-trained GloVe models.\n# - [FastText Project Website](https://fasttext.cc/docs/en/english-vectors.html): You can download the English pre-trained FastText models.\n# \n","repo_name":"alvinntnu/NTNU_ENC2045_LECTURES","sub_path":"_build/jupyter_execute/nlp/text-vec-embedding.py","file_name":"text-vec-embedding.py","file_ext":"py","file_size_in_byte":18820,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"23509964440","text":"import os\nimport json\n##############################\n# PROJECT ENGLISH DICTIONARY\n# BY PIYUSH, NANDINI, PRATIK\n# GOVERNMENT POLYTECHNIC, JINTUR\n###############################\nprint('*'*5,'English Dictionary','*'*5)\n\ndef opt1():\n word = input('Enter the word: ')\n meaning = input('Enter the meaning: ')\n if os.path.isfile('words1.txt'):\n f = open('words1.txt','r')\n temp = json.load(f) \n temp[word] = meaning \n f.close()\n\n f = open('words1.txt','w')\n json.dump(temp,f) \n f.close()\n else:\n f = open('words1.txt','w')\n dict1 = {word:meaning}\n json.dump(dict1,f)\n f.close()\n \ndef opt2():\n with open('words1.txt') as f:\n x = json.load(f)\n word = input('Enter a word to find its meaning : ')\n for i in x:\n if word in x:\n print ('The meaning of',word,'is',x[word])\n break\n else:\n print('This dictionary does not have an entry for',word)\n break\n\n \ndef opt3():\n y = input('Enter a word of which you want to update the meaning : ')\n z = input('Enter the meaning of the word : ')\n with open('words1.txt') as f:\n x = json.load(f)\n xcopy = {**x}\n for i in xcopy:\n if y not in x:\n print(\"Error: The entered word doesn't exist in the dictionary. Please try again.\")\n break\n else:\n x[y] = z\n print('The updated meaning of',y,'is',z) \n print(x)\n f = open('words1.txt','w')\n json.dump(x,f)\n f.close()\n\ndef opt4():\n y = input('Enter a word of which you want to remove : ')\n with open(\"words1.txt\", \"r\") as fp:\n lines = fp.readlines()\n\nwith open(\"words1.txt\", \"w\") as fp:\n for line in lines:\n if line.strip(\"\\n\") != y:\n fp.write(line)\ndef opt5():\n print('Thank for choosing us!\\n Exiting now...')\n exit()\n \ndef mainmenu():\n\n print('\\nMain Menu\\n')\n\n\n print('1. Add a new word\\n')\n\n\n print('2. Find the meaning\\n')\n\n\n print('3. Update a word\\n')\n \n \n print('4. Remove a word\\n')\n\n\n print('5. Exit\\n')\n\n\n x = int(input('Enter a choice: '))\n\n if x == 1:\n opt1()\n mainmenu()\n elif x == 2:\n opt2()\n mainmenu()\n elif x == 3:\n opt3()\n mainmenu()\n elif x == 4:\n opt4()\n mainmenu()\n else:\n opt5()\n \nmainmenu()\n","repo_name":"piyushL337/english-Dictionary","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39196266414","text":"import streamlit as st\nfrom utils import *\nfrom linear_optimization import *\n\n\nst.set_page_config(layout=\"wide\")\nst.title('Linear Programming Solver')\n\ncols = st.columns(2)\nwith cols[0]:\n num_variables = st.number_input('Insert number of variables', min_value=1, step=1)\nwith cols[1]:\n num_constraints = st.number_input('Insert number of constraints', min_value=1, step=1)\n\n# Objective function\nst.subheader('Objective Function')\n\nmin_max, _ = st.columns((0.2, 0.8))\nwith min_max:\n min_max = st.selectbox('', ['max', 'min'])\n\nobj_cols = st.columns(num_variables+2)\n\nobj_coeff = [0 for i in range(num_variables)]\nfor i in range(num_variables):\n with obj_cols[i]:\n obj_coeff[i] = st.number_input(f'x{get_sub(str(i))}', key=f'obj_{i}', \n # value=None,\n step=0.1, \n format='%.1f'\n )\n # try:\n # obj_coeff[i] = float(st.text_input(f'x{get_sub(str(i))}', key=f'obj_{i}', placeholder='0'))\n # except Exception as e:\n # st.exception(e)\n\n\nst.subheader('Constraints')\nconstraints_coeff = [[0 for i in range(num_variables)] for j in range(num_constraints)]\nsigns = ['' for i in range(num_constraints)]\n# right hand side\nrhs = [0 for i in range(num_constraints)]\n\nfor i in range(num_constraints):\n cstr_cols = st.columns(num_variables+2)\n for j in range(num_variables):\n with cstr_cols[j]:\n constraints_coeff[i][j] = st.number_input(f'x{get_sub(str(j))}', key=f'cstr_{i}{j}', \n # value=None,\n step=0.1, \n format='%.1f'\n )\n \n with cstr_cols[-2]:\n signs[i] = st.selectbox('', [u'\\u2264', '=', u'\\u2265'], key=f'sign_{i}')\n \n with cstr_cols[-1]:\n rhs[i] = st.number_input('', key=f'rhs_{i}', \n # value=None, \n step=0.1, \n format='%.1f'\n )\n\nx_cstr_string = ', '. join([f'x{get_sub(str(i))}' for i in range(num_variables)]) + ' ' + u'\\u2265' + ' ' + '0'\n\nst.subheader(x_cstr_string)\n\nN, b, c = get_standard_form(min_max, obj_coeff, constraints_coeff, signs, rhs)\n\nsolve = st.button('Solve')\n\nif solve:\n try:\n assert sum([coeff != 0 for coeff in obj_coeff]) != 0 # at least 1 coeff != 0\n except AssertionError as e:\n st.exception(\"Require at least 1 non-zero coefficient of objective function!\")\n solve = False\n\n try:\n assert sum([sum([coeff != 0 for coeff in constraint])!=0 for constraint in constraints_coeff]) == len(constraints_coeff) # all row require have at least 1 coeff\n except AssertionError as e:\n st.exception(\"All constraint row require have at least 1 coefficient!\")\n solve = False\n\nif solve:\n print(\"Solving ...\")\n\n status, solution, optimal_value, _, _, _, _, _, _, solution_str = optimize(N, b, c)\n \n tab1, tab2 = st.tabs(('Result', 'Steps'))\n\n with tab1:\n # st.subheader(status)\n if status == 'Optimal':\n st.subheader('Optimal solution')\n _, c = st.columns((0.2, 0.8))\n with c:\n for i in range(len(solution)):\n st.subheader(f'x{get_sub(str(i))} = {solution[i]}')\n st.subheader('Optimal value')\n _, c = st.columns((0.2, 0.8)) \n c.subheader(str(optimal_value))\n\n else:\n st.subheader(f\"The problem is {status}.\")\n\n with tab2:\n st.code(solution_str)\n","repo_name":"phamthaihoangtung/Linear-Programming-Solver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30990986362","text":"from flask import Flask, render_template, redirect, request\nfrom user import User\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/create_user\", methods=[\"POST\"])\ndef create_users():\n data = {\n \"first_name\": request.form['first_name'],\n \"last_name\": request.form['last_name'],\n \"email\": request.form['email']\n }\n User.create_user(data)\n return redirect(\"/all_users\")\n\n@app.route(\"/all_users\")\ndef all_users():\n users = User.get_all()\n return render_template(\"/all_users.html\", users=users)\n\n@app.route(\"/home\")\ndef home():\n return redirect(\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Trevor-D-Anderson/CRUD_MySQL","sub_path":"Users/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21524914027","text":"import click\nimport os\nimport torch\nimport random\nfrom datetime import datetime\nfrom PIL import Image\n\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom torchvision import transforms\n\nimport numpy as np\n\nfrom utils import init_device_seed\nfrom model_cartoongan import CartoonGANGenerator\nfrom model_animegan import AnimeGANGenerator\n\n\n@click.command()\n@click.option('--image_path', default='./data/cartoon_dataset/val')\n@click.option('--model_name', default='cartoongan')\n@click.option('--is_crop', type=bool, default=False)\n@click.option('--cuda_visible', default='0')\ndef test(image_path, model_name, is_crop, cuda_visible):\n device = init_device_seed(1234, cuda_visible)\n os.makedirs('./result', exist_ok=True)\n\n if model_name == 'cartoongan':\n checkpoint = torch.load('./model/cartoongan', map_location=device)\n generator = CartoonGANGenerator().to(device)\n else:\n checkpoint = torch.load('./model/animegan', map_location=device)\n generator = AnimeGANGenerator().to(device)\n\n generator.load_state_dict(checkpoint['generator_state_dict'])\n generator.eval()\n\n to_tensor = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n to_pil = transforms.Compose([\n transforms.Normalize(mean=(-1, -1, -1), std=(2, 2, 2)),\n transforms.ToPILImage()\n ])\n\n if os.path.isdir(image_path):\n files_list = []\n file_names_list = os.listdir(image_path)\n for file_name in file_names_list:\n files_list.append(os.path.join(image_path, file_name))\n output_dir = './result/{}'.format(datetime.now().strftime('%Y-%m-%d %H_%M_%S'))\n os.makedirs(output_dir, exist_ok=True)\n else:\n files_list = [image_path]\n\n for idx, file_path in enumerate(files_list):\n file_name = '.'.join(os.path.basename(file_path).split('.')[:-1])\n print('\\r{}/{} {}'.format(idx, len(files_list), file_name), end=' ')\n \n image = Image.open(file_path)\n size_min = min(image.size)\n\n transform = transforms.Compose([\n transforms.CenterCrop((size_min, size_min)),\n transforms.RandomHorizontalFlip(),\n transforms.Resize((256, 256))\n ])\n\n if is_crop:\n image = transform(image)\n image.save('{}/{}_orig.jpg'.format(output_dir,file_name))\n else:\n image = image.crop((0, 0, image.size[0] - image.size[0] % 4, image.size[1] - image.size[1] % 4))\n\n image = to_tensor(image)\n image = torch.unsqueeze(image, 0).to(device)\n\n output = generator(image).detach().cpu()[0]\n output = to_pil(output)\n\n output.save('{}/{}.jpg'.format(output_dir,file_name))\n\n\nif __name__ == '__main__':\n test()","repo_name":"Snailpong/style_transfer_implementation","sub_path":"test_cartoongan.py","file_name":"test_cartoongan.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"72040947769","text":"import numpy as pd\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.manifold import TSNE\nfrom stock_data import get_list\n\n\ndef tsne():\n \"\"\"\n\n It has the same functionality as k_means()\n\n \"\"\"\n\n tsne = TSNE(n_components=2, # Dimension of the embedded space = 2.\n verbose=1, # It produces lots of logging output.\n perplexity=75, # Numbers of iterations to converge.\n n_iter=1000) # Number of iterations run: default 1000.\n\n tsne_results = tsne.fit_transform(get_list())\n #print(tsne_results)\n\n df_subset = pd.DataFrame.from_dict({})\n df_subset['X'] = tsne_results[:, 0]\n df_subset['Y'] = tsne_results[:, 1]\n #print(df_subset)\n grad = df_subset.eval(\"X / Y\").rename(\"grad\")\n\n sns.scatterplot(\n #palette=sns.color_palette(\"hls\", 10),\n data=df_subset,\n x='X',\n y=\"Y\",\n hue = grad\n )\n\n plt.savefig(\"/Users/mcarmentz/Desktop/stock_screener/figures/tsne.png\") # Figures are saved in figures directory.\n\ndef test_run():\n \"\"\"Function called by Test Run.\"\"\"\n tsne()\n\nif __name__ == \"__main__\":\n test_run()\n\n\n\n","repo_name":"jiatangzhi/stock_screener","sub_path":"stock_screener/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28036040078","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 10 11:38:37 2022\n\n@author: imargolin\n\"\"\"\n\nfrom torch.optim import Adam\n\nfrom typing import Any, List, Set, Dict, Tuple\n\nimport torch.nn.functional as F\nimport torch\nfrom tqdm import tqdm\nfrom torch.optim import lr_scheduler\nimport mlflow\nfrom mlflow.tracking import MlflowClient\nimport numpy as np\nimport pandas as pd\nfrom torch import nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import DataLoader\nfrom ordinaloss.utils.metric_utils import RunningMetric, BinCounter, StatsCollector\nfrom ordinaloss.utils.metric_utils import accuracy_pytorch, mae_pytorch, calc_cost_metric\nfrom ordinaloss.utils.basic_utils import get_only_metrics\nfrom sklearn.metrics import accuracy_score, mean_absolute_error\nimport os\nfrom pathlib import Path\nfrom torch.optim.lr_scheduler import StepLR\nimport secrets\n\nprint(f\"loaded {__name__}\")\n\nclass EarlyStopper:\n def __init__(self, patience:int = 5, min_delta:float =1.0):\n \"\"\"My small implementation for early stopping.\n\n Args:\n patience (int, optional): How long to wait after last time validation loss improved. Defaults to 5.\n min_delta (float, optional): Minimum change in the monitored quantity to qualify as an improvement. Defaults to 1.0\n \"\"\"\n self.patience = patience\n self.min_delta = min_delta #We want the loss to be 0.99 from the previous epoch.\n self.counter = 0\n self.min_validation_loss = np.inf\n self.is_best_model = False\n self.early_stop = False\n\n def step(self, loss:float):\n \"\"\"Stepping the EarlyStopper with one more loss, checks whether should stop.\n\n Args:\n loss (float): The loss to be monitored\n \"\"\"\n\n if loss < self.min_validation_loss * self.min_delta:\n #New loss was found!\n self.counter = 0 #Reset the counter\n self.min_validation_loss = loss\n self.is_best_model = True\n \n else: #Not enough imporvement\n \n self.counter+=1\n print(f\"Strike {self.counter} / {self.patience}\")\n self.is_best_model = loss < self.min_validation_loss #yet might be new best.\n\n if self.counter>=self.patience:\n self.early_stop = True\n\nclass LRScheduler:\n def __init__(self, init_lr=1.0e-4, lr_decay_epoch=10, \n lr_decay_factor = 0.9):\n\n self.init_lr = init_lr\n self.lr_decay_epoch = lr_decay_epoch\n self.lr_decay_factor = lr_decay_factor\n\n def step(self):\n pass\n\n def __call__(self, optimizer, epoch):\n '''Decay learning rate by a factor every lr_decay_epoch epochs.'''\n lr = self.init_lr * (self.lr_decay_factor ** (epoch // self.lr_decay_epoch))\n lr = max(lr, 1e-8)\n if epoch % self.lr_decay_epoch == 0:\n print('LR is set to {}'.format(lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n print ('LR is set to {}'.format(lr))\n \n return optimizer\n\nclass SingleGPUTrainer:\n def __init__(\n self, \n model: nn.Module, \n loaders: Dict[str, DataLoader],\n optimizer: torch.optim.Optimizer, \n gpu_id: int,\n save_every: int,\n num_classes:int\n ):\n\n self.gpu_id = gpu_id\n self.model = model.to(self.gpu_id)\n self.loaders = loaders\n\n self.optimizer = optimizer\n self.save_every = save_every\n self.num_classes = num_classes\n self.epochs_trained = 0\n self.model_id = secrets.token_hex(nbytes=16)\n self.checkpoint_path = f\"{self.model_id}.pt\"\n\n def forward(self, X):\n return F.softmax(self.model(X), dim = 1) #Normalized \n\n def prepare_input(self, X, y):\n return X.to(self.gpu_id), y.to(self.gpu_id)\n\n def _train_epoch(self) -> dict[str, Any]:\n \n self.model.train()\n\n loss_metric = RunningMetric()\n collector = StatsCollector()\n\n loader = tqdm(self.loaders[\"train\"], total = len(self.loaders[\"train\"]), desc= f\"Training, epoch {self.epochs_trained}\")\n\n for X, y in loader:\n\n #Batch iteration\n X, y = self.prepare_input(X, y)\n batch_size = y.shape[0]\n\n self.optimizer.zero_grad()\n y_pred = self.forward(X)\n\n loss = self.loss_fn(y_pred, y)\n loss.backward()\n\n self.optimizer.step()\n\n loss_metric.update(loss.item(), batch_size)\n collector.update(y_pred, y)\n \n loader.set_postfix(\n loss = loss_metric.average)\n\n y_pred_all = collector.collect_y_pred() #(N, C)\n y_pred_argmax = y_pred_all.argmax(axis=1) #(N,)\n y_true_all = collector.collect_y_true()\n\n mae = mean_absolute_error(y_true_all, y_pred_argmax)\n accuracy = accuracy_score(y_true_all, y_pred_argmax)\n cost = calc_cost_metric(y_true=y_true_all, y_pred=y_pred_argmax, n_classes=self.num_classes)\n \n distribution = np.bincount(y_pred_argmax, minlength=self.num_classes)\n distribution = distribution/distribution.sum()\n\n self.epochs_trained +=1\n\n results = {\n \"train_distribution\": distribution, #numpy array\n \"train_loss\": loss_metric.average, #single value\n \"train_accuracy\":accuracy, #single value\n \"train_mae\": mae,\n \"train_cost\": cost\n }\n\n return results\n \n @torch.no_grad()\n def _eval_epoch(self, phase) -> dict[str, Any]:\n\n self.model.eval()\n\n loss_metric = RunningMetric() \n collector = StatsCollector()\n\n loader = self.loaders[phase]\n\n for X, y in loader:\n X, y = self.prepare_input(X, y)\n batch_size = y.shape[0]\n y_pred = self.forward(X)\n loss = self.loss_fn(y_pred, y)\n\n loss_metric.update(loss.item(), batch_size)\n collector.update(y_pred, y)\n \n y_pred_all = collector.collect_y_pred()\n y_pred_argmax = y_pred_all.argmax(axis=1)\n y_true_all = collector.collect_y_true()\n\n if phase ==\"test\":\n self.log_predictions(y_pred_all)\n \n #Some metrics\n mae = mean_absolute_error(y_true_all, y_pred_argmax)\n accuracy = accuracy_score(y_true_all, y_pred_argmax)\n cost = calc_cost_metric(y_true=y_true_all, y_pred=y_pred_argmax, n_classes=self.num_classes)\n \n distribution = np.bincount(y_pred_argmax, minlength=self.num_classes)\n distribution = distribution/distribution.sum()\n\n results = {\n f\"{phase}_distribution\": distribution, #numpy array\n f\"{phase}_loss\": loss_metric.average, #single value\n f\"{phase}_accuracy\":accuracy, #single value\n f\"{phase}_mae\": mae, #single value\n f\"{phase}_cost\": cost, #single value\n }\n \n return results\n \n def log_predictions(self, y_pred_all):\n my_df = pd.DataFrame(y_pred_all)\n path = f\"{self.model_id}_preds_{self.epochs_trained}.csv\"\n my_df.to_csv(path)\n mlflow.log_artifact(path)\n os.remove(path)\n\n def train_until_converge(self, n_epochs, patience, min_delta, sch_stepsize, sch_gamma) -> None:\n\n early_stopper = EarlyStopper(\n patience=patience, \n min_delta=min_delta)\n \n scheduler = StepLR(self.optimizer, step_size=sch_stepsize, gamma=sch_gamma, verbose=True)\n\n for _ in range(n_epochs):\n train_results = self._train_epoch()\n scheduler.step()\n val_results = self._eval_epoch(\"val\")\n\n mlflow.log_metrics(get_only_metrics(val_results), step = self.epochs_trained)\n mlflow.log_metrics(get_only_metrics(train_results), step = self.epochs_trained)\n\n early_stopper.step(val_results[\"val_loss\"]) #One more step for validation loss, check whether should stop.\n\n if early_stopper.is_best_model:\n #This is the best model so far, let's save it.\n self._save_checkpoint()\n\n if early_stopper.early_stop:\n break #Model converged.\n\n print(f\"Model Converged! the best validation loss is {early_stopper.min_validation_loss}\")\n self._load_checkpoint()\n os.remove(self.checkpoint_path)\n \n def _save_checkpoint(self):\n\n ckp = {\n \"epoch\": self.epochs_trained,\n \"model_state_dict\":self.model.state_dict(),\n \"optimizer_state_dict\": self.optimizer.state_dict(),\n }\n\n torch.save(ckp, self.checkpoint_path)\n mlflow.log_artifact(local_path=self.checkpoint_path)\n \n def _load_checkpoint(self):\n ckp = torch.load(self.checkpoint_path)\n self.epochs_trained = ckp[\"epoch\"]\n self.model.load_state_dict(ckp[\"model_state_dict\"])\n self.optimizer.load_state_dict(ckp[\"optimizer_state_dict\"])\n\n def set_loss_fn(self, loss_fn:nn.Module):\n self.loss_fn = loss_fn\n self.loss_fn.to(self.gpu_id)\n\nclass SingleGPUTrainerMatan:\n def __init__(\n self, \n model: nn.Module, \n loaders: Dict[str, DataLoader],\n optimizer: torch.optim.Optimizer, \n gpu_id: int,\n save_every: int,\n num_classes: int,\n grad_norm:float = 15.0\n ):\n\n self.gpu_id = gpu_id\n self.model = model.to(self.gpu_id)\n self.loaders = loaders\n\n self.optimizer = optimizer\n self.save_every = save_every\n self.num_classes = num_classes\n self.epochs_trained = 0\n \n self.checkpoint_path = Path(\"models\", f\"{uuid.uuid4().hex}.pt\")\n self.grad_norm = grad_norm\n\n def forward(self, X):\n return F.softmax(self.model(X), dim = 1) #Normalized \n\n def prepare_input(self, X, y):\n return X.to(self.gpu_id), y.to(self.gpu_id)\n\n def _train_epoch(self) -> dict[str, Any]:\n \n self.model.train()\n\n loss_metric = RunningMetric()\n collector = StatsCollector()\n\n loader = tqdm(self.loaders[\"train\"], total = len(self.loaders[\"train\"]), desc= f\"Training, epoch {self.epochs_trained}\")\n\n for X, y in loader:\n\n #Batch iteration\n X, y = self.prepare_input(X, y)\n batch_size = y.shape[0]\n\n self.optimizer.zero_grad()\n y_pred = self.forward(X)\n\n loss = self.loss_fn(y_pred, y)\n loss.backward()\n\n self.optimizer.step()\n\n loss_metric.update(loss.item(), batch_size)\n collector.update(y_pred, y)\n \n loader.set_postfix(\n loss = loss_metric.average)\n \n self.epochs_trained +=1\n\n y_pred_all = collector.collect_y_pred().argmax(axis=1)\n y_true_all = collector.collect_y_true()\n\n ones_ratio = y_pred_all.mean()\n accuracy = accuracy_score(y_true_all, y_pred_all)\n\n results = {\n \"train_loss\": loss_metric.average, #single value\n \"train_accuracy\":accuracy, #single value\n \"train_ones_ratio\":ones_ratio, #single value\n }\n\n return results\n \n @torch.no_grad()\n def _eval_epoch(self, phase) -> dict[str, Any]:\n\n self.model.eval()\n\n loss_metric = RunningMetric() \n collector = StatsCollector()\n\n loader = self.loaders[phase]\n\n for X, y in loader:\n X, y = self.prepare_input(X, y)\n batch_size = y.shape[0]\n y_pred = self.forward(X)\n loss = self.loss_fn(y_pred, y)\n\n loss_metric.update(loss.item(), batch_size)\n collector.update(y_pred, y)\n \n y_pred_all = collector.collect_y_pred().argmax(axis=1) #Binary vector of 0 and 1s\n y_true_all = collector.collect_y_true()\n \n #Some metrics\n \n ones_ratio = y_pred_all.mean()\n accuracy = accuracy_score(y_true_all, y_pred_all)\n\n results = {\n f\"{phase}_loss\": loss_metric.average, #single value\n f\"{phase}_accuracy\":accuracy, #single value\n f\"{phase}_ones_ratio\":ones_ratio, #single value\n }\n\n return results\n\n def train_until_converge(\n self, n_epochs:int, \n patience:int=3, min_delta:float = 1.0, \n sch_stepsize:int=5, sch_gamma:float=0.9) -> None:\n\n early_stopper = EarlyStopper(\n patience=patience, \n min_delta=min_delta)\n \n scheduler = StepLR(self.optimizer, step_size=sch_stepsize, gamma=sch_gamma, verbose=True)\n\n for _ in range(n_epochs):\n train_results = self._train_epoch()\n scheduler.step()\n val_results = self._eval_epoch(phase =\"val\")\n\n mlflow.log_metrics(get_only_metrics(val_results), step = self.epochs_trained)\n mlflow.log_metrics(get_only_metrics(train_results), step = self.epochs_trained)\n\n early_stopper.step(val_results[\"val_loss\"]) #One more step for validation loss, check whether should stop.\n\n if early_stopper.is_best_model:\n #This is the best model so far, let's save it.\n \n best_epoch_idx = self.epochs_trained\n self._save_checkpoint()\n\n if early_stopper.early_stop:\n break #Model converged.\n\n print(f\"Model Converged! the best validation loss is {early_stopper.min_validation_loss}\")\n self.model.load_state_dict(torch.load(self.checkpoint_path))\n self.epochs_trained = best_epoch_idx\n \n def _save_checkpoint(self):\n ckp = self.model.state_dict()\n torch.save(ckp, self.checkpoint_path)\n\n def set_loss_fn(self, loss_fn:nn.Module):\n self.loss_fn = loss_fn\n self.loss_fn.to(self.gpu_id)","repo_name":"imargolin/ordinaloss","sub_path":"ordinaloss/trainers/trainers.py","file_name":"trainers.py","file_ext":"py","file_size_in_byte":13912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21140421452","text":"from functools import wraps, partial\n\n\ndef xlogging(func, id = None):\n # @wraps(func)\n def wrapper(*args, **kwargs):\n print(id, func.__name__, \"Loggin input\", args, kwargs)\n r = func(*args, **kwargs)\n print(\"Loggin output\", r)\n return r\n return wrapper\n\n\nnew = partial(xlogging, id=1)\n\n\n@xlogging\ndef ct(_in: str, _out: str) -> str:\n return _in + _out\n\n\nif __name__ == \"__main__\":\n a = ct(_in=\"Ana\", _out=\" are mere\")\n print(a)","repo_name":"devraider/ArtRadio","sub_path":"spider/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36369802722","text":"# 뒤에 있는 큰 수 찾기\n# https://school.programmers.co.kr/learn/courses/30/lessons/154539\n\nfrom collections import deque\n\ndef solution(numbers):\n answer = [-1 for i in range(len(numbers))]\n stack = deque()\n \n for i, v in enumerate(numbers):\n while len(stack) and stack[-1][1] < v:\n answer[stack[-1][0]] = v\n stack.pop()\n stack.append([i, v])\n\n return answer\n","repo_name":"oleveloper/problem-solving","sub_path":"programmers/154539.py","file_name":"154539.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11289283682","text":"#################### Function Return ###########################\n\ndef allowed_dating_age(boy_age):\n\tgirls_age = boy_age/2 + 7\n\treturn girls_age\n\n# Lets assign the return value to a variable\nlimit = allowed_dating_age(32)\n\nprint(\"The Boy of age \" ,boy_age, \"should date a girl of age \" ,limit, \"or older\")\n","repo_name":"bikranz4u/Python-practice","sub_path":"python-fundamental-thenewboston/7.function-return-2.py","file_name":"7.function-return-2.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27551826628","text":"\n\nimport os\nos.environ['PYTHONHASHSEED'] = str(2019)\n\nimport re\n\nimport numpy as np\nnp.random.seed(2019)\n\nimport random as r\nr.seed(2019)\n\nfrom tensorflow import set_random_seed\nset_random_seed(2019)\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras import initializers,callbacks\nfrom keras.engine.topology import Layer\nfrom keras.layers import Dense, Input\nfrom keras.layers import Embedding, GRU, Bidirectional, TimeDistributed\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.utils.np_utils import to_categorical\nfrom nltk import tokenize\nfrom sklearn.utils import shuffle\nfrom sklearn import metrics\nfrom keras import optimizers\nfrom nltk import tokenize\nfrom optparse import OptionParser\nfrom sklearn.metrics import confusion_matrix\nimport string\n\nargs = {\n 'batch_size': 16,\n 'maxlen' : 100,\n 'max_sentences' : 100,\n 'max_words' : 20000,\n 'embedding_dim' : 200,\n 'glove_dir' : \"./\",\n 'embeddings_index' : {},\n 'text_data_dir': 'raw_corpora/',\n 'output_dir': 'outputs/'\n\n}\n\n ## take a list of strings as optional arguments\ndef list_callback(option, opt, value, parser):\n setattr(parser.values, option.dest, value.split(','))\n\n\n# class defining the custom attention layer\nclass HierarchicalAttentionNetwork(Layer):\n def __init__(self, attention_dim):\n self.init = initializers.get('normal')\n self.supports_masking = True\n self.attention_dim = attention_dim\n super(HierarchicalAttentionNetwork, self).__init__()\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n self.W = K.variable(self.init((input_shape[-1], self.attention_dim)))\n self.b = K.variable(self.init((self.attention_dim,)))\n self.u = K.variable(self.init((self.attention_dim, 1)))\n self.trainable_weights = [self.W, self.b, self.u]\n super(HierarchicalAttentionNetwork, self).build(input_shape)\n\n def compute_mask(self, inputs, mask=None):\n return mask\n\n def call(self, x, mask=None):\n # size of x :[batch_size, sel_len, attention_dim]\n # size of u :[batch_size, attention_dim]\n # uit = tanh(xW+b)\n uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))\n\n ait = K.exp(K.squeeze(K.dot(uit, self.u), -1))\n\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting\n ait *= K.cast(mask, K.floatx())\n ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n weighted_input = x * K.expand_dims(ait)\n output = K.sum(weighted_input, axis=1)\n\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[-1]\n\n\ndef remove_html(str_a):\n p = re.compile(r'<.*?>')\n return p.sub('', str_a)\n\n\n# replace all non-ASCII (\\x00-\\x7F) characters with a space\ndef replace_non_ascii(str_a):\n return re.sub(r'[^\\x00-\\x7f]', r'', str_a)\n\n\n## lowercase, remove digits, non-alphabetic chars, punctuations\n## and extra spaces\n\ndef clean_corpus(corpus):\n \n cleaned_corpus = []\n \n for article in corpus:\n\n article = article.lower()\n temp_str = re.sub(r'\\d+', '', article)\n temp_str = re.sub(r'[^\\x00-\\x7f]',r'', temp_str)\n temp_str = temp_str.translate(str.maketrans('', '', string.punctuation))\n temp_str = re.sub(r'\\s+', ' ', temp_str)\n\n\n cleaned_corpus.append(temp_str)\n \n return cleaned_corpus\n\n\ndef import_data(dataset):\n\n\n df_neg_text = pd.read_csv(os.path.join(args['text_data_dir'],'%s/%s_neg_text.csv'%(dataset,dataset)))\n df_pos_text = pd.read_csv(os.path.join(args['text_data_dir'],'%s/%s_pos_text.csv'%(dataset,dataset)))\n\n\n # df_neg_text['text'] = clean_corpus(df_neg_text.text.tolist())\n # df_pos_text['text'] = clean_corpus(df_pos_text.text.tolist())\n\n ## raw text\n texts = df_neg_text.text.tolist()+df_pos_text.text.tolist()\n\n ## labels \n labels = np.array([0]*len(df_neg_text)+[1]*len(df_pos_text))\n\n print(len(texts),np.bincount(labels))\n\n labels = to_categorical(np.asarray(labels))\n ## segmented documents\n reviews = []\n\n # for idx,document in enumerate(texts):\n # temp_seg_doc = []\n # for sentence in document.split('\\n'):\n # if len(sentence.split())>2:\n # temp_seg_doc.append(sentence.strip())\n # reviews.append(temp_seg_doc)\n \n for idx,document in enumerate(texts):\n temp_seg_doc = []\n for sentence in tokenize.sent_tokenize(document):\n if len(sentence.split())>2:\n temp_seg_doc.append(sentence.strip().lower())\n reviews.append(temp_seg_doc)\n \n\n tokenizer = Tokenizer(num_words=args['max_words'])\n tokenizer.fit_on_texts(texts)\n\n data = np.zeros((len(texts), args['max_sentences'], args['maxlen']), dtype='int32')\n\n for i, sentences in enumerate(reviews):\n for j, sent in enumerate(sentences):\n if j < args['max_sentences']:\n wordTokens = text_to_word_sequence(sent)\n k = 0\n for _, word in enumerate(wordTokens):\n if k < args['maxlen'] and tokenizer.word_index[word] < args['max_words']:\n data[i, j, k] = tokenizer.word_index[word]\n k = k + 1\n \n word_index = tokenizer.word_index\n print('Total %s unique tokens.' % len(word_index))\n print('Shape of reviews (data) tensor:', data.shape)\n print('Shape of sentiment (label) tensor:', labels.shape)\n\n\n return data, labels, df_neg_text, df_pos_text, word_index\n\n\nif __name__ == \"__main__\":\n\n # max_len = max(len(max(pre_trained_pos,key = lambda x: len(x))),len(max(pre_trained_neg,key = lambda x: len(x))))\n\n parser = OptionParser(usage='usage: -r random_seeds -d dataset_name -l learning_rate -e no_epochs -s train_size')\n\n \n parser.add_option(\"-d\",\"--dataset_name\", action=\"store\", type=\"string\", dest=\"dataset_name\", help=\"directory of data encoded by token-level Roberta\", default = 'longer_moviereview')\n parser.add_option(\"-l\",\"--learning_rate\", action=\"store\", type=\"float\", dest=\"learning_rate\", help=\"learning rate\", default=1e-3)\n parser.add_option(\"-e\",\"--no_epochs\", action=\"store\", type=\"int\", dest=\"no_epochs\", help=\"the number of epochs\",default=50)\n parser.add_option('-r', '--random_seeds', type='string', action='callback',dest='random_seeds',callback=list_callback,default=['1988','1989'])\n parser.add_option('-s', '--training_size', type='string', action='callback',dest='training_size',callback=list_callback,default=['50','100'])\n\n (options, _) = parser.parse_args()\n\n\n for number in options.training_size:\n if int(number)>200:\n parser.error( \"The largest training size is 200, you can customize the maximum training size by modifying the corrsponding codes of initializing training set.\" )\n\n dataset = options.dataset_name\n lr = options.learning_rate\n no_epochs = options.no_epochs\n embeddings_index = args['embeddings_index']\n\n # one can customize the maximum number instances in the training set by modifyting the corresponding codes of initializing training set\n train_sizes = [int(number) for number in options.training_size]\n random_states = [int(number) for number in options.random_seeds]\n\n print('number of epochs: ', no_epochs)\n print('dataset name: ', dataset)\n print('initial random states: ', random_states)\n print('training set sizes: ', train_sizes)\n\n\n \n data, labels,df_neg_text, df_pos_text, word_index = import_data(dataset)\n\n\n for idx,train_size in enumerate(train_sizes):\n \n df_all = pd.DataFrame()\n df_all_auc = pd.DataFrame()\n\n accs = []\n aucs = []\n confusion_matrices = []\n \n for seed in random_states:\n\n index_shuffle = shuffle([i for i in range(data.shape[0])], random_state=seed)\n\n total_train_shuffle = index_shuffle[:200]\n train_shuffle = total_train_shuffle[:train_size]\n test_shuffle = index_shuffle[200:]\n\n y_categorical = np.array([0]*len(df_neg_text)+[1]*len(df_pos_text))\n\n x_train,y_train = data[train_shuffle],labels[train_shuffle]\n x_val, y_val = x_train,y_train\n x_test,y_test,y_test_cate = data[test_shuffle], labels[test_shuffle],y_categorical[test_shuffle]\n\n print('Number of positive and negative reviews in training and validation set')\n print(y_train.sum(axis=0))\n print(y_val.sum(axis=0))\n\n ## prtrained GloVe embeddings downloaded from https://www.kaggle.com/incorpes/glove6b200d\n f = open(os.path.join(args['glove_dir'], 'glove.6B.200d.txt'),encoding='utf8')\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n print('Total %s word vectors.' % len(embeddings_index))\n\n # building Hierachical Attention network\n embedding_matrix = np.random.random((len(word_index) + 1, args['embedding_dim']))\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(len(word_index) + 1, args['embedding_dim'], weights=[embedding_matrix],\n input_length=args['maxlen'], trainable=True, mask_zero=True)\n\n sentence_input = Input(shape=(args['maxlen'],), dtype='int32')\n embedded_sequences = embedding_layer(sentence_input)\n lstm_word = Bidirectional(GRU(50, return_sequences=True))(embedded_sequences)\n attn_word = HierarchicalAttentionNetwork(100)(lstm_word)\n sentenceEncoder = Model(sentence_input, attn_word)\n\n review_input = Input(shape=(args['max_sentences'], args['maxlen']), dtype='int32')\n review_encoder = TimeDistributed(sentenceEncoder)(review_input)\n lstm_sentence = Bidirectional(GRU(100, return_sequences=True))(review_encoder)\n attn_sentence = HierarchicalAttentionNetwork(100)(lstm_sentence)\n preds = Dense(2, activation='softmax')(attn_sentence)\n model = Model(review_input, preds)\n \n callback = callbacks.EarlyStopping(monitor='loss', patience=3,min_delta=1e-4)\n opt = optimizers.Adam(lr=lr)\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['acc'])\n\n print(\"model fitting - Hierachical attention network\")\n\n his = model.fit(x_train, y_train, validation_data=(x_val, y_val), nb_epoch=no_epochs, batch_size=args['batch_size'],callbacks=[callback])\n\n y_predict = model.predict(x_test)\n y_eval_prob_pos = np.array(y_predict)[:,1]\n\n y_pred = np.argmax(y_predict,axis=1)\n acc = metrics.accuracy_score(y_test_cate, y_pred)\n accs.append(acc)\n\n fpr, tpr, thresholds = metrics.roc_curve(y_test_cate, y_eval_prob_pos,pos_label=1)\n auc = metrics.auc(fpr, tpr)\n aucs.append(auc)\n \n tn, fp, fn, tp = confusion_matrix(y_test_cate, y_pred, labels=[0,1]).ravel()\n confusion_matrices.append({'TP':tp, 'TN':tn, 'FP': fp, 'FN':fn})\n \n\n\n df_all['result'] = [row for row in confusion_matrices]\n df_all['seed'] = random_states\n df_all = df_all.set_index('seed')\n df_all.to_csv(os.path.join(args['output_dir'],'raw_%s_han_%s.csv'%(dataset,train_size)),index=True)\n\n df_all_auc['result'] = [row for row in aucs]\n df_all_auc['seed'] = random_states\n df_all_auc = df_all_auc.set_index('seed')\n df_all_auc.to_csv(os.path.join(args['output_dir'],'auc_%s_han_%s.csv'%(dataset,train_size)),index=True)\n\n\n","repo_name":"GeorgeLuImmortal/Hierarchical-BERT-Model-with-Limited-Labelled-Data","sub_path":"run_han.py","file_name":"run_han.py","file_ext":"py","file_size_in_byte":12203,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"77"} +{"seq_id":"13036530211","text":"import datetime\n\nfrom django.utils import timezone\n\n# from django.db.models import Q\nfrom django.db.models import F\n\nfrom rest_framework import serializers, status, permissions, mixins\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet, GenericViewSet\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.exceptions import NotFound, ValidationError\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\n\n# from drf_haystack.serializers import HaystackSerializer\n# from drf_haystack.viewsets import HaystackViewSet\n\nfrom .models import Curriculum, Unit, Module, Lesson, Question, Game, UnitConversion\nfrom .services import get_progress_service, LessonLocked, LessonProgress\n\nfrom .serializers import QuestionSerializer, UserResponseSerializer, AnswerSerializer,\\\n LessonSerializer, ScoreBoardSerializer, ModuleSerializer, UnitSerializer,\\\n CurriculumSerializer, LessonProgressSerializer\n\n# from .search_indexes import CurriculumIndex\n\nfrom .djeddit import create_thread\n\n# from profiles.serializers import PublicProfileSerializer\n# from pib_auth.models import User\n\n# TODO need to filter all elements with Curriculum setting_publically=True or request.user is author or in collaborators\n\n\ndef check_classroom_progress(service, user):\n if user.is_authenticated and service.current_lesson_progress.score >= service.COMPLETION_THRESHOLD:\n from classroom.models import AssignmentProgress\n\n AssignmentProgress.objects.recalculate_status_by_lesson(service.current_lesson, user)\n\n\nclass QuestionViewSet(ModelViewSet):\n\n serializer_class = QuestionSerializer\n queryset = Question.objects.all()\n permission_classes = []\n lookup_field = 'uuid'\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n # see also LessonViewSet.get_next_question\n new_thread = create_thread(instance)\n if new_thread:\n Question.objects.filter(pk=instance.pk).update(thread=new_thread)\n\n # increment view count TODO\n # if new_thread:\n # # save new thread in probllem\n # Question.objects.filter(pk=instance.pk).update(count_views=F('count_views') + 1, thread=new_thread)\n # else:\n # Question.objects.filter(pk=instance.pk).update(count_views=F('count_views') + 1)\n\n return super(QuestionViewSet, self).retrieve(request, *args, **kwargs)\n\n def user_response(self, request, uuid):\n question = self.get_object() # self is an instance of the question with the matching uuid\n data = {'question': question.pk, 'answered_on': timezone.now()}\n data.update(request.data)\n sr = UserResponseSerializer(data=data)\n sr.is_valid(raise_exception=True)\n kwargs = {}\n if request.user.is_authenticated:\n kwargs['profile'] = request.user.profile\n user_response = sr.get_response(**kwargs)\n service = get_progress_service(request, question.lesson)\n try:\n is_correct = service.check_user_response(user_response)\n except LessonLocked as e:\n raise serializers.ValidationError(e)\n data = LessonProgressSerializer(service.current_lesson_progress).data\n\n check_classroom_progress(service, self.request.user)\n\n data['required_score'] = service.COMPLETION_THRESHOLD\n data['was_correct'] = is_correct\n if not is_correct:\n if user_response.content:\n data['correct_answer'] = AnswerSerializer(user_response.get_correct_answer()).data\n elif user_response.answers_list:\n data['correct_answer'] = AnswerSerializer(user_response.get_correct_answer(), many=True).data\n return Response(data)\n\n # @renderer_classes((JSONRenderer,))\n def service_request(self, request, uuid):\n if 'type' in request.query_params and request.query_params['type'] == 'execute_mysql':\n question = self.get_object()\n if question.answer_type != Question.AnswerType.MYSQL or 'value' not in request.data:\n raise ValidationError({'error': 'Initial data validation error'})\n answer = question.answers.first()\n try:\n return Response({\n 'json_mysql_result': answer.content.get_json_from_sql(str(request.data['value']))\n })\n except Exception as e:\n raise ValidationError({'error': '{}'.format(e)})\n\n else:\n raise NotFound\n\n\nclass LessonViewSet(ModelViewSet):\n\n serializer_class = LessonSerializer\n queryset = Lesson.objects.all()\n lookup_field = 'uuid'\n\n def get_serializer_context(self):\n context = super(LessonViewSet, self).get_serializer_context()\n context['progress_service'] = get_progress_service(context['request'])\n return context\n\n def get_next_question(self, request, uuid):\n lesson = self.get_object()\n service = get_progress_service(request, lesson)\n previous_question = None\n previous_question_uuid = request.query_params.get('previous_question')\n if previous_question_uuid:\n previous_question = Question.objects.filter(uuid=previous_question_uuid).first()\n try:\n question = service.get_next_question(previous_question)\n except LessonLocked as e:\n raise serializers.ValidationError(e)\n if question:\n new_thread = create_thread(question)\n if new_thread:\n Question.objects.filter(pk=question.pk).update(thread=new_thread)\n question.thread = new_thread\n data = QuestionSerializer(question, context={'progress_service': service}).data\n # TODO: it might make more sense for these fields to be on the\n # lesson. Or a separate lesson_progress object.\n data.update(LessonProgressSerializer(service.current_lesson_progress).data)\n data['required_score'] = service.COMPLETION_THRESHOLD\n return Response(data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET'])\n@permission_classes([AllowAny])\ndef get_unit_conversion_units(request):\n return Response(UnitConversion.UnitConversionUnits)\n\n\n@api_view(['POST'])\n@permission_classes([AllowAny])\ndef game_success(request, uuid):\n try:\n game = Game.objects.get(lesson__uuid=uuid)\n except Game.DoesNotExist:\n raise NotFound()\n\n service = get_progress_service(request, game.lesson)\n\n n = 10 # max number of results to show\n\n duration_ms = request.data.get('duration', None)\n score = request.data.get('score', None)\n if duration_ms:\n dur = datetime.timedelta(milliseconds=duration_ms)\n else:\n dur = None\n\n service.game_success(game, dur, score)\n\n check_classroom_progress(service, request.user)\n\n if game.slug == 'unit-conversion' or game.slug == 'vector-game': # temp fix\n # get score list for\n # try:\n scores = service.get_score_board_qs(game.lesson).exclude(duration__isnull=True)\n data_scores_list = []\n user_already_in_score_list = False\n\n for row_num, row in enumerate(scores[:10]):\n # add score if user in top 10\n # current registered user\n if request.user.is_authenticated:\n if request.user.profile.id == row.profile_id:\n current_user_score = service.get_score_board_qs(game.lesson).\\\n get(profile__user=request.user)\n setattr(current_user_score, 'row_num', row_num + 1)\n data_scores_list.append(current_user_score)\n user_already_in_score_list = True\n continue\n # current anon user\n else:\n if row.duration > dur:\n if not user_already_in_score_list:\n current_user_score = LessonProgress(score=score, duration=dur, lesson=game.lesson)\n setattr(current_user_score, 'row_num', row_num + 1)\n data_scores_list.append(current_user_score)\n user_already_in_score_list = True\n continue\n\n setattr(row, 'row_num', row_num + 1)\n\n if row.duration:\n data_scores_list.append(row)\n\n # add score if user not in top 10\n if not user_already_in_score_list:\n if request.user.is_authenticated:\n current_user_score = service.get_score_board_qs(game.lesson).get(profile__user=request.user)\n else:\n current_user_score = LessonProgress(score=score, duration=dur, lesson=game.lesson)\n\n position = service.get_score_board_qs(game.lesson).filter(duration__lt=current_user_score.duration).count()\n setattr(current_user_score, 'row_num', position + 1)\n data_scores_list.append(current_user_score)\n\n data = ScoreBoardSerializer(data_scores_list[:n], many=True).data\n return Response(data)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ModuleViewSet(ModelViewSet):\n\n serializer_class = ModuleSerializer\n queryset = Module.objects.all()\n lookup_field = 'uuid'\n\n def get_serializer_context(self):\n context = super(ModuleViewSet, self).get_serializer_context()\n context['progress_service'] = get_progress_service(context['request'])\n return context\n\n\nclass UnitViewSet(ModelViewSet):\n\n def get_serializer_context(self):\n context = super(UnitViewSet, self).get_serializer_context()\n context['progress_service'] = get_progress_service(context['request'])\n return context\n\n serializer_class = UnitSerializer\n queryset = Unit.objects.all()\n lookup_field = 'uuid'\n\n\nclass CurriculaViewSet(ModelViewSet):\n\n serializer_class = CurriculumSerializer\n queryset = Curriculum.objects.all()\n lookup_field = 'uuid'\n\n def get_queryset(self):\n queryset = self.queryset\n filter_by = self.request.query_params.get('filter', None)\n if filter_by and self.request.user.is_authenticated:\n if filter_by == 'my':\n # todo do we need to get curricula of user classrooms?\n queryset = queryset.filter(author=self.request.user)\n elif filter_by == 'other':\n queryset = queryset.exclude(author=self.request.user)\n elif filter_by == 'default':\n queryset = queryset.filter(author__pk=2) # Physics Is Beautiful\n\n return queryset\n\n def get_serializer_context(self):\n context = super(CurriculaViewSet, self).get_serializer_context()\n context['progress_service'] = get_progress_service(context['request'])\n return context\n\n def get_object(self):\n lookup_id = self.kwargs.get(self.lookup_url_kwarg or self.lookup_field)\n if lookup_id and lookup_id.lower() == 'default':\n user = None\n if self.request.user.is_authenticated:\n user = self.request.user\n return Curriculum.objects.get_default(user=user)\n return super(CurriculaViewSet, self).get_object()\n\n\n\n# # Postgresql FTS Search\n# from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank\n#\n#\n# class CurriculaSearchViewSet(mixins.ListModelMixin,\n# GenericViewSet):\n# permission_classes = (permissions.IsAuthenticated,)\n# serializer_class = CurriculumSerializer\n# queryset = Curriculum.objects.all()\n# lookup_field = 'uuid'\n#\n# def get_queryset(self):\n# qs = self.queryset\n#\n# keywords = self.request.GET.get('query')\n# if not keywords:\n# raise NotAcceptable('Search query required')\n#\n# query = SearchQuery(keywords)\n# vector = SearchVector('name', 'description')\n# qs = qs.annotate(search=vector).filter(search=query)\n# qs = qs.annotate(rank=SearchRank(vector, query)).order_by('-rank')\n#\n# return qs\n\n\n# FTS Search\n\n# class CurriculumSearchSerializer(HaystackSerializer):\n#\n# def to_representation(self, instance):\n# representation = super().to_representation(instance)\n# # WO hitting DB\n# request = self.context.get('request', None)\n# if 'image' in representation and representation['image']:\n# if request is not None:\n# representation['image'] = request.build_absolute_uri(representation['image'])\n# # With hitting DB\n# # representation['image'] = None\n# # if instance.object.image:\n# # representation['image'] = instance.object.image.url\n#\n# representation['author'] = {}\n# representation['author']['pk'] = instance.author_pk\n# representation['author']['get_absolute_url'] = instance.author_get_absolute_url\n# representation['author']['display_name'] = instance.author_display_name\n#\n# return representation\n#\n# class Meta:\n# index_classes = [CurriculumIndex]\n#\n# # The `fields` contains all the fields we want to include.\n# # NOTE: Make sure you don't confuse these with model attributes. These\n# # fields belong to the search index!\n# fields = [\n# \"text\", \"name\", \"description\", \"uuid\", \"image\", \"author\"\n# ]\n#\n#\n# class CurriculaSearchViewSet(HaystackViewSet):\n# permission_classes = [IsAuthenticated]\n# index_models = [Curriculum]\n# serializer_class = CurriculumSearchSerializer\n\n","repo_name":"studyhub-co/physics-is-beautiful","sub_path":"curricula/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":13605,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"36437682114","text":"from pathlib import Path\nfrom dash.dependencies import Input, Output, State\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\n\nfrom app import app\nfrom tab_synthesize import tab_synth\nfrom tab_fit import tab_fit\n\n\n# callback for collapsing menu\n@app.callback(\n Output(\"navbar-collapse\", \"is_open\"),\n [Input(\"navbar-toggler\", \"n_clicks\")],\n [State(\"navbar-collapse\", \"is_open\")],\n)\ndef toggle_navbar_collapse(n, is_open):\n if n:\n return not is_open\n return is_open\n\n\ndef popup_modal(card_name, src=None, description=\"\",\n modal_id=\"modal\", close_id=\"close\"):\n modal = dbc.Modal(\n [\n dbc.ModalHeader(card_name),\n dbc.ModalBody(\n [\n html.Img(src=src, className=\"w-100\"),\n html.P(description)\n ]\n ),\n dbc.ModalFooter(\n dbc.Button(\"Close\", id=close_id,\n className=\"ml-auto\")\n ),\n ],\n id=modal_id,\n size=\"lg\",\n centered=True,\n # backdrop=\"static\"\n )\n return modal\n\n\ndef popup_menu(name, id, src, modal_id, close_id, description=\"\"):\n menu_item = dbc.DropdownMenuItem(\n [\n name,\n popup_modal(name, src=src, modal_id=modal_id, close_id=close_id,\n description=description)\n ],\n id=id,\n )\n return menu_item\n\n\ndef toggle_modal(n1, n2, is_open):\n if n1 or n2:\n return not is_open\n return is_open\n\n\nfor _modal, _button, _close in zip(\n [\"struc-modal\", \"models-modal\", \"vs1-modal\", \"vs2-modal\",\n \"info-modal\"],\n [\"struc-pop\", \"models-pop\", \"vs1-pop\", \"vs2-pop\", \"info-pop\"],\n [\"struc-close\", \"models-close\", \"vs1-close\", \"vs2-close\",\n \"info-close\"]):\n app.callback(\n Output(_modal, \"is_open\"),\n [Input(_button, \"n_clicks\"), Input(_close, \"n_clicks\")],\n [State(_modal, \"is_open\")],\n )(toggle_modal)\n\n\n# generate nav tabs\n@app.callback(\n Output(\"main-content\", \"children\"),\n Input(\"nav-tabs\", \"active_tab\"),\n)\ndef tab_content(active_tab):\n if active_tab == \"nav-tab-synthesize\":\n return tab_synth\n elif active_tab == \"nav-tab-fit\":\n return tab_fit\n\n\n# load the markdown file\nwith open(Path(__file__).parent / \"README.md\", \"r\") as f:\n intro_md = f.read()\n\n# features\nIMG_STRUCT = (\"https://raw.githubusercontent.com/chuckedfromspace/carspy/\"\n + \"main/assets/carspy_struct.png\")\nIMG_MODEL = (\"https://raw.githubusercontent.com/chuckedfromspace/carspy/\"\n + \"main/assets/cars_model.png\")\nIMG_COMPARE1 = (\"https://raw.githubusercontent.com/chuckedfromspace/carspy/\"\n + \"main/assets/vs_CARSFT_01.jpeg\")\nIMG_COMPARE2 = (\"https://raw.githubusercontent.com/chuckedfromspace/carspy/\"\n + \"main/assets/vs_CARSFT_02.jpeg\")\nCAP_1 = (\"Synthesized CARS spectra in N2 at 1 atm, 2400 K, \"\n + \"with a pump linewidth of 0.5 cm-1, \"\n + \"using Voigt lineshape and cross-coherence convolution.\")\nCAP_2 = (\"Synthesized CARS spectra in N2 at 10 atm, 2400 K, \"\n + \"with a pump linewidth of 0.5 cm-1, using modified exponential \"\n + \"gap law (MEG) and cross-coherence convolution\")\nfeature_menu = dbc.DropdownMenu(\n [\n popup_menu(\"CARSpy Structure\",\n id=\"struc-pop\",\n src=IMG_STRUCT,\n modal_id=\"struc-modal\",\n close_id=\"struc-close\"),\n popup_menu(\"CARS Models\",\n id=\"models-pop\",\n src=IMG_MODEL,\n modal_id=\"models-modal\",\n close_id=\"models-close\"),\n popup_menu(\"vs. CARSFT (low pressure)\",\n id=\"vs1-pop\",\n src=IMG_COMPARE1,\n modal_id=\"vs1-modal\",\n close_id=\"vs1-close\",\n description=CAP_1),\n popup_menu(\"vs. CARSFT (high pressure)\",\n id=\"vs2-pop\",\n src=IMG_COMPARE2,\n modal_id=\"vs2-modal\",\n close_id=\"vs2-close\",\n description=CAP_2),\n ],\n nav=True,\n in_navbar=True,\n label=\"Features\",\n style={\"font-size\": \"1.2em\"}\n),\n\nnavbar_title = dbc.Container(\n [\n html.A(\n dbc.Row(\n [\n dbc.Col(html.Img(src=app.get_asset_url(\"logo.svg\"),\n height=\"32px\")),\n dbc.Col(dbc.NavbarBrand(\n \"CARSpy\",\n className=\"ml-2 font-weight-bold\",\n style={\"font-size\": \"1.7em\"}\n )\n ),\n dbc.Nav(feature_menu, navbar=True)\n ],\n align=\"center\",\n no_gutters=True,\n ),\n ),\n dbc.NavbarToggler(id=\"navbar-toggler\"),\n dbc.Collapse(\n dbc.Nav(\n [\n dbc.NavLink(\n [\n html.I(\n title=\"Intro\",\n className=\"fas fa-info-circle mr-1\",\n style={\"font-size\": \"1.5em\"},\n id=\"info-icon\"\n ),\n popup_modal(\"Introduction\",\n description=dcc.Markdown(intro_md),\n modal_id=\"info-modal\",\n close_id=\"info-close\"),\n ],\n id=\"info-pop\",\n href=\"#\"\n ),\n dbc.NavLink(\n [\n html.I(\n title=\"Github\",\n className=\"fab fa-github mr-1\",\n style={\"font-size\": \"1.5em\"}\n ),\n \"\",\n ],\n target=\"_blank\",\n href=\"https://github.com/chuckedfromspace/\"\n + \"carspy-dash\",\n ),\n dbc.NavLink(\n [\n html.I(\n title=\"Docs\",\n className=\"fas fa-book mr-1\",\n style={\"font-size\": \"1.5em\"}\n ),\n \"\",\n ],\n target=\"_blank\",\n href=\"https://carspy.readthedocs.io/\"\n ),\n dbc.NavLink(\n [\n html.I(\n title=\"PyPI\",\n className=\"fas fa-cubes mr-1\",\n style={\"font-size\": \"1.5em\"}\n ),\n \"\",\n ],\n target=\"_blank\",\n href=\"https://pypi.org/project/carspy/\"\n ),\n ],\n className=\"ml-auto\",\n navbar=True\n ),\n id=\"navbar-collapse\",\n navbar=True\n ),\n ],\n fluid=True,\n),\n\nnavbar_tabs = dbc.Container(\n dbc.Tabs(\n [\n dbc.Tab(\n tab_id=\"nav-tab-synthesize\",\n label=\"Synthesize\",\n tab_style={\n \"margin-left\": 10,\n },\n activeLabelClassName=\"border-primary font-weight-bold\",\n active_label_style={\n \"background-color\": \"rgb(240,240,240)\",\n \"border-width\": \"0px 0px 2px 0px\",\n },\n ),\n dbc.Tab(\n tab_id=\"nav-tab-fit\",\n label=\"Least-Square Fit\",\n activeLabelClassName=\"border-primary font-weight-bold\",\n active_label_style={\n \"background-color\": \"rgb(240,240,240)\",\n \"border-width\": \"0px 0px 2px 0px\",\n },\n ),\n ],\n id=\"nav-tabs\",\n active_tab=\"nav-tab-synthesize\",\n className=\"pt-2\"\n ),\n fluid=False,\n className=\"mb-3\"\n)\n\nnavbar = dbc.Container(\n [\n dbc.Navbar(\n navbar_title,\n color=\"primary\",\n dark=True,\n ),\n ],\n fluid=True,\n className=\"bg-primary\"\n)\n","repo_name":"chuckedfromspace/carspy-dash","sub_path":"navbar.py","file_name":"navbar.py","file_ext":"py","file_size_in_byte":8692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1003657229","text":"#\n#Double Q-learning with function approximation\n# - Demoed using OpenAi CartPole\n#\n\nfrom dbl_qlearn_fa import dbl_qlearn_fa\nimport gym\nimport torch\ntorch.manual_seed(456) #let's make things repeatable! (only affects PyTorch neural-network param initialization in this demo)\nshow_plots=True\n\n\n#Small feedforward neural network model for q (PyTorch)\n# This NN architecture style is state in, q-per-action out\n#\nclass MLP(torch.nn.Module):\n def __init__(self, numFeatures,numActions):\n '''\n Parameters:\n numFeatures: Number of input features\n numActions: Number of output actions\n '''\n super().__init__()\n\n self.dense1=torch.nn.Linear(numFeatures,32)\n self.relu1=torch.nn.ReLU()\n self.dense2=torch.nn.Linear(32,numActions)\n \n def forward(self,s):\n '''\n Compute value function q(s,a,w) by forward computation through MLP \n '''\n feature_input=torch.tensor(s,dtype=torch.float32)\n\n #forward propagate input through network layers\n output=self.dense1(feature_input)\n output=self.relu1(output)\n output=self.dense2(output)\n\n return output\n\n @property\n def weights(self):\n '''\n Return model parameters\n '''\n return self.parameters()\n\n\n#################################################################\n#\n# 1. Compute & demo optimal policy for OpenAI CartPole environment\n# (generates animated .mp4 video to demo computed policy)\n#\n#\n\nsimenv = gym.make('CartPole-v1')\nsimenv.numActions=simenv.action_space.n\nsimenv.numFeatures=simenv.observation_space.shape[0]\n\nq1=MLP(simenv.numFeatures,simenv.numActions)\nq2=MLP(simenv.numFeatures,simenv.numActions)\n \n#Compute q(s,a) using Double Q-learning with function approximation\ndbl_qlearn_fa(simenv,q1,q2,0.99,1.0,1e-3,5000,500,decayEpsilon=True,showPlots=show_plots)\n\n\n#run an episode using computed q(s,a)\nfrom gym.wrappers import RecordVideo\nsimenv = RecordVideo(gym.make('CartPole-v1'), './cartpole_video')\nstate=simenv.reset(seed=789)\n\nterm_status=False\nepisode_len=0\nwhile not term_status:\n action=int(torch.argmax((q1.forward(state)+q2.forward(state))/2))\n (next_state,reward,term_status,_)=simenv.step(action)\n \n if term_status: break #reached end of episode\n state=next_state\n episode_len+=1\nprint('Episode Length: {}'.format(episode_len))\n\nsimenv.close()\n \n#\n# End\n#\n#################################################################\n\n","repo_name":"putoze/RL_lecture","sub_path":"HW/hw9/double_q_learning/dbl_qlearn_fa_demo.py","file_name":"dbl_qlearn_fa_demo.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"921800476","text":"from collections import defaultdict\n\nN = int(input())\narchive = defaultdict(int)\nanswer = 0\n\nfor _ in range(N):\n word = input()\n wordLen = len(word)\n for i in range(wordLen):\n archive[word[i]] += 10**(wordLen-i-1)\n\nvalueList = list(archive.values())\nvalueList.sort(reverse=True)\nnumbers = list(range(10))\n\nfor v in valueList:\n answer += v*(numbers.pop())\n\nprint(answer)","repo_name":"snowedev/Algorithm","sub_path":"By-Python/baekjoon/[greedy]/단어수학.py","file_name":"단어수학.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1879349020","text":"\"\"\"cálculo de total de una factura usando diccionario\"\"\"\r\n\"\"\"los diccionarios no función con índice numérico sino string\"\"\"\r\n\"\"\"se introduce precio unitario y cantidad de productos,\r\nseguirá introduciendo ambos valores hasta que ponga 0. Este\r\núltimo precio será descartado.\"\"\"\r\n\"\"\"restricciones:\r\n -números decimales para precio y enteros positivos para cantidad.\r\n -el programa advierte dato no numérico y volverá a pedirlo.\r\n -resultado de precio se redondea a 2 decimales.\r\npuntos a valorar:\r\n -diferenciar entrada, procesamiento y salida de datos con\r\n comentarios o agrupando\r\nretos:\r\n -crear función valoración de enteros.\r\n -crear módulo reutilizable con la función de valoración de\r\n decimales y utilizarlo en el programa\r\n -utilizar python format para formatear la salida impresa.\r\n -utilizar if para distinguir unidad/unidades en función de\r\n la cantidad de producto\r\n\"\"\"\r\n_UNIDADES=0\r\n_PRECIO=1\r\ncadenaUnidades=input(\"Cantidad: \")\r\nunidades=float(cadenaUnidades)\r\n\r\ncadenaPrecio=input(\"Precio unitario (€): \")\r\nprecio=float(cadenaPrecio)\r\n\r\ntotalItems=0\r\nprecioTotal=0\r\n\r\nlistaLineasFact=[]\r\n\r\nwhile unidades>0 and precio>0:\r\n totalUnitario=unidades*precio\r\n item=dict()\r\n item['unidades']= unidades\r\n item['precio']= precio\r\n listaLineasFact.append(item)\r\n\r\n #lineasImpresion+= \"{}€ * {} unidades = {}€\\n\".format(precio,unidades,totalUnitario)\r\n totalItems+=unidades#totalItems=totalItems+unidades\r\n precioTotal+=totalUnitario#precioTotal=precioTotal+precio\r\n \r\n cadenaUnidades=input(\"Cantidad: \")\r\n unidades=float(cadenaUnidades)\r\n cadenaPrecio=input(\"Precio unitario (€): \")\r\n precio=float(cadenaPrecio)\r\n\r\nfor item in listaLineasFact:#empezamos a usar estructuras complejas\r\n print(item['precio'],\"€ *\",item['unidades'],\"unidades =\",item['unidades']*item['precio'],\"€\")\r\n\r\n#tres líneas de código para imprimir resultados\r\nprint(\"---------------------------------------\")\r\nprint(\"Total: \",precioTotal)\r\nprint(\"Unidades: \",totalItems)\r\n#la línea de abajo es alternativa usando format para imprimir usando solo una línea de código\r\nprint(\"---------------------------------------\\nTotal:\\t{:.2f}\\nUnidades:\\t{:.2f}\".format(precioTotal,totalItems))\r\n#/ \\n=retorno de carro/ / \\t=tabulador/ /{}=reservar posicion para valor/ /:.2f=redondear a 2 decimales/\r\nprint(\"\\033[3;33;41m\")#caracteres de control para formateo de resultados","repo_name":"moisescantero/M01_python","sub_path":"M01/main_facturaDiccionarios.py","file_name":"main_facturaDiccionarios.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21239870864","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nimport base64, zipfile\nfrom io import StringIO, BytesIO\nfrom datetime import datetime, date, timedelta\nimport logging\n\n_logging = logging.getLogger(__name__)\n\nclass SolseDevHerramientas(models.Model):\n\t_name = \"sdev.herramientas\"\n\t_description = \"Herramientas (solse)\"\n\n\tname = fields.Char('Nombre')\n\n\tdatas_zip_fname = fields.Char(\"Nombre de archivo zip\", readonly=True)\n\tdatas_zip = fields.Binary(\"Datos Zip\", readonly=True)\n\tutl_fecha_ejecucion = fields.Datetime(\"Ultima Fecha ejecución\")\n\n\tnombre_modelo_procesar = fields.Char(\"Nombre modelo\")\n\n\tdef borrar_datos_modelo(self):\n\t\tlista = self.env[self.nombre_modelo_procesar].sudo().search([('active', 'in', [False, True])]).ids\n\t\tfor reg in lista:\n\t\t\tself.borrar_registro_modelo(reg)\n\t\t\t\n\tdef borrar_registro_modelo(self, registro):\n\t\t#self.env[self.nombre_modelo_procesar].sudo().search([('id', '=', registro)]).with_context(force_delete=True).unlink()\n\t\tobj_name = self.nombre_modelo_procesar\n\t\tobj = self.pool.get(self.nombre_modelo_procesar)\n\t\tif not obj:\n\t\t\tt_name = obj_name.replace('.', '_')\n\t\telse:\n\t\t\tt_name = obj._table\n\n\t\tsql = \"delete from %s where id = %s\" % (t_name, registro)\n\t\ttry:\n\t\t\tself._cr.execute(sql)\n\t\t\tself._cr.commit()\n\t\texcept Exception as e:\n\t\t\t_logging.warning('remove data error: %s,%s', obj_name, e)\n\n\n\tdef llenar_direccion_usando_ubigeo(self):\n\t\tpacientes = self.env['res.partner'].search([])\n\t\tfor reg in pacientes:\n\t\t\tif reg.zip:\n\t\t\t\tdistrict = self.env['l10n_pe.res.city.district'].search([('code', '=', reg.zip)], limit=1)\n\t\t\t\treg.l10n_pe_district = district.id\n\t\t\t\treg.city_id = district.city_id.id\n\t\t\t\treg.state_id = district.city_id.state_id.id\n\t\t\t\treg.country_id = district.city_id.state_id.country_id.id\n\n\tdef llenar_vat_con_doc_number(self):\n\t\tpacientes = self.env['res.partner'].search([])\n\t\tfor reg in pacientes:\n\t\t\tif not reg.doc_number:\n\t\t\t\tcontinue\n\t\t\treg.vat = reg.doc_number\n\n\tdef llenar_doc_number_con_doc_vat(self):\n\t\tpacientes = self.env['res.partner'].search([])\n\t\tfor reg in pacientes:\n\t\t\tif not reg.vat:\n\t\t\t\tcontinue\n\t\t\treg.doc_number = reg.vat\n\n\tdef buscar_con_doc_number(self):\n\t\tpacientes = self.env['res.partner'].search([])\n\t\tfor reg in pacientes:\n\t\t\tif not reg.l10n_pe_district and reg.doc_number:\n\t\t\t\treg.update_document()\n\n\tdef borrar_pagos(self):\n\t\tself.env['account.payment'].search([]).action_draft()\n\t\tself.env['account.payment'].search([]).unlink()\n\n\tdef borrar_supplierinfo(self):\n\t\tself.env['product.supplierinfo'].search([]).unlink()\n\n\tdef borrar_pagos_pos(self):\n\t\tself.env['pos.payment'].search([]).unlink()\n\n\tdef borrar_notas_credito(self):\n\t\tnotas_credito = self.env['account.move'].search([('move_type', '=', 'out_refund')])\n\t\tnotas_credito.write({'state': 'draft', 'name': '/'})\n\t\tlineas = notas_credito.line_ids\n\n\t\tself.env['account.partial.reconcile'].search([('credit_move_id', 'in', lineas.ids)]).unlink()\n\t\tself.env['account.analytic.line'].search([('move_id', 'in', lineas.ids)]).unlink()\n\n\t\tself.env['account.move'].search([('move_type', '=', 'out_refund')]).with_context(force_delete=True).unlink()\n\n\tdef borrar_facturas(self):\n\t\tself.env['account.move'].search([('move_type', '!=', 'entry')]).write({'state': 'draft', 'name': '/'})\n\t\tself.env['account.partial.reconcile'].search([]).unlink()\n\t\tself.env['account.analytic.line'].search([]).unlink()\n\t\tself.env['account.move'].search([]).with_context(force_delete=True).unlink()\n\t\t#self.env['account.journal'].search([]).write({'sequence_number_next': 1})\n\n\tdef borrar_cpe(self):\n\t\tself.env['solse.cpe'].search([]).write({'state': 'draft'})\n\t\tself.env['solse.cpe'].search([]).unlink()\n\n\tdef borrar_inventarios(self):\n\t\tself.env['stock.move'].search([]).write({'state': 'draft'})\n\t\tself.env['stock.move'].search([]).unlink()\n\t\tself.env['stock.picking'].search([]).write({'state': 'draft'})\n\t\tself.env['stock.picking'].search([]).unlink()\n\t\tself.env['stock.quant'].search([]).unlink()\n\t\tself.env['stock.valuation.layer'].search([]).unlink()\n\t\tself.env['stock.inventory'].search([]).action_cancel_draft()\n\t\tself.env['stock.inventory'].search([]).unlink()\n\n\tdef borrar_ventas(self):\n\t\tself.env['sale.order'].search([]).write({'state': 'draft'})\n\t\tself.env['sale.order'].search([]).unlink()\n\n\tdef borrar_ventas_pos(self):\n\t\tself.env['pos.order'].search([]).write({'state': 'draft'})\n\t\tself.env['pos.order'].search([]).unlink()\n\n\tdef borrar_compras(self):\n\t\tself.env['purchase.order'].search([]).write({'state': 'cancel'})\n\t\tself.env['purchase.order'].search([]).unlink()\n\n\tdef borrar_datos_crm(self):\n\t\tself.env['crm.lead'].search([]).unlink()\n\n\tdef borrar_producciones(self):\n\t\tself.env['mrp.workorder'].search([]).unlink()\n\t\tlista = self.env['mrp.production'].search([('state', '=', 'draft')]).unlink()\n\t\tlista = self.env['mrp.production'].search([('state', '=', 'cancel')]).unlink()\n\t\t#for reg in lista:\n\t\t#\treg.unlink()\n\t\t#self.env['mrp.production'].search([('state', '!=', 'done')]).action_cancel()\n\t\t#self.env['mrp.production'].search([('state', '!=', 'done')]).unlink()\n\n\tdef aplicar_estados_importacion(self):\n\t\tfacturas = self.env[\"account.move\"].search([(\"estado_temp\", \"!=\", False)])\n\t\tfor reg in facturas:\n\t\t\treg.state = reg.estado_temp\n\t\t\treg.estado_temp = False\n\n\tdef aplicar_estados_guias_importacion(self):\n\t\tfacturas = self.env[\"stock.picking\"].search([(\"estado_temp\", \"!=\", False)], limit=250)\n\t\tfor reg in facturas:\n\t\t\testado_temporal = reg.estado_temp\n\t\t\tif reg.state not in ['draft']:\n\t\t\t\treg.estado_temp = False\n\t\t\telif reg.estado_temp == 'done':\n\t\t\t\ttry:\n\t\t\t\t\treg.button_validate()\n\t\t\t\t\treg.estado_temp = False\n\t\t\t\texcept Exception as msg_error:\n\t\t\t\t\treg.estado_temp = estado_temporal\n\t\t\t\t\t_logging.info(msg_error)\n\t\t\telse:\n\t\t\t\treg.state = reg.estado_temp\n\t\t\t\treg.estado_temp = False\n\n\tdef aplicar_pagos_factura(self):\n\t\tpagos = self.env['sdev.facturas.pago'].search([('factura_ids', '!=', False)], limit=10)\n\t\tfor reg in pagos:\n\t\t\ttry:\n\t\t\t\tpmt_wizard = self.env['account.payment.register'].with_context(active_model='account.move', active_ids=reg.factura_ids.ids).create({\n\t\t\t\t\t'payment_date': reg.payment_date,\n\t\t\t\t\t'journal_id': reg.journal_id.id,\n\t\t\t\t\t'payment_method_id': reg.payment_method_id.id,\n\t\t\t\t\t'amount': reg.amount,\n\t\t\t\t\t'currency_id': reg.currency_id.id,\n\t\t\t\t\t'partner_id': reg.partner_id.id,\n\t\t\t\t\t'communication': reg.communication\n\t\t\t\t})\n\t\t\t\tpmt_wizard._create_payments()\n\t\t\t\treg.factura_ids.write({\n\t\t\t\t\t'pago_id': False\n\t\t\t\t})\n\t\t\texcept Exception as msg_error:\n\t\t\t\t_logging.info(msg_error)\n\t\t\t\n\n\tdef aplicar_notas_credito(self):\n\t\tnotas_credito = self.env['account.move'].search([('move_type', '=', 'out_refund'), ('state', '=', 'posted')])\n\t\tfor nota in notas_credito:\n\t\t\tpay_term_lines = nota.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))\n\t\t\tdomain = [\n\t\t\t\t('move_id', '=', nota.reversed_entry_id.id),\n\t\t\t\t('account_id', 'in', pay_term_lines.account_id.ids),\n\t\t\t\t('move_id.state', '=', 'posted'),\n\t\t\t\t('reconciled', '=', False),\n\t\t\t\t'|', ('amount_residual', '!=', 0.0), ('amount_residual_currency', '!=', 0.0),\n\t\t\t]\n\t\t\tif nota.is_inbound():\n\t\t\t\tdomain.append(('balance', '<', 0.0))\n\t\t\telse:\n\t\t\t\tdomain.append(('balance', '>', 0.0))\n\t\t\tlinea_factura = self.env['account.move.line'].search(domain)\n\t\t\tif pay_term_lines and linea_factura:\n\t\t\t\tlines = pay_term_lines + linea_factura\n\t\t\t\tlines.reconcile()\n\n\tdef aplicar_tipo_operacion_facturas(self):\n\t\tfacturas = self.env['account.move'].search([('invoice_picking_id', '!=', False), ('picking_type_id', '=', False)])\n\t\tfor reg in facturas:\n\t\t\treg.picking_type_id = reg.invoice_picking_id.picking_type_id.id\n\n\tdef aplicar_notas_credito_2(self):\n\t\tnotas_credito = self.env['account.move'].search([('move_type', '=', 'out_refund'), ('state', '=', 'posted'), ('payment_state', '!=', 'paid')])\n\t\tfor nota in notas_credito:\n\t\t\tpay_term_lines = nota.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))\n\t\t\tdomain = [\n\t\t\t\t('account_id', 'in', pay_term_lines.account_id.ids),\n\t\t\t\t('move_id.state', '=', 'posted'),\n\t\t\t\t('partner_id', '=', nota.commercial_partner_id.id),\n\t\t\t\t('reconciled', '=', False),\n\t\t\t\t'|', ('amount_residual', '!=', 0.0), ('amount_residual_currency', '!=', 0.0),\n\t\t\t]\n\t\t\tif nota.is_inbound():\n\t\t\t\tdomain.append(('balance', '<', 0.0))\n\t\t\telse:\n\t\t\t\tdomain.append(('balance', '>', 0.0))\n\t\t\tlinea_factura = self.env['account.move.line'].search(domain)\n\t\t\tif pay_term_lines and linea_factura:\n\t\t\t\tlines = pay_term_lines + linea_factura\n\t\t\t\tlines.reconcile()\n\n\t# retorna el json con los datos necesarios para la accion \"descargar_datos_cpe\"\n\tdef obtener_datos_cpe(self):\n\t\tin_memory_data = BytesIO()\n\t\tin_memory_zip = zipfile.ZipFile(in_memory_data, 'w', zipfile.ZIP_DEFLATED, False)\n\n\t\tcpes = self.env['solse.cpe'].search([])\n\t\tAttachment = self.env['ir.attachment']\n\t\tfor reg in cpes:\n\t\t\tif reg.datas_sign_fname:\n\t\t\t\t_document_name = reg.datas_sign_fname\n\t\t\t\tfilecontent = base64.b64decode(reg.datas_sign)\n\t\t\t\tin_memory_zip.writestr(_document_name, filecontent)\n\t\t\tif reg.datas_response_fname:\n\t\t\t\t_document_name = reg.datas_response_fname\n\t\t\t\tfilecontent = base64.b64decode(reg.datas_response)\n\t\t\t\tin_memory_zip.writestr(_document_name, filecontent)\n\n\t\t\tif reg.type == 'sync':\n\t\t\t\tnombre = '%s.pdf' % reg.get_document_name()\n\t\t\t\tfactura = self.env['account.move'].search([('pe_cpe_id', '=', reg.id)], limit=1)\n\t\t\t\tpdf = Attachment.search([('res_id', '=', factura.id), ('name', 'like', nombre + '%')], limit=1)\n\t\t\t\tif pdf:\n\t\t\t\t\tfilecontent = base64.b64decode(pdf.datas)\n\t\t\t\t\tin_memory_zip.writestr(nombre, filecontent)\n\t\t\t\t\"\"\"else:\n\t\t\t\t\tresult_pdf, type = self.env['ir.actions.report']._get_report_from_name('account.report_invoice')._render_qweb_pdf(factura.ids)\n\t\t\t\t\tresult_pdf = base64.encodestring(result_pdf)\n\t\t\t\t\tfilecontent = base64.b64decode(result_pdf)\n\t\t\t\t\tin_memory_zip.writestr(nombre, filecontent)\"\"\"\n\n\t\tfor zfile in in_memory_zip.filelist:\n\t\t\tzfile.create_system = 0\n\t\tin_memory_zip.close()\n\n\t\tself.datas_zip = base64.b64encode(in_memory_data.getvalue())\n\t\tself.datas_zip_fname = \"pdf_xml_cdr.zip\"\n\n\tdef completar_pdf_faltantes(self):\n\t\tAttachment = self.env['ir.attachment']\n\t\tfacturas = self.env['account.move'].search([('is_cpe', '=', True)])\n\t\tfor reg in facturas:\n\t\t\tif not reg.pe_cpe_id:\n\t\t\t\tcontinue\n\t\t\tnombre = '%s.pdf' % reg.pe_cpe_id.get_document_name()\n\t\t\tpdf = Attachment.search([('res_id', '=', reg.id), ('name', 'like', nombre + '%')], limit=1)\n\t\t\tif not pdf:\n\t\t\t\tattach = {}\n\t\t\t\tresult_pdf, type = self.env['ir.actions.report']._get_report_from_name('account.report_invoice')._render_qweb_pdf(reg.ids)\n\t\t\t\tattach['name'] = nombre\n\t\t\t\tattach['type'] = 'binary'\n\t\t\t\tattach['datas'] = base64.encodestring(result_pdf)\n\t\t\t\tattach['res_model'] = 'mail.compose.message'\n\t\t\t\tattachment_id = self.env['ir.attachment'].create(attach)\n\t\tself.utl_fecha_ejecucion = fields.Datetime.to_string(datetime.now())","repo_name":"Lobonick/cens-test","sub_path":"solse_dev/models/herramientas.py","file_name":"herramientas.py","file_ext":"py","file_size_in_byte":10860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}