diff --git "a/1181.jsonl" "b/1181.jsonl"
new file mode 100644--- /dev/null
+++ "b/1181.jsonl"
@@ -0,0 +1,1397 @@
+{"seq_id":"11966980442","text":"import boto3\nimport time\nfrom datetime import date\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\n\nCONN = None\nTRANSACTIONS_TABLE_NAME = None\nUSERS_TABLE_NAME = None\nTRANSACTIONS_TABLE = None\nUSERS_TABLE = None\n\n\ndef open_connections_and_tables():\n global CONN\n global TRANSACTIONS_TABLE\n global USERS_TABLE\n CONN = boto3.resource('dynamodb')\n TRANSACTIONS_TABLE = CONN.Table(TRANSACTIONS_TABLE_NAME)\n USERS_TABLE = CONN.Table(USERS_TABLE_NAME)\n\n\ndef resolve_names_to_ids(names):\n ids = []\n for name in names:\n field_to_use = 'username' if name[0] == \"@\" else 'name'\n value_to_use = name[1:] if name[0] == \"@\" else name\n items = USERS_TABLE.scan(\n FilterExpression=Attr(field_to_use).eq(value_to_use)\n )[\"Items\"]\n if len(items) != 0:\n ids.append(items.pop()[\"id\"])\n else:\n ids.append(None)\n return dict(zip(names, ids))\n\n\ndef resolve_ids_to_names(ids):\n names = []\n for id in ids:\n items = USERS_TABLE.query(\n KeyConditionExpression=Key('id').eq(id),\n )[\"Items\"]\n if len(items) > 0:\n names.append(\"@{}\".format(items[0][\"username\"]) if \"username\" in items[0] else items[0][\"name\"])\n else:\n names.append(None)\n return names\n\n\ndef debit_transaction(gid, sender, receiver, amt, *, msg_id=None, description=\"\", on_hold=False):\n with decimal.localcontext(boto3.dynamodb.types.DYNAMODB_CONTEXT) as ctx:\n ctx.traps[decimal.Inexact] = False\n ctx.traps[decimal.Rounded] = False\n timestamp = ctx.create_decimal_from_float(time.time())\n item = {\n 'group_id': gid,\n 'id': msg_id,\n 'from': sender,\n 'to': receiver,\n 'amt': -abs(amt),\n 'timestamp': round(timestamp, 2),\n 'on_hold': on_hold\n }\n if description is not None and description.strip() != \"\":\n item[\"description\"] = description.strip()\n TRANSACTIONS_TABLE.put_item(Item=item)\n\n\ndef credit_transaction(gid, sender, receiver, amt, *, msg_id=None, description=\"\", on_hold=False):\n with decimal.localcontext(boto3.dynamodb.types.DYNAMODB_CONTEXT) as ctx:\n ctx.traps[decimal.Inexact] = False\n ctx.traps[decimal.Rounded] = False\n timestamp = ctx.create_decimal_from_float(time.time())\n item = {\n 'group_id': gid,\n 'id': msg_id,\n 'from': sender,\n 'to': receiver,\n 'amt': abs(amt),\n 'timestamp': round(timestamp, 2),\n 'on_hold': on_hold\n }\n if description is not None and description.strip() != \"\":\n item[\"description\"] = description.strip()\n TRANSACTIONS_TABLE.put_item(Item=item)\n\n\ndef view_account(gid, user):\n gid = int(gid)\n user = int(user)\n response = TRANSACTIONS_TABLE.query(\n Select='ALL_ATTRIBUTES',\n KeyConditionExpression=Key('group_id').eq(int(gid)) & Key('timestamp').gt(0),\n FilterExpression=(Attr('from').eq(int(user)) | Attr('to').eq(int(user))) & Attr('on_hold').ne(True)\n )\n entries = response[\"Items\"]\n account = {}\n for entry in entries:\n is_payment = entry[\"amt\"] > 0\n is_receiver = entry[\"to\"] == user\n print(entry, is_payment, is_receiver)\n\n dict_key = entry[\"from\"] if is_receiver else entry[\"to\"]\n if dict_key not in account:\n account[dict_key] = 0\n account[dict_key] = account[dict_key] + (entry[\"amt\"] * (-1 if is_receiver else 1))\n return account\n\n\ndef view_logs(gid, user_id, filter_id, esk=None):\n gid = int(gid)\n uid = int(user_id)\n fid = int(filter_id) if filter_id is not None else None\n if fid is not None:\n filter_expression = (Attr('from').eq(uid) & Attr('to').eq(fid)) | (Attr('from').eq(fid) & Attr('to').eq(uid))\n else:\n filter_expression = Attr('from').eq(uid) | Attr('to').eq(uid)\n entries = []\n while len(entries) < 0:\n response = TRANSACTIONS_TABLE.query(\n Select='ALL_ATTRIBUTES',\n KeyConditionExpression=Key('group_id').eq(int(gid)) & Key('timestamp').gt(0),\n FilterExpression=filter_expression,\n Limit=10,\n ExclusiveStartKey=esk\n )\n entries.extend(response[\"Items\"])\n lek = response.get(\"LastEvaluatedKey\", None)\n # break into timeframes\n\n def destructive_filter(list, callback):\n pass\n # this week\n this_week = [e for e in entries if date.fromtimestamp(e[\"timestamp\"]).isocalendar()[1] == date.today().isocalendar()[1]]\n # last week\n last_week = [e for e in entries if date.fromtimestamp(e[\"timestamp\"]).isocalendar()[1] == date.today().isocalendar()[1] - 1]\n # this month\n # last month\n\n\ndef register_user(uid, *, username=\"\", name=\"\"):\n data = {\n \"id\": int(uid)\n }\n username = username.strip()\n name = name.strip()\n if username != \"\":\n data[\"username\"] = username\n if name != \"\":\n data[\"name\"] = name\n USERS_TABLE.put_item(Item=data)\n\n\ndef find_transaction(gid, mid):\n response = TRANSACTIONS_TABLE.query(\n KeyConditionExpression=Key('group_id').eq(gid),\n FilterExpression=Attr('id').eq(mid)\n )\n try:\n return response[\"Items\"].pop()\n except IndexError:\n return None\n\n\ndef update_transaction(transaction):\n TRANSACTIONS_TABLE.put_item(Item=transaction)\n","repo_name":"chesnutcase/ledger_bot","sub_path":"utils/tableutils.py","file_name":"tableutils.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42167978913","text":"import argparse\n\nfrom game import Runner\n\nTRAPS = [(2, 2), (3, 3), (4, 4), (5, 5)]\nWIND = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]\nSTART = (3, 0)\nGOAL = (3, 7)\n\nparser = argparse.ArgumentParser(description='My script')\nparser.add_argument('-a', '--actions', help='Number of actions that agent can take (4, 8, 9')\nparser.add_argument('-e', '--episodes', help='Number of training episodes')\nparser.add_argument('-v', '--verbose', help='verbose')\nparser.add_argument('-t', '--traps', help='Number of traps 0,1,2,3,4')\n\nargs = parser.parse_args()\nactions_list = [4, 8, 9]\nif args.actions:\n actions_list = [int(args.actions)]\nepisodes = int(args.episodes or 200)\nverbose = int(args.verbose or 10)\ntraps = min(4, int(args.traps or 0))\nfor actions in actions_list:\n if actions not in [4, 8, 9]:\n raise Exception('Invalid \"-a/--actions\"')\n\nfor actions in actions_list:\n game = Runner(num_actions=actions,\n start=START,\n goal=GOAL,\n verbose=verbose,\n episodes=episodes,\n rows=7,\n cols=10,\n gamma=1,\n wind=WIND,\n traps=TRAPS[:traps],\n render_interval=[51, 50])\n game.train()\n game.test(pause=3)\n","repo_name":"shadi-danhash/q-learning-simulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"22224692881","text":"import sqlite3, os\nSQLITE_NAME = \"fatpanda.tmp.db\"\n# if os.path.isfile(SQLITE_NAME): os.remove(SQLITE_NAME)\n\n\ndef fpd_raw_connection(db_path=SQLITE_NAME):\n conn = sqlite3.connect(db_path)\n '''Optional processing'''\n return conn\n\nfrom .readers import (\n read_csv,\n concat_csv,\n read_sql_query\n)","repo_name":"shashfrankenstien/FatPanda","sub_path":"fatpanda/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32933771056","text":"from django.contrib import admin\nfrom . import models\nfrom django.utils.safestring import mark_safe\n\n\n# Register your models here.\n\nclass TagAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass CategorieAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass CommentaireAdmin(admin.ModelAdmin):\n\n def affiche_image(self, obj):\n if obj.cover:\n return mark_safe(''.format(url=obj.cover.url))\n\n list_display = (\n 'article',\n 'nom',\n 'email',\n 'message',\n 'affiche_image',\n 'statut',\n 'date_add',\n 'date_update'\n )\n\n list_filter = (\n 'article',\n 'statut',\n 'date_add',\n 'date_update'\n )\n search_fields = (\n 'message',\n 'date_add'\n )\n readonly_fields = ['affiche_image']\n fieldsets = [\n ('Info ', {'fields': ['article', 'nom', 'email', 'message', ]\n }),\n ('Image', {'fields': [\n 'cover',\n 'affiche_image'\n ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n list_display = (\n 'auteur',\n 'titre',\n 'affiche_image',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'categorie',\n 'statut',\n 'tags'\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n readonly_fields = ['affiche_image']\n\n fieldsets = [\n ('Info ', {'fields': [\n 'auteur',\n 'titre',\n 'categorie',\n 'tags',\n 'contenu',\n 'resume'\n ]\n }),\n ('Image', {'fields': ['cover', 'affiche_image']}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n def affiche_image(self, obj):\n return mark_safe(''.format(url=obj.cover.url))\n\n\n\n\n\ndef _register(model, admin_class):\n admin.site.register(model, admin_class)\n\n\n_register(models.Article, ArticleAdmin)\n_register(models.Commentaire, CommentaireAdmin)\n_register(models.Categorie, CategorieAdmin)\n_register(models.Tag, TagAdmin)\n\n\n","repo_name":"paulemxx/Orgo","sub_path":"blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"70423693956","text":"\"\"\"\nNaiveBayes is a generative classifier based on the Naive assumption that features are independent from each other\nP(w1, w2, ..., wn|y) = P(w1|y) P(w2|y) ... P(wn|y)\nThus argmax_{y} (P(y|w1,w2, ... wn)) can be modeled as argmax_{y} P(w1|y) P(w2|y) ... P(wn|y) P(y) using Bayes Rule\nand P(w1, w2, ... ,wn) is constant with respect to argmax_{y} \nPlease refer to lecture notes Chapter 4 for more details\n\"\"\"\n\nfrom collections import Counter, defaultdict\nfrom math import log\nimport operator\n\nimport numpy as np\nfrom Features import Features, tokenize\nfrom Model import *\n\n\nclass NBFeatures(Features):\n @classmethod \n def get_features(cls, tokenized, model):\n features = []\n token_to_embed = model['token_to_embed']\n for token in tokenized:\n embed = token_to_embed.get(token)\n if embed is not None:\n features.append(embed)\n else:\n features.append(token_to_embed['__OOV__'])\n return features\n\nclass NaiveBayes(Model):\n \n def __init__(self, model_file, vocab_size=None):\n super().__init__(model_file)\n self.vocab_size = vocab_size\n \n \n def train(self, input_file):\n \"\"\"\n This method is used to train your models and generated for a given input_file a trained model\n :param input_file: path to training file with a text and a label per each line\n :return: model: trained model \n \"\"\"\n \n wprobdenom = '__ALL__'\n \n nbFeatures = NBFeatures(input_file, vocab_size=self.vocab_size)\n \n model = {\n 'type': NaiveBayes.__class__,\n 'categories_probs': {},\n 'words_probs': {},\n 'options': nbFeatures.labelset,\n 'token_to_embed': nbFeatures.token_to_embed,\n 'embed_to_token': nbFeatures.embed_to_token,\n 'vocab_size': self.vocab_size,\n\n # 'label_to_embed': nbFeatures.label_to_embed,\n # 'embed_to_label': nbFeatures.embed_to_label,\n }\n \n wscores = defaultdict(lambda: Counter())\n cscores = Counter()\n \n features_list = list(map(lambda x: NBFeatures.get_features(x, model), nbFeatures.tokenized_text))\n # Y_true = list(map(lambda x: model['label_to_embed'][x], nbFeatures.labels))\n \n cutoff = int(len(features_list)*0.9)\n X_train, X_valid = features_list[:cutoff], features_list[cutoff:]\n Y_train, Y_valid = nbFeatures.labels[:cutoff], nbFeatures.labels[cutoff:]\n \n for features, label in zip(X_train, Y_train):\n cscores[label] += 1\n for f in features:\n wscores[label][f] += 1\n wscores[label][wprobdenom] += 1\n \n # Laplace Smoothing (+1)\n for label in model['options']:\n wprob = {}\n for token in nbFeatures.token_to_embed:\n embed = model['token_to_embed'][token]\n wprob[embed] = 1 / (wscores[label][wprobdenom] + 1)\n model['words_probs'][label] = wprob\n \n for label in model['options']:\n model['categories_probs'][label] =\\\n cscores[label] / len(features)\n for feature, score in wscores[label].items():\n # Laplace Smoothing (+1)\n # Overriding vocab values if applicable\n model['words_probs'][label][feature] = (score + 1) / (wscores[label][wprobdenom] + 1)\n \n \n # Validate\n train_err =\\\n np.sum(np.array(self._classify(X_train, model)) != np.array(Y_train))/len(Y_train)\n\n valid_err =\\\n np.sum(np.array(self._classify(X_valid, model)) != np.array(Y_valid))/len(Y_valid)\n \n print(f'TrainErr = {train_err}, ValidErr = {valid_err}', end='\\n')\n \n ## Save the model\n self.save_model(model)\n print('Saved model.')\n return model\n\n\n def _classify(self, features_list, model):\n def evaluate(features, option, model):\n score = log(model['categories_probs'][option])\n for f in features:\n score += log(model['words_probs'][option][f])\n return score \n \n preds = []\n for features in features_list:\n scores = {}\n for option in model['options']:\n scores[option] = evaluate(features, option, model)\n preds.append(\n max(scores.items(), key=operator.itemgetter(1))[0]\n )\n return preds\n \n def classify(self, input_file, model):\n \"\"\"\n This method will be called by us for the validation stage and or you can call it for evaluating your code \n on your own splits on top of the training sets seen to you\n :param input_file: path to input file with a text per line without labels\n :param model: the pretrained model\n :return: predictions list\n \"\"\" \n with open(input_file) as file:\n tokenized_sentences =\\\n map(tokenize, file.read().splitlines())\n\n features_list = list(map(lambda x: NBFeatures.get_features(x, model), tokenized_sentences))\n preds = self._classify(features_list, model) \n return preds\n\n\n","repo_name":"BasRizk/NaiveBayesVsPerceptronNLP","sub_path":"naivebayes.py","file_name":"naivebayes.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41185222380","text":"import json\nfrom flask import Flask\n\napp = Flask(__name__)\n\nwith open('../data/keywords_json.json', 'r') as f:\n jsondata = json.load(f)\n\n\n@app.route('/')\ndef index():\n return jsondata\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"secantsquared/flaskreactapp","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"73708577794","text":"#!/usr/bin/env python3\n\nimport argparse, re, os\nimport kmertools as kt\t\t#Available at https://github.com/jtladner/Modules\nimport fastatools as ft\t\t#Available at https://github.com/jtladner/Modules\nimport inout as io\t\t#Available at https://github.com/jtladner/Modules\nimport pandas as pd\nimport seaborn as sns\n\nfrom matplotlib import pyplot as plt\ntypeface='Arial'\n\n\n#Example command: coverage_per_seq_violinplot.py -d /Users/colleenung/Documents/197911_InfluenzavirusA/HA/SW_SC_noC/t0.200/197911_id_70_9_SWSC-x9-y30-t0.200.fasta -c /Users/colleenung/Documents/197911_InfluenzavirusA/HA/197911_id_70_9 -k 9 -t 0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95 --swCtoS -o 197911_id_70_9_coverage_per_seq_violinplot.png -s 197911_id_70_9_coverage_per_seq_stats.txt\n\nparser = argparse.ArgumentParser(description='''A script that will generate violin plot(s) to visualize the distribution of kmer coverage \n\t\t\t\t\t\tin the design on a per sequence basis. Can generate multiple violin plots, with each representing a different Xmer \n\t\t\t\t\t\tthreshold.''')\n\nparser.add_argument(\"-d\", \"--design\", metavar='\\b', help=\"Input design file. If looking at multiple Xmer thresholds, only provide path to one of the design files. Assuming designs share the same naming structure and are located in a directory containing subdirectories for each Xmer target threshold.\")\nparser.add_argument(\"-c\", \"--cluster\", metavar='\\b', help=\"Input cluster file to look at kmer coverage on a per sequence basis. Note, cluster names must end with cluster number.\")\n\nparser.add_argument(\"-k\", \"--ksize\", default=9, type=int, metavar='\\b', help=\"Size of kmer to use for looking at kmer coverage in the design [default: 9].\")\nparser.add_argument(\"-t\", \"--targets\", default=\"0.5,0.75,1\", metavar='\\b', help=\"Target thresholds to generate violin plots for. [default: 0.5,0.75,1]\")\nparser.add_argument(\"-o\", \"--output\", default=\"coverage_per_seq_violinplot.png\", metavar='\\b', help=\"Name of output PNG file with violin plot(s). [default: coverage_per_seq_violinplot.png]\")\nparser.add_argument(\"-s\", \"--statsoutput\", default=\"coverage_per_seq_violinplot.txt\", metavar='\\b', help=\"Name of output txt file with descriptive statistics. [default: coverage_per_seq_violinplot.txt]\")\nparser.add_argument(\"--swCtoS\", default=False, action=\"store_true\", help=\"Use this flag if Cysteine residues were converted to Serine residues in the SW portion of the design.\")\nparser.add_argument(\"-b\", \"--batchMode\", default=None, metavar='\\b', help=\"You can use this flag to run the script in batch mode. If used, it should be followed by the path to a tsv file with two columns and one row per design. The first column should correspond to --design and the second to --cluster. In this mode, the output filenames will be generated based on the input file names. [default: None]\")\n\n#New argument group to underscore that these arguments are required despite being provided with flags\n#reqArgs = parser.add_argument_group(\"required arguments\")\n\nargs = parser.parse_args()\n\n\n#Parsing target thresholds\ntargetThresh = sorted(list(set([float(x) for x in args.targets.split(\",\")])))\n\n#Prep for batch mode\nif args.batchMode:\n\tinputD = io.fileDict(args.batchMode, header=False)\nelse:\n\tinputD = {args.design:args.cluster}\n\n# Step through each design/cluster pair\nfor design, cluster in inputD.items():\n\t\n\t# Specify output names if running in batch mode\n\tif args.batchMode:\n\t\targs.output = \"%s_%s_vp.png\" % (os.path.basename(cluster), args.targets)\n\t\targs.statsoutput = \"%s_%s_vpStats.tsv\" % (os.path.basename(cluster), args.targets)\n\t\n\t#Reading in fasta file (in this case, cluster file). Returns two lists, the first containing seq names and the second containing its sequences.\n\tnames, seqs = ft.read_fasta_lists(cluster)\n\n\txthrList=[]\n\tcoverageperseqList=[]\n\tfor thr in targetThresh:\n\t\t#Using path of input design file to find design files for other desired target threshold(s), if applicable\n\t\tsearchstr= \".*/t([\\d.]*)/.*\"\n\t\tregexresult= re.search(searchstr, design)\n\t\tdesignPath= re.sub(str(regexresult.group(1)), (\"%.3f\" % (thr)), design)\n\n\t\t#Creating set of all unique kmers within design\n\t\tdesignkSet= kt.kmerSetFasta(designPath, args.ksize, filter=[])\n\n\t\tfor s in seqs:\n\t\t\tif args.swCtoS:\n\t\t\t\ts = s.replace(\"C\", \"S\")\n\t\t\t#Creating set of all unique kmers within sequence\n\t\t\tsSet = kt.kmerSet(s, args.ksize, filter=[\"X\"])\n\t\t\tif len(sSet)>0:\n\t\t\t\txmersCovered= sSet.intersection(designkSet)\n\t\t\t\tpercentCovered= (len(xmersCovered) / len(sSet))*100\n\t\t\t\txthrList.append((\"%.3f\" % (thr)))\n\t\t\t\tcoverageperseqList.append(percentCovered)\n\n\tlabelY= \"%% %dmers covered per sequence\" % args.ksize\n\tdataDict= {\"Xmer Threshold\":xthrList, labelY:coverageperseqList}\n\t#Creating pandas dataframe from dictionary\n\tdf = pd.DataFrame(dataDict)\n\n\n\t#Generating violin plot from pandas dataframe using Seaborn\n\tfig, ax = plt.subplots(1,1,figsize=(10,10),facecolor='w')\n\tsns.violinplot(x=df[\"Xmer Threshold\"], y=df[labelY], palette=\"Set3\", ax=ax)\n\tax.set_ylabel(labelY)\n\tax.set_ylim(0,100)\n\tax.set_xlabel(\"Xmer Threshold\")\n\tfig.savefig(args.output, bbox_inches='tight', dpi=200)\n\tplt.close(fig=fig)\n\n\n\t#Writing out file with descriptive statistics\n\twith open(args.statsoutput, \"w\") as fout:\n\t\tline1= \"\\tMaximum\\tQ3\\tMedian\\tQ1\\tMinimum\\tIQR\"\n\t\tfout.write(line1)\n\t\n\t\tfor thr in targetThresh:\n\t\t\tthrDF= df.loc[df[\"Xmer Threshold\"] == (\"%.3f\" % (thr))]\n\t\t\n\t\t\tmaximum= thrDF[labelY].max()\n\t\t\tq3= thrDF[labelY].quantile(q=0.75, interpolation='midpoint')\n\t\t\tmedian= thrDF[labelY].quantile(q=0.5, interpolation='midpoint')\n\t\t\tq1= thrDF[labelY].quantile(q=0.25, interpolation='midpoint')\n\t\t\tminimum= thrDF[labelY].min()\n\t\t\tIQR= q3-q1\n\t\t\n\t\tline2= \"\\n%.3f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\" % (thr,maximum,q3,median,q1,minimum,IQR)\n\t\tfout.write(line2)","repo_name":"LadnerLab/Library-Design","sub_path":"extensions/coverage_per_seq_violinplot.py","file_name":"coverage_per_seq_violinplot.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"32404734265","text":"#pylint:disable=E1101\n\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.distributions import Categorical\nimport numpy as np\nimport pandas as pd\nimport numpy as numpy\nimport matplotlib.pyplot as plt\nimport argparse\nfrom models import MyModel\nfrom math_dataset import MyDataset\n\n\ndef main():\n _i, _j, _k = 2,3,3\n dataset = MyDataset(_i,_j,_k)\n\n dtype = torch.float\n device = torch.device(\"cpu\")\n # device = torch.device(\"cuda:0\")\n\n #batch, input, hidden, output\n N, D_in, H, D_out = 10, _i+_j+_k, 16, _i*_j*_k\n msg_len = 10\n\n x, y = dataset.get_frame()\n x = torch.tensor(x, dtype=dtype, device=device)\n #x = torch.cat((x,x,x,x,x),0)\n y = torch.tensor(y, dtype=torch.long, device=device).squeeze()\n #y = torch.cat((y,y,y,y,y),0)\n print(x.size(), y.size())\n #x = torch.zeros(N, D_in, device=device, dtype=dtype)\n #y = torch.zeros(N, device=device, dtype=dtype)\n\n model = MyModel(D_in, H, D_out)\n #model = torch.nn.Linear(D_in, D_out)\n\n loss_fn = torch.nn.CrossEntropyLoss(reduce=None)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n\n for t in range(10001):\n if True: #reinforce\n y_pred = model(x)\n probs = F.softmax(y_pred, dim=1)\n m = Categorical(probs)\n action = m.sample()\n reward = torch.eq(action, y).to(torch.float)\n reward = (reward - reward.mean())\n loss = -m.log_prob(action) * reward\n model.zero_grad()\n loss.sum().backward()\n #loss.backward(loss)\n optimizer.step()\n \n elif True:\n y_pred = model(x)\n \n else: # supervised\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n model.zero_grad()\n loss.backward()\n optimizer.step()\n\n if t % 100 == 0:\n with torch.no_grad():\n y_pred = model(x)\n eq = torch.eq(torch.argmax(y_pred, dim=1), y)\n print(\"t: {}, acc: {}/{} = {}\".format(t, torch.sum(eq).item(), eq.numel(), torch.sum(eq).item() / eq.numel()))\n\n\n torch.save({'epoch': t,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, \"checkpoints.tar\")\n\nif __name__ == \"__main__\":\n main()\n \n\n\n\n # model3 = MyModel(D_in, H, D_out)\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n # checkpoint = torch.load(\"checkpoints.tar\")\n # model.load_state_dict(checkpoint['model_state_dict'])\n # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n # epoch = checkpoint['epoch']\n # loss = checkpoint['loss']\n\n # print(model.state_dict())\n # print(optimizer.state_dict())\n\n # PATH = \"model.pt\"\n # torch.save(model.state_dict(), PATH)\n\n # model2 = MyModel(D_in, H, D_out)\n # model.load_state_dict(torch.load(PATH))\n # model.eval() # for dropout and BN","repo_name":"parkjunsoo91/number-communication","sub_path":"supervised.py","file_name":"supervised.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10697609667","text":"import json\nd = {\n \"vezetekNev\": \"Kovacs\",\n \"keresztNev\": \"Janos\",\n \"kor\": 25,\n \"cim\":\n {\n \"utcaHazszam\": \"2. utca 21.\",\n \"varos\": \"New York\",\n \"allam\": \"NY\",\n \"iranyitoSzam\": \"10021\"\n }\n}\nprint(json.dumps(d))\nwith open(\"dump.txt\", \"w\") as f:\n\tjson.dump(d,f)\n\n","repo_name":"Gero4884/Gero4884","sub_path":"json1.py","file_name":"json1.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31721912672","text":"class SyntacticWorkingMemory:\n def __init__(self, brain_model):\n self.brain_model = brain_model\n self.working_memory = []\n self.in_active_memory = []\n self.not_in_active_memory = []\n\n def initialize(self):\n self.working_memory = []\n\n def maintain(self, site):\n if 'working_memory' in self.brain_model.local_file_system.settings and self.brain_model.local_file_system.settings['working_memory']:\n if not site.active_in_syntactic_working_memory:\n site.active_in_syntactic_working_memory = True\n\n def remove_item(self, ps):\n ps.active_in_syntactic_working_memory = False\n if ps.mother and (ps.contains_features({'T/fin', 'OP:REL'})):\n node = ps\n while node.mother:\n node = node.mother\n node.active_in_syntactic_working_memory = False\n\n def remove_items(self, merge_sites):\n for site, transfer, address_label in merge_sites:\n site.active_in_syntactic_working_memory = False\n\n def active_working_memory_catalog(self, ps):\n all_nodes_available = [N for N in ps.geometrical_minimal_search()]\n nodes_not_in_active_working_memory = []\n new_nodes_available = all_nodes_available.copy()\n for N in all_nodes_available:\n if not N.active_in_syntactic_working_memory:\n new_nodes_available.remove(N)\n nodes_not_in_active_working_memory.insert(0, N) # Outside list is stack\n return [node for node in new_nodes_available], [node for node in nodes_not_in_active_working_memory]\n","repo_name":"pajubrat/parser-grammar","sub_path":"lpparse/working_memory.py","file_name":"working_memory.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"38191438054","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 25 13:56:35 2018\n\n@author: galaz\n\"\"\"\n\nimport pyedflib\nimport numpy as np\nimport matplotlib as plt\nf = pyedflib.EdfReader(\"3-1-Schlucktest_Leitfaehigkeit_edited_triggerMarker_edited.bdf\")\nn = f.signals_in_file\nsignal_labels = f.getSignalLabels()\nsigbufs = np.zeros((n, f.getNSamples()[0]))\nfor i in np.arange(n):\n sigbufs[i, :] = f.readSignal(i)\n \nBI = sigbufs[0] \nEMG = sigbufs[1] \nannotations = f.readAnnotations() \nsample_frequency= 4000 \n\ndef segment(t_after,t_befor,sample_frequency,annotations,BI,EMG):\n BI_segment_list = []\n EMG_segment_list = []\n for i in range (annotations[0].size):\n BI_segment = []\n EMG_segment = []\n swallow_index= int(sample_frequency*annotations[0][i])\n segment_start =swallow_index-int(sample_frequency*t_befor)\n segment_end = swallow_index+int(sample_frequency*t_after)\n segment_length = segment_end-segment_start\n for j in range (segment_length):\n BI_segment.append(BI[segment_start+j])\n EMG_segment.append(EMG[segment_start+j])\n BI_segment_list.append(BI_segment) \n EMG_segment_list.append(EMG_segment) \n return [BI_segment_list, EMG_segment_list] \n \nresult=segment(2,0.5,4000,annotations,BI,EMG) \n\nfig = plt.pyplot.figure()\nax = fig.add_subplot(111)\nnumberofsegment='123456'\n\nfor i in range(annotations[0].size):\n ax = fig.add_subplot(2,annotations[0].size,i+1)\n ax.plot(result[0][i])\n ax = fig.add_subplot(2,annotations[0].size,(annotations[0].size+i+1))\n ax.plot(result[1][i])\n ax.set_title('The: %s st'%numberofsegment[i])\n\n\n","repo_name":"Gamil-Farea/Schluckerkennung","sub_path":"Code/Gamil_test_V1.py","file_name":"Gamil_test_V1.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32691022891","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*\n\nfrom pwn import *\nfrom sys import argv\nfrom time import sleep\n\n\ncontext.terminal = ['tmux', 'sp', '-h']\ncontext.log_level = \"debug\"\n\nchall = \"./chal_patched_stdout\"\nlibc = ELF(\"./libc.so.6\")\nelf = ELF(chall)\ncontext.binary = chall\ncontext.binary.checksec()\n\nif len(argv) >= 2 and argv[1] == \"r\":\n p = remote(\"wfw1.2023.ctfcompetition.com\", 1337)\nelif len(argv) >= 2 and argv[1] == \"d\":\n\tcmd = \"\"\"\n b *main+504\n b *main+595\n\t\tc\n\t\"\"\"\n\tp = gdb.debug(chall,cmd)\nelse:\n p = process(chall)\n\np.recvuntil(\"shot.\\n\")\nbin_base = eval(b'0x' + p.recv(12))\nelf.address = bin_base\nlog.info(\"bin base: \" + hex(bin_base))\n\nfor i in range(7):\n p.recvline()\n\n\"\"\"\nlibc_base = eval(b'0x' + p.recv(12))\nlibc.address = libc_base\nlog.info(\"libc base: \" + hex(libc_base))\n\"\"\"\n\nmsg_addr = bin_base + 0x21e0\npayload = \"{} {}\".format(hex(msg_addr), 0x7e).encode()\np.sendlineafter(\"expire\", payload)\n\n\np.interactive()\n","repo_name":"t3mp-0xCC/write-up","sub_path":"google_ctf_2023/write-flag-where/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25179251635","text":"\"\"\"\nTest the ability to regress/remove read-only paths with api version >= 201\n\"\"\"\nimport os\nimport unittest\n\nimport commontest as comtst\nimport fileset\n\nfrom rdiff_backup import Globals, rpath\n\n\nclass ActionReadOnlyTest(unittest.TestCase):\n \"\"\"\n Test that rdiff-backup can properly handle read-only paths\n \"\"\"\n\n def setUp(self):\n self.base_dir = os.path.join(comtst.abs_test_dir,\n b\"readonly_actions\")\n self.from1_struct = {\n \"from1\": {\"contents\": {\n \"dirA\": {\"contents\": {\"fileA\": {\"content\": \"initial\"}}},\n \"fileB\": {\"content\": \"something\"}\n }}}\n self.from1_path = os.path.join(self.base_dir, b\"from1\")\n self.from2_struct = {\n \"from2\": {\"contents\": {\n \"dirA\": {\"contents\": {\"fileA\": {\"content\": \"afterwards\"}}},\n \"fileB\": {\"content\": \"now else\"}\n }}}\n rec = {\"fmode\": 0o400, \"dmode\": 0o500}\n self.from2_path = os.path.join(self.base_dir, b\"from2\")\n fileset.create_fileset(self.base_dir, self.from1_struct, recurse=rec)\n fileset.create_fileset(self.base_dir, self.from2_struct, recurse=rec)\n fileset.remove_fileset(self.base_dir, {\"bak\": {\"type\": \"dir\"}})\n self.bak_path = os.path.join(self.base_dir, b\"bak\")\n\n # we backup twice to the same backup repository at different times\n self.assertEqual(comtst.rdiff_backup_action(\n False, False, self.from1_path, self.bak_path,\n (\"--api-version\", \"201\", \"--current-time\", \"10000\"),\n b\"backup\", ()), 0)\n self.assertEqual(comtst.rdiff_backup_action(\n False, True, self.from2_path, self.bak_path,\n (\"--api-version\", \"201\", \"--current-time\", \"20000\"),\n b\"backup\", ()), 0)\n\n self.success = False\n\n def test_readonly_regress(self):\n \"\"\"test the \"regress\" action on a read-only repository\"\"\"\n\n # we regress forcefully\n self.assertEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--force\"),\n b\"regress\", ()), 0)\n\n # all tests were successful\n self.success = True\n\n def test_readonly_remove(self):\n \"\"\"test the \"remove\" action on a read-only repository\"\"\"\n\n # we remove forcefully\n self.assertEqual(comtst.rdiff_backup_action(\n True, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--force\"),\n b\"remove\", (\"increments\", \"--older-than\", \"0B\")), 0)\n\n # all tests were successful\n self.success = True\n\n @unittest.skip(\"Skipped until issue #790 can be fixed as part of 2.4\")\n def test_readonly_delete(self):\n \"\"\"test the \"delete\" method of rpath on a read-only repository\"\"\"\n\n from1_rp = rpath.RPath(Globals.local_connection, self.from1_path)\n from1_rp.delete()\n self.assertIsNone(from1_rp.lstat())\n\n # all tests were successful\n self.success = True\n\n def tearDown(self):\n # we clean-up only if the test was successful\n if self.success:\n fileset.remove_fileset(self.base_dir, self.from1_struct)\n fileset.remove_fileset(self.base_dir, self.from2_struct)\n fileset.remove_fileset(self.base_dir, {\"bak\": {\"type\": \"dir\"}})\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"rdiff-backup/rdiff-backup","sub_path":"testing/readonly_actions_test.py","file_name":"readonly_actions_test.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":969,"dataset":"github-code","pt":"61"}
+{"seq_id":"37253410841","text":"from django.core.management.base import BaseCommand\nfrom optparse import make_option\n\nfrom synnefo import quotas\n\n\nclass Command(BaseCommand):\n help = \"Detect and resolve pending commissions to Quotaholder\"\n output_transaction = True\n option_list = BaseCommand.option_list + (\n make_option(\"--fix\", dest=\"fix\",\n action='store_true',\n default=False,\n help=\"Fix pending commissions\"\n ),\n )\n\n def handle(self, *args, **options):\n fix = options['fix']\n\n accepted, rejected = quotas.resolve_pending_commissions()\n\n if accepted:\n self.stdout.write(\"Pending accepted commissions:\\n %s\\n\"\n % list_to_string(accepted))\n\n if rejected:\n self.stdout.write(\"Pending rejected commissions:\\n %s\\n\"\n % list_to_string(rejected))\n\n if fix and (accepted or rejected):\n self.stdout.write(\"Fixing pending commissions..\\n\")\n quotas.resolve_commissions(accept=accepted, reject=rejected,\n strict=False)\n\n\ndef list_to_string(l):\n return \",\".join([str(x) for x in l])\n","repo_name":"mpastyl/websocket-console","sub_path":"synnefo/snf-cyclades-app/synnefo/quotas/management/commands/reconcile-commissions-cyclades.py","file_name":"reconcile-commissions-cyclades.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36448126999","text":"import asyncio\nimport logging\nimport time\nimport six\nimport json\nimport os\nimport mpyq\nimport async_timeout\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\n\nfrom .client import Client\nfrom .data import CreateGameError, Result\nfrom .game_state import GameState\nfrom .player import Bot, Human\nfrom .portconfig import Portconfig\nfrom .protocol import ConnectionAlreadyClosed, ProtocolError\nfrom .sc2process import SC2Process\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlidingTimeWindow:\n def __init__(self, size: int):\n assert size > 0\n\n self.window_size = size\n self.window = []\n\n def push(self, value: float):\n self.window = (self.window + [value])[-self.window_size :]\n\n def clear(self):\n self.window = []\n\n @property\n def sum(self) -> float:\n return sum(self.window)\n\n @property\n def available(self) -> float:\n return sum(self.window[1:])\n\n @property\n def available_fmt(self) -> float:\n return \",\".join(f\"{w:.2f}\" for w in self.window[1:])\n\n\nasync def _play_game_human(client, player_id, realtime, game_time_limit):\n while True:\n state = await client.observation()\n if client._game_result:\n return client._game_result[player_id]\n\n if game_time_limit and (state.observation.observation.game_loop * 0.725 * (1 / 16)) > game_time_limit:\n print(state.observation.game_loop, state.observation.game_loop * 0.14)\n return Result.Tie\n\n if not realtime:\n await client.step()\n\n\nasync def _play_game_ai(client, player_id, ai, realtime, step_time_limit, game_time_limit):\n if realtime:\n assert step_time_limit is None\n\n # step_time_limit works like this:\n # * If None, then step time is not limited\n # * If given integer or float, the bot will simpy resign if any step takes longer than that\n # * Otherwise step_time_limit must be an object, with following settings:\n #\n # Key | Value | Description\n # ------------|------------|-------------\n # penalty | None | No penalty, the bot can continue on next step\n # penalty | N: int | Cooldown penalty, BotAI.on_step will not be called for N steps\n # penalty | \"resign\" | Bot resigns when going over time limit\n # time_limit | int/float | Time limit for a single step\n # window_size | N: int | The time limit will be used for last N steps, instad of 1\n #\n # Cooldown is a harsh penalty. The both loses the ability to act, but even worse,\n # the observation data from skipped steps is also lost. It's like falling asleep in\n # a middle of the game.\n time_penalty_cooldown = 0\n if step_time_limit is None:\n time_limit = None\n time_window = None\n time_penalty = None\n elif isinstance(step_time_limit, (int, float)):\n time_limit = float(step_time_limit)\n time_window = SlidingTimeWindow(1)\n time_penalty = \"resign\"\n else:\n assert isinstance(step_time_limit, dict)\n time_penalty = step_time_limit.get(\"penalty\", None)\n time_window = SlidingTimeWindow(int(step_time_limit.get(\"window_size\", 1)))\n time_limit = float(step_time_limit.get(\"time_limit\", None))\n\n ai._initialize_variables()\n\n game_data = await client.get_game_data()\n game_info = await client.get_game_info()\n\n # This game_data will become self._game_data in botAI\n ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n gs = GameState(state.observation)\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n await ai.on_before_start()\n ai._prepare_first_step()\n try:\n await ai.on_start()\n except Exception as e:\n logger.exception(f\"AI on_start threw an error\")\n logger.error(f\"resigning due to previous error\")\n await ai.on_end(Result.Defeat)\n return Result.Defeat\n\n iteration = 0\n while True:\n if iteration != 0:\n if realtime:\n # On realtime=True, might get an error here: sc2.protocol.ProtocolError: ['Not in a game']\n try:\n requested_step = gs.game_loop + client.game_step\n state = await client.observation(requested_step)\n # If the bot took too long in the previous observation, request another observation one frame after\n if state.observation.observation.game_loop > requested_step:\n # TODO Remove these 2 comments\n # t = state.observation.observation.game_loop\n state = await client.observation(state.observation.observation.game_loop + 1)\n # print(f\"Requested step: {requested_step}, received: {t}, new: {state.observation.observation.game_loop}\")\n except ProtocolError:\n pass\n else:\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n try:\n await ai.on_end(client._game_result[player_id])\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {client._game_result[player_id]}\")\n return client._game_result[player_id]\n return client._game_result[player_id]\n gs = GameState(state.observation)\n logger.debug(f\"Score: {gs.score.score}\")\n\n if game_time_limit and (gs.game_loop * 0.725 * (1 / 16)) > game_time_limit:\n await ai.on_end(Result.Tie)\n return Result.Tie\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n\n logger.debug(f\"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s\")\n\n try:\n if realtime:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n if time_penalty_cooldown > 0:\n time_penalty_cooldown -= 1\n logger.warning(f\"Running AI step: penalty cooldown: {time_penalty_cooldown}\")\n iteration -= 1 # Do not increment the iteration on this round\n elif time_limit is None:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n out_of_budget = False\n budget = time_limit - time_window.available\n\n # Tell the bot how much time it has left attribute\n ai.time_budget_available = budget\n\n if budget < 0:\n logger.warning(f\"Running AI step: out of budget before step\")\n step_time = 0.0\n out_of_budget = True\n else:\n step_start = time.monotonic()\n try:\n async with async_timeout.timeout(budget):\n await ai.issue_events()\n await ai.on_step(iteration)\n except asyncio.TimeoutError:\n step_time = time.monotonic() - step_start\n logger.warning(\n f\"Running AI step: out of budget; \"\n + f\"budget={budget:.2f}, steptime={step_time:.2f}, \"\n + f\"window={time_window.available_fmt}\"\n )\n out_of_budget = True\n step_time = time.monotonic() - step_start\n\n time_window.push(step_time)\n\n if out_of_budget and time_penalty is not None:\n if time_penalty == \"resign\":\n raise RuntimeError(\"Out of time\")\n else:\n time_penalty_cooldown = int(time_penalty)\n time_window.clear()\n\n await ai._after_step()\n except Exception as e:\n if isinstance(e, ProtocolError) and e.is_game_over_error:\n if realtime:\n return None\n result = client._game_result[player_id]\n if result is None:\n logger.error(\"Game over, but no results gathered\")\n raise\n await ai.on_end(result)\n return result\n # NOTE: this message is caught by pytest suite\n logger.exception(f\"AI step threw an error\") # DO NOT EDIT!\n logger.error(f\"Error: {e}\")\n logger.error(f\"Resigning due to previous error\")\n try:\n await ai.on_end(Result.Defeat)\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {Result.Defeat}\")\n return Result.Defeat\n return Result.Defeat\n\n logger.debug(f\"Running AI step: done\")\n\n if not realtime:\n if not client.in_game: # Client left (resigned) the game\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n\n await client.step()\n\n iteration += 1\n\n\nasync def _play_game(\n player, client, realtime, portconfig, step_time_limit=None, game_time_limit=None, rgb_render_config=None\n):\n assert isinstance(realtime, bool), repr(realtime)\n\n player_id = await client.join_game(\n player.name, player.race, portconfig=portconfig, rgb_render_config=rgb_render_config\n )\n logging.info(f\"Player {player_id} - {player.name if player.name else str(player)}\")\n\n if isinstance(player, Human):\n result = await _play_game_human(client, player_id, realtime, game_time_limit)\n else:\n result = await _play_game_ai(client, player_id, player.ai, realtime, step_time_limit, game_time_limit)\n\n logging.info(f\"Result for player {player_id} - {player.name if player.name else str(player)}: {result._name_}\")\n\n return result\n\n\nasync def _play_replay(client, ai, realtime=False, player_id=0):\n ai._initialize_variables()\n\n game_data = await client.get_game_data()\n game_info = await client.get_game_info()\n client.game_step = 1\n # This game_data will become self._game_data in botAI\n ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)\n state = await client.observation()\n # Check game result every time we get the observation\n if client._game_result:\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n gs = GameState(state.observation)\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n ai._prepare_first_step()\n try:\n await ai.on_start()\n except Exception as e:\n logger.exception(f\"AI on_start threw an error\")\n logger.error(f\"resigning due to previous error\")\n await ai.on_end(Result.Defeat)\n return Result.Defeat\n\n iteration = 0\n while True:\n if iteration != 0:\n if realtime:\n # TODO: check what happens if a bot takes too long to respond, so that the requested\n # game_loop might already be in the past\n state = await client.observation(gs.game_loop + client.game_step)\n else:\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n try:\n await ai.on_end(client._game_result[player_id])\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {client._game_result[player_id]}\")\n return client._game_result[player_id]\n return client._game_result[player_id]\n gs = GameState(state.observation)\n logger.debug(f\"Score: {gs.score.score}\")\n\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n\n logger.debug(f\"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s\")\n\n try:\n if realtime:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n\n except Exception as e:\n if isinstance(e, ProtocolError) and e.is_game_over_error:\n if realtime:\n return None\n # result = client._game_result[player_id]\n # if result is None:\n # logger.error(\"Game over, but no results gathered\")\n # raise\n await ai.on_end(Result.Victory)\n return None\n # NOTE: this message is caught by pytest suite\n logger.exception(f\"AI step threw an error\") # DO NOT EDIT!\n logger.error(f\"Error: {e}\")\n logger.error(f\"Resigning due to previous error\")\n try:\n await ai.on_end(Result.Defeat)\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {Result.Defeat}\")\n return Result.Defeat\n return Result.Defeat\n\n logger.debug(f\"Running AI step: done\")\n\n if not realtime:\n if not client.in_game: # Client left (resigned) the game\n await ai.on_end(Result.Victory)\n return Result.Victory\n\n await client.step() # unindent one line to work in realtime\n\n iteration += 1\n\n\nasync def _setup_host_game(server, map_settings, players, realtime, random_seed=None, disable_fog=None):\n r = await server.create_game(map_settings, players, realtime, random_seed, disable_fog)\n if r.create_game.HasField(\"error\"):\n err = f\"Could not create game: {CreateGameError(r.create_game.error)}\"\n if r.create_game.HasField(\"error_details\"):\n err += f\": {r.create_game.error_details}\"\n logger.critical(err)\n raise RuntimeError(err)\n\n return Client(server._ws)\n\n\nasync def _host_game(\n map_settings,\n players,\n realtime,\n portconfig=None,\n save_replay_as=None,\n step_time_limit=None,\n game_time_limit=None,\n rgb_render_config=None,\n random_seed=None,\n sc2_version=None,\n disable_fog=None,\n):\n\n assert players, \"Can't create a game without players\"\n\n assert any(isinstance(p, (Human, Bot)) for p in players)\n\n async with SC2Process(\n fullscreen=players[0].fullscreen, render=rgb_render_config is not None, sc2_version=sc2_version\n ) as server:\n await server.ping()\n\n client = await _setup_host_game(server, map_settings, players, realtime, random_seed, disable_fog)\n # Bot can decide if it wants to launch with 'raw_affects_selection=True'\n if not isinstance(players[0], Human) and getattr(players[0].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[0].ai.raw_affects_selection\n\n try:\n result = await _play_game(\n players[0], client, realtime, portconfig, step_time_limit, game_time_limit, rgb_render_config\n )\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n await client.quit()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return None\n\n return result\n\n\nasync def _host_game_aiter(\n map_settings, players, realtime, portconfig=None, save_replay_as=None, step_time_limit=None, game_time_limit=None,\n):\n assert players, \"Can't create a game without players\"\n\n assert any(isinstance(p, (Human, Bot)) for p in players)\n\n async with SC2Process() as server:\n while True:\n await server.ping()\n\n client = await _setup_host_game(server, map_settings, players, realtime)\n if not isinstance(players[0], Human) and getattr(players[0].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[0].ai.raw_affects_selection\n\n try:\n result = await _play_game(players[0], client, realtime, portconfig, step_time_limit, game_time_limit)\n\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return\n\n new_players = yield result\n if new_players is not None:\n players = new_players\n\n\ndef _host_game_iter(*args, **kwargs):\n game = _host_game_aiter(*args, **kwargs)\n new_playerconfig = None\n while True:\n new_playerconfig = yield asyncio.get_event_loop().run_until_complete(game.asend(new_playerconfig))\n\n\nasync def _join_game(\n players, realtime, portconfig, save_replay_as=None, step_time_limit=None, game_time_limit=None,\n):\n async with SC2Process(fullscreen=players[1].fullscreen) as server:\n await server.ping()\n\n client = Client(server._ws)\n # Bot can decide if it wants to launch with 'raw_affects_selection=True'\n if not isinstance(players[1], Human) and getattr(players[1].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[1].ai.raw_affects_selection\n\n try:\n result = await _play_game(players[1], client, realtime, portconfig, step_time_limit, game_time_limit)\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n await client.quit()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return None\n\n return result\n\n\nasync def _setup_replay(server, replay_path, realtime, observed_id):\n await server.start_replay(replay_path, realtime, observed_id)\n return Client(server._ws)\n\n\nasync def _host_replay(replay_path, ai, realtime, portconfig, base_build, data_version, observed_id):\n async with SC2Process(fullscreen=False, base_build=base_build, data_hash=data_version) as server:\n response = await server.ping()\n\n client = await _setup_replay(server, replay_path, realtime, observed_id)\n result = await _play_replay(client, ai, realtime)\n return result\n\n\ndef get_replay_version(replay_path):\n with open(replay_path, \"rb\") as f:\n replay_data = f.read()\n replay_io = six.BytesIO()\n replay_io.write(replay_data)\n replay_io.seek(0)\n archive = mpyq.MPQArchive(replay_io).extract()\n metadata = json.loads(archive[b\"replay.gamemetadata.json\"].decode(\"utf-8\"))\n return metadata[\"BaseBuild\"], metadata[\"DataVersion\"]\n\n\ndef run_game(map_settings, players, **kwargs):\n if sum(isinstance(p, (Human, Bot)) for p in players) > 1:\n host_only_args = [\"save_replay_as\", \"rgb_render_config\", \"random_seed\", \"sc2_version\", \"disable_fog\"]\n join_kwargs = {k: v for k, v in kwargs.items() if k not in host_only_args}\n\n portconfig = Portconfig()\n result = asyncio.get_event_loop().run_until_complete(\n asyncio.gather(\n _host_game(map_settings, players, **kwargs, portconfig=portconfig),\n _join_game(players, **join_kwargs, portconfig=portconfig),\n )\n )\n else:\n result = asyncio.get_event_loop().run_until_complete(_host_game(map_settings, players, **kwargs))\n return result\n\n\ndef run_replay(ai, replay_path, realtime=False, observed_id=0):\n portconfig = Portconfig()\n assert os.path.isfile(replay_path), f\"Replay does not exist at the given path: {replay_path}\"\n assert os.path.isabs(\n replay_path\n ), f'Replay path has to be an absolute path, e.g. \"C:/replays/my_replay.SC2Replay\" but given path was \"{replay_path}\"'\n base_build, data_version = get_replay_version(replay_path)\n result = asyncio.get_event_loop().run_until_complete(\n _host_replay(replay_path, ai, realtime, portconfig, base_build, data_version, observed_id)\n )\n return result\n","repo_name":"mitchkoko/firstbot","sub_path":"python-sc2/sc2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21385,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"}
+{"seq_id":"1640022022","text":"from functools import lru_cache\n\n\nclass Solution:\n def minimumDistance(self, word: str) -> int:\n word_length = len(word)\n\n def distance(char_a: str, char_b: str) -> int:\n if not char_a or not char_b:\n # return 0 for the first letter\n return 0\n\n index_a = ord(char_a) - ord('A')\n index_b = ord(char_b) - ord('A')\n\n return abs(index_a // 6 - index_b // 6) + abs(index_a % 6 - index_b % 6)\n\n @lru_cache(maxsize=None)\n def find(ind: int, key_a: str, key_b: str) -> int:\n # boundary condition\n if ind == word_length:\n return 0\n\n char = word[ind]\n\n return min(\n find(ind + 1, key_a, char) + distance(key_b, char),\n find(ind + 1, char, key_b) + distance(key_a, char)\n )\n\n return find(0, None, None)\n\n\nif __name__ == '__main__':\n word = \"A\" * 300\n print(Solution().minimumDistance(word))","repo_name":"amogchandrashekar/Leetcode","sub_path":"Hard/Minimum Distance to Type a Word Using Two Fingers.py","file_name":"Minimum Distance to Type a Word Using Two Fingers.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"40233180338","text":"from __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport os\n\n# NB(zundel): these definitions are a part of the source from https://github.com/pantsbuild/pants\nfrom pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary\nfrom pants.base.exceptions import TargetDefinitionException\nfrom pants.base.payload import Payload\nfrom pants.base.payload_field import PrimitiveField\n\n\nlogger = logging.getLogger(__name__)\n\nclass JaxWsLibrary(ExportableJvmLibrary):\n \"\"\"Generates a Java library from JAX-WS wsdl files.\"\"\"\n\n def __init__(self,\n payload=None,\n vm_args=None,\n xjc_args=None,\n extra_args=None,\n **kwargs):\n \"\"\"Generates a Java library from WSDL files using JAX-WS.\n\n :param list vm_args: Additional arguments for the JVM.\n :param list xjc_args: Additional arguments to xjc.\n :param list extra_args: Additional arguments for the CLI.\n \"\"\"\n payload = payload or Payload()\n payload.add_fields({\n 'vm_args': PrimitiveField(vm_args or ()),\n 'xjc_args': PrimitiveField(xjc_args or ()),\n 'extra_args': PrimitiveField(extra_args or ()),\n })\n super(JaxWsLibrary, self).__init__(payload=payload, **kwargs)\n self.add_labels('codegen')\n","repo_name":"ericzundel/mvn2pants","sub_path":"src/python/squarepants/plugins/jax_ws/targets/jax_ws_library.py","file_name":"jax_ws_library.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"}
+{"seq_id":"1187077067","text":"\n########################################################################################################\nimport FWCore.ParameterSet.Config as cms\n########################################################################################################\n\n#process = cms.Process(\"DemoTauAna\")\n\n###################################################\n# Import skeleton\n###################################################\nfrom PhysicsTools.PatAlgos.patTemplate_cfg import *\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\n\nKeepAll = True\n\n\n\n\nrunOnMC = True\nif runOnMC:\n process.GlobalTag.globaltag = 'START53_V23::All'\nelse:\n process.GlobalTag.globaltag = 'SOMETHING_FOR_DATA::All'\n\n########################################################################################################\n# Setup PF2PAT (for now we will not run both PAT and PF2PAT, everything will be PF2PAT)\n########################################################################################################\n\n###################################################\n# tau discriminators must be re-run\n###################################################\nprocess.load(\"RecoTauTag.Configuration.RecoPFTauTag_cff\")\nprocess.load(\"PhysicsTools.PatAlgos.patSequences_cff\")\n\n\n\n\n###################################################\n# setup PF2PAT, empty postfix means\n# only PF2PAT and not both PAT + PF2PAT\n###################################################\nfrom PhysicsTools.PatAlgos.tools.pfTools import *\n\n###################################################\n# use PF isolation\n###################################################\n\nusePFIso(process)\n\n# if the sample does not contain value map of PF candidate \"particleFlow:electrons\", use following line.\n# this appears to be the case for our test sample\nprocess.patElectrons.pfElectronSource = 'particleFlow'\n\n\npostfix = \"\"\njetAlgo = \"AK5\"\nusePF2PAT(process,runPF2PAT=True, jetAlgo=jetAlgo, runOnMC=runOnMC, postfix=postfix)\nswitchToPFJets(process)\n\n# needed for MVA met, but need to be here\nfrom JetMETCorrections.METPUSubtraction.mvaPFMET_leptons_PAT_cfi import *\nprocess.load('JetMETCorrections.Configuration.JetCorrectionProducers_cff')\nprocess.load('JetMETCorrections.METPUSubtraction.mvaPFMET_leptons_cff')\n\n\n##################################################\n# specify settings for met mva\n# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MVAMet\n# recoil corrections should be applied at Ntuple stage\n# in later stages isomuons, isoelectrons, and isotaus\n# should be replaced by our final selected leptons\n###################################################\nprocess.pfMEtMVA = process.pfMEtMVA.clone(srcLeptons = cms.VInputTag(\"isomuons\",\"isoelectrons\",\"isotaus\"),\n useType1 = cms.bool(True)\n )\n\n\n\n\n###################################################\n# rm MC matching if DATA\n###################################################\n\nif not runOnMC:\n removeMCMatchingPF2PAT( process, 'All' )\n\n\n###################################################\n# load the PU JetID sequence\n###################################################\nprocess.load(\"CMGTools.External.pujetidsequence_cff\")\n\nprocess.out.outputCommands +=['keep *_selectedPatJets*_*_*']\nprocess.out.outputCommands +=['keep *_puJetId*_*_*']\nprocess.out.outputCommands +=['keep *_puJetMva*_*_*']\n\n###################################################\n# needed for JEC\n###################################################\nprocess.out.outputCommands +=['keep double_kt6PFJets_rho_RECO']\n###################################################\n# Store the Vertex Collection\n# filtering is possible at this\n# stage (currently requiring at least one)\n###################################################\n\nfrom PhysicsTools.PatAlgos.tools.coreTools import *\nfrom PhysicsTools.PatAlgos.tools.trackTools import *\n\nprocess.VertexPresent = cms.EDFilter(\"VertexSelector\",\n src = cms.InputTag(\"offlinePrimaryVertices\"),\n cut = cms.string(\"!isFake && ndof > 4 && abs(z) < 24 && position.Rho < 2\"),\n filter = cms.bool(True)\n )\n\nprocess.out.outputCommands +=['keep *_offlinePrimaryVertices*_*_*']\nprocess.out.outputCommands +=['drop *_offlinePrimaryVerticesWithBS*_*_*']\nprocess.out.outputCommands +=['keep *_generalTracks_*_*']\n\n\n###################################################\n# add info needed for pile-up reweight\n####################################################\nprocess.out.outputCommands +=['keep *_addPileupInfo*_*_*']\n###################################################\n\n###################################################\n# keep beamspot (may be needed for electron ID)\n###################################################\n\nprocess.out.outputCommands +=['keep *_offlineBeamSpot*_*_*']\n\n\n\n###################################################\n# Store the Muons\n###################################################\n\n# https://twiki.cern.ch/twiki/bin/view/Main/SingleTopHiggsBBEventSel\n# setting to 9999 eliminates the default filtering w.r.t the 1st vertex in\n# the vertex src list; there is no gaurantee that the 1st vertex is the\n# one that we will end up selecting\n\nprocess.pfMuonsFromVertex.d0Cut = 9999.\nprocess.pfMuonsFromVertex.d0SigCut = 9999.\nprocess.pfMuonsFromVertex.dzCut = 9999.\nprocess.pfMuonsFromVertex.dzSigCut = 9999.\n\nprocess.out.outputCommands +=['keep *_selectedPatMuons*_*_*']\n\n\n\n\n###################################################\n# add in hadronic taus\n# based on\n# https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuidePFTauID#5_3_12_and_higher\n###################################################\n\nprocess.load(\"Configuration.StandardSequences.GeometryPilot2_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load(\"RecoTauTag.Configuration.RecoPFTauTag_cff\")\n\nswitchToPFTauHPS(process)\n\nprocess.out.outputCommands += ['keep *_selectedPatTaus*_*_*']\n\n###################################################\n# store electrons and MVA ID\n###################################################\n\n\nprocess.load('EgammaAnalysis.ElectronTools.electronIdMVAProducer_cfi')\nprocess.mvaIDelec = cms.Sequence( process.mvaTrigV0 + process.mvaNonTrigV0 + process.mvaTrigNoIPV0 )\nprocess.patElectrons.electronIDSources.mvaTrigV0 = cms.InputTag(\"mvaTrigV0\")\nprocess.patElectrons.electronIDSources.mvaNonTrigV0 = cms.InputTag(\"mvaNonTrigV0\")\nprocess.patElectrons.electronIDSources.mvaTrigNoIPV0 = cms.InputTag(\"mvaTrigNoIPV0\")\n\nprocess.patPF2PATSequence.replace( process.patElectrons, process.mvaIDelec * process.patElectrons )\n\nprocess.out.outputCommands +=['keep *_selectedPatElectrons*_*_*']\n\n\n###################################################\n# keep conversion info and gsf electrons just in\n# case they are needed\n###################################################\n\nprocess.patConversions = cms.EDProducer(\"PATConversionProducer\",\n electronSource = cms.InputTag(\"gsfElectrons\")\n )\n#process.out.outputCommands +=['keep *_patConversions*_*_*']\n#process.out.outputCommands +=['keep *_conversions*_*_*']\n#process.out.outputCommands +=['keep *_gsfElectrons*_*_*']\n\n\n###################################################\n# MVA MET (this must be before muon and electron sequences, don't\n# understand why at this point)\n###################################################\n\n\n\n\n\n#process.mvamet = cms.Sequence(process.pfMEtMVAsequence*process.patDefaultSequence*process.patPFMetByMVA)\n#process.patPF2PATSequence.replace( process.pfMET, process.mvamet)\n#process.mvametseq = cms.Sequence(process.pfMEtMVAsequence)\n#process.mvametpath = cms.Path(process.mvametseq)\n\n\n\n\n#process.mvametseq = cms.Sequence(process.pfMEtMVAsequence)\n#process.mvametpath = cms.Path(process.mvametseq)\nprocess.patPFMetByMVA = process.patMETs.clone(\n metSource = cms.InputTag('pfMEtMVA'),\n addMuonCorrections = cms.bool(False),\n genMETSource = cms.InputTag('genMetTrue')\n)\n#process.mvamet = cms.Sequence(process.pfMEtMVAsequence*getattr(process,\"patPF2PATSequence\"+postfix)*process.patPFMetByMVA)\nprocess.out.outputCommands +=['keep *_pfMEtMVA*_*_*']\nprocess.out.outputCommands +=['keep *_patPFMetByMVA*_*_*']\n\n\n\n\n\n\n\n# keep CSV info\nprocess.out.outputCommands +=['keep *_combinedSecondaryVertexBJetTagsAOD_*_*']\n\n###################################################\n# apply selection cuts on physics objects\n# to keep that PATtuple to a reasonable kB/event\n###################################################\n\n\nfrom PhysicsTools.PatAlgos.selectionLayer1.jetSelector_cfi import *\nprocess.selectedPatJets = selectedPatJets.clone(src = 'patJets', cut = 'correctedP4(0).pt > 10. && abs(eta)<4.7')\n\nfrom PhysicsTools.PatAlgos.selectionLayer1.tauSelector_cfi import *\nprocess.selectedPatTaus = selectedPatTaus.clone(src = 'patTaus', cut = 'pt >18. && decayMode>-1')\n\n\nfrom PhysicsTools.PatAlgos.selectionLayer1.muonSelector_cfi import *\nprocess.selectedPatMuons = selectedPatMuons.clone(src = 'patMuons', cut = 'pt >3.')\n\n\nfrom PhysicsTools.PatAlgos.selectionLayer1.electronSelector_cfi import *\nprocess.selectedPatElectrons = selectedPatElectrons.clone(src = 'patElectrons', cut = 'et >8.')\n\n###################################################\n# drop some large unused collections\n###################################################\n\nprocess.out.outputCommands +=['drop patPFParticles_selectedPatPFParticles__PAT']\nprocess.out.outputCommands +=['drop recoPFCandidates_selectedPatJets_pfCandidates_PAT']\n\n###################################################\n# require at least two lepton candidates\n# in the event\n###################################################\n\n\n# module to filter on the number of Electrons\nprocess.countSelectedLeptons = cms.EDFilter(\"PATLeptonCountFilter\",\n electronSource = cms.InputTag(\"selectedPatElectrons\"),\n muonSource = cms.InputTag(\"selectedPatMuons\"),\n tauSource = cms.InputTag(\"selectedPatTaus\"),\n countElectrons = cms.bool(True),\n countMuons = cms.bool(True),\n countTaus = cms.bool(True),\n minNumber = cms.uint32(2),\n maxNumber = cms.uint32(999999),\n filter = cms.bool(True)\n)\n\n\n\n\n#from PhysicsTools.PatAlgos.selectionLayer1.electronSelector_cfi import *\n#process.selectedPatElectrons = selectedPatElectrons.clone(src = 'patElectrons', cut = 'et >8.')\n\n##################################################\n# run the MET systematic tool\n##################################################\n\n\n#############################\n# need to create PAT versions\n# of the iso-leptons for use with\n# the met uncertainty tool\n#############################\n\n\n\n#from PhysicsTools.PatAlgos.producersLayer1.electronProducer_cfi import *\n#process.patIsoElec = process.patElectrons.clone(electronSource = cms.InputTag(\"isoelectrons\"))\n#from PhysicsTools.PatAlgos.producersLayer1.muonProducer_cfi import *\n#process.patIsoMuon = process.patMuons.clone(muonSource = cms.InputTag(\"isomuons\"))\n#from PhysicsTools.PatAlgos.producersLayer1.tauProducer_cfi import *\n#process.patIsoTau = process.patTaus.clone(tauSource = cms.InputTag(\"isotaus\")\n# )\n\n\n# apply type I/type I + II PFMEt corrections to pat::MET object\n# and estimate systematic uncertainties on MET\nfrom PhysicsTools.PatUtils.tools.metUncertaintyTools import runMEtUncertainties\nprocess.load(\"PhysicsTools.PatUtils.patPFMETCorrections_cff\")\nimport RecoMET.METProducers.METSigParams_cfi as jetResolutions\n\n\nprocess.smearedUncorrectedJetsForPFMEtByMVA = cms.EDProducer(\"SmearedPFJetProducer\",\n src = cms.InputTag('ak5PFJets'),\n jetCorrLabel = cms.string(\"ak5PFL1FastL2L3\"),\n dRmaxGenJetMatch = cms.string('TMath::Min(0.5, 0.1 + 0.3*TMath::Exp(-0.05*(genJetPt - 10.)))'),\n sigmaMaxGenJetMatch = cms.double(5.),\n inputFileName =cms.FileInPath('PhysicsTools/PatUtils/data/pfJetResolutionMCtoDataCorrLUT.root'),\n lutName = cms.string('pfJetResolutionMCtoDataCorrLUT'),\n jetResolutions = jetResolutions.METSignificance_params,\n skipRawJetPtThreshold = cms.double(10.), # GeV\n skipCorrJetPtThreshold = cms.double(1.e-2),\n srcGenJets = cms.InputTag('ak5GenJetsNoNu')\n )\n\n\n\n\nrunMEtUncertainties(process,\n electronCollection = cms.InputTag('selectedPatElectrons'),\n photonCollection = '',\n muonCollection = cms.InputTag('selectedPatMuons'),\n tauCollection = cms.InputTag('selectedPatTaus'),\n jetCollection = cms.InputTag('patJets'),\n jetCorrLabel = \"L3Absolute\",\n doSmearJets = True,\n makeType1corrPFMEt = True,\n makeType1p2corrPFMEt = True,\n makePFMEtByMVA = True,\n makeNoPileUpPFMEt = False,\n doApplyType0corr = False\n\n\n )\n\n\n\n\n##################################################\n# Let it run\n###################################################\nprocess.p = cms.Path( process.VertexPresent+\n process.pfMEtMVAsequence*\n getattr(process,\"patPF2PATSequence\"+postfix)*\n# process.smearedUncorrectedJetsForPFMEtByMVA+\n process.patPFMetByMVA+\n #getattr(process,\"patPF2PATSequence\"+postfix)+\n process.recoTauClassicHPSSequence+\n process.puJetIdSqeuence+\n process.countSelectedLeptons\n# +process.patIsoElec\n# +process.patIsoMuon\n# +process.patIsoTau\n# +process.metUncertaintySequence\n #process.PFTau\n #process.SelectMuonEvents\n )\n\n\n\n##################################################\n\n\n\nif not postfix == \"\":\n process.p += process.recoTauClassicHPSSequence # re-run tau discriminators (new version)\n process.p += process.patDefaultSequence\n\n###################################################\n# require all paths to pass\n###################################################\n\nprocess.out.SelectEvents.SelectEvents = ['p']\n\n########################################################################################################\n\n\n\n\n\n\nif KeepAll:\n process.out.outputCommands +=['keep *_*_*_*']\n\n#process.out.fileName = 'patTuple_testing.root'\nprocess.out.fileName = '/uscms/home/shalhout/no_backup/patTuple_testing.root'\nprocess.source.fileNames=['root://cmsxrootd-site.fnal.gov//store/mc/Summer12_DR53X/'+\n 'GluGluToHToTauTau_M-125_8TeV-powheg-pythia6/AODSIM/'+\n 'PU_S10_START53_V7A-v1/0000/00E903E2-9FE9-E111-8B1E-003048FF86CA.root']\n\nprocess.maxEvents.input = 4\n########################################################################################################\n","repo_name":"sshalhou/DemoTauCode","sub_path":"UserCode/PAT_resolveConflicts.py","file_name":"PAT_resolveConflicts.py","file_ext":"py","file_size_in_byte":14935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4710100071","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n\nyears = list(range(1985, 2011))\n\nst.title('Gender Pay Gap')\nst.subheader('Data Analytics Project')\n\nst.cache_data()\ndef load_dataset():\n df=pd.read_csv(\"cleaned_gpg_v2.csv\", usecols=['year','region','relate','sex','race','marst','occ','ind','classwkr',\n 'hrswork','incwage','annhrs','hrwage','inflate','expendbase10','perconexp',\n 'potexp','potexp2','o_occ1990','o_occ1950','o_ind1950','o_ind1990'] )\n df.set_index('year',inplace=True)\n return df\n\nwith st.spinner('Loading data...'):\n df = load_dataset()\n\n\nyears = df.index.unique().tolist()\nselectyear = st.sidebar.selectbox('Select a year', years)\nst.info(f'You selected {selectyear}')\n\nst.write(df.shape)\nr = df.race.unique().tolist()\ns = df.sex.unique().tolist()\noccupation = df.occ.unique().tolist()\nindustry = df.ind.unique().tolist()\n\nrace = st.sidebar.selectbox('select a race', r )\nsex = st.sidebar.selectbox('select a sex', s )\noccup = st.sidebar.selectbox('select a occupation', occupation )\nindus = st.sidebar.selectbox('select a industry', industry )\n\nif st.sidebar.checkbox('Show raw data'):\n st.dataframe(df[(df['race']== race) & (df['sex'] == sex)][:1000])\n st.dataframe(df[(df['occ']== occup) & (df['ind'] == indus)][:1000])\n\ndf_year = df[df.index == selectyear]\n\nfig1 = px.area(x=df.index, y=df['incwage'], title=f'INCOME WAGE')\nfig2 = px.scatter( x=df.index, y=df['occ'], title=f'OCCUPATION')\nfig3 = px.bar(x=df.index, y=df['ind'], title=f'INDUSTRY')\nfig4 = px.box(x=df.index, y=df['hrswork'], title=f'HOURS WORKED')\nif st.checkbox('Show income wage'):\n st.plotly_chart(fig1, use_container_width=True)\nif st.checkbox('Show occupation'):\n st.plotly_chart(fig2, use_container_width=True)\nif st.checkbox('Show industry'):\n st.plotly_chart(fig3, use_container_width=True)\nif st.checkbox('Show hours worked'):\n st.plotly_chart(fig4, use_container_width=True)\n\nfig5 = px.scatter(df, x=\"incwage\", y=\"hrswork\", color=\"sex\", marginal_y=\"violin\", title=f'INCOME WAGE VS HOURS WORKED')\nif st.checkbox('Show income wage vs hours worked'):\n st.plotly_chart(fig5, use_container_width=True)\n\nif st.checkbox('Show group analysis'):\n fig7 = px.sunburst(df, path=['classwkr','sex'], values='incwage', title=f'CLASS OF WORKERS AND THEIR INCOME WAGE')\n st.plotly_chart(fig7, use_container_width=True)\n fig11 = px.sunburst(df,path=['marst','sex'],values='annhrs',title=f'MARITAL STATUS AND NO.OF HOURS WORKED')\n st.plotly_chart(fig11, use_container_width=True)\n fig14 = px.treemap(df,names=[''])\nfig8 = px.bar(df, x=\"incwage\", y=\"expendbase10\", color=\"sex\", title=f'INCOME WAGE VS EXPENDITURE')\nif st.checkbox('Show income wage vs expenditure'):\n st.plotly_chart(fig8, use_container_width=True)\nfig9 = px.histogram(df,x=\"o_occ1990\",y=\"sex\",title=f'OCCUPATION IN 1990')\nfig10 = px.histogram(df,x='o_occ1950',y='sex',title=f'OCCUPATION IN 1950')\nif st.checkbox('Show difference in occupation'):\n st.plotly_chart(fig9, use_container_width=True)\n st.plotly_chart(fig10, use_container_width=True)\nfig12 = px.violin(df,x='hrswork',y='incwage',title=f'INCOME WAGE VS HOURS WORKED')\nfig13 = px.violin(df,x='annhrs',y='hrwage',title=f'HOURLY WAGE VS NO.OF HOURS WORKED')\nif st.checkbox('Show comparison between income wage and hours wage'):\n st.plotly_chart(fig12, use_container_width=True)\n st.plotly_chart(fig13, use_container_width=True)\n","repo_name":"Pranshirastogi/Data-analytics-python-course-","sub_path":"major project/work/dap.py","file_name":"dap.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5406651356","text":"from django.shortcuts import render\nfrom pymongo import MongoClient\nfrom models import *\n\n# Create your views here.\n\ndef saveRoute():\n client = MongoClient(\"localhost\", 27017)\n db = client.scrapping\n cursor = db.route_data.find()\n for each in cursor:\n r = RouteData(\n depTimeString=str(each.get('depTimeString')),\n maxUpperColumns=str(each.get('maxUpperColumns')),\n fromCity=str(each.get('FromCity')),\n maxLowerColumns=str(each.get('maxLowerColumns')),\n maxLowerRows=str(each.get('maxLowerRows')),\n DPInformationList=each.get('DPInformationList'),\n toCity=str(each.get('ToCity')),\n maxUpperRows=str(each.get('maxUpperRows')),\n vehicleType=str(each.get('vehicleType')),\n BPInformationList=each.get('BPInformationList'),\n travelDate=str(each.get('travelDate')),\n busType=str(each.get('busType')),\n MPax=str(each.get('MPax')),\n serviceName=str(each.get('serviceName')),\n seatList=str(each.get('seatlist')),\n toCityId=str(each.get('ToCityId')),\n operatorId=str(each.get('operatorId')),\n amenities=str(each.get('amenties')),\n notes=str(each.get('Notes')),\n dateOFJourney=str(each.get('DateOfJourney')),\n routeId=str(each.get('RouteId')),\n travels=str(each.get('Travels')),\n arrTime=str(each.get('arrTime')),\n arrTimeString=str(each.get('arrTimeString')),\n serviceNumber=str(each.get('serviceNo')),\n aes=str(each.get('aes')),\n mxSPrTxn=str(each.get('mxSPrTxn')),\n depTime=str(each.get('depTime')),\n isBPMapLinkShown=str(each.get('isBPMapLinkShown')),\n fromCityId=str(each.get('FromCityId')),\n param42=each.get('param42')\n )\n r.save()\n # break\n\n\ndef saveTrip():\n client = MongoClient(\"localhost\", 27017)\n db = client.scrapping\n cursor = db.trip.find()\n\n for each in cursor:\n data = each.get('data')\n t = TripData(\n status=str(each.get('status')),\n defaultSorting=each.get('DefaultSorting'),\n amenitiesData=str(each.get('amenitiesData')),\n message=str(each.get('message'))\n )\n t.save()\n try:\n for singleData in data:\n try:\n tsd = TripSingleData(\n tripData=t,\n DPList=singleData.get('DPLst'),\n vt=str(singleData.get('vt')),\n busType=str(singleData.get('BsTp')),\n Tips=str(singleData.get('Tips')),\n BsSvid=str(singleData.get('BsSvId')),\n Sort=str(singleData.get('Sort')),\n IsDPA=str(singleData.get('IsDPA')),\n NSA=str(singleData.get('NSA')),\n params42=singleData.get('param42'),\n serviceName=str(singleData.get('serviceName')),\n giry=str(singleData.get('Glry')),\n RbPrefCode=str(singleData.get('RbPrefCode')),\n WnSt=str(singleData.get('WnSt')),\n DpTm=str(singleData.get('DpTm')),\n IsAC=str(singleData.get('IsAc')),\n IsNAc=str(singleData.get('IsNAc')),\n RtId=str(singleData.get('RtId')),\n IsSpF=str(singleData.get('IsSpF')),\n IsSlpr=str(singleData.get('IsSlpr')),\n serviceId=str(singleData.get('serviceId')),\n FareList=singleData.get('FrLst'),\n Ament=singleData.get('Ament'),\n OpId=str(singleData.get('OpId')),\n BPList=singleData.get('BPLst'),\n IsMTE=str(singleData.get('IsMTE')),\n Rtg=singleData.get('Rtg'),\n IsBpDpSearch=str(singleData.get('IsBpDpSearch')),\n jDur=str(singleData.get('jDur')),\n isStr=str(singleData.get('IsStr')),\n Tvs=str(singleData.get('Tvs')),\n Cmpg=singleData.get('Cmpg'),\n BsSt=str(singleData.get('BsSt')),\n ArTm=str(singleData.get('ArTm'))\n )\n tsd.save()\n except:\n pass\n except:\n pass\n # break\n # break\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ankittube/dbtransfer","sub_path":"transferdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23432311891","text":"def toldNaomi(chosenNaomi, ken):\r\n \r\n toldNaomi = [x for x in ken if x > chosenNaomi]\r\n if(chosenNaomi > min(ken)):\r\n toldNaomi = max(ken) + 0.000001\r\n else:\r\n toldNaomi = max(ken) - 0.000001\r\n\r\n return toldNaomi\r\n\r\nwith open(\"D-large.in\",\"r\") as fp:\r\n with open(\"ouput.out\",\"w\") as out:\r\n cases = fp.readline()\r\n \r\n for t in range(0,int(cases)):\r\n \r\n N = fp.readline()\r\n \r\n naomi = [float(i) for i in fp.readline().split(\" \")]\r\n ken = [float(i) for i in fp.readline().split(\" \")]\r\n kenDeceitful = list(ken) \r\n \r\n pointsNaomi_DeceitfulWar = 0\r\n pointsNaomi_War = 0\r\n\r\n for n in range(0, int (N)):\r\n chosenNaomi = min(naomi)\r\n fakeChosenNaomi = toldNaomi(chosenNaomi, kenDeceitful)\r\n index = naomi.index(chosenNaomi)\r\n del naomi[index]\r\n \r\n possible = [x for x in ken if x > chosenNaomi]\r\n possibleDeceitful = [x for x in kenDeceitful if x > fakeChosenNaomi]\r\n \r\n if(len(possible) > 0):\r\n chosenKen = min(possible)\r\n else:\r\n chosenKen = min(ken)\r\n \r\n index = ken.index(chosenKen)\r\n del ken[index]\r\n \r\n if(len(possibleDeceitful) > 0):\r\n chosenKenDeceitful = min(possibleDeceitful)\r\n else:\r\n chosenKenDeceitful = min(kenDeceitful)\r\n \r\n index = kenDeceitful.index(chosenKenDeceitful)\r\n del kenDeceitful[index]\r\n \r\n if(chosenNaomi > chosenKenDeceitful):\r\n pointsNaomi_DeceitfulWar = pointsNaomi_DeceitfulWar + 1;\r\n \r\n if(chosenNaomi > chosenKen):\r\n pointsNaomi_War = pointsNaomi_War + 1;\r\n \r\n #print(pointsNaomi_DeceitfulWar, pointsNaomi_War)\r\n out.write(\"Case #\" + str(t+1) + \": \" + str(pointsNaomi_DeceitfulWar) + \" \" + str(pointsNaomi_War) + \"\\n\")\r\n \r\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/1202.py","file_name":"1202.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8637626498","text":"from glob import glob\nfrom shutil import unpack_archive\n\ntar_gz_files = glob('UKSSDC_spec/*/*.tar.gz')\n\nprint(f'Found {len(tar_gz_files)} tar.gz files')\n\n# Unzip all tar_gz files\nfor f in tar_gz_files:\n if 'ERROR' in f:\n print(f'{f} contains error, not extracting')\n continue\n extract_path = '/'.join(f.split('/')[:-1])\n print(f'Unzipping {f:<80} extract path:{extract_path}')\n unpack_archive(f, extract_path)\n","repo_name":"nx1/anticorr_data","sub_path":"6_extract_xrt_spec.py","file_name":"6_extract_xrt_spec.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29006650813","text":"# NEU CS5001 Project 6 graphicsPlus drawing image\n# Yong Shi/shi.yong@northeastern.edu/NUID 001578845\n\nimport sys\nimport graphicsPlus as gr\n\n# read in an image and display in a window\ndef main( argv ):\n\n if len(argv) < 2:\n print(\"usage: python3 image.py \")\n return\n\n # read in the image from the filename specified on the command\n filename = argv[1]\n image = gr.Image( gr.Point(0, 0), filename )\n\n # create a window that is the same size as the image\n rows = image.getHeight()\n cols = image.getWidth()\n win = gr.GraphWin( filename, cols, rows )\n\n # move the image so it is centered in the window\n image.move( cols/2, rows/2 )\n\n image.draw(win)\n\n win.getMouse()\n win.close()\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n\n","repo_name":"shiyong5008/Python-project","sub_path":"project 6/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25218891405","text":"# 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。\r\n#\r\n# 你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。\r\n#\r\n# 示例:\r\n#\r\n# 给定 nums = [2, 7, 11, 15], target = 9\r\n#\r\n# 因为 nums[0] + nums[1] = 2 + 7 = 9\r\n# 所以返回 [0, 1]\r\n#\r\n# 来源:力扣(LeetCode)\r\n# 链接:https://leetcode-cn.com/problems/two-sum\r\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\r\n\r\nclass Solution:\r\n def twoSum(self, nums: List[int], target: int) -> List[int]:\r\n hashmap={}\r\n for ind,val in enumerate(nums):\r\n hashmap[val]=ind\r\n for ind,val in enumerate(nums):\r\n item=hashmap.get(target-val)\r\n if item is not None and item!=ind:\r\n return [ind,item]\r\n\r\n def twoSum2(self,nums:List[int],target:int)->List[int]:\r\n hashmap={}\r\n for ind,val in enumerate(nums):\r\n if hashmap.get(target-val) is not None:\r\n return [ind, hashmap.get(target-val)]\r\n hashmap[val,ind]","repo_name":"liauraljl/liauraPyProject","sub_path":"leeCode/1.两数之和.py","file_name":"1.两数之和.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"4160611138","text":"import csv\nfrom fileinput import filename\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nfilename = 'Chapter16/csv/Chicago.csv' #assign the csv file to filename\nwith open(filename) as file: #we assign the object of filename to file\n reader = csv.reader(file) #we call csv.reader and pass it the file object as an argument to create a reader oobject\n header_row = next(reader) #Store data from the first row with next function\n print(header_row)\n\n for index, column_header in enumerate(header_row): #Use this for loop/enumerate to find the indeces of date and temp min. (4,6)\n print(index, column_header)\n \n dates, highs, lows = [], [], [] # create a list\n for row in reader: # find the rows in reader\n if row[5] == '': # if the values in it are blank\n blank = (row[5]) #assign them to a variable that doesn't matter\n elif row[6] == '':\n blank = (row[6])\n else:\n low = int(row[6])\n lows.append(low)\n high = int(row[5]) #change the string to an int if its not blank and assign to variable high\n highs.append(high) #add the aformentioned variable to the list\n \n for row in reader:\n current_date = datetime.strptime(row[4], '%Y-%m-%d')\n dates.append(current_date)\n for i in dates:\n print(i) # I cannot figure out why this doesnt work sadly.\n print(highs) #print them to make sure they work (they do, just numbers now)\n print(lows)\n print(len(highs)) # find the number of values in the list (599, or roughly )\n\n\n\n\n #plotting the high temperatures\n plt.style.use('seaborn')\n fig, ax = plt.subplots()\n ax.plot(highs, c='orange')\n ax.plot(lows, c='blue')\n\n #Format plot\n ax.set_title(\"Daily high and low Temperatures, Chicago, 2022\", fontsize = 24)\n ax.set_xlabel('', fontsize =16)\n ax.set_ylabel(\"Temperature (F)\", fontsize = 16)\n ax.tick_params(axis = \"both\", which= \"major\", labelsize=16)\n\n plt.show()\n\n\n","repo_name":"RiggityRussell/CIT228","sub_path":"Chapter16/chicago_csv.py","file_name":"chicago_csv.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72939259073","text":"import os\n\n# ============= SETTINGS FOR BASE APPLICATION TO RUN ============\nBASE_DIR = os.path.dirname(os.path.abspath(__name__))\nLOG_BASE_PATH = os.path.join(BASE_DIR, 'logs')\nSTATIC_DIR_PATH = os.path.join(BASE_DIR, 'static')\nSQLLITE_DB_PATH = os.path.join(STATIC_DIR_PATH, 'users_sqllite_database.sqlite')\n# ===============================================================\n\n# ============= SETTINGS FOR HASH KEYS AND ACTIVITY ============\nSECRET_KEY = 'eyJhbGciOiJIUzI1NiJ9.ew0KImVtYWlsIiA6ICJtZWduYS5hbGFtcGFsbGlAdGFsZW50aW5jLmNvbSINCn0'\nALGORITHM = \"HS256\"\nACCESS_TOKEN_EXPIRE_MINUTES = 60\n# ===============================================================\n\n# ============= SETTINGS FOR DB NEVER CHANGING RUN ============\nRIO_DRIVER = \"postgresql+psycopg2\"\nRIO_PORT = 5432\n# ===============================================================\n","repo_name":"LaxminarayanaV7416/fastAPIApplication","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28928388087","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport uuid\nimport random\nimport subprocess\n\nfrom argparse import ArgumentParser\n\n\ndef list_files(path_to_annotations, file_extension):\n \"\"\"Get list of files in a given directory\"\"\"\n file_list = []\n for file in os.listdir(path_to_annotations):\n if file.endswith('.' + file_extension):\n file_list.append(file)\n return file_list\n\n\ndef pick_random_images(background_dir):\n \"\"\"Return paths to randomly chosen fore/background images\"\"\"\n background_list = list_files(background_dir, 'jpg')\n background_image = os.path.join(background_dir, random.choice(background_list))\n\n return background_image\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--input_dir', '-i', type=str, help='directory of foreground images')\n parser.add_argument('--background_dir', '-b', type=str, help='directory of background images')\n parser.add_argument('--out_dir', '-o', type=str, help='output directory')\n args = parser.parse_args()\n\n sequence_names = []\n for item in os.listdir(args.input_dir):\n if os.path.isdir(item):\n sequence_names.append(item)\n\n for sequence in sequence_names:\n input = os.path.join(args.input_dir, sequence)\n background = pick_random_images(args.background_dir)\n output = os.path.join(args.out_dir, sequence)\n\n cmd = ['python', 'composite_video.py',\n '--input', input,\n '--background', background,\n '--output', output]\n\n subprocess.check_call(cmd)\n","repo_name":"atomicguy/simulants","sub_path":"simulants/legacy/batch_comp_videos.py","file_name":"batch_comp_videos.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"40037380553","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nimport telegram\nfrom mensa import fetch_all_menus, overwrite_current_menus\nfrom time import sleep\nimport sys\nfrom datetime import datetime\nfrom config import Config\nfrom utils import format_menus\nimport asyncio\n\n\nasync def send_menus(bot, config):\n \"\"\"Run the bot.\"\"\"\n date = datetime.today()\n overwrite_current_menus(config)\n mensa_menus = fetch_all_menus(config, date)\n users_mensas = config.get_database().get_all_user_and_mensas()\n print(\"Sending menus in %d messages\" % (len(users_mensas)))\n for cid, mensa in users_mensas:\n menus = mensa_menus[mensa]\n if not menus:\n continue\n await send_message(bot, cid, format_menus(mensa, menus, date))\n\n\nasync def send_message_to_all(bot, users, msg):\n print(\"Sending message to all %d users\" % len(users))\n for cid in users:\n await send_message(bot, cid, msg)\n\n\nasync def send_message(bot, chat_id, message):\n try:\n await bot.send_message(chat_id=chat_id, text=message,\n parse_mode='HTML')\n except Exception as ex:\n print(\"Could not send message to\", chat_id, str(ex))\n sleep(0.05) # avoiding flood limits\n\n\nasync def main():\n if len(sys.argv) == 1:\n print(f\"Usage: python3 {__file__} [message to all]\")\n sys.exit()\n config = Config(sys.argv[1])\n bot = telegram.Bot(config.get_token())\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - '\n '%(message)s')\n async with bot:\n if len(sys.argv) > 2:\n await send_message_to_all(bot, config.get_database().get_users(),\n \" \".join(sys.argv[2:]))\n else:\n await send_menus(bot, config)\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"dnrhead/mensa_bot","sub_path":"Bot/send_messages.py","file_name":"send_messages.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29223279208","text":"import random\nfrom time import sleep, time\nimport starkbank\nfrom datetime import datetime, timedelta\nfrom src.authentication import user\n\nstarkbank.user = user\n\n\ninvoices = starkbank.invoice.create([\n starkbank.Invoice(\n amount=248,\n descriptions=[{'key': 'Arya', 'value': 'Not today'}],\n discounts=[{'percentage': 10, 'due': datetime.now()+timedelta(days=10)}],\n due=datetime.now()+timedelta(days=10),\n expiration=123456789,\n fine=2.5,\n interest=1.3,\n name=\"Arya Stark\",\n tags=['New sword', 'Invoice #1234'],\n tax_id=\"29.176.331/0001-69\"\n )\n])\n\nbreakpoint()","repo_name":"RodrigoNavarroNogueira/apisdk","sub_path":"src/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72627901314","text":"import Utils\n\n\ndef run():\n option = input(\"Enter a or b \").strip()\n if option == \"a\":\n a()\n elif option == \"b\":\n b()\n else:\n print(\"Invalid option\")\n\n\ndef a():\n number = int(input(\"Enter the amount of columns for Pascals triangle \"))\n column1 = [1]\n column2 = [1, 1]\n\n if number < 1:\n print(\"Invalid option\")\n return\n\n print(column1)\n if number == 1:\n return\n\n print(column2)\n if number == 2:\n return\n\n number -= 2\n\n while number > 0:\n column1 = column2\n column2 = [1]\n\n for index in range(len(column1) - 1):\n column2.append(column1[index] + column1[index + 1])\n\n column2.append(1)\n print(column2)\n\n number -= 1\n\n\ndef b():\n lst = [1, 3, 4, 5, 6, 1, 23, 6, 57, 5, 43, -4, 2, 6, 7, 8, 9, 10, 11, 12, 13, 17, 19, 23]\n temp = []\n result = []\n\n for i in lst:\n if Utils.isPrime(i):\n temp.append(i)\n else:\n if len(temp) > len(result):\n result = temp\n temp = []\n\n if len(temp) > len(result):\n result = temp\n\n print(result)\n","repo_name":"EmeraldGames3/Python","sub_path":"L1/exercises/Three.py","file_name":"Three.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"15858724367","text":"import pygame, sys, random\r\nimport numpy as np\r\n\r\nclass Main:\r\n def __init__(self, fps=60, screen_resolution=()):\r\n self.fps = fps\r\n pygame.init()\r\n self.screen = pygame.display.set_mode((1080,720))\r\n self.clock = pygame.time.Clock()\r\n self.display_width, self.display_height = pygame.display.Info().current_w, pygame.display.Info().current_h\r\n self.bool_pos = 0\r\n self.x = 5\r\n self.conter = 0\r\n #0 - Левое Крыло; 2 - Правое крыло; 1 - Основа; 3 - Заднее Левое Крыло; 4 - Заднее Правое Крыло\r\n self.list_0_coordinates = [[215, 100], [255, 100],\r\n [365, 307], [265, 310]]\r\n self.list_2_coordinates = [[215, 560], [255, 560],\r\n [365, 353], [265, 350]]\r\n self.list_3_coordinates = [[0, 250], [20, 250],\r\n [90, 330], [30, 330]]\r\n self.list_4_coordinates = [[0, 410], [20, 410],\r\n [90, 330], [30, 330]]\r\n self.list_1_coordinates = [[505, 330], [495, 345], [480, 360],\r\n [50, 345], [30, 330],\r\n [50, 315], [480, 300],[495, 315]]\r\n self.p_list = [self.list_0_coordinates, self.list_1_coordinates, self.list_2_coordinates, self.list_3_coordinates, self.list_4_coordinates]\r\n #Тут Отрисовка перед циклом\r\n\r\n def run_while(self):\r\n while True:\r\n self.conter += 1\r\n if not self.bool_pos:\r\n self.drawing_in_a_loop()\r\n self.event_handler()\r\n pygame.display.flip()\r\n self.clock.tick(self.fps)\r\n\r\n def event_handler(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN :\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.key == pygame.K_SPACE:\r\n self.bool_pos = 0\r\n if event.type == pygame.MOUSEBUTTONDOWN :\r\n self.pos = event.pos\r\n for p in self.p_list:\r\n if self.point_in_polygon(p, self.pos):\r\n self.fire()\r\n self.bool_pos = not self.bool_pos\r\n break\r\n #print(self.pos)\r\n #if self.pos\r\n\r\n def drawing_in_a_loop(self):\r\n if self.list_4_coordinates[0][0] >= pygame.display.Info().current_w:\r\n self.list_0_coordinates = [[-290, 100], [-250, 100], [-140, 307], [-240, 310]]\r\n self.list_2_coordinates = [[-290, 560], [-250, 560], [-140, 353], [-240, 350]]\r\n self.list_3_coordinates = [[-505, 250], [-485, 250], [-415, 330], [-475, 330]]\r\n self.list_4_coordinates = [[-505, 410], [-485, 410], [-415, 330], [-475, 330]]\r\n self.list_1_coordinates = [[0, 330], [-10, 345], [-25, 360], [-455, 345],\r\n [-475, 330], [-455, 315], [-25, 300], [-10, 315]]\r\n self.p_list = [self.list_0_coordinates, self.list_1_coordinates, self.list_2_coordinates, self.list_3_coordinates, self.list_4_coordinates]\r\n\r\n self.screen.fill((125,249,255))\r\n for i in range(len(self.list_0_coordinates)):\r\n self.list_0_coordinates[i][0] += self.x\r\n for i in range(len(self.list_1_coordinates)):\r\n self.list_1_coordinates[i][0] += self.x\r\n for i in range(len(self.list_2_coordinates)):\r\n self.list_2_coordinates[i][0] += self.x\r\n for i in range(len(self.list_3_coordinates)):\r\n self.list_3_coordinates[i][0] += self.x\r\n for i in range(len(self.list_4_coordinates)):\r\n self.list_4_coordinates[i][0] += self.x\r\n\r\n pygame.draw.lines(self.screen, \"black\", True, self.list_0_coordinates, 5)\r\n pygame.draw.lines(self.screen, \"black\", True, self.list_2_coordinates, 5)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_0_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_2_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_3_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_4_coordinates)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_3_coordinates, 5)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_4_coordinates, 5)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_1_coordinates)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_1_coordinates, 5)\r\n\r\n def point_in_polygon(self, p, point):\r\n result = False\r\n size = len(p)\r\n j = size - 1\r\n for i in range(size):\r\n if (p[i][1] < point[1] and p[j][1] >= point[1] or p[j][1] < point[1]\r\n and p[i][1] >= point[1]) and (p[i][0] + (point[1] - p[i][1]) / (p[j][1] - p[i][1]) * (p[j][0] - p[i][0]) < point[0]):\r\n result = not result\r\n j = i\r\n return result\r\n\r\n def fire(self):\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 3)\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 7, 1)\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 9, 1)\r\n\r\nMain(24).run_while()\r\n","repo_name":"fevzifevziev/Computer_Graphics","sub_path":"lab_5/5.02.py","file_name":"5.02.py","file_ext":"py","file_size_in_byte":5480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74348435395","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport librosa\n\nfrom postprocess_utils import seg_metrics\nfrom utils import extract_features_melspec\n\naudio_filename = \"./samples/seg-test16.wav\"\nfeatures_filename = \"./samples/seg-test_features.npy\"\n# predictions_filename = \"samples/predictions_2018-05-24_17-48.npy\"\n\naudio, sr = librosa.load(audio_filename, sr=16000)\n# predictions = np.load(predictions_filename)\n# features = np.load(features_filename)\nfeatures = extract_features_melspec(audio, sr)\n\nprint(\"AUDIO\", audio.shape)\n# print(\"PREDICTIONS\", predictions.shape)\nprint(\"FEATURES\", features.shape)\n\ntimeseries_length = 100\nhop_length = 25\n\n# preds = deoverlap_predictions(predictions, features, hop_length)\n# norm_preds = defragment_vad(preds)\n\n# reference = [(6.42, 6.85), (13.49, 13.78)]\nreference = [(0, 6.42), (6.42, 13.49), (13.49, 20.43)]\n\n# lium = [(13.55, 13.67)]\nlium = [(0, 13.55), (13.55, 20.43)]\n\nref_plot = [0.1 for _ in range(len(audio))]\nfor r in reference:\n sr = 16000\n (start, end) = librosa.core.time_to_samples(r, sr=sr)\n start = max((0, start))\n end = min((len(audio), end))\n print(\"REF\", start, end)\n ref_plot[start:end] = [0.9 for _ in range(end - start)]\nprint(len(ref_plot))\n\n\nlium_seg = [0 for _ in range(len(audio))]\nfor l in lium:\n sr = 16000\n (start, end) = librosa.core.time_to_samples(l, sr=sr)\n start = max((0, start))\n end = min((len(audio), end))\n print(\"LIUM\", start, end)\n lium_seg[start:end] = [1 for _ in range(end - start)]\nprint(len(lium_seg))\n\nseg_metrics(lium, reference)\n\nfig, (\n (ax1),\n (ax2),\n # (ax3)\n) = plt.subplots(2, 1)\n\nax1.plot(audio)\nax1.set_title('skaņas līkne', fontsize='large')\n\nax2.plot(lium_seg)\nax2.plot(ref_plot)\nax2.set_title('LIUM rezultāti', fontsize='large')\n\n# ax3.plot(norm_preds)\n# ax3.plot(ref_plot)\n# ax3.set_title('normalizēti rezultāti', fontsize='large')\n\nplt.show()\n\n\n\n","repo_name":"dmednis/speaker-segmenter","sub_path":"test_seg.py","file_name":"test_seg.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41825590262","text":"class Vocab:\n \"\"\"Vocabulary class for mapping between words and ids (integers)\"\"\"\n PAD_TOKEN = '[PAD]'\n UNKNOWN_TOKEN = '[UNK]'\n START_DECODING = '[START]'\n STOP_DECODING = '[STOP]'\n\n def __init__(self, vocab_file, max_size):\n \"\"\"Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.\n\n Args:\n vocab_file: path to the vocab file, which is assumed to contain \"\" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.\n max_size: integer. The maximum size of the resulting Vocabulary.\"\"\"\n\n self.word2id = {Vocab.UNKNOWN_TOKEN: 0, Vocab.PAD_TOKEN: 1,\n Vocab.START_DECODING: 2, Vocab.STOP_DECODING: 3}\n self.id2word = {0: Vocab.UNKNOWN_TOKEN, 1: Vocab.PAD_TOKEN, 2: Vocab.START_DECODING, 3: Vocab.STOP_DECODING}\n self.count = 4\n\n\n with open(vocab_file, 'r', encoding='utf-8') as f:\n for line in f:\n pieces = line.split()\n if len(pieces) != 2:\n print('Warning : incorrectly formatted line in vocabulary file : %s\\n' % line)\n continue\n\n w = pieces[0]\n if w in [Vocab.UNKNOWN_TOKEN, Vocab.PAD_TOKEN, Vocab.START_DECODING, Vocab.STOP_DECODING]:\n raise Exception(\n ', , [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\n if w in self.word2id:\n raise Exception('Duplicated word in vocabulary file: %s' % w)\n\n self.word2id[w] = self.count\n self.id2word[self.count] = w\n self.count += 1\n if max_size != 0 and self.count >= max_size:\n print(\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n max_size, self.count))\n break\n\n print(\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n self.count, self.id2word[self.count - 1]))\n\n def word_to_id(self, word):\n if word not in self.word2id:\n return self.word2id[Vocab.UNKNOWN_TOKEN]\n return self.word2id[word]\n\n def id_to_word(self, word_id):\n if word_id not in self.id2word:\n raise ValueError('Id not found in vocab: %d' % word_id)\n return self.id2word[word_id]\n\n def size(self):\n return self.count\n\n\n\ndef input_to_ids(article_words, vocab):\n ids = []\n oovs = []\n unk_id = vocab.word_to_id(vocab.UNKNOWN_TOKEN)\n for w in article_words:\n i = vocab.word_to_id(w)\n if i == unk_id: # If w is OOV\n if w not in oovs: # Add to list of OOVs\n oovs.append(w)\n oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...\n ids.append(\n vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...\n else:\n ids.append(i)\n return ids, oovs\n\ndef report_to_ids(abstract_words, vocab, article_oovs):\n ids = []\n unk_id = vocab.word_to_id(vocab.UNKNOWN_TOKEN)\n for w in abstract_words:\n i = vocab.word_to_id(w)\n if i == unk_id: # If w is an OOV word\n if w in article_oovs: # If w is an in-article OOV\n vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number\n ids.append(vocab_idx)\n else: # If w is an out-of-article OOV\n ids.append(unk_id) # Map to the UNK token id\n else:\n ids.append(i)\n return ids\n\ndef output_to_words(id_list, vocab, article_oovs):\n words = []\n for i in id_list:\n try:\n w = vocab.id_to_word(i) # might be [UNK]\n except ValueError as e: # w is OOV\n assert article_oovs is not None, \"Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode\"\n article_oov_idx = i - vocab.size()\n try:\n w = article_oovs[article_oov_idx]\n except ValueError as e: # i doesn't correspond to an article oov\n raise ValueError(\n 'Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (\n i, article_oov_idx, len(article_oovs)))\n words.append(w)\n return words\n\n","repo_name":"RyanPeking/Automobile-Q-A","sub_path":"data_process/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"23559852861","text":"# input() reads a string with a line of input, stripping the '\\n' (newline) at the end.\n# This is all you need for most Google Code Jam problems.\nt = int(input()) # read a line with a single integer\nfor j in range(1, t + 1):\n [n] = [int(s) for s in input().split(\" \")] # read a list of integers, 2 in this case\n listN = list(str(n))\n checkAgain = True\n makeNines = False\n startNineIndex = 0\n while checkAgain:\n checkAgain = False\n for i in range(len(listN) - 1):\n if listN[i] > listN[i+1]:\n #print(\"changing {} with {}\".format(i, i+1))\n if i == 0 and listN[i] == 1:\n listN[i] = ''\n else:\n listN[i] = chr(ord(listN[i]) - 1)\n makeNines = True\n checkAgain = True\n startNineIndex = i+1\n break\n if makeNines:\n for i in range(startNineIndex, len(listN)):\n listN[i] = '9'\n print(\"Case #{}: {}\".format(j, int(\"\".join(listN))))\n # check out .format's specification for more formatting options\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3860.py","file_name":"3860.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35825932627","text":"from collections import OrderedDict\n\n\nfrom bitcoin.core import str_money_value, b2lx, b2x, x\nfrom bitcoin.wallet import CBitcoinAddress, CBitcoinAddressError\nfrom bitcoin.rpc import unhexlify, hexlify\nfrom bitcoin.core import COutPoint\n\nfrom .exceptions import ChainError, BacktrackError\n\nCOINBASE_TX = b'\\x00'*32\n\ndef bitcoin_to_string(value):\n \"\"\"Convert bitcoin value to a string\"\"\"\n #TODO: Append zeroes up to standard length\n bitcoin_str = str_money_value(abs(value))\n if value < 0:\n return '- '+bitcoin_str\n else:\n return bitcoin_str\n\n\n\nclass TxOut(object):\n \"\"\"Transaction ouput\"\"\"\n __slots__ = ('tx', 'nout', 'addr', 'value')\n\n def __init__(self, tx, nout, addr=None, value=0):\n \"\"\"\n Arguments:\n tx (string): Transaction hash\n nout (int): Transaction output number\n addr (string):\n value (int): Output value\n \"\"\"\n self.tx = tx\n self.nout = nout\n self.addr = addr\n self.value = value\n\n @staticmethod\n def addr_from_script(script):\n \"\"\"Generate output addres from scriptPubKey\"\"\"\n try:\n addr = str(CBitcoinAddress.from_scriptPubKey(script))\n except CBitcoinAddressError:\n addr = None\n \n return addr\n\n @classmethod\n def from_tx(cls, tx, nout):\n \"\"\"\n WARNING: This is not efficient to process all the transaction outputs\n because of GetTxid() does not cache the result.\n\n Arguments:\n tx (bitcoin.CTransaction): Transaction\n nout (int): Output number\n\n Returns:\n Inialized TxOut\n\n Exceptions:\n CBitcoinAddressError: Couldn't convert transaction output scriptPubKey \n to address\n IndexError: The requested output doesn't exist\n \"\"\"\n # GetTxid instead of GetHash for segwit support (bip-0141)\n txhash = tx.GetTxid()\n cout = tx.vout[nout]\n addr = TxOut.addr_from_script(cout.scriptPubKey)\n return cls(txhash, nout, addr, value=cout.nValue)\n\n def __hash__(self):\n return hash((self.tx, self.nout))\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return False\n return self.tx == other.tx and self.nout == other.nout\n \n def __repr__(self):\n return \"TxOut({}, {}, {}, {})\".format(\n self.tx, \n self.nout, \n self.addr, \n self.value)\n\n def __str__(self): \n return \"TxOut({}, {}, {}, {})\".format(\n b2x(self.tx), \n self.nout, \n self.addr, \n str_money_value(self.value))\n\n\n\nclass Block(object):\n\n __slots__=('block_hash', 'height', 'vin', 'vout')\n\n def __init__(self, block_hash, height, vin=None, vout=None):\n \n self.block_hash = block_hash\n self.height = height\n if not vin:\n vin = []\n if not vout:\n vout = []\n\n self.vin = list(vin)\n self.vout = list(vout)\n\n def __hash__(self):\n return hash(self.block_hash)\n\n def __eq__(self, other):\n if isintance(other, self.__class__):\n return self.block_hash==other.block_hash\n else:\n return False\n\n def __repr__(self):\n return \"{}({},{},{},{})\".format(self.__class__.__name__,\n self.block_hash,\n self.height,\n self.vin,\n self.vout)\n\n def __str__(self):\n return \"{}: {} ({})\".format(self.__class__.__name,\n self.block_hash,\n self.height)\n\n def check_balance(self):\n \"\"\"Check block input value sum is equeal to output value sum\"\"\"\n input_value=0\n output_value=0\n\n for vin in self.vin:\n input_value += vin.value\n\n for vout in self.vout:\n output_value += vout.value\n\n return input_value == output_value\n\n\nclass TxOutCache(object):\n \n def __init__(self, proxy, size=500000):\n \"\"\"\n Arguments:\n size (int): max cache size\n proxy (proxy.BitcoindProxy)\n \"\"\"\n self._proxy = proxy\n self._max_size = size\n\n self._txout_cache = OrderedDict()\n\n self._cache_miss = 0\n self._cache_hit = 0\n\n def del_txout(self, txout):\n \"\"\"Remove txout from cache\"\"\"\n self._txout_cache.pop(txout, None)\n \n def add_txout(self, txout):\n \"\"\"Add TxOut to cache\"\"\"\n if len(self._txout_cache)>=self._max_size:\n self._txout_cache.popitem(last=False)\n \n self._txout_cache[txout] = txout\n\n def purge_cache(self):\n \"\"\"Purge complete cache\"\"\"\n self._txout_cache = OrderedDict()\n\n def get_txout(self, txhash, nout):\n \"\"\"\n Get TxOut from cache or if not available query bitcoind_proxy\n \n Arguments:\n txhash (str): Transactions hash\n nout (int): Output number\n \"\"\"\n try:\n txout = self._txout_cache[TxOut(txhash, nout)]\n self._cache_hit += 1\n return txout\n except KeyError:\n pass\n\n self._cache_miss += 1\n\n with self._proxy as proxy: \n try:\n tx = proxy.get_transaction(txhash)\n except ConnectionError:\n raise\n except Exception:\n raise ChainError(\"Unknown Txout {} {}\".format(txhash, nout))\n \n # Manually initilize TxOut so there is no need to generate the transaction\n # hash a second time. (faster than:txout = TxOut.from_tx(rawtx, nout))\n for out, cout in enumerate(tx.vout):\n addr = TxOut.addr_from_script(cout.scriptPubKey)\n self.add_txout(TxOut(txhash, out, addr, value=cout.nValue))\n\n # Now txout must be in cache\n self._cache_hit -= 1 # Fix hit/miss counter\n return self.get_txout(txhash, nout)\n \n\nclass BlockFactory(object):\n\n def __init__(self, proxy, size=1000000):\n \"\"\"\n Arguments:\n size (int): max cache size\n proxy (proxy.BitcoindProxy)\n \"\"\"\n self._proxy = proxy\n self._max_size = size\n \n self._cache = TxOutCache(proxy, size)\n\n def purge_cache(self):\n \"\"\"Completely purge cache\"\"\"\n self._cache.purge()\n\n def _transaction_inputs(self, tx):\n \"\"\"Generate transaction inputs from source transaction outputs\"\"\" \n inputs = []\n txhash = tx.GetTxid()\n \n for vin in tx.vin:\n txin = vin.prevout\n \n if txin.hash == COINBASE_TX:\n continue\n\n txout = self._cache.get_txout(txin.hash, txin.n)\n if txout is None:\n logger.error(\"Unable to find TxOut {} {}\".format(\n txin_hash, txin_n))\n else:\n inputs.append(txout)\n\n return inputs\n\n def _transaction_outputs(self, tx):\n \"\"\"Generate transaction TxOut\"\"\" \n outputs = []\n\n # GetTxid instead of GetHash for segwit support (bip-0141)\n txhash = tx.GetTxid()\n\n for n, utxo in enumerate(tx.vout): \n \n addr = TxOut.addr_from_script(utxo.scriptPubKey)\n out = TxOut(txhash, n, addr, value=utxo.nValue)\n outputs.append(out)\n\n return outputs\n\n def _block_outputs(self, block):\n \"\"\"Generate the TxOut for all the block outputs\"\"\"\n block_txouts = []\n\n for tx in block.vtx:\n block_txouts.extend(self._transaction_outputs(tx))\n \n return block_txouts\n\n def _block_inputs(self, block):\n \"\"\"Generate the TxOut for all the block inputs\"\"\"\n block_inputs = []\n\n for tx in block.vtx:\n block_inputs.extend(self._transaction_inputs(tx))\n\n return block_inputs\n\n def build_block(self, block, height=None):\n \"\"\"Build Block from bitcoin.CBlock\"\"\"\n blockhash = block.GetHash()\n \n \n outputs = self._block_outputs(block)\n \n # Add outputs to cache, because the outputs from a transaction\n # can be used as inputs for other transactions in the same block\n for txout in outputs:\n if txout.value > 0:\n self._cache.add_txout(txout)\n\n # Generate inputs \n inputs = self._block_inputs(block)\n #TODO: Remove outputs added to cache if input generations fails???\n\n\n # With the complete block remove used inputs from cache to save space\n #for txout in inputs:\n # self._cache.del_txout(txout)\n\n block = Block(blockhash, height, inputs, outputs)\n return block\n","repo_name":"secnot/bitcoin-balance","sub_path":"bitbalance/primitives.py","file_name":"primitives.py","file_ext":"py","file_size_in_byte":8913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"8545020554","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.9.1\n# kernelspec:\n# display_name: hcp7t_fv_sleep_env\n# language: python\n# name: hcp7t_fv_sleep_env\n# ---\n\n# # Data Quality Assurance - Part 1\n#\n# This notebook will perform the following steps:\n#\n# 1. Load a list of subjects of interest (i.e., those with at least one resting-state scan at 7T)\n# 2. Load motion estimates and compute Framewise Displacement (saves FD to disk on each run folder)\n# 3. Attempt loading of ET files for each run (and mark those that are defective)\n# 4. Construct a dataframe with the following information per run: correct number of TRs, correct spatial resolution, correct number of volumes, ET available, ET can be loaded\n#\n# A summary of this QA is saved to disk in ${RESOURCES_DIR}/QA_Part1_Results.pkl\n# ***\n\n# +\n# %%time\nfrom utils.basics import get_7t_subjects, load_motion_info\nfrom utils.variables import RUNS, DATA_DIR, ProjectFiles_DF_Path, QA1_Results_DF_Path\nfrom utils.ParseEyeLinkAsc import ParseEyeLinkAsc\n\nimport numpy as np\nimport pandas as pd\nimport os.path as osp\nimport nibabel as nib\n\nVERBOSE=False\n# -\n\n# *** \n# ## 1. Check the Dataframe with information about available files\n\nProjectFiles_DF = pd.read_pickle(ProjectFiles_DF_Path)\nprint('++ INFO: Shape of Project Files_DF is %s' % str(ProjectFiles_DF.shape))\n\nprint('++ INFO: Number of Runs with ET(asc) file available: %d Runs' % (ProjectFiles_DF.shape[0] - ProjectFiles_DF['ET_ASC'].isna().sum()))\nprint('++ INFO: Number of Runs with ET(csv) file available: %d Runs' % (ProjectFiles_DF.shape[0] - ProjectFiles_DF['ET_CSV'].isna().sum()))\n\n# ***\n# ## 2. Load List of Subjects of interest\n\n# Load List of Subjects with at least one resting-state scan\nsbjs = get_7t_subjects()\nprint('++ Number of available subjects: %d' % len(sbjs))\n\n# ***\n# ## 4. Load Motion Information and Compute FrameWise Displacement\n# This will generate a file per run with the traces of framewise displacepment for that particular run\n\n# %%time\n# Load Motion Information for all subjects available and create FD data frame for each run\nmot_df = load_motion_info(sbjs, write_FD=True, fillnan=False, verbose=VERBOSE)\n\nprint('++ INFO: Shape of mot_df is %s' % str(mot_df.shape))\nmot_df.head()\n\n# ***\n# ## 5. Check the Integrity of Eye Tracker Data Files & See if FD is low\n#\n# Unfortunately, not all eye tracking data files can be loaded properly. \n#\n# During this initial QA, we will test whether or not a given ET file (e.g., that of one run) can be properly loaded or not\n#\n# In addition we will also store the previously computed Mean and Max Framewise Displacement\n\n# +\n# %%time\n# Create Eamty DataFrame with the following columns:\n# * Sbj = Subject ID\n# * Run = Run ID\n# * Dir Avail = Does the directory for this run exists on our system?\n# * Mot Avail = Is the motion file for this run available on our system?\n# * ET Avail = Are both ET files for this run available on our system?\n# * ET_OK = Are we able to load (e.g., file is uncorrupted) the main ET File\ndf = pd.DataFrame(columns=['Sbj','Run','Dir Avail','Mot Avail','ET Avail', 'ET_OK'])\n\n# For all subjects\nfor s,sbj in enumerate(sbjs):\n # For all possible runs\n for run in RUNS:\n # Create the path to this run directory (should it exists)\n drun_path = osp.join(DATA_DIR,str(sbj),run)\n if osp.exists(drun_path):\n # Create the path to the motion file (should it exists)\n mot_path = osp.join(drun_path,'{run}_Movement_Regressors.txt'.format(run=run))\n # Create the path to the \n et_asc_path = osp.join(drun_path,'{run}_eyetrack.asc'.format(run=run))\n et_csv_path = osp.join(drun_path,'{run}_eyetrack_summary.csv'.format(run=run))\n # Try loading the ET file without causing any type of exception\n if osp.exists(et_asc_path):\n try:\n dfTrial,dfMsg,dfFix,dfSacc,dfBlink,dfSamples = ParseEyeLinkAsc(et_asc_path)\n et_ok = True\n except: # If there was any issue (e.g., an exception), then set et_ok to False\n et_ok = False\n # Update the dataframe with the information about this run\n df = df.append({'Sbj':sbj,\n 'Run':run,\n 'Dir Avail':osp.exists(drun_path),\n 'Mot Avail':osp.exists(mot_path),\n 'ET Avail':osp.exists(et_asc_path ) & osp.exists(et_csv_path),\n 'ET_OK': et_ok}, \n ignore_index=True)\n if VERBOSE:\n print('INFO: Just finsished with subject {sbj} run {run}'.format(sbj=sbj, run=run))\n else: \n print('WARNING: Subject {sbj} run {run} does not exists'.format(sbj=sbj, run=run))\ndf = df.infer_objects()\n# -\n\n# ***\n# ## 6. Check the spatial resolution and length of the scans\n\nrun_list = [str(row['Sbj'])+'_'+row['Run'] for r,row in df.iterrows() ]\n\n# %%time\ndf['Spatial Resolution OK'] = None\ndf['Nacq OK'] = None\ndf['TR OK'] = None\nprint('++ INFO: Number of items to iter [%d]' % len(run_list))\nprint(' + ',end='')\nfor i,item in enumerate(run_list):\n sbj,run = item.split('_',1)\n file_path = osp.join(DATA_DIR,sbj,run,run+'_mPP.nii.gz')\n if np.mod(i,50)==0:\n print('%i..' % i, end='')\n if not osp.exists(file_path):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = False\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = False\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = False\n else:\n file_img = nib.load(file_path)\n [dx, dy, dz, tr] = file_img.header.get_zooms()\n \n if np.isclose(dx,1.60) & np.isclose(dx,1.60) & np.isclose(dz,1.60):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = False\n \n if np.isclose(tr,1.0):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = False\n \n if file_img.shape[3] == 900:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = False\nprint('')\ndf.head()\n\nprint(\"++ INFO: Number of Runs with directory available: %d\" % df[df['Dir Avail']==True].shape[0])\nprint(\"++ INFO: Number of Runs with ET available: %d\" % df[df['ET Avail']==True].shape[0])\nprint(\"++ INFO: Number of Runs with ET OK: %d\" % df[df['ET_OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with correct spatial resolution: %d\" % df[df['Spatial Resolution OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with correct number of acquisitions: %d\" % df[df['Nacq OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with expected TR: %d\" % df[df['TR OK']==True].shape[0])\nprint(\"++ ===============================================================\")\nprint(\"++ INFO: Number of Runs with all controls OK: %d\" % df[(df['Dir Avail']==True) & \n (df['ET Avail']==True) & \n (df['ET_OK']==True) & \n (df['Spatial Resolution OK']==True) &\n (df['Nacq OK']==True) &\n (df['TR OK']==True)].shape[0])\n\n# ***\n# ## Save the summary of this first QA part to disk\n\ndf.to_pickle(QA1_Results_DF_Path)\n\nprint('++ INFO: Number of runs missing ET files = %d RUNS' % (df[df['ET Avail']==False].shape[0]))\nprint('++ INFO: Number of runs with ET files available but unreadable = %d RUNS' % (df[df['ET_OK']==False].shape[0]))\n\n# ***\n#\n# ### Clean up space\n#\n# Scans that will not be used becuase the ET is not available will be removed from disk\n\ndf = pd.read_pickle(QA1_Results_DF_Path)\n\ndf = df[df['ET Avail']==False]\n\ncommand_file = open('./N01_QA_RemoveScansWithBadET.sh','w+')\nfor r,row in df.iterrows():\n command_file.write('rm -rf /data/SFIMJGC_HCP7T/HCP7T/{sbj}/{run} \\n'.format(sbj=row['Sbj'],run=row['Run']))\ncommand_file.close()\n","repo_name":"nimh-sfim/hcp7t_fv_sleep","sub_path":"Notebooks/N01_QA.py","file_name":"N01_QA.py","file_ext":"py","file_size_in_byte":8746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41595175631","text":"def bisection(f, a, b, eps=1E-6):\n fa = f(a)\n if fa*f(b) > 0:\n return None, 0\n i = 0\n while b-a > eps:\n i += 1\n m = (b+a)/2\n fm = f(m)\n if fa*fm <= 0:\n b = m\n else:\n a = m\n fa = fm\n return m, i\n\ndef test_bisection():\n f = lambda x: 2*x -3\n x_expected = 1.5\n x, iter = bisection(f, 0, 10, eps=1E-6)\n success = abs(x - x_expected) < eps\n assert success, 'found x=%g != %g'%(x, x_expected)\n\nif __name__ == \"__main__\":\n test_bisection() ","repo_name":"Roasters/Practices","sub_path":"PythonProgramming/Springer/bisection.py","file_name":"bisection.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33563734214","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 20 12:54:50 2019\n\n@author: mpanaggio\n\"\"\"\n\n\nimport learn_kuramoto_files as lk\nimport numpy as np\nimport importlib as imp\nimport pandas as pd\nimport time\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimp.reload(lk)\n\n##############################################################################\n## define model parameters\nnum_osc=10\nmu_freq=0.0 # mean natural frequency\nsigma_freq=0.01 # std natural frequency\np_erdos_renyi=0.9 # probability of connection for erdos renyi\nrandom_seed=-1 # -1 to ignore\ncoupling_function=lambda x: np.sin(x)#+0.1*np.sin(2*(x+0.2)) # Gamma from kuramoto model\n#coupling_function=lambda x: np.sin(x-0.2)+0.1*np.cos(2*x) # Gamma from kuramoto model\n\n##############################################################################\n## define numerical solution parameters\ndt=0.1 # time step for numerical solution\ntmax=1000*dt # maximum time for numerical solution\nnoise_level=0.0 # post solution noise added\ndynamic_noise_level=0.00 # post solution noise added\nnum_repeats=1#10 # number of restarts for numerical solution\nnum_attempts=1#5 # number of times to attempt to learn from data for each network\nnum_networks=1#10 # number of different networks for each parameter value\nmethod='euler' #'rk2','rk4','euler',\nwith_vel=False\n## Note: the loop parameter value will overwrite the value above\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfor network in range(1,num_networks+1):\n## create parameter dictionaries\n system_params={'w': lk.random_natural_frequencies(num_osc,mu=mu_freq,sigma=sigma_freq,seed=random_seed),\n 'A': lk.random_erdos_renyi_network(num_osc,p_value=p_erdos_renyi,seed=random_seed),\n 'K': 1.0,\n 'Gamma': coupling_function,\n 'other': str(parameter),\n #'IC': np.random.rand(num_osc)*np.pi*2, # fixed initial condition for each repeat\n 'IC': {'type': 'reset', # reset (set phase to 0) or random\n 'selection': 'fixed', #fixed or random\n 'num2perturb': 1, # integer used only when selection is random\n 'indices': [0], # list of integers, used only when selection='fixed' \n 'size': 2, # float, used only when type='random'\n 'IC': 0*np.random.rand(num_osc)*np.pi*2} # initical condition for first repeat\n }\n \n solution_params={'dt':dt,\n 'tmax':tmax,\n 'noise': noise_level,\n 'dynamic noise': dynamic_noise_level,\n 'ts_skip': 1, # don't skip timesteps\n 'num_repeats': num_repeats\n }\n \n learning_params={'learning_rate': 0.005,\n 'n_epochs': 300, #400\n 'batch_size':500,#500,\n 'n_oscillators':num_osc,\n 'dt': dt,\n 'n_coefficients': 20,\n 'reg':0.0001,\n 'prediction_method': method,\n 'velocity_fit': with_vel\n }\n t=np.arange(0,tmax,dt)[:-1].reshape(-1,1)\n phases,vel=lk.generate_data_vel(system_params,solution_params)\n n_ts=t.shape[0]\n \n \n figsize=(12,4)\n fontsize=16\n plt.figure(figsize=figsize) \n for rep in range(num_repeats):\n \n cur_t=t+rep*tmax\n cur_phases=phases[rep*n_ts:(rep+1)*n_ts]\n #lk.plot_ode_results(t,phases[rep*n_ts:(rep+1)*n_ts],figsize=(20,5),fontsize=16)\n R,Psi=lk.get_op(cur_phases)\n plt.subplot(1,3,1)\n plt.plot(cur_t,cur_phases)\n plt.title('Phases',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel('phases',fontsize=fontsize)\n plt.subplot(1,3,2)\n plt.plot(cur_t,R,'b')\n plt.title('Order parameter',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel('R(t)=|Z(t)|',fontsize=fontsize)\n plt.ylim(0,1.1)\n plt.subplot(1,3,3)\n plt.plot(cur_t,Psi,'b')\n plt.title('Order parameter',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel(r'$\\Psi(t)=arg(Z(t))$',fontsize=fontsize)\n plt.ylim(-np.pi,np.pi)\n if rep>=1:\n for subplot in range(1,4):\n ax=plt.subplot(1,3,subplot)\n ylim=ax.get_ylim()\n ax.axvline(x=rep*tmax,ymin=ylim[0],ymax=ylim[1],color='k',linestyle='--')\n plt.show()\n","repo_name":"mpanaggio/coupled_oscillator_network_model_reconstruction","sub_path":"test_data_generation.py","file_name":"test_data_generation.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"33754780701","text":"import random\n\nimport numpy as np\n\nCOLOR_BLACK = -1\nCOLOR_WHITE = 1\nCOLOR_NONE = 0\n\nrandom.seed(0)\n\n\nclass Idiot(object):\n\n def __init__(self, chessboard_size, color, time_out):\n self.chessboard_size = chessboard_size\n self.color = color\n self.time_out = time_out\n\n self.chessboard = None\n self.candidate_list = []\n self.next_state = {}\n\n def go(self, chessboard):\n self.candidate_list.clear()\n self.next_state.clear()\n self.chessboard = chessboard\n self.next_state = self.get_state(chessboard, self.color)\n self.candidate_list = list(self.next_state.keys())\n if len(self.candidate_list) == 0:\n return self.candidate_list\n self.candidate_list.append(random.choice(self.candidate_list))\n return self.candidate_list\n\n def judge(self, x: int, y: int) -> bool:\n return 0 <= x < self.chessboard_size and 0 <= y < self.chessboard_size\n\n def get_state(self, chessboard, color):\n\n def get_valid_pos(x, y, dx, dy):\n pos_set = []\n including = False\n append = pos_set.append\n while 1:\n x += dx\n y += dy\n if not self.judge(x, y) or chessboard[x][y] == COLOR_NONE:\n break\n elif chessboard[x][y] == color and not including:\n break\n elif chessboard[x][y] == -color:\n append((x, y))\n including = True\n elif chessboard[x][y] == color and including:\n return pos_set\n return None\n\n def test_all_directions(x, y):\n pos_set = []\n directions = [(dx, dy) for dx in range(-1, 2) for dy in range(-1, 2) if dx != 0 or dy != 0]\n for dx, dy in directions:\n subset = get_valid_pos(x, y, dx, dy)\n if subset is not None:\n pos_set += subset\n return (x, y), pos_set\n\n next_state = {}\n indexes = np.where(chessboard == COLOR_NONE)\n indexes = tuple(zip(indexes[0], indexes[1]))\n for idx in indexes:\n idx, reversed_color_set = test_all_directions(idx[0], idx[1])\n if len(reversed_color_set) > 0:\n new_chessboard = chessboard.copy()\n for pos in reversed_color_set:\n new_chessboard[pos[0]][pos[1]] = color\n new_chessboard[idx[0]][idx[1]] = color\n next_state[idx] = new_chessboard\n return next_state","repo_name":"SnowCharmQ/2022-Fall-AI-Project","sub_path":"Reversed-Reversi/idiot.py","file_name":"idiot.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"27156283273","text":"import unittest\nfrom src.utilities import utilities\n\nclass utility_tests(unittest.TestCase) :\n utils: utilities = utilities()\n\n # is file included\n def test_is_file_included (self) -> None :\n filename_request: str = \"packet-capture.csv\"\n\n self.assertTrue(\n expr = self.utils.is_file_included(filename_request),\n msg = \"is file included\"\n )\n\n # is file not included\n def test_is_file_not_included (self) -> None :\n\n filename_request: str = \"packet-capture.txt\"\n\n self.assertFalse(\n expr = self.utils.is_file_included(filename_request),\n msg = \"is file not included\"\n )\n","repo_name":"belajarqywok/wyrequin","sub_path":"tests/test_utilities.py","file_name":"test_utilities.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"10164064366","text":"from survey import AnonymousSurvey\n\nquestion = \"What language did you first learn to speak?\"\nlanguage_survey = AnonymousSurvey(question)\n\nlanguage_survey.show_question()\nprint(\"Enter 'q' at any time to quit.\\n\")\n\nwhile True:\n response = input(\"Language: \")\n\n if response == 'q':\n break\n\n language_survey.store_response(response)\n\nprint(\"\\nThank you to our participants!\")\nlanguage.survey.show_results()\n\n","repo_name":"skibidibidop/testing-area","sub_path":"python-practice/python-crash-course-matthes/chapter_11/test_examples/survey/language_survey.py","file_name":"language_survey.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"19365984025","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport socket\nimport psana\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom pickle import dump\n\nfrom benchmarking import Event,\\\n set_defaults,\\\n event_here, start, stop, log, event_log\n\n\n\n\n#\n# PSANA2 BENCHMARK, based on CCTBX's XTC_PROCESS pipeline.\n# COMMENT: I've started with cctbx_project/xfel/xtc_process.py and stripped\n# out all the things that I don't think are relevant to this benchmark\n#\n\n\n\n\n@log\ndef get_calib_file_path(env, address, run):\n \"\"\" Findes the path to the SLAC metrology file stored in a psana\n environment object's calibration store\n @param env psana environment object\n @param address address string for a detector\n @param run psana run object or run number\n \"\"\"\n\n from psana import Detector\n\n\n #\n # try to get it from the detector interface\n #\n\n try:\n start(\"load geometry from detector\")\n psana_det = Detector(address, run.env())\n ret = psana_det.pyda.geoaccess(run.run()).path\n stop(\"load geometry from detector\")\n\n return ret\n except Exception as e:\n pass\n\n\n #\n # try to get it from the calib store directly\n #\n\n from psana import ndarray_uint8_1, Source\n\n start(\"load geometry from calib store\")\n cls = env.calibStore()\n src = Source('DetInfo(%s)'%address)\n path_nda = cls.get(ndarray_uint8_1, src, 'geometry-calib')\n stop(\"load geometry from calib store\")\n\n if path_nda is None:\n return None\n return ''.join(map(chr, path_nda))\n\n\n\n@log\ndef env_dxtbx_from_slac_metrology(run, address):\n \"\"\" Loads a dxtbx cspad cbf header only object from the metrology path\n stored in a psana run object's calibration store\n @param env psana run object\n @param address address string for a detector\n \"\"\"\n\n start(\"load geometry data from detector\")\n det = run.Detector(address)\n geometry = det.raw.geometry()\n stop(\"load geometry data from detector\")\n\n if geometry is None:\n metro_path = get_calib_file_path(run.env(), address, run)\n elif geometry.valid:\n metro_path = None\n else:\n raise RuntimeError(f\"Could not read geometry, hostname: {socket.gethostname()}\")\n\n if metro_path is None and geometry is None:\n return None\n\n\n return None\n\n\n\n@log\ndef get_psana_corrected_data(psana_det, evt, use_default=False, dark=True,\n common_mode=None, apply_gain_mask=True,\n gain_mask_value=None, per_pixel_gain=False,\n gain_mask=None, additional_gain_factor=None):\n \"\"\"\n Given a psana Detector object, apply corrections as appropriate and return\n the data from the event\n @param psana_det psana Detector object\n @param evt psana event\n @param use_default If true, apply the default calibration only, using the\n psana algorithms. Otherise, use the corrections specified by the rest of\n the flags and values passed in.\n @param dark Whether to apply the detector dark, bool or numpy array\n @param common_mode Which common mode algorithm to apply. None: apply no\n algorithm. Default: use the algorithm specified in the calib folder.\n Otherwise should be a list as specified by the psana documentation for\n common mode customization\n @param apply_gain_mask Whether to apply the common mode gain mask correction\n @param gain_mask_value Multiplier to apply to the pixels, according to the\n gain mask\n @param per_pixel_gain If available, use the per pixel gain deployed to the\n calibration folder\n @param gain_mask gain mask showing which pixels to apply gain mask value\n @param additional_gain_factor Additional gain factor. Pixels counts are\n divided by this number after all other corrections.\n @return Numpy array corrected as specified.\n \"\"\"\n\n # order is pedestals, then common mode, then gain mask, then per pixel gain\n\n # HACK: Force psana v2 behaviour\n PSANA2_VERSION = True\n\n\n start(\"psana_det.raw\")\n if PSANA2_VERSION:\n # in psana2, data are stored as raw, fex, etc so the selection\n # has to be given here when the detector interface is used.\n # for now, assumes cctbx uses \"raw\".\n psana_det = psana_det.raw\n stop(\"psana_det.raw\")\n\n\n if use_default:\n start(\"psana_det.calib\")\n ret = psana_det.calib(evt) # applies psana's complex run-dependent calibrations\n stop(\"psana_det.calib\")\n return ret\n\n\n start(\"psana_det.raw_data(evt)\")\n data = psana_det.raw_data(evt)\n stop(\"psana_det.raw_data(evt)\")\n if data is None:\n return\n\n\n start(\"subtract psana_det.pedestals()\")\n data = data.astype(np.float64)\n if isinstance(dark, bool):\n if dark:\n if PSANA2_VERSION:\n data -= psana_det.pedestals()\n else:\n data -= psana_det.pedestals(evt)\n elif isinstance( dark, np.ndarray ):\n data -= dark\n stop(\"subtract psana_det.pedestals()\")\n\n\n if common_mode is not None and common_mode != \"default\":\n print(\"Applying common mode\")\n\n start(\"psana_det.common_mode_apply(data, common_mode)\")\n if common_mode == 'cspad_default':\n common_mode = (1,25,25,100,1) # default parameters for CSPAD images\n psana_det.common_mode_apply(data, common_mode)\n elif common_mode == 'unbonded':\n common_mode = (5,0,0,0,0) # unbonded pixels used for correction\n psana_det.common_mode_apply(data, common_mode)\n else: # this is how it was before.. Though I think common_mode would need to be a tuple..\n psana_det.common_mode_apply(data, common_mode)\n stop(\"psana_det.common_mode_apply(data, common_mode)\")\n else:\n print(\"Not applying common mode\")\n \n\n if apply_gain_mask:\n print(\"Applying gain mask\")\n\n start(\"apply gain mask\")\n if gain_mask is None: # TODO: consider try/except here\n gain_mask = psana_det.gain_mask(evt) == 1\n if gain_mask_value is None:\n try:\n gain_mask_value = psana_det._gain_mask_factor\n except AttributeError:\n print(\"No gain set for psana detector, using gain value of 1, consider disabling gain in your phil file\")\n gain_mask_value = 1\n data[gain_mask] = data[gain_mask]*gain_mask_value\n stop(\"apply gain mask\")\n else:\n print(\"Not applying gain mask\")\n\n\n if per_pixel_gain: # TODO: test this\n start(\"applying psana_det.gain()\")\n data *= psana_det.gain()\n stop(\"applying psana_det.gain()\")\n\n\n if additional_gain_factor is not None:\n data /= additional_gain_factor\n\n\n return data\n\n\n\n@log\ndef process_event(run, evt, psana_det):\n \"\"\"\n Process a single event from a run\n @param run psana run object\n @param timestamp psana timestamp object\n \"\"\"\n\n\n # HACK: Force psana v2 behaviour\n PSANA2_VERSION = True\n\n start(\"construct event timestamp\")\n if PSANA2_VERSION:\n sec = evt._seconds\n nsec = evt._nanoseconds\n else:\n time = evt.get(psana.EventId).time()\n fid = evt.get(psana.EventId).fiducials()\n sec = time[0]\n nsec = time[1]\n\n ts = Event.as_timestamp(sec, nsec/1e6)\n stop(\"construct event timestamp\")\n\n print(\"Accepted\", ts)\n\n # HACK: these parameters have been extracted from a xtc_process run\n data = get_psana_corrected_data(psana_det, evt, use_default=False,\n dark=True, common_mode=None,\n apply_gain_mask=True, gain_mask_value=6.85,\n per_pixel_gain=False,\n additional_gain_factor=None)\n\n\n if data is None:\n print(\"ERROR! No data\")\n return\n\n\n timestamp = t = ts\n s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]\n print(\"Loaded shot\", s)\n\n \n\n@log\ndef test_xtc_read(ds, comm, det_name):\n\n for run in ds.runs():\n\n start(f\"run.Detector({ds.det_name})\")\n det = run.Detector(ds.det_name)\n stop(f\"run.Detector({ds.det_name})\")\n\n # TODO: fix flex dependency\n # if comm.Get_rank() == 0:\n # PS_CALIB_DIR = os.environ.get('PS_CALIB_DIR')\n # assert PS_CALIB_DIR\n # dials_mask = easy_pickle.load(params.format.cbf.invalid_pixel_mask)\n # else:\n # dials_mask = None\n # dials_mask = comm.bcast(dials_mask, root=0)\n\n start(\"for evt in run.events()\")\n for evt in run.events():\n env_dxtbx_from_slac_metrology(run, det_name)\n\n process_event(run, evt, det)\n stop(\"for evt in run.events()\")\n\n\n\n\nif __name__ == \"__main__\":\n\n # Defaul data\n default_parameters = {\n \"exp\" : \"cxid9114\",\n \"run\" : 1,\n \"dir\" : \"/img/data/xtc_test\",\n \"max_events\" : 0,\n \"det_name\" : \"cspad\"\n }\n\n\n # Input args allowed by psana.DataSource\n psana_args = [\"exp\", \"run\", \"dir\", \"max_events\", \"det_name\", \"batch_size\"]\n\n\n #\n # Parse input arguments\n #\n\n parser = ArgumentParser()\n\n for arg in psana_args:\n parser.add_argument(f\"--{arg}\", help=\"psana.DataSource kwarg\")\n\n parser.add_argument(\"--of\",\n help=\"Log dir -- every rank will write its own log file\")\n\n # Get args dict, and sanitize None types\n args = vars(parser.parse_args())\n\n output_name = args[\"of\"]\n del args[\"of\"] # don't pass this to psana\n\n psana_kwargs = set_defaults(args, default_parameters)\n\n\n\n #\n # Initialize MPI\n #\n\n start(\"INIT MPI\")\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n stop(\"INIT MPI\")\n\n rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed\n\n\n #\n # Run Benchmark\n #\n\n if rank == 0:\n print(\"MPI Initialized, Running xtc_read Benchmark\")\n\n start(f\"psana.DataSource({psana_kwargs})\")\n ds = psana.DataSource(**psana_kwargs)\n stop(f\"psana.DataSource({psana_kwargs})\")\n\n test_xtc_read(ds, comm, psana_kwargs[\"det_name\"])\n\n\n #\n # Save log files\n #\n\n if rank == 0:\n print(\"Writing logs\")\n\n log_path = os.path.join(output_name, f\"debug_{rank}.txt\")\n with open(log_path, \"w\") as f:\n for entry in event_log(cctbx_fmt=True):\n print(entry, file=f)\n","repo_name":"JBlaschke/psana2_benchmarks","sub_path":"opt/benchmark_xtc_read.py","file_name":"benchmark_xtc_read.py","file_ext":"py","file_size_in_byte":10502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18044067045","text":"from Posicao import Posicao\n\nclass Tabuleiro:\n def __init__(self, marcadorJogador1='X', marcadorJogador2='O', espacamentoVertical=0, espacamentoHorizontal=1, imprimirCabeçalhos=True):\n self.__posicoes = [[Posicao(), Posicao(), Posicao()],\n [Posicao(), Posicao(), Posicao()],\n [Posicao(), Posicao(), Posicao()]]\n self.__marcadorJogador1 = marcadorJogador1\n self.__marcadorJogador2 = marcadorJogador2\n self.__jogadorAtual = 1\n self.__espacamentoVertical = espacamentoVertical\n self.__espacamentoHorizontal = espacamentoHorizontal\n self.__imprimirCabeçalhos = imprimirCabeçalhos\n \n def __imprimirLinhaVertical(self):\n for _ in range(self.__espacamentoVertical):\n if self.__imprimirCabeçalhos:\n print(' ', end='')\n print(f'{\" \" * (self.__espacamentoHorizontal * 2 + 1)}|{\" \" * (self.__espacamentoHorizontal * 2 + 1)}|')\n \n def imprimirTabuleiro(self):\n if self.__imprimirCabeçalhos:\n print(f'{\" \" * (self.__espacamentoHorizontal + 2)}1{\" \" * (self.__espacamentoHorizontal * 2 + 1)}2{\" \" * (self.__espacamentoHorizontal * 2 + 1)}3')\n print()\n \n for numeroLinha in range(1, 4):\n self.__imprimirLinhaVertical()\n \n if self.__imprimirCabeçalhos:\n print(f'{numeroLinha} ', end='')\n print(f'{\" \" * self.__espacamentoHorizontal}{self.__posicoes[numeroLinha-1][0]}{\" \" * self.__espacamentoHorizontal}|', end='')\n print(f'{\" \" * self.__espacamentoHorizontal}{self.__posicoes[numeroLinha-1][1]}{\" \" * self.__espacamentoHorizontal}|', end='')\n print(f'{\" \" * self.__espacamentoHorizontal}{self.__posicoes[numeroLinha-1][2]}')\n\n self.__imprimirLinhaVertical()\n\n if numeroLinha != 3:\n if self.__imprimirCabeçalhos:\n print(' ', end='')\n print(f'{\"-\" * (self.__espacamentoHorizontal * 2 + 1)} {\"-\" * (self.__espacamentoHorizontal * 2 + 1)} {\"-\" * (self.__espacamentoHorizontal * 2 + 1)}')\n \n def marcarPosicao(self, indiceLinha, indiceColuna):\n if not type(indiceLinha) is int or not type(indiceColuna) is int:\n raise TypeError('Os indices precisam ser inteiros !')\n \n if indiceLinha > 3 or indiceLinha < 0 or indiceColuna > 3 or indiceColuna < 0:\n raise ValueError('Os indices precisam estar entre 0 e 2 !')\n \n if self.__posicoes[indiceLinha][indiceColuna].marcada:\n raise Exception('A posição selecionada já está marcada !')\n \n if self.__jogadorAtual == 1:\n self.__posicoes[indiceLinha][indiceColuna].marcarPosicao(self.__marcadorJogador1)\n self.__jogadorAtual = 2\n else:\n self.__posicoes[indiceLinha][indiceColuna].marcarPosicao(self.__marcadorJogador2)\n self.__jogadorAtual = 1\n \n @staticmethod\n def __verificarMarcacoesIguais(listaPosicoes):\n marcacao = listaPosicoes[0].marcacao\n return all([posicao.marcada for posicao in listaPosicoes]) and all([posicao.marcacao == marcacao for posicao in listaPosicoes])\n\n @property\n def vitorioso(self):\n for i in range(3):\n # Verificar se ocorreu alguma vitoria na horizontal\n if self.__verificarMarcacoesIguais(self.__posicoes[i]):\n return self.__posicoes[i][0].marcacao\n \n # Verificar se ocorreu alguma vitorina na vertical\n if self.__verificarMarcacoesIguais([self.__posicoes[0][i], self.__posicoes[1][i], self.__posicoes[2][i]]):\n return self.__posicoes[0][i].marcacao\n \n # Verificando vitoria na diagonal principal\n if self.__verificarMarcacoesIguais([self.__posicoes[0][0], self.__posicoes[1][1], self.__posicoes[2][2]]):\n return self.__posicoes[0][0].marcacao\n \n # Verificando vitoria na diagonal secundaria\n if self.__verificarMarcacoesIguais([self.__posicoes[0][2], self.__posicoes[1][1], self.__posicoes[2][0]]):\n return self.__posicoes[0][2].marcacao\n \n return False\n\n\n @property\n def empatado(self):\n for linha in self.__posicoes:\n for posicao in linha:\n if not posicao.marcada:\n return False\n return True\n","repo_name":"GregorioFornetti/maritacas-gamestation","sub_path":"jogos/JogoDaVelha/Tabuleiro.py","file_name":"Tabuleiro.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20752848565","text":"from functools import lru_cache\nfrom fastapi import FastAPI, UploadFile, File, Depends, HTTPException\nfrom fastapi import responses\nfrom fastapi.responses import FileResponse, JSONResponse\nfrom typing import List, Optional\nimport pytesseract\nimport pathlib\nfrom os import getcwd\nimport os\nimport io\nimport uuid\nimport shutil\nfrom PIL import Image\nimport sys\nimport logging\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseSettings, BaseModel\nfrom random import randint\n\napp = FastAPI()\n\norigins = [\n \"http://localhost:3000\",\n]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\npytesseract.pytesseract.tesseract_cmd ='C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n\n\nBASE_DIR = pathlib.Path(__file__).parent\nUPLOAD_DIR = BASE_DIR / \"uploads\"\n\nclass Settings(BaseSettings):\n debug: bool = False\n echo_active: bool = False\n\n class Config:\n env_file = \".env\"\n\nclass PredictionResponse(BaseModel):\n filename: str\n contentype: Optional[str] = None \n likely_class: Optional[str] = None\n\n@lru_cache\ndef get_settings():\n return Settings()\n\nsettings = get_settings()\nDEBUG = settings.debug\n\nprint(DEBUG)\n\n@app.post(\"/upload\", response_class=FileResponse, responses={200: {\"Description\": \"Uploading Images\"}})\nasync def upload_file(file: UploadFile = File(...), settings: Settings=Depends(get_settings)):\n if not settings.echo_active:\n raise HTTPException(detail=\"Invalid endpoint\", status_code=400)\n UPLOAD_DIR.mkdir(exist_ok=True)\n bytes_str = io.BytesIO(await file.read())\n #img = Image.open(bytes_str) #opencv can be used here, also called cv2\n try:\n img = Image.open(bytes_str)\n except:\n raise HTTPException(detail=\"Invalid image\", status_code=400)\n fname = pathlib.Path(file.filename)\n fext = fname.suffix # .jpg, .txt\n dest = UPLOAD_DIR / f\"{file.filename}\"\n # {uuid.uuid1()}{fext}\n with open(str(dest), 'wb') as out:\n out.write(bytes_str.read())\n img.save(dest)\n print(settings.debug)\n return dest\n\n@app.post(\"/predictions\") # http POST\nasync def prediction_view(file:UploadFile = File(...), settings:Settings = Depends(get_settings)):\n \n try:\n contents = await file.read()\n image = Image.open(io.BytesIO(contents)).convert('RGB')\n\n predicted_class = pytesseract.image_to_string(image)\n predictions = [x for x in predicted_class.split(\"\\n\")]\n \n logging.info(f\"Predicted Class: {predictions}\")\n\n # --\n\n bytes_str = io.BytesIO(contents)\n try:\n img = Image.open(bytes_str)\n except:\n raise HTTPException(detail=\"Invalid image\", status_code=400)\n \n try:\n img.save(getcwd() + f\"/images/{file.filename}\")\n except FileExistsError:\n pass\n\n # --\n\n # Save to file\n \n try:\n my_file_location = getcwd() + f\"/images/{file.filename}.txt\"\n my_file = open(my_file_location, \"w\")\n\n print(\"begin write\")\n\n for text in predictions:\n my_file.write(f\"{str(text)}\\n\")\n\n print(\"END WRITE\")\n my_file.close()\n print(\"close success\")\n\n except Exception as e:\n print(\"error\", e)\n \n # ---\n\n\n return {\n \"filename\": file.filename, \n \"contentype\": file.content_type, \n \"likely_class\": predictions,\n \"text_link\": f\"http://127.0.0.1:8000/file/{file.filename}.txt\",\n \"link\": f\"http://127.0.0.1:8000/file/{file.filename}\"\n }\n except Exception as error:\n logging.exception(error)\n e = sys.exc_info()[1]\n raise HTTPException(status_code=500, detail=str(e))\n\n\n@app.post(\"/predict/\", response_model=PredictionResponse)\nasync def predict(file: UploadFile = File(...)): \n # if file.content_type.startswith('/images/') is False:\n # raise HTTPException(status_code=400, detail=f'File \\'{file.filename}\\' is not an image.') \n\n try:\n contents = await file.read()\n image = Image.open(io.BytesIO(contents)).convert('RGB')\n\n predicted_class = pytesseract.image_to_string(image)\n predictions = [x for x in predicted_class.split(\"\\n\")]\n \n logging.info(f\"Predicted Class: {predictions}\")\n return {\n \"filename\": file.filename, \n \"contentype\": file.content_type, \n \"likely_class\": predicted_class,\n }\n except Exception as error:\n logging.exception(error)\n e = sys.exc_info()[1]\n raise HTTPException(status_code=500, detail=str(e))\n\n@app.post(\"/uploads\")\nasync def upload_files(file: UploadFile = File(...)):\n with open(file.filename, 'wb') as image:\n content = await file.read()\n image.write(content)\n image.close()\n return JSONResponse(content={\"filename\": file.filename},\nstatus_code=200)\n\n@app.post(\"/img\")\nasync def upload_img(files: List[UploadFile] = File(...)):\n # UPLOAD_DIR.mkdir(exist_ok=True)\n for img in files:\n with open(f'{img.filename}', \"wb\") as buffer:\n shutil.copyfileobj(img.file, buffer)\n\n return {\"file_name\" : \"Images Uploaded\"}\n\n@app.post(\"/upload-file/\")\nasync def create_upload_file(uploaded_file: UploadFile = File(...)):\n print(\"execute\")\n\n file_location = f\"images/{uploaded_file.filename}\"\n with open(file_location, \"wb+\") as file_object:\n shutil.copyfileobj(uploaded_file.file, file_object) \n return {\"info\": f\"file '{uploaded_file.filename}' saved at '{file_location}'\",\n \"link\": f\"http://127.0.0.1:8000/file/{uploaded_file.filename}\" }\n\n@app.get(\"/images/\")\nasync def read_random_file():\n\n # get a random file from the image directory\n files = os.listdir(UPLOAD_DIR)\n random_index = randint(0, len(files) - 1)\n\n path = f\"{UPLOAD_DIR}{files[random_index]}\"\n \n # notice you can use FileResponse now because it expects a path\n return FileResponse(path)\n\n@app.get(\"/file/{name_file}\")\ndef get_file(name_file: str):\n return FileResponse(path=getcwd() + \"/images/\" + name_file)\n\n\n\"\"\"\n@app.post(\"/test/\")\nasync def get_file(uploaded_file: UploadFile = File(...)):\n print(\"receive\", uploaded_file.filename)\n return JSONResponse({\"state\": \"success\"})\n\"\"\"\n","repo_name":"Angelvicks/vision-ai","sub_path":"Backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37541203182","text":"print('='*6, 'ANO BISSEXTO', '='*6)\nprint('')\nfrom datetime import date # Biblioteca para capturar o ano atual do sistema.\na = int(input(\"Qual ano quer analisar? Ou digite 0 para o ano atual: \"))\n# Se o ano for divisível por 4 e tiver resto igual 0,\n# ou divisível por 100 tiver resto diferente de 0,\n# ou divisível por 400 tiver resto igual a 0.\nif a == 0:\n a = date.today().year # Para capturar o ano atual com o usuário digitando 0.\nif a % 4 == 0 and a % 100 != 0 or a % 400 == 0:\n print('O Ano {} é BISSEXTO.'.format(a))\nelse:\n print('O Ano {} NÃO é BISSEXTO.'.format(a))\n\n\n\n","repo_name":"Edcarlos-Oliveira/PythonMundo1","sub_path":"des032AnoBi.py","file_name":"des032AnoBi.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8730644009","text":"import numpy as np\n\n\ngrid = np.zeros((15,15))\npattern = [[0,0,0,0,0],\n [0,0,0,0,0],\n [0,0,1,1,1],\n [0,1,1,1,0],\n [0,0,0,0,0]]\npattern = np.array(pattern)\n\ngrid[1:pattern.shape[0]+1,1:pattern.shape[1]+1] = pattern\n\ndef life_step(x,y,grid):\n n = np.copy(grid[x-1:x+2, y-1:y+2])\n c = n[1,1]\n n[1,1] = 0\n s = np.sum(n)\n\n if (s > 3):\n # Overpopulation\n v = 0\n elif (s < 2):\n # Underpopulation\n v = 0\n elif (c == 0 and s == 3):\n # Reproduction\n v = 1\n else:\n v = grid[x,y]\n return v\n\nw,h = grid.shape\n\nwhile (True):\n print(grid)\n new_grid = np.copy(grid)\n for i in range(1,w):\n for j in range(1,h):\n new_grid[i,j] = life_step(i,j, grid)\n print(new_grid)\n grid = np.copy(new_grid)","repo_name":"PerlinWarp/YearOfAI","sub_path":"Misc/Conway.py","file_name":"Conway.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"19239398004","text":"f1=open(\"text.txt\")\nf2=open(\"pattern.txt\")\n\n\ntext=f1.readline()\npattern=f2.readline()\n\nn=len(text)\nm=len(pattern)\n\narray=[]\n\nfor i in range(n):\n\t\n\tt=i\n\tfor j in range(m):\n\t\t\n\t\t#Check for wild card\n\t\tif(pattern[j]!=\"_\"):\n\t\t\t#Break out of inner loop if mismatch\n\t\t\tif(pattern[j]!=text[t]):\n\t\t\t\n\t\t\t\tbreak\n\t\t\t\n\t\t\t\t\t\t\t\n\t\tarray.append(text[t])\n\t\tt=t+1\n\t\tif(j==m-1):\n\t\t\t\t\n\t\t\t\tfor x in array:\n\t\t\t\t\tprint(x,end='')\n\n\t\t\t\tprint()\t\n\tarray.clear()\n\n\t\n\t\t\n\n","repo_name":"Dulangi-anuththara/String-matching","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43038497878","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\ndef final_depth_integrate(finname,foutname):\n \n #jupyter nbconvert --to script final_depth_integrate.ipynb \n # Use the above script in a Terminal Window to convert to a .py file\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import statistics as st\n import time as time\n\n from IPython.core.interactiveshell import InteractiveShell\n InteractiveShell.ast_node_interactivity = \"last\"\n #other options include 'none', 'last', 'last_expr'\n\n df1=pd.read_csv(finname) \n\n df2=df1.copy(deep=True)\n df2.drop(df2[df2[' Layer']!=2].index, inplace=True)\n df2=df2.reset_index(drop=True)\n \n df1=df1.replace([-9999.0, 9999.0, -999.0, 999.0], np.nan)\n df2=df2.replace([-9999.0, 9999.0, -999.0, 999.0], np.nan)\n #df1=df1.replace(9999.0, np.nan)\n #df2=df2.replace(-9999.0, np.nan)\n #df2=df2.replace(9999.0, np.nan)\n\n df1_interval=np.nanmax(df1[' Interval'])\n df2_interval=np.nanmax(df2[' Interval'])\n mx_interval_int=int(df1_interval)\n\n #Check that the number or instances with data (# of intervals) equals the number of rows in df2\n if len(np.unique(df1[' Interval'])) != len(df2.index):\n print('Mismatch in Length of Files!!! ' +finname+ ' NOT processed') \n else: \n bad_value=-9998\n tic=time.time()\n cnt=0 #Counter is needed in case interval is not sequential in the original csv file\n for i in range (mx_interval_int+1):\n if any(df1[' Interval']==i):\n #print(i)\n loar=df1[' Interval']==i\n #idx=loar[loar==True].index[-1] #Maybe Not Needed\n #df2[' NASC'][i]=sum((df1[' NASC'])[loar]) #THis Created Warnings! Better to use iloc like below\n\n df2.iloc[cnt,df2.columns.get_loc(' Sv_mean')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' NASC')]=sum((df1[' NASC'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Sv_max')]=np.nanmax((df1[' Sv_max'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Sv_min')]=np.nanmin((df1[' Sv_min'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Sv_noise')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' NASC_noise')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Height_mean')]=sum((df1[' Height_mean'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Depth_mean')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Samples')]=sum((df1[' Samples'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Layer_depth_max')]=np.nanmax((df1[' Layer_depth_max'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Layer_depth_min')]=np.nanmin((df1[' Layer_depth_min'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Standard_deviation')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Skewness')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Kurtosis')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' ABC')]=sum((df1[' ABC'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Area_Backscatter_Strength')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Thickness_mean')]=sum((df1[' Thickness_mean'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Range_mean')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Beam_volume_sum')]=sum((df1[' Beam_volume_sum'])[loar])\n cnt=cnt+1\n #tmp_date=df[' Date_M'][loar]\n #f_time.append((df[' Time_M'])[loar])\n toc=time.time()\n elapsed=toc-tic\n #print(elapsed)\n \n df2=df2.fillna(value=-9999.0)\n df2\n df2.to_csv (foutname, index = False, header=True)\n print('Writing ' +foutname+ ' with ' +str(len(df2.index))+ ' rows.') \n print('Processing took ' +str(elapsed)+ ' seconds.')\n print('')\n \n #import csv\n #csvData=[f_lon, f_lat, f_nasc]\n\n #zipped=zip(f_date,f_time,f_lon, f_lat, f_nasc)\n #zipped=zip(f_time,f_lon, f_lat, f_nasc)\n\n #with open('test.csv', 'w') as csvFile:\n # writer=csv.DictWriter(csvFile, fieldnames=[\"Time\",\"Lon_M\",\"Lat_M\",\"NASC\"])\n # writer.writeheader()\n # writer = csv.writer(csvFile)\n #writer.writeheader\n # writer.writerows(zipped)\n\n #csvFile.close()\n\n #type(f_lon)\n \n\n","repo_name":"jeffdorman/krill_biomass","sub_path":"programs/krill_biomass_processing/final_depth_integrate.py","file_name":"final_depth_integrate.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"42912342322","text":"\"\"\"\nAuthor: Sangtak Lee\nContact: chst27@gmail.com\n\"\"\"\n\n\nimport pymel.core as pm\n\n\ndef getHairSystems(nucleus):\n hairSystems = []\n hairSystems = nucleus.startFrame.connections(source=False, type='hairSystem', shapes=True)\n return hairSystems\n\n\ndef getNucleusControllerAttributes(nucleus):\n enableCtrlAttr = None\n startFrameCtrlAttr = None\n enableAttrDriver = nucleus.enable.connections(destination=False, plugs=True)\n startFrameAttrDriver = nucleus.startFrame.connections(destination=False, plugs=True)\n enableCtrlAttr = enableAttrDriver[0] if enableAttrDriver else nucleus.enable\n startFrameCtrlAttr = startFrameAttrDriver[0] if startFrameAttrDriver else nucleus.startFrame\n return enableCtrlAttr, startFrameCtrlAttr\n\n\ndef getSplineIkCurve(hairSystem):\n splineIkCurve = None\n follicle = hairSystem.connections(source=False, type='follicle', shapes=True)[0]\n dynCurve = follicle.connections(source=False, type='nurbsCurve', shapes=True)[0]\n blendShape = dynCurve.connections(source=False, type='blendShape')\n if blendShape:\n splineIkCurve = blendShape[0].connections(source=False, type='nurbsCurve', shapes=True)[0]\n else:\n splineIkCurve = dynCurve\n return splineIkCurve\n\n\ndef getDynCurve(hairSystem):\n dynCrv = None\n follicle = hairSystem.outputHair[0].connections(type='follicle', shapes=True)[0]\n dynCrv = follicle.outCurve.connections(type='nurbsCurve', shapes=True)[0]\n return dynCrv\n\n\ndef getBakeLocators(dynCrv):\n bakeLocs = []\n bakeLocs = [pointOnCurveInfo.result.position.connections(type='transform')[0].getChildren(type='transform')[0] for pointOnCurveInfo in dynCrv.worldSpace[0].connections(type='pointOnCurveInfo')]\n return bakeLocs\n\n\ndef getIkHandle(splineIkCurve):\n ikHandle = None\n ikHandle = splineIkCurve.worldSpace[0].connections(type='ikHandle')[0]\n return ikHandle\n\n\ndef getControls(dynCrv):\n controls = []\n pntOnCrvInfos = dynCrv.outputs(type='pointOnCurveInfo')\n for pntOnCrvInfo in pntOnCrvInfos:\n bakeLocParent = pntOnCrvInfo.outputs(type='transform')[0]\n ctrlName = bakeLocParent.getChildren()[0].replace('_bake_loc', '')\n controls.append(pm.PyNode(ctrlName))\n return controls\n\n\ndef getJoints(splineIkCurve):\n joints = []\n\n ikHandle = splineIkCurve.outputs(type='ikHandle')[0]\n startJoint = ikHandle.inputs(type='joint')[0]\n joints = startJoint.getChildren(ad=True, type='joint') + [startJoint]\n\n return joints\n\n\ndef bakeDynToControllers(bakeLocators):\n minFrame = pm.playbackOptions(q=True, min=True)\n maxFrame = pm.playbackOptions(q=True, max=True)\n\n ctrls = []\n ctrlSpaceLocs = []\n for bakeLoc in bakeLocators:\n ctrl = pm.PyNode(bakeLoc.split('_bake_loc')[0])\n ctrls.append(ctrl)\n pm.matchTransform(bakeLoc, ctrl)\n pm.cutKey(ctrl, attribute=['tx', 'ty', 'tz', 'rx', 'ry', 'rz'], clear=True)\n\n ctrlSpaceLoc = pm.spaceLocator(n='{}_space_loc'.format(ctrl))\n ctrlSpaceLocs.append(ctrlSpaceLoc)\n # pm.parent(ctrlSpaceLoc, ctrl.getParent())\n pm.parentConstraint(bakeLoc, ctrlSpaceLoc, mo=False)\n\n pm.bakeResults(\n ctrlSpaceLocs,\n simulation=True,\n time=(minFrame, maxFrame),\n sampleBy=1,\n attribute=['tx', 'ty', 'tz', 'rx', 'ry', 'rz'],\n disableImplicitControl=True,\n preserveOutsideKeys=True,\n sparseAnimCurveBake=False,\n removeBakedAttributeFromLayer=False,\n bakeOnOverrideLayer=False,\n controlPoints=False,\n shape=False\n )\n\n # for ctrlSpaceLoc, ctrl in zip(ctrlSpaceLocs, ctrls):\n # pm.cutKey(ctrlSpaceLoc, time=':', attribute=['tx', 'ty', 'tz', 'rx', 'ry', 'rz'], hierarchy='none')\n # pm.pasteKey(ctrl, option='insert', copies=1, connect=True, timeOffset=0, floatOffset=0, valueOffset=0)\n\n # pm.delete(ctrlSpaceLocs)\n\n print(ctrlSpaceLocs)\n print(ctrls)\n for ctrlSpaceLoc, ctrl in zip(ctrlSpaceLocs, ctrls):\n pm.parentConstraint(ctrlSpaceLoc, ctrl, mo=False)\n pm.bakeResults(\n ctrls,\n simulation=True,\n time=(minFrame, maxFrame),\n sampleBy=1,\n attribute=['tx', 'ty', 'tz', 'rx', 'ry', 'rz'],\n disableImplicitControl=True,\n preserveOutsideKeys=True,\n sparseAnimCurveBake=False,\n removeBakedAttributeFromLayer=False,\n bakeOnOverrideLayer=False,\n controlPoints=False,\n shape=False\n )\n pm.delete(ctrlSpaceLocs)\n\ndef bakeDynToJoints(joints, endCtrs=[]):\n minFrame = pm.playbackOptions(q=True, min=True)\n maxFrame = pm.playbackOptions(q=True, max=True)\n\n pm.bakeResults(\n joints,\n simulation=True,\n time=(minFrame, maxFrame),\n sampleBy=1,\n attribute=['tx', 'ty', 'tz', 'rx', 'ry', 'rz'],\n disableImplicitControl=True,\n preserveOutsideKeys=True,\n sparseAnimCurveBake=False,\n removeBakedAttributeFromLayer=False,\n bakeOnOverrideLayer=False,\n controlPoints=False,\n shape=False\n )\n\n if endCtrs:\n for endCtr in endCtrs:\n endCtr.bakeType.set(0)\n endCtr.IkBlend.set(0)\n\n\ndef deleteKeys(objects):\n if not objects:\n pm.error('There is no transform to delete keys')\n pm.select(objects, r=True)\n pm.mel.eval('DeleteKeys;')\n pm.select(cl=True)\n\n\ndef getIhHairchainData(endCtrs):\n ihHairchainData = {'ctrList': [], 'bakeCtrList': [], 'bakeOutList': [], 'jointList': []}\n for ctr in endCtrs:\n prefix = ctr.replace('_ctrEnd_crv', '')\n ihHairchainData['ctrList'].extend(pm.ls(prefix + '_ctr*_crv'))\n ihHairchainData['bakeCtrList'].extend(pm.ls(prefix + '_bake*_crv'))\n ihHairchainData['bakeOutList'].extend(pm.ls(prefix + '_bakeOut*_jnt', type='joint'))\n ihHairchainData['jointList'].extend(pm.ls(prefix + '_*_jnt', type='joint'))\n return ihHairchainData\n\n\ndef bakeIhHairchainControl(ihHairchainData):\n pm.cutKey(ihHairchainData['ctrList'], attribute=['tx', 'ty', 'tz'], clear=True)\n\n for bakeCtr in ihHairchainData['bakeCtrList']:\n bakeCtrZero = bakeCtr.replace('_crv', '_zero')\n ctrZero = bakeCtrZero.replace('_bake', '_ctr')\n pm.delete(pm.parentConstraint(ctrZero, bakeCtrZero, mo=False))\n pm.delete(pm.pointConstraint(bakeCtr.replace('_bake', '_ctr'), bakeCtr, mo=False))\n\n pntConstraints = []\n for bakeOut in ihHairchainData['bakeOutList']:\n bakeCtr = bakeOut.replace('_bakeOut', '_bake').replace('_jnt', '_crv')\n pntConstraints.append(pm.pointConstraint(bakeOut, bakeCtr, mo=True))\n\n minFrame = pm.playbackOptions(q=True, min=True)\n maxFrame = pm.playbackOptions(q=True, max=True)\n pm.bakeResults(\n ihHairchainData['bakeCtrList'],\n simulation=True,\n time=(minFrame, maxFrame),\n sampleBy=1,\n attribute=['tx', 'ty', 'tz'],\n disableImplicitControl=True,\n preserveOutsideKeys=True,\n sparseAnimCurveBake=False,\n removeBakedAttributeFromLayer=False,\n bakeOnOverrideLayer=False,\n controlPoints=False,\n shape=False\n )\n pm.delete(pntConstraints)\n\n for bakeCtr in ihHairchainData['bakeCtrList']:\n ctr = bakeCtr.replace('_bake', '_ctr')\n pm.cutKey(bakeCtr, time=':', attribute=['tx', 'ty', 'tz'], hierarchy='none')\n pm.pasteKey(ctr, option='insert', copies=1, connect=True, timeOffset=0, floatOffset=0, valueOffset=0)\n\n\ndef bakeIhHairchainJoint(joints, endCtrs):\n pm.bakeResults()\n\n\nclass DynamicSplineBaker(object):\n name = 'dynSplineBaker'\n\n # Attributes\n bakeType = ['Controller', 'Joint']\n namespaceMenu = None\n solverTxtScrlList = None\n dynCtrlsTxtScrlList = None\n bakeTypeMenu = None\n dynOnOffBtn = None\n objects = []\n\n def __init__(self):\n super(DynamicSplineBaker, self).__init__()\n\n if pm.window(DynamicSplineBaker.name, q=True, exists=True):\n pm.deleteUI(DynamicSplineBaker.name)\n\n win = pm.window(DynamicSplineBaker.name, title='Dynamic Spline Baker', mnb=False, mxb=False)\n\n pm.tabLayout(tabsVisible=False)\n pm.columnLayout(adj=True, rowSpacing=5)\n self.namespaceMenu = pm.optionMenu(label='Namespace: ', cc=lambda item: self.populateDynPartsTxtScrlList(item))\n\n pm.separator(style='in', h=3)\n\n pm.rowColumnLayout(numberOfColumns=3, columnSpacing=[(2, 5), (3, 5)], columnWidth=[(1,150), (2,150)])\n\n pm.columnLayout(adj=True, rowSpacing=5, columnAlign='left')\n pm.text(label='Solvers')\n self.solverTxtScrlList = pm.textScrollList(sc=self.populateDynCtrlsTxtScrlList)\n\n pm.setParent('..')\n pm.columnLayout(adj=True, rowSpacing=5, columnAlign='left')\n pm.text(label='Dynamic Controllers')\n self.dynCtrlsTxtScrlList = pm.textScrollList(allowMultiSelection=True, sc=self.selectDynCtrls)\n pm.popupMenu()\n pm.menuItem(label='Select All Controllers', c=self.selectAllDynCtrls)\n\n pm.setParent('..')\n pm.columnLayout(adj=True, rowSpacing=5)\n pm.separator(h=10, style='none')\n self.dynOnOffBtn = pm.button(label='Dynamic On/Off', c=self.dynOnOff)\n pm.rowColumnLayout(numberOfColumns=2)\n\n pm.setParent('..')\n self.bakeTypeMenu = pm.optionMenu(label='Bake Type: ')\n pm.menuItem(label='Controller')\n pm.menuItem(label='Joint')\n pm.separator(h=10, style='in')\n pm.button(label='Bake Dynamic', h=70, c=lambda x: self.bakeDynamic(pm.optionMenu(self.bakeTypeMenu, q=True, value=True)))\n pm.button(label='Delete Keys', c=self.delKeys)\n pm.button(label='Reset Controls', c=self.resetControls)\n\n pm.window(DynamicSplineBaker.name, e=True, w=100, h=100)\n pm.showWindow(win)\n\n self.buildObjects()\n self.populateNamespaceMenu()\n self.populateDynPartsTxtScrlList(pm.optionMenu(self.namespaceMenu, q=True, value=True))\n\n def buildObjects(self):\n namespaces = []\n defaultNamespaces = set(['UI', 'shared'])\n namespaces = list(set(pm.namespaceInfo(listOnlyNamespaces=True, r=True)) - defaultNamespaces)\n if not namespaces:\n hairChainNucleuses = [nucleus for nucleus in pm.ls(type='nucleus') if nucleus.startFrame.connections(source=False, type='hairSystem', shapes=True)]\n self.objects.append({'namespace': ':', 'nucleuses': hairChainNucleuses})\n for namespace in namespaces:\n nucleuses = [pm.PyNode(item) for item in pm.namespaceInfo(namespace, listOnlyDependencyNodes=True) if isinstance(pm.PyNode(item), pm.nodetypes.Nucleus)]\n hairChainNucleuses = [nucleus for nucleus in nucleuses if nucleus.startFrame.connections(source=False, type='hairSystem', shapes=True)]\n if hairChainNucleuses:\n self.objects.append({'namespace': namespace, 'nucleuses': hairChainNucleuses})\n\n def populateNamespaceMenu(self):\n for obj in self.objects:\n\n pm.menuItem(label=obj['namespace'], parent=self.namespaceMenu)\n\n def populateDynPartsTxtScrlList(self, namespace):\n pm.textScrollList(self.solverTxtScrlList, e=True, removeAll=True)\n for obj in self.objects:\n if namespace == obj['namespace']:\n for nucleus in obj['nucleuses']:\n pm.textScrollList(self.solverTxtScrlList, e=True, append=nucleus.replace(namespace+':', ''))\n\n def populateDynCtrlsTxtScrlList(self, *args):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n pm.textScrollList(self.dynCtrlsTxtScrlList, e=True, removeAll=True)\n nucleus = [pm.PyNode(namespace+':'+item) for item in pm.textScrollList(self.solverTxtScrlList, q=True, selectItem=True)][0]\n\n dyn_ctrl = list(set(nucleus.inputs(type='transform')))\n pm.select(dyn_ctrl, r=True)\n # pm.select(nucleus, r=True)\n\n enableAttr, startFrameAttr = getNucleusControllerAttributes(nucleus)\n pm.cutKey(enableAttr, startFrameAttr, clear=True)\n\n self.updateDynOfOffBtn()\n\n dynCtrls = []\n hairSystems = getHairSystems(pm.PyNode(nucleus))\n for hairSystem in hairSystems:\n try: # In case JH Hairchain Rig or TAK's spline rig\n splineIkCurve = getSplineIkCurve(hairSystem)\n joints = sorted(getJoints(splineIkCurve))\n dynCtrls.append(joints[0].visibility.connections(type='transform')[0])\n except: # In case IH Hairchain Rig\n dynCtrls.append(hairSystem.startCurveAttract.connections(destination=False, type='transform')[0])\n\n for dynCtrl in dynCtrls:\n pm.textScrollList(self.dynCtrlsTxtScrlList, e=True, append=dynCtrl.replace(namespace+':', ''))\n\n def dynOnOff(self, *args):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n nucleus = [pm.PyNode(namespace+':'+item) for item in pm.textScrollList(self.solverTxtScrlList, q=True, selectItem=True)][0]\n enableAttr, startFrameAttr = getNucleusControllerAttributes(nucleus)\n enabled = enableAttr.get()\n hairSystem = getHairSystems(nucleus)[0]\n endCtrs = [pm.PyNode(namespace+':'+ctr) for ctr in pm.textScrollList(self.dynCtrlsTxtScrlList, q=True, allItems=True)]\n if enabled and hairSystem.simulationMethod.get() == 3:\n enableAttr.set(False)\n startFrameAttr.set(100000)\n pm.button(self.dynOnOffBtn, e=True, bgc=(0.75, 0.25, 0.0), label='Dynamic Off')\n if endCtrs[0].hasAttr('dynamicType'):\n endCtrs[0].dynamicType.connections(destination=False, plugs=True)[0].set(0)\n for endCtr in endCtrs:\n if endCtr.hasAttr('Constraint'):\n pm.cutKey(endCtr.Constraint, clear=True)\n endCtr.Constraint.set(1)\n else:\n enableAttr.set(True)\n startFrameAttr.set(pm.playbackOptions(q=True, min=True))\n pm.button(self.dynOnOffBtn, e=True, bgc=(0.0, 0.75, 0.25), label='Dynamic On')\n if endCtrs[0].hasAttr('dynamicType'):\n endCtrs[0].dynamicType.connections(destination=False, plugs=True)[0].set(2)\n for endCtr in endCtrs:\n if endCtr.hasAttr('Constraint'):\n pm.cutKey(endCtr.Constraint, clear=True)\n endCtr.Constraint.set(0)\n\n def updateDynOfOffBtn(self):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n nucleus = [pm.PyNode(namespace+':'+item) for item in pm.textScrollList(self.solverTxtScrlList, q=True, selectItem=True)][0]\n enableAttr, startFrameAttr = getNucleusControllerAttributes(nucleus)\n enabled = enableAttr.get()\n hairSystem = getHairSystems(nucleus)[0]\n if enabled and hairSystem.simulationMethod.get() == 3:\n pm.button(self.dynOnOffBtn, e=True, bgc=(0.0, 0.75, 0.25), label='Dynamic On')\n else:\n pm.button(self.dynOnOffBtn, e=True, bgc=(0.75, 0.25, 0.0), label='Dynamic Off')\n\n def selectDynCtrls(self):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n dynCtrls = [namespace+':'+ctrl for ctrl in pm.textScrollList(self.dynCtrlsTxtScrlList, q=True, selectItem=True)]\n pm.select(dynCtrls, r=True)\n\n def selectAllDynCtrls(self, *args):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n allDynCtrls = [namespace+':'+ctrl for ctrl in pm.textScrollList(self.dynCtrlsTxtScrlList, q=True, allItems=True)]\n pm.textScrollList(self.dynCtrlsTxtScrlList, e=True, selectItem=pm.textScrollList(self.dynCtrlsTxtScrlList, q=True, allItems=True))\n pm.select(allDynCtrls, r=True)\n\n def bakeDynamic(self, bakeType):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n nucleus = [pm.PyNode(namespace+':'+item) for item in pm.textScrollList(self.solverTxtScrlList, q=True, selectItem=True)][0]\n\n enableAttr, startFrameAttr = getNucleusControllerAttributes(nucleus)\n enableAttr.set(True)\n pm.button(self.dynOnOffBtn, e=True, bgc=(0.0, 0.75, 0.25), label='Dynamic On')\n startFrameAttr.set(pm.playbackOptions(q=True, min=True))\n endCtrs = [pm.PyNode(namespace+':'+ctr) for ctr in pm.textScrollList(self.dynCtrlsTxtScrlList, q=True, allItems=True)]\n if endCtrs[0].hasAttr('dynamicType'):\n endCtrs[0].dynamicType.connections(destination=False, plugs=True)[0].set(2)\n for endCtr in endCtrs:\n if endCtr.hasAttr('Constraint'):\n pm.cutKey(endCtr.Constraint, clear=True)\n endCtr.Constraint.set(0)\n\n hairSystems = getHairSystems(nucleus)\n allJoints = []\n allBakeLocs = []\n for hairSystem in hairSystems:\n try: # In case JH Hairchain Rig or TAK's spline rig\n splineIkCurve = getSplineIkCurve(hairSystem)\n dynCrv = getDynCurve(hairSystem)\n allJoints.extend(getJoints(splineIkCurve))\n if bakeType == 'Controller':\n allBakeLocs.extend(getBakeLocators(dynCrv))\n except: # In case IH Hairchain Rig\n break\n\n try: # In case JH Hairchain Rig or TAK's spline rig\n pm.refresh(su=True)\n bakeDynToControllers(allBakeLocs) if bakeType == 'Controller' else bakeDynToJoints(allJoints)\n pm.refresh(su=False)\n except: # In case IH Hairchain Rig\n for endCtr in endCtrs:\n if endCtr.hasAttr('Constraint'):\n endCtr.Constraint.set(0)\n ihHairchainData = getIhHairchainData(endCtrs)\n bakeIhHairchainControl(ihHairchainData) if bakeType == 'Controller' else bakeDynToJoints(ihHairchainData['jointList'], endCtrs)\n endCtrs[0].dynamicType.connections(destination=False, plugs=True)[0].set(0)\n\n enableAttr, startFrameAttr = getNucleusControllerAttributes(nucleus)\n enableAttr.set(False)\n startFrameAttr.set(100000)\n self.updateDynOfOffBtn()\n\n def delKeys(self, *args):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n nucleus = [pm.PyNode(namespace+':'+item) for item in pm.textScrollList(self.solverTxtScrlList, q=True, selectItem=True)][0]\n allJoints = []\n allControls = []\n allIkHandles = []\n hairSystems = getHairSystems(nucleus)\n for hairSystem in hairSystems:\n try: # In case JH Hairchain Rig or TAK's spline rig\n splineIkCurve = getSplineIkCurve(hairSystem)\n allJoints.extend(getJoints(splineIkCurve))\n allIkHandles.append(getIkHandle(splineIkCurve))\n allControls.extend(getControls(getDynCurve(hairSystem)))\n except: # In case IH Hairchain Rig\n break\n\n try: # In case JH Hairchain Rig or TAK's spline rig\n deleteKeys(allControls)\n deleteKeys(allJoints)\n for ikHandle in allIkHandles:\n ikHandle.ikBlend.set(1)\n except: # In case IH Hairchain Rig\n endCtrs = [pm.PyNode(namespace+':'+ctr) for ctr in pm.textScrollList(self.dynCtrlsTxtScrlList, q=True, allItems=True)]\n ihHairchainData = getIhHairchainData(endCtrs)\n deleteKeys(ihHairchainData['ctrList'])\n deleteKeys(ihHairchainData['jointList'])\n for endCtr in endCtrs:\n endCtr.bakeType.set(0)\n endCtr.IkBlend.set(1)\n\n def resetControls(self, *args):\n namespace = pm.optionMenu(self.namespaceMenu, q=True, value=True)\n nucleus = [pm.PyNode(namespace+':'+item) for item in pm.textScrollList(self.solverTxtScrlList, q=True, selectItem=True)][0]\n allControls = []\n hairSystems = getHairSystems(nucleus)\n for hairSystem in hairSystems:\n try: # In case JH Hairchain Rig or TAK's spline rig\n allControls.extend(getControls(getDynCurve(hairSystem)))\n except: # In case IH Hairchain Rig\n break\n\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']\n try: # In case JH Hairchain Rig or TAK's spline rig\n for ctrl in allControls:\n for attr in attrs:\n ctrl.attr(attr).set(0)\n except: # In case IH Hairchain Rig\n endCtrs = [pm.PyNode(namespace+':'+ctr) for ctr in pm.textScrollList(self.dynCtrlsTxtScrlList, q=True, allItems=True)]\n ihHairchainData = getIhHairchainData(endCtrs)\n for ctr in ihHairchainData['ctrList']:\n for attr in attrs:\n ctr.attr(attr).set(0)\n","repo_name":"LEESANGTAK/ironRig","sub_path":"scripts/ironRig/dynamicSplineBaker.py","file_name":"dynamicSplineBaker.py","file_ext":"py","file_size_in_byte":20624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"9582120465","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 23 12:38:50 2019\n\n@author: michaelboles\n\"\"\"\n\n# set up working directory\nimport os\nos.chdir('/Users/michaelboles/Michael/Coding/2019/Realestate') # Mac\n#os.chdir('C:\\\\Users\\\\bolesmi\\\\Lam\\\\Coding\\\\Python\\\\2019\\\\Realestate') # PC\n\n# import data\nimport pandas as pd\ndata = pd.read_csv('./Data/listings/data_all_price_predictions.csv')\n\n# remind myself what the column names are\ndata.columns\n\n# create in-memory sqlite database, add dataframe\nfrom sqlalchemy import create_engine\nengine = create_engine('sqlite://', echo = False)\ndata.to_sql('Realestate', con=engine)\n\n# query database\nengine.execute(\"SELECT * FROM Realestate\").fetchall() # gets everything\nengine.execute('SELECT * FROM Realestate WHERE Zip = 94618').fetchall() # matches a zipcode\n\n# create a list from sql query \n# returns list of rowproxy objects, omits column names - why is this so hard\nrockridge = engine.execute('SELECT * FROM Realestate WHERE Zip = 94618').fetchall() # matches a zipcode\n","repo_name":"mboles01/Realestate","sub_path":"Old/SQL/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"}
+{"seq_id":"14677063857","text":"import argparse\nimport numpy as np\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nimport cv2\nimport PIL.Image\n\n# from models import *\nimport models\n\n# Prune settings\nparser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')\nparser.add_argument('--dataset', type=str, default='cifar10',\n help='training dataset (default: cifar10)')\nparser.add_argument('--val-batch-size', type=int, default=256, metavar='N',\n help='input batch size for validatin (default: 256)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--depth', type=int, default=16,\n help='depth of the vgg')\nparser.add_argument('--arch', default='vgg_16', type=str,\n help='architecture to use')\n# parser.add_argument('--model', default='', type=str, metavar='PATH',\n# help='path to the model (default: none)')\nparser.add_argument('--save', default='./cleanresult/1/EB-30-29.pth.tar', type=str, metavar='PATH',\n help='path to save pruned model (default: none)')\nparser.add_argument('--save_1', default='./poisonresult_2/2/EB-30-28.pth.tar', type=str, metavar='PATH',\n help='path to save pruned model (default: none)')\n\n# parser.add_argument('--save_2', default='./poisonresult_2/3/EB-30-27.pth.tar', type=str, metavar='PATH',\n# help='path to save pruned model (default: none)')\n# parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual start epoch number')\n# parser.add_argument('--end_epoch', default=160, type=int, metavar='N', help='manual end epoch number')\n\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\nprint('Experiment Starting... Check critical information below carefully!')\nprint('Training Phase: Calculate Difference of Two Masks;')\nprint('Dataset:{};'.format(args.dataset))\n# print('Dataset:{};\\tStart Epoch:{};\\tEnd Epoch:{};'.format(args.dataset, args.start_epoch, args.end_epoch)) #\nprint('Network Architecture:{};\\tDepth:{};'.format(args.arch, args.depth)) #\nprint('First Mask Path:{};'.format(args.save))\nprint('Second Mask Path:{};'.format(args.save_1))\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\n\nsetting_perc = 0.3\n\nif not os.path.exists(args.save):\n os.makedirs(args.save)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)\nmodel_bd = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)\n\nif args.cuda:\n model.cuda()\n model_bd.cuda()\n\n\ndef pruning(model, percent):\n total = 0\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n total += m.weight.data.shape[0]\n\n bn = torch.zeros(total)\n index = 0\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n size = m.weight.data.shape[0]\n bn[index:(index + size)] = m.weight.data.abs().clone()\n index += size\n\n y, i = torch.sort(bn)\n thre_index = int(total * percent)\n thre = y[thre_index]\n mask2 = bn.gt(thre).float().view(-1)\n return mask2\n\n\ndef get_mask(path: str, default_percent=0.3):\n print(f'==> Mask from {path} ... ')\n checkpoint = torch.load(path)\n best_epoch = checkpoint['epoch']\n print('EarlyBird Emerging Epoch: ', best_epoch)\n model.load_state_dict(checkpoint['state_dict'])\n percent = 0.3 if 'EB-30' in path else 0.5 if 'EB-50' in path else 0.7 if 'EB-70' in path else default_percent\n mask = pruning(model, percent)\n print('Remanent Percent: {}%.\\n'.format(int(torch.sum(mask == 1) * 100. / mask.size(0))))\n return mask\n\n# get clean EB\nprint('==> resumeing from {} ... '.format(args.save))\ncheckpoint = torch.load(args.save)\nbest_epoch = checkpoint['epoch']\nprint('EarlyBird Emerging Epoch: ', best_epoch)\nmodel.load_state_dict(checkpoint['state_dict'])\n\n# get backdoor EB and mask\nprint('==> resumeing from {} ... '.format(args.save_1))\ncheckpoint_bd = torch.load(args.save_1)\nbest_epoch_bd = checkpoint_bd['epoch']\nprint('EarlyBird Emerging Epoch: ', best_epoch_bd)\nmodel_bd.load_state_dict(checkpoint_bd['state_dict'])\npercent_2 = 0.3 if 'EB-30' in args.save_1 else 0.5 if 'EB-50' in args.save_1 else 0.7 if 'EB-70' in args.save_1 else setting_perc\nbest_mask_bd = pruning(model_bd, percent_2)\n\nX = []\nY = []\n\nfor percent_set in np.arange(0.3, 1, 0.05): # [0.3, 0.35, 0.4, ... , 1]:\n X.append(percent_set)\n print(\"\\nclean prune precent:\", percent_set)\n best_mask = pruning(model, percent_set) # get clean mask /key neurons\n\n in_num = 0\n for i in range(best_mask.size(0)):\n if best_mask[i] == 1 and best_mask_bd[i] == 1:# key neuron exists in both cl & bd\n in_num += 1 \n Y.append(in_num / int(torch.sum(best_mask)))\n print(\"both exist percent:\", in_num / int(torch.sum(best_mask)))\nprint(X)\nprint(Y)","repo_name":"zeyuanyin/LTH-Backdoor","sub_path":"plot/key_neuron_rate.py","file_name":"key_neuron_rate.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"}
+{"seq_id":"59773328","text":"#PROJECT: MODELING TORONTO BIKESHARE NETWORK\r\n\r\n#Notes: \r\n\r\n#station info JSON: https://tor.publicbikesystem.net/ube/gbfs/v1/en/station_information\r\n\r\n#--------------------------------------------------#\r\n\r\n#1) IMPORT LIBRARIES\r\n\r\n#Computation and Structuring:\r\n\r\nimport pandas as pd\r\nimport json\r\nfrom pandas.io.json import json_normalize\r\n\r\n#Modeling:\r\n\r\nimport networkx as nx\r\n\r\n#Visualization:\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n#--------------------------------------------------#\r\n\r\n#1) DATA IMPORT AND PREP\r\n\r\n#First we load the node data from a JSON file containing all of the station's in the Toronto bike network:\r\n\r\n#The JSON was in a deep embedded format and not working with Pandas read_json, so needed to take a more manual approach (i.e. can't use pd.read_json):\r\n\r\ndef unpack_json(filename):\r\n \"\"\"function to unpack the JSON file format provided by the Toronto bikeshare network \"\"\"\r\n \r\n with open(filename) as json_file: \r\n inter_data = json.load(json_file)\r\n \r\n inter_data = json_normalize(inter_data['data'])\r\n inter_data = list(inter_data.values.flatten()) #creates a list of a list of dictionaries\r\n inter_data = inter_data[0] #unpacks so it is a list of dictionaries since all data data was in a list object at index[0]\r\n inter_data_df = pd.DataFrame(inter_data) #convert the list of dictionaires into a df, which is now properly formatted\r\n \r\n return inter_data_df\r\n\r\nnode_data_function = unpack_json('station_info.json') #gets information on station's and locations\r\nnode_data_final = node_data_function[['address','capacity','lat','lon','name','station_id']] #only keep relevant columns, this is our final cleaned node data set we can use to build the graph\r\n\r\n#Now we load the edge data, which consists of an excel file with ride level data:\r\n\r\nedge_data = pd.read_excel('2016_Bike_Share_Toronto_Ridership_Q4.xlsx')\r\n\r\n#clean edge data and join to station id information from the node_data file:\r\n\r\ndef clean_edge_data(df1, df2):\r\n \"\"\"cleans and reformats the edge data set so that node information is included\"\"\"\r\n \r\n edge_data_final = pd.merge(df1,df2[['name','station_id']].rename(columns={'name':'from_station_name'}),how='left',on='from_station_name') #add station_id from the node data to the trip level data \r\n edge_data_final = edge_data_final.rename(columns={'station_id':'station_id_from'}) #rename station_id column\r\n edge_data_final = pd.merge(edge_data_final,df2[['name','station_id']].rename(columns={'name':'to_station_name'}),how='left',on='to_station_name') #add station_id from the node data to the trip level data \r\n edge_data_final = edge_data_final.rename(columns={'station_id':'station_id_to'}) #rename station_id column\r\n edge_data_final = edge_data_final.dropna(subset=['station_id_to', 'station_id_from']) #drops edges where station id info is missing\r\n edge_data_final['station_id_from'] = pd.to_numeric(edge_data_final['station_id_from'], downcast='integer') #match to format of station_id in node data set\r\n edge_data_final['station_id_to'] = pd.to_numeric(edge_data_final['station_id_to'], downcast='integer') #match to format of station_id in node data set\r\n \r\n return edge_data_final\r\n\r\nedge_data_final2 = clean_edge_data(edge_data, node_data_final) #creates final cleaned edge data set ready for creating the network\r\n\r\n#--------------------------------------------------#\r\n\r\n#2) Structure the Bikeshare network as a NetworkX Graph:\r\n\r\nNG = nx.MultiDiGraph() #creates empty directed graph\r\n\r\n#create nodes in the graph from station_id and give them a position that is equal to their lat-lon coordinates\r\n\r\nfor i, j, k in zip(node_data_final['station_id'], node_data_final['lon'], node_data_final['lat']):\r\n NG.add_node(i,pos=(j,k)) #iterates through the node data file to and \r\n\r\npos= nx.get_node_attributes(NG, 'pos') #set position attribute for drawing\r\nprint(pos) #check the dictionary format is correct\r\n\r\n#loop through the edge pairs and add to graph:\r\nfor i, j in zip(edge_data_final2['station_id_from'], edge_data_final2['station_id_to']):\r\n NG.add_edge(i,j) #iterates through edge_data and adds edges to the graph\r\n \r\n#--------------------------------------------------#\r\n\r\n#3) Analysis and Visualization: \r\n \r\n#Some high level stats for the network:\r\n \r\nprint('# of edges: {}'.format(NG.number_of_edges())) #~147k\r\nprint('# of nodes: {}'.format(NG.number_of_nodes())) #336 nodes, matches number of stations\r\nprint(NG.degree(node_data_final['station_id'])) #look at most important nodes in network\r\nprint(nx.in_degree_centrality(NG)) #computes the in-degree centrality for nodes in the directed network\r\nprint(nx.out_degree_centrality(NG)) #coputes the out-degree centrality for nodes in the directed network\r\n\r\n#visualization of the network in physical space (using the lat-lon coordinate attributes):\r\n\r\nplt.axis('off')\r\nnx.draw(NG,pos,node_size=20,node_color='blue',alpha=0.5,width=0.5)\r\n\r\n\r\n","repo_name":"7cb15/Modeling-Toronto-Bikeshare-Network","sub_path":"BikeShareModeling.py","file_name":"BikeShareModeling.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"1832679764","text":"import timm\nimport torch\nimport numpy as np\nfrom torchsummary import summary\n\nfrom nni.compression.pytorch.pruning import L1NormPruner\nfrom nni.compression.pytorch.speedup import ModelSpeedup\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef test_exclude():\n batch_size = 16\n inference_input = torch.randn(batch_size, 3, 360, 640).to(device)\n\n sparsity = 0.8\n model = timm.create_model('efficientnet_lite0', pretrained=True)\n model.to(device)\n print(\"Model Structure...\")\n print(model)\n\n print(\"\\nStarting Pruning Process...\")\n config_list = None\n # create pruned model\n config_list = [{\n 'sparsity_per_layer': sparsity,\n 'op_types': ['Linear', 'Conv2d']\n }, {\n 'exclude': True,\n 'op_names': ['conv_stem']\n }]\n\n print(\"\\nConfig List:\", config_list)\n\n dummy_input = torch.rand(1, 3, 360, 640).to(device)\n pruner = L1NormPruner(model, config_list)\n\n # compress the model and generate the masks\n _, masks = pruner.compress()\n\n # need to unwrap the model, if the model is wrapped before speedup\n pruner._unwrap_model()\n\n # speedup the model, for more information about speedup, please refer :doc:`pruning_speedup`.\n ModelSpeedup(model, dummy_input, masks).speedup_model()\n\n print(\"\\n\\n----------- Model Summary: Pruned at {}% with NNI -----------\\n\".format(sparsity * 100))\n if torch.cuda.is_available():\n model.cuda()\n summary(model, (3, 360, 640))\n\n\ntest_exclude()\n","repo_name":"pmmitche/Masters-Thesis","sub_path":"minimal_pruning_error_example.py","file_name":"minimal_pruning_error_example.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2647265561","text":"import os\nimport threading\n\nimport GladosIA\nimport GladosTelegramClient\nimport GladosDiscordClient\nimport GladosSlackClient\nimport GladosMQTT\n\nimport platform\nimport GladosMQTT\nimport time\n\n#Variables\nmqHost\t = \"10.0.0.20\"\nmqPort \t = 1883\nnodeName = platform.node()\nglobalCMDTopic = \"space/cmnd\"\n\nglados_bot = GladosIA.GladosBot()\ndiscord_bot = GladosDiscordClient.GladosDiscordClient()\n\ndef subscribeTopics() :\n\tgladosMQTT.subscribe(\"node\")\n\ndef on_connect(client, userdata, rc,arg):\n\tsubscribeTopics()\n\ndef on_message(client, userdata, msg):\n\tif (msg.topic == commandTopic) :\n\t\tdebug(\"cmd:\"+msg)\n\t\ndef on_disconnect(client, userdata, rc):\n\tdebug(\"Disconnected! rc: \"+str(rc))\n\n\n\ndef run_bots() :\n global mqHost,mqPort,nodeName,on_connect,on_message,on_disconnect,globalCMDTopic\n GladosMQTT.initMQTT(mqHost,mqPort,nodeName,on_connect,on_message,on_disconnect,globalCMDTopic)\n\n# GladosDiscordClient.run_bot(GladosIA.askGLaDOS)\n# GladosSlackClient.run_bot(GladosIA.askGLaDOS)\n# tg_t = threading.Thread(target=\tGladosTelegramClient.run_bot, args=(GladosIA.askGLaDOS,))\n dc_t = threading.Thread(target=\tdiscord_bot.run , args=())\n# sl_t = threading.Thread(target=\tGladosSlackClient.run_bot , args=(GladosIA.askGLaDOS,))\n\n# tg_t.start()\n dc_t.start()\n# sl_t.start() \n\n# tg_t.join()\n# dc_t.join()\n\nasync def process_msgs() :\n pending = discord_bot.getMsgs()\n for msg in pending :\n response = glados_bot.askGLaDOS(msg)\n await discord_bot.sendMsg(response)\n","repo_name":"makespacemadrid/GLaDOS","sub_path":"GLaDOS_bot/moab.py","file_name":"moab.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"14211099431","text":"import cv2\r\nimport numpy as np\r\nfrom csv_managment import comparate_with_database\r\nimport socket\r\n\r\nadress = '0.0.0.0'\r\nport = 8081\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsock.bind((adress, port))\r\nsock.listen(1)\r\n\r\nconnections = []\r\nwithAndroid = False\r\n\r\nfinger_position_list = [[], []]\r\n\r\ndef string(vec):\r\n result = \"\"\r\n for i in vec:\r\n result += str(i) + \"!\"\r\n\r\n return result\r\n\r\ndef send(message):\r\n for connection in connections:\r\n connection.send(bytes(message + \"\\n\", 'utf-8'))\r\n\r\nif (withAndroid):\r\n print(\"Waiting for connections\")\r\n while True:\r\n client, a = sock.accept()\r\n connections.append(client)\r\n break\r\n\r\n print(\"Connected\")\r\n print(connections)\r\n\r\ncap = cv2.VideoCapture(0)\r\n_, img3 = cap.read()\r\n\r\nx1, y1, x2, y2 = 0,0,0,0\r\n\r\ncounter = 0\r\nsalidaFinal = \"\"\r\nisSend = False\r\nmensaje = \"\"\r\nwhile (cap.isOpened()):\r\n _, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n # define range of white color in HSV\r\n # change it according to your need !\r\n #lower_white = np.array([0, 0, 215])\r\n #upper_white = np.array([180, 15, 255])\r\n #lower_white = np.array([0, 0, 230])\r\n #upper_white = np.array([180, 25, 255])\r\n \r\n # Threshold the HSV image to get only white colors\r\n #mask = cv2.inRange(hsv, lower_white, upper_white)\r\n # Bitwise-AND mask and original image\r\n #res = cv2.bitwise_and(frame,frame, mask= mask)\r\n #umbral = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY)[1]\r\n #umbral = cv2.dilate(umbral, None, iterations=2)\r\n \r\n #contornosimg = umbral.copy()\r\n # Buscamos contorno en la imagen\r\n #im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n \r\n \r\n \r\n \"\"\"\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 4000):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 40000):\r\n continue\r\n else:\r\n \r\n (xa, ya, wa, ha) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n if(xa>40 and ya>40 and wa+80(x1+30) or xa<(x1-30)):\r\n if(x1(y1+30) or ya<(y1-30)):\r\n if(y1 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n red_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"RED color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255))\r\n xr = x\r\n yr = y\r\n \r\n contornosimg = blue.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n xb = 0\r\n yb = 0\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x +w, y+h), (255, 0, 0), 2)\r\n blue_objects.append([(x + w)/2 , (y+h)/2])\r\n xb = x\r\n yb = y\r\n cv2.putText(frame,\"BLUE color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.7,(255,0,0))\r\n\r\n \r\n\r\n #Tracking the YELLOW Color\r\n \r\n contornosimg = yellow.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n yellow_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"YELLLOW color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n\r\n\r\n #Tracking the purple Color\r\n contornosimg = purple.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n purple_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"purple color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the green Color\r\n \r\n contornosimg = green.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n green_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"green color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the black Color\r\n \r\n contornosimg = black.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n black_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"black color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the orange Color\r\n \r\n \r\n \r\n \r\n \r\n contornosimg = orange.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n orange_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"orange color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n\r\n \r\n \r\n \r\n if(xb==0):\r\n if(xr>(x1+30) or xr<(x1-30)):\r\n if(x1(y1+30) or yr<(y1-30)):\r\n if(y1(x2+30) or xb<(x2-30)):\r\n if(x1(y2+30) or yb<(y2-30)):\r\n if(y2Go\"\n return mark_safe(html)\n else:\n return '-'\n\n get_link.short_description = (\"Link\")\n get_link.allow_tags = True\n\n def geo_reference(self, obj):\n if obj.pk and str(obj.url_type)=='image':\n corners=\"\"\n if obj.resource.bounding_box:\n points = []\n for b in obj.resource.bounding_box:\n for p in b:\n points.append(str(p[0]) + \" \" + str(p[1]))\n corners = \"&d=\"+','.join(points)\n solr_id=str(obj.resource.resource_id)+\"-\"+str(obj.resource.end_point.id)\n html = \"Open Georeferencer\"\n return mark_safe(html)\n else:\n return '-'\n\n geo_reference.short_description = (\"Geo Reference\")\n geo_reference.allow_tags = True\n\n\nclass Status_LogInline(admin.StackedInline):\n model = Status_Log\n extra = 0\n\nclass Change_LogInline(admin.StackedInline):\n model = Change_Log\n classes = ['collapse']\n # readonly_fields = ('field_name', \"date_\", \"change_type\")\n fieldsets = [\n (None, {'fields': [('field_name', \"date\", \"change_type\")]}),\n (None, {'fields': ['new']}),\n (None, {'fields': ['old']}),\n (None, {'fields': ['community_input']})\n\n ]\n extra = 0\n\nclass ParentInline(admin.StackedInline):\n model = Resource.parent.through\n fk_name = \"from_resource\" # not work \"parent_resource\" \"resource_id\", \"parent_id\", from_resource_id, to_resource_id\n classes = ['collapse']\n verbose_name = \"Parent Resource\"\n verbose_name_plural = \"Parent Resources\"\n extra = 0\n show_change_link=True\n\nclass ChildrenInline(admin.StackedInline):\n model = Resource.parent.through\n fk_name = \"to_resource\" # not work \"parent_resource\" \"resource_id\", \"parent_id\", from_resource_id, to_resource_id\n classes = ['collapse']\n verbose_name = \"Child Resource\"\n verbose_name_plural = \"Child Resources\"\n extra = 0\n show_change_link=True\n\nclass ParentFilter(admin.SimpleListFilter):\n title = 'Root Resource'\n parameter_name = 'is_parent'\n\n def lookups(self, request, model_admin):\n return (\n ('Yes', 'Yes'),\n ('No', 'No'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if value == 'Yes':\n return queryset.filter(parent=None)\n elif value == 'No':\n return queryset.exclude(parent=None)\n return queryset\n\n# @admin.register(Resource)\nclass ResourceAdmin(OSMGeoAdmin):\n list_filter = ('end_point',\"type\",\"status_type\",\"owner\",ParentFilter,\"missing\")\n search_fields = ('title','alt_title','description','resource_id',)\n list_display = ('title', 'year','end_point','get_thumb_small','type','get_category','status_type',\"child_count\",\"accessioned\")\n\n readonly_fields = ('get_thumb',\"_layer_json\",\"_raw_json\",\"get_tags\",\"get_named_places\",\"get_category\",\"child_count\",\"preview\")\n\n autocomplete_fields =(\"tag\",\"named_place\",\"owner\", \"publisher\")\n fieldsets = [\n (None, {'fields': [('resource_id','preview'),'year','temporal_coverage']}),\n (None, {'fields': [('title', 'alt_title')]}),\n (None, {'fields': ['status_type','end_point',\"missing\"]}),\n (None, {'fields': [('resource_type')]}),\n (None, {'fields': [('type', 'geometry_type', \"format\")]}),\n\n (None, {'fields': [\"get_thumb\", \"thumbnail\"]}),\n (None, {'fields': [(\"owner\", \"publisher\")]}),\n (None, {'fields': [(\"created\",\"modified\",\"accessioned\")]}),\n\n (None, {'fields': ['description']}),\n (None, {'fields': ['bounding_box']}),\n\n (None, {'fields': [\"languages\",\"category\"]}),\n (None, {'fields': [( \"get_tags\",\"tag\")]}),\n (None, {'fields': [(\"get_named_places\",\"named_place\")]}),\n\n\n (None, {'fields': [\"_raw_json\"]}),\n (None, {'fields': [\"_layer_json\"]}),\n (None, {'fields': [\"license_info\"]}),\n\n ]\n\n def child_count(self, obj=None):\n with connection.cursor() as cursor:\n cursor.execute(\"Select count(id) from resources_resource_parent where to_resource_id={};\".format(obj.id))\n\n return (cursor.fetchone()[0])\n\n\n\n def get_tags(self, obj=None):\n print(obj.tag.all())\n return \", \".join([t.name for t in obj.tag.all()])\n\n def get_named_places(self, obj=None):\n return \", \".join([p.name for p in obj.named_place.all()])\n\n def get_category(self, obj):\n return \",\".join([p.name for p in obj.category.all()])\n\n def get_thumb(self, obj=None):\n html = ''.format(obj.thumbnail) if obj.thumbnail else \"\"\n return mark_safe(html)\n\n def get_thumb_small(self, obj=None):\n\n html = ''.format(obj.thumbnail) if obj.thumbnail else \"\"\n return mark_safe(html)\n\n def _raw_json(self, obj=None):\n return mark_safe(get_pretty_json(obj.raw_json)) if obj.raw_json else \"\"\n\n def _layer_json(self, obj=None):\n return mark_safe(get_pretty_json(obj.layer_json)) if obj.layer_json else \"\"\n\n inlines = [\n ParentInline,\n ChildrenInline,\n URLInline,\n Status_LogInline,\n Change_LogInline\n ]\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n actions = [\"add_selected_resources_to_staging\",\"delete_selected_resources\", 'remove_selected_resources_from_index_staging']\n\n def add_selected_resources_to_staging(self, request, queryset):\n # first export\n\n directory = os.path.dirname(os.path.realpath(__file__)) + \"/ingester\"\n verbosity=1\n # clear the directory\n if os.path.exists(directory + \"/json\"):\n files = glob.glob(directory + \"/json/*\")\n if (verbosity>1):\n print(\"removing existing files from past ingest for a fresh start!\")\n\n for f in files:\n os.remove(f)\n\n #if a child is selected we should ingest the parent instead\n for r in queryset:\n # todo - need a better way than just relying upon the parent status\n r.layers = Resource.objects.filter(status_type=r.status_type, parent=r.id)\n print(\"The layers are:\", r.layers)\n # return\n # associate the children\n for r in queryset:\n #todo - need a better way than just relying upon the parent status\n r.layers = Resource.objects.filter(status_type=r.status_type,parent=r.id)\n print(\"The layers are:\",r.layers)\n\n exporter = db_to_gbl.DB_ToGBL({\n \"resources\": queryset,\n \"path\": directory + \"/\",\n \"verbosity\": verbosity\n })\n # then ingest\n publish_to_gbl.Publish_ToGBL({\n \"path\": directory + \"/json\",\n \"verbosity\": verbosity\n })\n # set status to remove from staging\n updated =queryset.update(status_type='is')\n self.message_user(request, ngettext(\n '%d resource was successfully ingested to Staging.',\n '%d resources were successfully ingested to Staging.',\n updated,\n ) % updated, messages.SUCCESS)\n\n add_selected_resources_to_staging.short_description = \"Ingest to Staging\"\n\n def remove_selected_resources_from_index_staging(self, request, queryset):\n deleter = Delete_From_Solr.Delete_From_Solr({})\n # set status to remove from staging\n updated =queryset.update(status_type='rs')\n for obj in queryset:\n # remove from solr\n print(\"DELETE---\", obj.resource_id+\"-\"+str(obj.end_point.id))\n deleter.interface.delete_one_record(\"\\\"\"+obj.resource_id+\"-\"+str(obj.end_point.id)+\"\\\"\")\n\n self.message_user(request, ngettext(\n '%d resource was successfully removed from Staging.',\n '%d resources were successfully removed from Staging.',\n updated,\n ) % updated, messages.SUCCESS)\n\n remove_selected_resources_from_index_staging.short_description = \"Remove from Staging\"\n\n def delete_selected_resources(self, request, queryset):\n\n if 'apply' in request.POST:\n # The user clicked submit on the intermediate form.\n # Perform our update action:\n # # prevent postgres from hanging - https://stackoverflow.com/questions/62439261/postgres-delete-hangs-on-a-table-with-a-self-referential-foreign-key\n with connection.cursor() as cursor:\n cursor.execute(\"ALTER TABLE resources_resource DISABLE TRIGGER ALL;\")\n\n for obj in queryset:\n print(\"WERE DELETING SOMETHING #############\")\n obj.delete()\n\n with connection.cursor() as cursor:\n cursor.execute(\"ALTER TABLE resources_resource ENABLE TRIGGER ALL;\")\n # Redirect to our admin view after our update has\n # completed with a nice little info message saying\n # our models have been updated:\n self.message_user(request,\n \" {} Resources Deleted!\".format(queryset.count()))\n return HttpResponseRedirect(request.get_full_path())\n\n return render(request,\n 'admin/delete.html',\n context={'resources':queryset})\n\n def save_model(self, request, obj, form, change):\n\n try:\n # attempt to match precision and prevent unexpected change\n # use first point as determinant\n #todo make this more robust\n first_point = str(self.model.objects.get(id=obj.id).bounding_box[0][0][0])\n precision = len(first_point[first_point.index(\".\") + 1:])\n wkt_w = WKTWriter()\n wkt_w.precision = precision\n obj.bounding_box = GEOSGeometry(wkt_w.write(obj.bounding_box))\n except:\n pass\n print(\"first point\",)\n \"\"\"pass request to save to distinguish between automation and admin\n \"\"\"\n try:\n obj.save(request.user)\n except:\n pass\n\n def preview(self, obj):\n if obj.pk:\n\n html = \"Preview\"\n return mark_safe(html)\n else:\n return '-'\n\n preview.short_description = (\"Preview\")\n preview.allow_tags = True\n\n\n\ndef get_pretty_json(_json):\n \"\"\"Function to display pretty version of our data REF: https://www.pydanny.com/pretty-formatting-json-django-admin.html\"\"\"\n # Convert the data to sorted, indented JSON\n response = json.dumps(_json, sort_keys=True, indent=2)\n\n # Get the Pygments formatter\n formatter = HtmlFormatter(style='colorful')\n # Highlight the data\n response = highlight(response, JsonLexer(), formatter)\n\n # Get the stylesheet\n return \"